1 /***********************license start***************
2 * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Networks nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
44 * Configuration and status register (CSR) type definitions for
47 * This file is auto generated. Do not edit.
52 #ifndef __CVMX_POW_TYPEDEFS_H__
53 #define __CVMX_POW_TYPEDEFS_H__
55 #define CVMX_POW_BIST_STAT (CVMX_ADD_IO_SEG(0x00016700000003F8ull))
56 #define CVMX_POW_DS_PC (CVMX_ADD_IO_SEG(0x0001670000000398ull))
57 #define CVMX_POW_ECC_ERR (CVMX_ADD_IO_SEG(0x0001670000000218ull))
58 #define CVMX_POW_INT_CTL (CVMX_ADD_IO_SEG(0x0001670000000220ull))
59 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
60 static inline uint64_t CVMX_POW_IQ_CNTX(unsigned long offset)
63 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 7))) ||
64 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 7))) ||
65 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 7))) ||
66 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 7))) ||
67 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7))) ||
68 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 7))) ||
69 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 7))) ||
70 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 7)))))
71 cvmx_warn("CVMX_POW_IQ_CNTX(%lu) is invalid on this chip\n", offset);
72 return CVMX_ADD_IO_SEG(0x0001670000000340ull) + ((offset) & 7) * 8;
75 #define CVMX_POW_IQ_CNTX(offset) (CVMX_ADD_IO_SEG(0x0001670000000340ull) + ((offset) & 7) * 8)
77 #define CVMX_POW_IQ_COM_CNT (CVMX_ADD_IO_SEG(0x0001670000000388ull))
78 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
79 #define CVMX_POW_IQ_INT CVMX_POW_IQ_INT_FUNC()
80 static inline uint64_t CVMX_POW_IQ_INT_FUNC(void)
82 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
83 cvmx_warn("CVMX_POW_IQ_INT not supported on this chip\n");
84 return CVMX_ADD_IO_SEG(0x0001670000000238ull);
87 #define CVMX_POW_IQ_INT (CVMX_ADD_IO_SEG(0x0001670000000238ull))
89 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
90 #define CVMX_POW_IQ_INT_EN CVMX_POW_IQ_INT_EN_FUNC()
91 static inline uint64_t CVMX_POW_IQ_INT_EN_FUNC(void)
93 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
94 cvmx_warn("CVMX_POW_IQ_INT_EN not supported on this chip\n");
95 return CVMX_ADD_IO_SEG(0x0001670000000240ull);
98 #define CVMX_POW_IQ_INT_EN (CVMX_ADD_IO_SEG(0x0001670000000240ull))
100 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
101 static inline uint64_t CVMX_POW_IQ_THRX(unsigned long offset)
104 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7))) ||
105 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 7))) ||
106 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 7)))))
107 cvmx_warn("CVMX_POW_IQ_THRX(%lu) is invalid on this chip\n", offset);
108 return CVMX_ADD_IO_SEG(0x00016700000003A0ull) + ((offset) & 7) * 8;
111 #define CVMX_POW_IQ_THRX(offset) (CVMX_ADD_IO_SEG(0x00016700000003A0ull) + ((offset) & 7) * 8)
113 #define CVMX_POW_NOS_CNT (CVMX_ADD_IO_SEG(0x0001670000000228ull))
114 #define CVMX_POW_NW_TIM (CVMX_ADD_IO_SEG(0x0001670000000210ull))
115 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
116 #define CVMX_POW_PF_RST_MSK CVMX_POW_PF_RST_MSK_FUNC()
117 static inline uint64_t CVMX_POW_PF_RST_MSK_FUNC(void)
119 if (!(OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
120 cvmx_warn("CVMX_POW_PF_RST_MSK not supported on this chip\n");
121 return CVMX_ADD_IO_SEG(0x0001670000000230ull);
124 #define CVMX_POW_PF_RST_MSK (CVMX_ADD_IO_SEG(0x0001670000000230ull))
126 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
127 static inline uint64_t CVMX_POW_PP_GRP_MSKX(unsigned long offset)
130 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
131 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 1))) ||
132 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 15))) ||
133 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 1))) ||
134 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
135 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 11))) ||
136 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
137 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 5)))))
138 cvmx_warn("CVMX_POW_PP_GRP_MSKX(%lu) is invalid on this chip\n", offset);
139 return CVMX_ADD_IO_SEG(0x0001670000000000ull) + ((offset) & 15) * 8;
142 #define CVMX_POW_PP_GRP_MSKX(offset) (CVMX_ADD_IO_SEG(0x0001670000000000ull) + ((offset) & 15) * 8)
144 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
145 static inline uint64_t CVMX_POW_QOS_RNDX(unsigned long offset)
148 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 7))) ||
149 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 7))) ||
150 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 7))) ||
151 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 7))) ||
152 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7))) ||
153 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 7))) ||
154 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 7))) ||
155 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 7)))))
156 cvmx_warn("CVMX_POW_QOS_RNDX(%lu) is invalid on this chip\n", offset);
157 return CVMX_ADD_IO_SEG(0x00016700000001C0ull) + ((offset) & 7) * 8;
160 #define CVMX_POW_QOS_RNDX(offset) (CVMX_ADD_IO_SEG(0x00016700000001C0ull) + ((offset) & 7) * 8)
162 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
163 static inline uint64_t CVMX_POW_QOS_THRX(unsigned long offset)
166 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 7))) ||
167 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 7))) ||
168 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 7))) ||
169 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 7))) ||
170 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7))) ||
171 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 7))) ||
172 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 7))) ||
173 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 7)))))
174 cvmx_warn("CVMX_POW_QOS_THRX(%lu) is invalid on this chip\n", offset);
175 return CVMX_ADD_IO_SEG(0x0001670000000180ull) + ((offset) & 7) * 8;
178 #define CVMX_POW_QOS_THRX(offset) (CVMX_ADD_IO_SEG(0x0001670000000180ull) + ((offset) & 7) * 8)
180 #define CVMX_POW_TS_PC (CVMX_ADD_IO_SEG(0x0001670000000390ull))
181 #define CVMX_POW_WA_COM_PC (CVMX_ADD_IO_SEG(0x0001670000000380ull))
182 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
183 static inline uint64_t CVMX_POW_WA_PCX(unsigned long offset)
186 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 7))) ||
187 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 7))) ||
188 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 7))) ||
189 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 7))) ||
190 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7))) ||
191 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 7))) ||
192 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 7))) ||
193 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 7)))))
194 cvmx_warn("CVMX_POW_WA_PCX(%lu) is invalid on this chip\n", offset);
195 return CVMX_ADD_IO_SEG(0x0001670000000300ull) + ((offset) & 7) * 8;
198 #define CVMX_POW_WA_PCX(offset) (CVMX_ADD_IO_SEG(0x0001670000000300ull) + ((offset) & 7) * 8)
200 #define CVMX_POW_WQ_INT (CVMX_ADD_IO_SEG(0x0001670000000200ull))
201 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
202 static inline uint64_t CVMX_POW_WQ_INT_CNTX(unsigned long offset)
205 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 15))) ||
206 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 15))) ||
207 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 15))) ||
208 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 15))) ||
209 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 15))) ||
210 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 15))) ||
211 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
212 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 15)))))
213 cvmx_warn("CVMX_POW_WQ_INT_CNTX(%lu) is invalid on this chip\n", offset);
214 return CVMX_ADD_IO_SEG(0x0001670000000100ull) + ((offset) & 15) * 8;
217 #define CVMX_POW_WQ_INT_CNTX(offset) (CVMX_ADD_IO_SEG(0x0001670000000100ull) + ((offset) & 15) * 8)
219 #define CVMX_POW_WQ_INT_PC (CVMX_ADD_IO_SEG(0x0001670000000208ull))
220 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
221 static inline uint64_t CVMX_POW_WQ_INT_THRX(unsigned long offset)
224 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 15))) ||
225 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 15))) ||
226 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 15))) ||
227 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 15))) ||
228 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 15))) ||
229 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 15))) ||
230 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
231 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 15)))))
232 cvmx_warn("CVMX_POW_WQ_INT_THRX(%lu) is invalid on this chip\n", offset);
233 return CVMX_ADD_IO_SEG(0x0001670000000080ull) + ((offset) & 15) * 8;
236 #define CVMX_POW_WQ_INT_THRX(offset) (CVMX_ADD_IO_SEG(0x0001670000000080ull) + ((offset) & 15) * 8)
238 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
239 static inline uint64_t CVMX_POW_WS_PCX(unsigned long offset)
242 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 15))) ||
243 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 15))) ||
244 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 15))) ||
245 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 15))) ||
246 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 15))) ||
247 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 15))) ||
248 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
249 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 15)))))
250 cvmx_warn("CVMX_POW_WS_PCX(%lu) is invalid on this chip\n", offset);
251 return CVMX_ADD_IO_SEG(0x0001670000000280ull) + ((offset) & 15) * 8;
254 #define CVMX_POW_WS_PCX(offset) (CVMX_ADD_IO_SEG(0x0001670000000280ull) + ((offset) & 15) * 8)
260 * POW_BIST_STAT = POW BIST Status Register
262 * Contains the BIST status for the POW memories ('0' = pass, '1' = fail).
264 * Also contains the BIST status for the PP's. Each bit in the PP field is the OR of all BIST
265 * results for the corresponding physical PP ('0' = pass, '1' = fail).
267 union cvmx_pow_bist_stat
270 struct cvmx_pow_bist_stat_s
272 #if __BYTE_ORDER == __BIG_ENDIAN
273 uint64_t reserved_32_63 : 32;
274 uint64_t pp : 16; /**< Physical PP BIST status */
275 uint64_t reserved_0_15 : 16;
277 uint64_t reserved_0_15 : 16;
279 uint64_t reserved_32_63 : 32;
282 struct cvmx_pow_bist_stat_cn30xx
284 #if __BYTE_ORDER == __BIG_ENDIAN
285 uint64_t reserved_17_63 : 47;
286 uint64_t pp : 1; /**< Physical PP BIST status */
287 uint64_t reserved_9_15 : 7;
288 uint64_t cam : 1; /**< POW CAM BIST status */
289 uint64_t nbt1 : 1; /**< NCB transmitter memory 1 BIST status */
290 uint64_t nbt0 : 1; /**< NCB transmitter memory 0 BIST status */
291 uint64_t index : 1; /**< Index memory BIST status */
292 uint64_t fidx : 1; /**< Forward index memory BIST status */
293 uint64_t nbr1 : 1; /**< NCB receiver memory 1 BIST status */
294 uint64_t nbr0 : 1; /**< NCB receiver memory 0 BIST status */
295 uint64_t pend : 1; /**< Pending switch memory BIST status */
296 uint64_t adr : 1; /**< Address memory BIST status */
307 uint64_t reserved_9_15 : 7;
309 uint64_t reserved_17_63 : 47;
312 struct cvmx_pow_bist_stat_cn31xx
314 #if __BYTE_ORDER == __BIG_ENDIAN
315 uint64_t reserved_18_63 : 46;
316 uint64_t pp : 2; /**< Physical PP BIST status */
317 uint64_t reserved_9_15 : 7;
318 uint64_t cam : 1; /**< POW CAM BIST status */
319 uint64_t nbt1 : 1; /**< NCB transmitter memory 1 BIST status */
320 uint64_t nbt0 : 1; /**< NCB transmitter memory 0 BIST status */
321 uint64_t index : 1; /**< Index memory BIST status */
322 uint64_t fidx : 1; /**< Forward index memory BIST status */
323 uint64_t nbr1 : 1; /**< NCB receiver memory 1 BIST status */
324 uint64_t nbr0 : 1; /**< NCB receiver memory 0 BIST status */
325 uint64_t pend : 1; /**< Pending switch memory BIST status */
326 uint64_t adr : 1; /**< Address memory BIST status */
337 uint64_t reserved_9_15 : 7;
339 uint64_t reserved_18_63 : 46;
342 struct cvmx_pow_bist_stat_cn38xx
344 #if __BYTE_ORDER == __BIG_ENDIAN
345 uint64_t reserved_32_63 : 32;
346 uint64_t pp : 16; /**< Physical PP BIST status */
347 uint64_t reserved_10_15 : 6;
348 uint64_t cam : 1; /**< POW CAM BIST status */
349 uint64_t nbt : 1; /**< NCB transmitter memory BIST status */
350 uint64_t index : 1; /**< Index memory BIST status */
351 uint64_t fidx : 1; /**< Forward index memory BIST status */
352 uint64_t nbr1 : 1; /**< NCB receiver memory 1 BIST status */
353 uint64_t nbr0 : 1; /**< NCB receiver memory 0 BIST status */
354 uint64_t pend1 : 1; /**< Pending switch memory 1 BIST status */
355 uint64_t pend0 : 1; /**< Pending switch memory 0 BIST status */
356 uint64_t adr1 : 1; /**< Address memory 1 BIST status */
357 uint64_t adr0 : 1; /**< Address memory 0 BIST status */
369 uint64_t reserved_10_15 : 6;
371 uint64_t reserved_32_63 : 32;
374 struct cvmx_pow_bist_stat_cn38xx cn38xxp2;
375 struct cvmx_pow_bist_stat_cn31xx cn50xx;
376 struct cvmx_pow_bist_stat_cn52xx
378 #if __BYTE_ORDER == __BIG_ENDIAN
379 uint64_t reserved_20_63 : 44;
380 uint64_t pp : 4; /**< Physical PP BIST status */
381 uint64_t reserved_9_15 : 7;
382 uint64_t cam : 1; /**< POW CAM BIST status */
383 uint64_t nbt1 : 1; /**< NCB transmitter memory 1 BIST status */
384 uint64_t nbt0 : 1; /**< NCB transmitter memory 0 BIST status */
385 uint64_t index : 1; /**< Index memory BIST status */
386 uint64_t fidx : 1; /**< Forward index memory BIST status */
387 uint64_t nbr1 : 1; /**< NCB receiver memory 1 BIST status */
388 uint64_t nbr0 : 1; /**< NCB receiver memory 0 BIST status */
389 uint64_t pend : 1; /**< Pending switch memory BIST status */
390 uint64_t adr : 1; /**< Address memory BIST status */
401 uint64_t reserved_9_15 : 7;
403 uint64_t reserved_20_63 : 44;
406 struct cvmx_pow_bist_stat_cn52xx cn52xxp1;
407 struct cvmx_pow_bist_stat_cn56xx
409 #if __BYTE_ORDER == __BIG_ENDIAN
410 uint64_t reserved_28_63 : 36;
411 uint64_t pp : 12; /**< Physical PP BIST status */
412 uint64_t reserved_10_15 : 6;
413 uint64_t cam : 1; /**< POW CAM BIST status */
414 uint64_t nbt : 1; /**< NCB transmitter memory BIST status */
415 uint64_t index : 1; /**< Index memory BIST status */
416 uint64_t fidx : 1; /**< Forward index memory BIST status */
417 uint64_t nbr1 : 1; /**< NCB receiver memory 1 BIST status */
418 uint64_t nbr0 : 1; /**< NCB receiver memory 0 BIST status */
419 uint64_t pend1 : 1; /**< Pending switch memory 1 BIST status */
420 uint64_t pend0 : 1; /**< Pending switch memory 0 BIST status */
421 uint64_t adr1 : 1; /**< Address memory 1 BIST status */
422 uint64_t adr0 : 1; /**< Address memory 0 BIST status */
434 uint64_t reserved_10_15 : 6;
436 uint64_t reserved_28_63 : 36;
439 struct cvmx_pow_bist_stat_cn56xx cn56xxp1;
440 struct cvmx_pow_bist_stat_cn38xx cn58xx;
441 struct cvmx_pow_bist_stat_cn38xx cn58xxp1;
442 struct cvmx_pow_bist_stat_cn63xx
444 #if __BYTE_ORDER == __BIG_ENDIAN
445 uint64_t reserved_22_63 : 42;
446 uint64_t pp : 6; /**< Physical PP BIST status */
447 uint64_t reserved_12_15 : 4;
448 uint64_t cam : 1; /**< POW CAM BIST status */
449 uint64_t nbr : 3; /**< NCB receiver memory BIST status */
450 uint64_t nbt : 4; /**< NCB transmitter memory BIST status */
451 uint64_t index : 1; /**< Index memory BIST status */
452 uint64_t fidx : 1; /**< Forward index memory BIST status */
453 uint64_t pend : 1; /**< Pending switch memory BIST status */
454 uint64_t adr : 1; /**< Address memory BIST status */
463 uint64_t reserved_12_15 : 4;
465 uint64_t reserved_22_63 : 42;
468 struct cvmx_pow_bist_stat_cn63xx cn63xxp1;
470 typedef union cvmx_pow_bist_stat cvmx_pow_bist_stat_t;
475 * POW_DS_PC = POW De-Schedule Performance Counter
477 * Counts the number of de-schedule requests. Write to clear.
482 struct cvmx_pow_ds_pc_s
484 #if __BYTE_ORDER == __BIG_ENDIAN
485 uint64_t reserved_32_63 : 32;
486 uint64_t ds_pc : 32; /**< De-schedule performance counter */
489 uint64_t reserved_32_63 : 32;
492 struct cvmx_pow_ds_pc_s cn30xx;
493 struct cvmx_pow_ds_pc_s cn31xx;
494 struct cvmx_pow_ds_pc_s cn38xx;
495 struct cvmx_pow_ds_pc_s cn38xxp2;
496 struct cvmx_pow_ds_pc_s cn50xx;
497 struct cvmx_pow_ds_pc_s cn52xx;
498 struct cvmx_pow_ds_pc_s cn52xxp1;
499 struct cvmx_pow_ds_pc_s cn56xx;
500 struct cvmx_pow_ds_pc_s cn56xxp1;
501 struct cvmx_pow_ds_pc_s cn58xx;
502 struct cvmx_pow_ds_pc_s cn58xxp1;
503 struct cvmx_pow_ds_pc_s cn63xx;
504 struct cvmx_pow_ds_pc_s cn63xxp1;
506 typedef union cvmx_pow_ds_pc cvmx_pow_ds_pc_t;
511 * POW_ECC_ERR = POW ECC Error Register
513 * Contains the single and double error bits and the corresponding interrupt enables for the ECC-
514 * protected POW index memory. Also contains the syndrome value in the event of an ECC error.
516 * Also contains the remote pointer error bit and interrupt enable. RPE is set when the POW detected
517 * corruption on one or more of the input queue lists in L2/DRAM (POW's local copy of the tail pointer
518 * for the L2/DRAM input queue did not match the last entry on the the list). This is caused by
519 * L2/DRAM corruption, and is generally a fatal error because it likely caused POW to load bad work
522 * This register also contains the illegal operation error bits and the corresponding interrupt
523 * enables as follows:
525 * <0> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/DESCH/UPD_WQP from PP in NULL_NULL state
526 * <1> Received SWTAG/SWTAG_DESCH/DESCH/UPD_WQP from PP in NULL state
527 * <2> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/GET_WORK from PP with pending tag switch to ORDERED or ATOMIC
528 * <3> Received SWTAG/SWTAG_FULL/SWTAG_DESCH from PP with tag specified as NULL_NULL
529 * <4> Received SWTAG_FULL/SWTAG_DESCH from PP with tag specified as NULL
530 * <5> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/DESCH/UPD_WQP/GET_WORK/NULL_RD from PP with GET_WORK pending
531 * <6> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/DESCH/UPD_WQP/GET_WORK/NULL_RD from PP with NULL_RD pending
532 * <7> Received CLR_NSCHED from PP with SWTAG_DESCH/DESCH/CLR_NSCHED pending
533 * <8> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/DESCH/UPD_WQP/GET_WORK/NULL_RD from PP with CLR_NSCHED pending
534 * <9> Received illegal opcode
535 * <10> Received ADD_WORK with tag specified as NULL_NULL
536 * <11> Received DBG load from PP with DBG load pending
537 * <12> Received CSR load from PP with CSR load pending
539 union cvmx_pow_ecc_err
542 struct cvmx_pow_ecc_err_s
544 #if __BYTE_ORDER == __BIG_ENDIAN
545 uint64_t reserved_45_63 : 19;
546 uint64_t iop_ie : 13; /**< Illegal operation interrupt enables */
547 uint64_t reserved_29_31 : 3;
548 uint64_t iop : 13; /**< Illegal operation errors */
549 uint64_t reserved_14_15 : 2;
550 uint64_t rpe_ie : 1; /**< Remote pointer error interrupt enable */
551 uint64_t rpe : 1; /**< Remote pointer error */
552 uint64_t reserved_9_11 : 3;
553 uint64_t syn : 5; /**< Syndrome value (only valid when DBE or SBE is set) */
554 uint64_t dbe_ie : 1; /**< Double bit error interrupt enable */
555 uint64_t sbe_ie : 1; /**< Single bit error interrupt enable */
556 uint64_t dbe : 1; /**< Double bit error */
557 uint64_t sbe : 1; /**< Single bit error */
564 uint64_t reserved_9_11 : 3;
567 uint64_t reserved_14_15 : 2;
569 uint64_t reserved_29_31 : 3;
570 uint64_t iop_ie : 13;
571 uint64_t reserved_45_63 : 19;
574 struct cvmx_pow_ecc_err_s cn30xx;
575 struct cvmx_pow_ecc_err_cn31xx
577 #if __BYTE_ORDER == __BIG_ENDIAN
578 uint64_t reserved_14_63 : 50;
579 uint64_t rpe_ie : 1; /**< Remote pointer error interrupt enable */
580 uint64_t rpe : 1; /**< Remote pointer error */
581 uint64_t reserved_9_11 : 3;
582 uint64_t syn : 5; /**< Syndrome value (only valid when DBE or SBE is set) */
583 uint64_t dbe_ie : 1; /**< Double bit error interrupt enable */
584 uint64_t sbe_ie : 1; /**< Single bit error interrupt enable */
585 uint64_t dbe : 1; /**< Double bit error */
586 uint64_t sbe : 1; /**< Single bit error */
593 uint64_t reserved_9_11 : 3;
596 uint64_t reserved_14_63 : 50;
599 struct cvmx_pow_ecc_err_s cn38xx;
600 struct cvmx_pow_ecc_err_cn31xx cn38xxp2;
601 struct cvmx_pow_ecc_err_s cn50xx;
602 struct cvmx_pow_ecc_err_s cn52xx;
603 struct cvmx_pow_ecc_err_s cn52xxp1;
604 struct cvmx_pow_ecc_err_s cn56xx;
605 struct cvmx_pow_ecc_err_s cn56xxp1;
606 struct cvmx_pow_ecc_err_s cn58xx;
607 struct cvmx_pow_ecc_err_s cn58xxp1;
608 struct cvmx_pow_ecc_err_s cn63xx;
609 struct cvmx_pow_ecc_err_s cn63xxp1;
611 typedef union cvmx_pow_ecc_err cvmx_pow_ecc_err_t;
616 * POW_INT_CTL = POW Internal Control Register
618 * Contains POW internal control values (for internal use, not typically for customer use):
620 * PFR_DIS = Disable high-performance pre-fetch reset mode.
622 * NBR_THR = Assert ncb__busy when the number of remaining coherent bus NBR credits equals is less
623 * than or equal to this value.
625 union cvmx_pow_int_ctl
628 struct cvmx_pow_int_ctl_s
630 #if __BYTE_ORDER == __BIG_ENDIAN
631 uint64_t reserved_6_63 : 58;
632 uint64_t pfr_dis : 1; /**< High-perf pre-fetch reset mode disable */
633 uint64_t nbr_thr : 5; /**< NBR busy threshold */
635 uint64_t nbr_thr : 5;
636 uint64_t pfr_dis : 1;
637 uint64_t reserved_6_63 : 58;
640 struct cvmx_pow_int_ctl_s cn30xx;
641 struct cvmx_pow_int_ctl_s cn31xx;
642 struct cvmx_pow_int_ctl_s cn38xx;
643 struct cvmx_pow_int_ctl_s cn38xxp2;
644 struct cvmx_pow_int_ctl_s cn50xx;
645 struct cvmx_pow_int_ctl_s cn52xx;
646 struct cvmx_pow_int_ctl_s cn52xxp1;
647 struct cvmx_pow_int_ctl_s cn56xx;
648 struct cvmx_pow_int_ctl_s cn56xxp1;
649 struct cvmx_pow_int_ctl_s cn58xx;
650 struct cvmx_pow_int_ctl_s cn58xxp1;
651 struct cvmx_pow_int_ctl_s cn63xx;
652 struct cvmx_pow_int_ctl_s cn63xxp1;
654 typedef union cvmx_pow_int_ctl cvmx_pow_int_ctl_t;
659 * POW_IQ_CNTX = POW Input Queue Count Register (1 per QOS level)
661 * Contains a read-only count of the number of work queue entries for each QOS level.
663 union cvmx_pow_iq_cntx
666 struct cvmx_pow_iq_cntx_s
668 #if __BYTE_ORDER == __BIG_ENDIAN
669 uint64_t reserved_32_63 : 32;
670 uint64_t iq_cnt : 32; /**< Input queue count for QOS level X */
672 uint64_t iq_cnt : 32;
673 uint64_t reserved_32_63 : 32;
676 struct cvmx_pow_iq_cntx_s cn30xx;
677 struct cvmx_pow_iq_cntx_s cn31xx;
678 struct cvmx_pow_iq_cntx_s cn38xx;
679 struct cvmx_pow_iq_cntx_s cn38xxp2;
680 struct cvmx_pow_iq_cntx_s cn50xx;
681 struct cvmx_pow_iq_cntx_s cn52xx;
682 struct cvmx_pow_iq_cntx_s cn52xxp1;
683 struct cvmx_pow_iq_cntx_s cn56xx;
684 struct cvmx_pow_iq_cntx_s cn56xxp1;
685 struct cvmx_pow_iq_cntx_s cn58xx;
686 struct cvmx_pow_iq_cntx_s cn58xxp1;
687 struct cvmx_pow_iq_cntx_s cn63xx;
688 struct cvmx_pow_iq_cntx_s cn63xxp1;
690 typedef union cvmx_pow_iq_cntx cvmx_pow_iq_cntx_t;
693 * cvmx_pow_iq_com_cnt
695 * POW_IQ_COM_CNT = POW Input Queue Combined Count Register
697 * Contains a read-only count of the total number of work queue entries in all QOS levels.
699 union cvmx_pow_iq_com_cnt
702 struct cvmx_pow_iq_com_cnt_s
704 #if __BYTE_ORDER == __BIG_ENDIAN
705 uint64_t reserved_32_63 : 32;
706 uint64_t iq_cnt : 32; /**< Input queue combined count */
708 uint64_t iq_cnt : 32;
709 uint64_t reserved_32_63 : 32;
712 struct cvmx_pow_iq_com_cnt_s cn30xx;
713 struct cvmx_pow_iq_com_cnt_s cn31xx;
714 struct cvmx_pow_iq_com_cnt_s cn38xx;
715 struct cvmx_pow_iq_com_cnt_s cn38xxp2;
716 struct cvmx_pow_iq_com_cnt_s cn50xx;
717 struct cvmx_pow_iq_com_cnt_s cn52xx;
718 struct cvmx_pow_iq_com_cnt_s cn52xxp1;
719 struct cvmx_pow_iq_com_cnt_s cn56xx;
720 struct cvmx_pow_iq_com_cnt_s cn56xxp1;
721 struct cvmx_pow_iq_com_cnt_s cn58xx;
722 struct cvmx_pow_iq_com_cnt_s cn58xxp1;
723 struct cvmx_pow_iq_com_cnt_s cn63xx;
724 struct cvmx_pow_iq_com_cnt_s cn63xxp1;
726 typedef union cvmx_pow_iq_com_cnt cvmx_pow_iq_com_cnt_t;
731 * POW_IQ_INT = POW Input Queue Interrupt Register
733 * Contains the bits (1 per QOS level) that can trigger the input queue interrupt. An IQ_INT bit
734 * will be set if POW_IQ_CNT#QOS# changes and the resulting value is equal to POW_IQ_THR#QOS#.
736 union cvmx_pow_iq_int
739 struct cvmx_pow_iq_int_s
741 #if __BYTE_ORDER == __BIG_ENDIAN
742 uint64_t reserved_8_63 : 56;
743 uint64_t iq_int : 8; /**< Input queue interrupt bits */
746 uint64_t reserved_8_63 : 56;
749 struct cvmx_pow_iq_int_s cn52xx;
750 struct cvmx_pow_iq_int_s cn52xxp1;
751 struct cvmx_pow_iq_int_s cn56xx;
752 struct cvmx_pow_iq_int_s cn56xxp1;
753 struct cvmx_pow_iq_int_s cn63xx;
754 struct cvmx_pow_iq_int_s cn63xxp1;
756 typedef union cvmx_pow_iq_int cvmx_pow_iq_int_t;
761 * POW_IQ_INT_EN = POW Input Queue Interrupt Enable Register
763 * Contains the bits (1 per QOS level) that enable the input queue interrupt.
765 union cvmx_pow_iq_int_en
768 struct cvmx_pow_iq_int_en_s
770 #if __BYTE_ORDER == __BIG_ENDIAN
771 uint64_t reserved_8_63 : 56;
772 uint64_t int_en : 8; /**< Input queue interrupt enable bits */
775 uint64_t reserved_8_63 : 56;
778 struct cvmx_pow_iq_int_en_s cn52xx;
779 struct cvmx_pow_iq_int_en_s cn52xxp1;
780 struct cvmx_pow_iq_int_en_s cn56xx;
781 struct cvmx_pow_iq_int_en_s cn56xxp1;
782 struct cvmx_pow_iq_int_en_s cn63xx;
783 struct cvmx_pow_iq_int_en_s cn63xxp1;
785 typedef union cvmx_pow_iq_int_en cvmx_pow_iq_int_en_t;
790 * POW_IQ_THRX = POW Input Queue Threshold Register (1 per QOS level)
792 * Threshold value for triggering input queue interrupts.
794 union cvmx_pow_iq_thrx
797 struct cvmx_pow_iq_thrx_s
799 #if __BYTE_ORDER == __BIG_ENDIAN
800 uint64_t reserved_32_63 : 32;
801 uint64_t iq_thr : 32; /**< Input queue threshold for QOS level X */
803 uint64_t iq_thr : 32;
804 uint64_t reserved_32_63 : 32;
807 struct cvmx_pow_iq_thrx_s cn52xx;
808 struct cvmx_pow_iq_thrx_s cn52xxp1;
809 struct cvmx_pow_iq_thrx_s cn56xx;
810 struct cvmx_pow_iq_thrx_s cn56xxp1;
811 struct cvmx_pow_iq_thrx_s cn63xx;
812 struct cvmx_pow_iq_thrx_s cn63xxp1;
814 typedef union cvmx_pow_iq_thrx cvmx_pow_iq_thrx_t;
819 * POW_NOS_CNT = POW No-schedule Count Register
821 * Contains the number of work queue entries on the no-schedule list.
823 union cvmx_pow_nos_cnt
826 struct cvmx_pow_nos_cnt_s
828 #if __BYTE_ORDER == __BIG_ENDIAN
829 uint64_t reserved_12_63 : 52;
830 uint64_t nos_cnt : 12; /**< # of work queue entries on the no-schedule list */
832 uint64_t nos_cnt : 12;
833 uint64_t reserved_12_63 : 52;
836 struct cvmx_pow_nos_cnt_cn30xx
838 #if __BYTE_ORDER == __BIG_ENDIAN
839 uint64_t reserved_7_63 : 57;
840 uint64_t nos_cnt : 7; /**< # of work queue entries on the no-schedule list */
842 uint64_t nos_cnt : 7;
843 uint64_t reserved_7_63 : 57;
846 struct cvmx_pow_nos_cnt_cn31xx
848 #if __BYTE_ORDER == __BIG_ENDIAN
849 uint64_t reserved_9_63 : 55;
850 uint64_t nos_cnt : 9; /**< # of work queue entries on the no-schedule list */
852 uint64_t nos_cnt : 9;
853 uint64_t reserved_9_63 : 55;
856 struct cvmx_pow_nos_cnt_s cn38xx;
857 struct cvmx_pow_nos_cnt_s cn38xxp2;
858 struct cvmx_pow_nos_cnt_cn31xx cn50xx;
859 struct cvmx_pow_nos_cnt_cn52xx
861 #if __BYTE_ORDER == __BIG_ENDIAN
862 uint64_t reserved_10_63 : 54;
863 uint64_t nos_cnt : 10; /**< # of work queue entries on the no-schedule list */
865 uint64_t nos_cnt : 10;
866 uint64_t reserved_10_63 : 54;
869 struct cvmx_pow_nos_cnt_cn52xx cn52xxp1;
870 struct cvmx_pow_nos_cnt_s cn56xx;
871 struct cvmx_pow_nos_cnt_s cn56xxp1;
872 struct cvmx_pow_nos_cnt_s cn58xx;
873 struct cvmx_pow_nos_cnt_s cn58xxp1;
874 struct cvmx_pow_nos_cnt_cn63xx
876 #if __BYTE_ORDER == __BIG_ENDIAN
877 uint64_t reserved_11_63 : 53;
878 uint64_t nos_cnt : 11; /**< # of work queue entries on the no-schedule list */
880 uint64_t nos_cnt : 11;
881 uint64_t reserved_11_63 : 53;
884 struct cvmx_pow_nos_cnt_cn63xx cn63xxp1;
886 typedef union cvmx_pow_nos_cnt cvmx_pow_nos_cnt_t;
891 * POW_NW_TIM = POW New Work Timer Period Register
893 * Sets the minimum period for a new work request timeout. Period is specified in n-1 notation
894 * where the increment value is 1024 clock cycles. Thus, a value of 0x0 in this register translates
895 * to 1024 cycles, 0x1 translates to 2048 cycles, 0x2 translates to 3072 cycles, etc... Note: the
896 * maximum period for a new work request timeout is 2 times the minimum period. Note: the new work
897 * request timeout counter is reset when this register is written.
899 * There are two new work request timeout cases:
901 * - WAIT bit clear. The new work request can timeout if the timer expires before the pre-fetch
902 * engine has reached the end of all work queues. This can occur if the executable work queue
903 * entry is deep in the queue and the pre-fetch engine is subject to many resets (i.e. high switch,
904 * de-schedule, or new work load from other PP's). Thus, it is possible for a PP to receive a work
905 * response with the NO_WORK bit set even though there was at least one executable entry in the
906 * work queues. The other (and typical) scenario for receiving a NO_WORK response with the WAIT
907 * bit clear is that the pre-fetch engine has reached the end of all work queues without finding
910 * - WAIT bit set. The new work request can timeout if the timer expires before the pre-fetch
911 * engine has found executable work. In this case, the only scenario where the PP will receive a
912 * work response with the NO_WORK bit set is if the timer expires. Note: it is still possible for
913 * a PP to receive a NO_WORK response even though there was at least one executable entry in the
916 * In either case, it's important to note that switches and de-schedules are higher priority
917 * operations that can cause the pre-fetch engine to reset. Thus in a system with many switches or
918 * de-schedules occuring, it's possible for the new work timer to expire (resulting in NO_WORK
919 * responses) before the pre-fetch engine is able to get very deep into the work queues.
921 union cvmx_pow_nw_tim
924 struct cvmx_pow_nw_tim_s
926 #if __BYTE_ORDER == __BIG_ENDIAN
927 uint64_t reserved_10_63 : 54;
928 uint64_t nw_tim : 10; /**< New work timer period */
930 uint64_t nw_tim : 10;
931 uint64_t reserved_10_63 : 54;
934 struct cvmx_pow_nw_tim_s cn30xx;
935 struct cvmx_pow_nw_tim_s cn31xx;
936 struct cvmx_pow_nw_tim_s cn38xx;
937 struct cvmx_pow_nw_tim_s cn38xxp2;
938 struct cvmx_pow_nw_tim_s cn50xx;
939 struct cvmx_pow_nw_tim_s cn52xx;
940 struct cvmx_pow_nw_tim_s cn52xxp1;
941 struct cvmx_pow_nw_tim_s cn56xx;
942 struct cvmx_pow_nw_tim_s cn56xxp1;
943 struct cvmx_pow_nw_tim_s cn58xx;
944 struct cvmx_pow_nw_tim_s cn58xxp1;
945 struct cvmx_pow_nw_tim_s cn63xx;
946 struct cvmx_pow_nw_tim_s cn63xxp1;
948 typedef union cvmx_pow_nw_tim cvmx_pow_nw_tim_t;
951 * cvmx_pow_pf_rst_msk
953 * POW_PF_RST_MSK = POW Prefetch Reset Mask
955 * Resets the work prefetch engine when work is stored in an internal buffer (either when the add
956 * work arrives or when the work is reloaded from an external buffer) for an enabled QOS level
957 * (1 bit per QOS level).
959 union cvmx_pow_pf_rst_msk
962 struct cvmx_pow_pf_rst_msk_s
964 #if __BYTE_ORDER == __BIG_ENDIAN
965 uint64_t reserved_8_63 : 56;
966 uint64_t rst_msk : 8; /**< Prefetch engine reset mask */
968 uint64_t rst_msk : 8;
969 uint64_t reserved_8_63 : 56;
972 struct cvmx_pow_pf_rst_msk_s cn50xx;
973 struct cvmx_pow_pf_rst_msk_s cn52xx;
974 struct cvmx_pow_pf_rst_msk_s cn52xxp1;
975 struct cvmx_pow_pf_rst_msk_s cn56xx;
976 struct cvmx_pow_pf_rst_msk_s cn56xxp1;
977 struct cvmx_pow_pf_rst_msk_s cn58xx;
978 struct cvmx_pow_pf_rst_msk_s cn58xxp1;
979 struct cvmx_pow_pf_rst_msk_s cn63xx;
980 struct cvmx_pow_pf_rst_msk_s cn63xxp1;
982 typedef union cvmx_pow_pf_rst_msk cvmx_pow_pf_rst_msk_t;
985 * cvmx_pow_pp_grp_msk#
987 * POW_PP_GRP_MSKX = POW PP Group Mask Register (1 per PP)
989 * Selects which group(s) a PP belongs to. A '1' in any bit position sets the PP's membership in
990 * the corresponding group. A value of 0x0 will prevent the PP from receiving new work. Note:
991 * disabled or non-existent PP's should have this field set to 0xffff (the reset value) in order to
992 * maximize POW performance.
994 * Also contains the QOS level priorities for each PP. 0x0 is highest priority, and 0x7 the lowest.
995 * Setting the priority to 0xf will prevent that PP from receiving work from that QOS level.
996 * Priority values 0x8 through 0xe are reserved and should not be used. For a given PP, priorities
997 * should begin at 0x0 and remain contiguous throughout the range.
999 union cvmx_pow_pp_grp_mskx
1002 struct cvmx_pow_pp_grp_mskx_s
1004 #if __BYTE_ORDER == __BIG_ENDIAN
1005 uint64_t reserved_48_63 : 16;
1006 uint64_t qos7_pri : 4; /**< PPX priority for QOS level 7 */
1007 uint64_t qos6_pri : 4; /**< PPX priority for QOS level 6 */
1008 uint64_t qos5_pri : 4; /**< PPX priority for QOS level 5 */
1009 uint64_t qos4_pri : 4; /**< PPX priority for QOS level 4 */
1010 uint64_t qos3_pri : 4; /**< PPX priority for QOS level 3 */
1011 uint64_t qos2_pri : 4; /**< PPX priority for QOS level 2 */
1012 uint64_t qos1_pri : 4; /**< PPX priority for QOS level 1 */
1013 uint64_t qos0_pri : 4; /**< PPX priority for QOS level 0 */
1014 uint64_t grp_msk : 16; /**< PPX group mask */
1016 uint64_t grp_msk : 16;
1017 uint64_t qos0_pri : 4;
1018 uint64_t qos1_pri : 4;
1019 uint64_t qos2_pri : 4;
1020 uint64_t qos3_pri : 4;
1021 uint64_t qos4_pri : 4;
1022 uint64_t qos5_pri : 4;
1023 uint64_t qos6_pri : 4;
1024 uint64_t qos7_pri : 4;
1025 uint64_t reserved_48_63 : 16;
1028 struct cvmx_pow_pp_grp_mskx_cn30xx
1030 #if __BYTE_ORDER == __BIG_ENDIAN
1031 uint64_t reserved_16_63 : 48;
1032 uint64_t grp_msk : 16; /**< PPX group mask */
1034 uint64_t grp_msk : 16;
1035 uint64_t reserved_16_63 : 48;
1038 struct cvmx_pow_pp_grp_mskx_cn30xx cn31xx;
1039 struct cvmx_pow_pp_grp_mskx_cn30xx cn38xx;
1040 struct cvmx_pow_pp_grp_mskx_cn30xx cn38xxp2;
1041 struct cvmx_pow_pp_grp_mskx_s cn50xx;
1042 struct cvmx_pow_pp_grp_mskx_s cn52xx;
1043 struct cvmx_pow_pp_grp_mskx_s cn52xxp1;
1044 struct cvmx_pow_pp_grp_mskx_s cn56xx;
1045 struct cvmx_pow_pp_grp_mskx_s cn56xxp1;
1046 struct cvmx_pow_pp_grp_mskx_s cn58xx;
1047 struct cvmx_pow_pp_grp_mskx_s cn58xxp1;
1048 struct cvmx_pow_pp_grp_mskx_s cn63xx;
1049 struct cvmx_pow_pp_grp_mskx_s cn63xxp1;
1051 typedef union cvmx_pow_pp_grp_mskx cvmx_pow_pp_grp_mskx_t;
1056 * POW_QOS_RNDX = POW QOS Issue Round Register (4 rounds per register x 8 registers = 32 rounds)
1058 * Contains the round definitions for issuing new work. Each round consists of 8 bits with each bit
1059 * corresponding to a QOS level. There are 4 rounds contained in each register for a total of 32
1060 * rounds. The issue logic traverses through the rounds sequentially (lowest round to highest round)
1061 * in an attempt to find new work for each PP. Within each round, the issue logic traverses through
1062 * the QOS levels sequentially (highest QOS to lowest QOS) skipping over each QOS level with a clear
1063 * bit in the round mask. Note: setting a QOS level to all zeroes in all issue round registers will
1064 * prevent work from being issued from that QOS level.
1066 union cvmx_pow_qos_rndx
1069 struct cvmx_pow_qos_rndx_s
1071 #if __BYTE_ORDER == __BIG_ENDIAN
1072 uint64_t reserved_32_63 : 32;
1073 uint64_t rnd_p3 : 8; /**< Round mask for round Xx4+3 */
1074 uint64_t rnd_p2 : 8; /**< Round mask for round Xx4+2 */
1075 uint64_t rnd_p1 : 8; /**< Round mask for round Xx4+1 */
1076 uint64_t rnd : 8; /**< Round mask for round Xx4 */
1079 uint64_t rnd_p1 : 8;
1080 uint64_t rnd_p2 : 8;
1081 uint64_t rnd_p3 : 8;
1082 uint64_t reserved_32_63 : 32;
1085 struct cvmx_pow_qos_rndx_s cn30xx;
1086 struct cvmx_pow_qos_rndx_s cn31xx;
1087 struct cvmx_pow_qos_rndx_s cn38xx;
1088 struct cvmx_pow_qos_rndx_s cn38xxp2;
1089 struct cvmx_pow_qos_rndx_s cn50xx;
1090 struct cvmx_pow_qos_rndx_s cn52xx;
1091 struct cvmx_pow_qos_rndx_s cn52xxp1;
1092 struct cvmx_pow_qos_rndx_s cn56xx;
1093 struct cvmx_pow_qos_rndx_s cn56xxp1;
1094 struct cvmx_pow_qos_rndx_s cn58xx;
1095 struct cvmx_pow_qos_rndx_s cn58xxp1;
1096 struct cvmx_pow_qos_rndx_s cn63xx;
1097 struct cvmx_pow_qos_rndx_s cn63xxp1;
1099 typedef union cvmx_pow_qos_rndx cvmx_pow_qos_rndx_t;
1104 * POW_QOS_THRX = POW QOS Threshold Register (1 per QOS level)
1106 * Contains the thresholds for allocating POW internal storage buffers. If the number of remaining
1107 * free buffers drops below the minimum threshold (MIN_THR) or the number of allocated buffers for
1108 * this QOS level rises above the maximum threshold (MAX_THR), future incoming work queue entries
1109 * will be buffered externally rather than internally. This register also contains a read-only count
1110 * of the current number of free buffers (FREE_CNT), the number of internal buffers currently
1111 * allocated to this QOS level (BUF_CNT), and the total number of buffers on the de-schedule list
1112 * (DES_CNT) (which is not the same as the total number of de-scheduled buffers).
1114 union cvmx_pow_qos_thrx
1117 struct cvmx_pow_qos_thrx_s
1119 #if __BYTE_ORDER == __BIG_ENDIAN
1120 uint64_t reserved_60_63 : 4;
1121 uint64_t des_cnt : 12; /**< # of buffers on de-schedule list */
1122 uint64_t buf_cnt : 12; /**< # of internal buffers allocated to QOS level X */
1123 uint64_t free_cnt : 12; /**< # of total free buffers */
1124 uint64_t reserved_23_23 : 1;
1125 uint64_t max_thr : 11; /**< Max threshold for QOS level X */
1126 uint64_t reserved_11_11 : 1;
1127 uint64_t min_thr : 11; /**< Min threshold for QOS level X */
1129 uint64_t min_thr : 11;
1130 uint64_t reserved_11_11 : 1;
1131 uint64_t max_thr : 11;
1132 uint64_t reserved_23_23 : 1;
1133 uint64_t free_cnt : 12;
1134 uint64_t buf_cnt : 12;
1135 uint64_t des_cnt : 12;
1136 uint64_t reserved_60_63 : 4;
1139 struct cvmx_pow_qos_thrx_cn30xx
1141 #if __BYTE_ORDER == __BIG_ENDIAN
1142 uint64_t reserved_55_63 : 9;
1143 uint64_t des_cnt : 7; /**< # of buffers on de-schedule list */
1144 uint64_t reserved_43_47 : 5;
1145 uint64_t buf_cnt : 7; /**< # of internal buffers allocated to QOS level X */
1146 uint64_t reserved_31_35 : 5;
1147 uint64_t free_cnt : 7; /**< # of total free buffers */
1148 uint64_t reserved_18_23 : 6;
1149 uint64_t max_thr : 6; /**< Max threshold for QOS level X */
1150 uint64_t reserved_6_11 : 6;
1151 uint64_t min_thr : 6; /**< Min threshold for QOS level X */
1153 uint64_t min_thr : 6;
1154 uint64_t reserved_6_11 : 6;
1155 uint64_t max_thr : 6;
1156 uint64_t reserved_18_23 : 6;
1157 uint64_t free_cnt : 7;
1158 uint64_t reserved_31_35 : 5;
1159 uint64_t buf_cnt : 7;
1160 uint64_t reserved_43_47 : 5;
1161 uint64_t des_cnt : 7;
1162 uint64_t reserved_55_63 : 9;
1165 struct cvmx_pow_qos_thrx_cn31xx
1167 #if __BYTE_ORDER == __BIG_ENDIAN
1168 uint64_t reserved_57_63 : 7;
1169 uint64_t des_cnt : 9; /**< # of buffers on de-schedule list */
1170 uint64_t reserved_45_47 : 3;
1171 uint64_t buf_cnt : 9; /**< # of internal buffers allocated to QOS level X */
1172 uint64_t reserved_33_35 : 3;
1173 uint64_t free_cnt : 9; /**< # of total free buffers */
1174 uint64_t reserved_20_23 : 4;
1175 uint64_t max_thr : 8; /**< Max threshold for QOS level X */
1176 uint64_t reserved_8_11 : 4;
1177 uint64_t min_thr : 8; /**< Min threshold for QOS level X */
1179 uint64_t min_thr : 8;
1180 uint64_t reserved_8_11 : 4;
1181 uint64_t max_thr : 8;
1182 uint64_t reserved_20_23 : 4;
1183 uint64_t free_cnt : 9;
1184 uint64_t reserved_33_35 : 3;
1185 uint64_t buf_cnt : 9;
1186 uint64_t reserved_45_47 : 3;
1187 uint64_t des_cnt : 9;
1188 uint64_t reserved_57_63 : 7;
1191 struct cvmx_pow_qos_thrx_s cn38xx;
1192 struct cvmx_pow_qos_thrx_s cn38xxp2;
1193 struct cvmx_pow_qos_thrx_cn31xx cn50xx;
1194 struct cvmx_pow_qos_thrx_cn52xx
1196 #if __BYTE_ORDER == __BIG_ENDIAN
1197 uint64_t reserved_58_63 : 6;
1198 uint64_t des_cnt : 10; /**< # of buffers on de-schedule list */
1199 uint64_t reserved_46_47 : 2;
1200 uint64_t buf_cnt : 10; /**< # of internal buffers allocated to QOS level X */
1201 uint64_t reserved_34_35 : 2;
1202 uint64_t free_cnt : 10; /**< # of total free buffers */
1203 uint64_t reserved_21_23 : 3;
1204 uint64_t max_thr : 9; /**< Max threshold for QOS level X */
1205 uint64_t reserved_9_11 : 3;
1206 uint64_t min_thr : 9; /**< Min threshold for QOS level X */
1208 uint64_t min_thr : 9;
1209 uint64_t reserved_9_11 : 3;
1210 uint64_t max_thr : 9;
1211 uint64_t reserved_21_23 : 3;
1212 uint64_t free_cnt : 10;
1213 uint64_t reserved_34_35 : 2;
1214 uint64_t buf_cnt : 10;
1215 uint64_t reserved_46_47 : 2;
1216 uint64_t des_cnt : 10;
1217 uint64_t reserved_58_63 : 6;
1220 struct cvmx_pow_qos_thrx_cn52xx cn52xxp1;
1221 struct cvmx_pow_qos_thrx_s cn56xx;
1222 struct cvmx_pow_qos_thrx_s cn56xxp1;
1223 struct cvmx_pow_qos_thrx_s cn58xx;
1224 struct cvmx_pow_qos_thrx_s cn58xxp1;
1225 struct cvmx_pow_qos_thrx_cn63xx
1227 #if __BYTE_ORDER == __BIG_ENDIAN
1228 uint64_t reserved_59_63 : 5;
1229 uint64_t des_cnt : 11; /**< # of buffers on de-schedule list */
1230 uint64_t reserved_47_47 : 1;
1231 uint64_t buf_cnt : 11; /**< # of internal buffers allocated to QOS level X */
1232 uint64_t reserved_35_35 : 1;
1233 uint64_t free_cnt : 11; /**< # of total free buffers */
1234 uint64_t reserved_22_23 : 2;
1235 uint64_t max_thr : 10; /**< Max threshold for QOS level X */
1236 uint64_t reserved_10_11 : 2;
1237 uint64_t min_thr : 10; /**< Min threshold for QOS level X */
1239 uint64_t min_thr : 10;
1240 uint64_t reserved_10_11 : 2;
1241 uint64_t max_thr : 10;
1242 uint64_t reserved_22_23 : 2;
1243 uint64_t free_cnt : 11;
1244 uint64_t reserved_35_35 : 1;
1245 uint64_t buf_cnt : 11;
1246 uint64_t reserved_47_47 : 1;
1247 uint64_t des_cnt : 11;
1248 uint64_t reserved_59_63 : 5;
1251 struct cvmx_pow_qos_thrx_cn63xx cn63xxp1;
1253 typedef union cvmx_pow_qos_thrx cvmx_pow_qos_thrx_t;
1258 * POW_TS_PC = POW Tag Switch Performance Counter
1260 * Counts the number of tag switch requests. Write to clear.
1262 union cvmx_pow_ts_pc
1265 struct cvmx_pow_ts_pc_s
1267 #if __BYTE_ORDER == __BIG_ENDIAN
1268 uint64_t reserved_32_63 : 32;
1269 uint64_t ts_pc : 32; /**< Tag switch performance counter */
1271 uint64_t ts_pc : 32;
1272 uint64_t reserved_32_63 : 32;
1275 struct cvmx_pow_ts_pc_s cn30xx;
1276 struct cvmx_pow_ts_pc_s cn31xx;
1277 struct cvmx_pow_ts_pc_s cn38xx;
1278 struct cvmx_pow_ts_pc_s cn38xxp2;
1279 struct cvmx_pow_ts_pc_s cn50xx;
1280 struct cvmx_pow_ts_pc_s cn52xx;
1281 struct cvmx_pow_ts_pc_s cn52xxp1;
1282 struct cvmx_pow_ts_pc_s cn56xx;
1283 struct cvmx_pow_ts_pc_s cn56xxp1;
1284 struct cvmx_pow_ts_pc_s cn58xx;
1285 struct cvmx_pow_ts_pc_s cn58xxp1;
1286 struct cvmx_pow_ts_pc_s cn63xx;
1287 struct cvmx_pow_ts_pc_s cn63xxp1;
1289 typedef union cvmx_pow_ts_pc cvmx_pow_ts_pc_t;
1292 * cvmx_pow_wa_com_pc
1294 * POW_WA_COM_PC = POW Work Add Combined Performance Counter
1296 * Counts the number of add new work requests for all QOS levels. Write to clear.
1298 union cvmx_pow_wa_com_pc
1301 struct cvmx_pow_wa_com_pc_s
1303 #if __BYTE_ORDER == __BIG_ENDIAN
1304 uint64_t reserved_32_63 : 32;
1305 uint64_t wa_pc : 32; /**< Work add combined performance counter */
1307 uint64_t wa_pc : 32;
1308 uint64_t reserved_32_63 : 32;
1311 struct cvmx_pow_wa_com_pc_s cn30xx;
1312 struct cvmx_pow_wa_com_pc_s cn31xx;
1313 struct cvmx_pow_wa_com_pc_s cn38xx;
1314 struct cvmx_pow_wa_com_pc_s cn38xxp2;
1315 struct cvmx_pow_wa_com_pc_s cn50xx;
1316 struct cvmx_pow_wa_com_pc_s cn52xx;
1317 struct cvmx_pow_wa_com_pc_s cn52xxp1;
1318 struct cvmx_pow_wa_com_pc_s cn56xx;
1319 struct cvmx_pow_wa_com_pc_s cn56xxp1;
1320 struct cvmx_pow_wa_com_pc_s cn58xx;
1321 struct cvmx_pow_wa_com_pc_s cn58xxp1;
1322 struct cvmx_pow_wa_com_pc_s cn63xx;
1323 struct cvmx_pow_wa_com_pc_s cn63xxp1;
1325 typedef union cvmx_pow_wa_com_pc cvmx_pow_wa_com_pc_t;
1330 * POW_WA_PCX = POW Work Add Performance Counter (1 per QOS level)
1332 * Counts the number of add new work requests for each QOS level. Write to clear.
1334 union cvmx_pow_wa_pcx
1337 struct cvmx_pow_wa_pcx_s
1339 #if __BYTE_ORDER == __BIG_ENDIAN
1340 uint64_t reserved_32_63 : 32;
1341 uint64_t wa_pc : 32; /**< Work add performance counter for QOS level X */
1343 uint64_t wa_pc : 32;
1344 uint64_t reserved_32_63 : 32;
1347 struct cvmx_pow_wa_pcx_s cn30xx;
1348 struct cvmx_pow_wa_pcx_s cn31xx;
1349 struct cvmx_pow_wa_pcx_s cn38xx;
1350 struct cvmx_pow_wa_pcx_s cn38xxp2;
1351 struct cvmx_pow_wa_pcx_s cn50xx;
1352 struct cvmx_pow_wa_pcx_s cn52xx;
1353 struct cvmx_pow_wa_pcx_s cn52xxp1;
1354 struct cvmx_pow_wa_pcx_s cn56xx;
1355 struct cvmx_pow_wa_pcx_s cn56xxp1;
1356 struct cvmx_pow_wa_pcx_s cn58xx;
1357 struct cvmx_pow_wa_pcx_s cn58xxp1;
1358 struct cvmx_pow_wa_pcx_s cn63xx;
1359 struct cvmx_pow_wa_pcx_s cn63xxp1;
1361 typedef union cvmx_pow_wa_pcx cvmx_pow_wa_pcx_t;
1366 * POW_WQ_INT = POW Work Queue Interrupt Register
1368 * Contains the bits (1 per group) that set work queue interrupts and are used to clear these
1369 * interrupts. Also contains the input queue interrupt temporary disable bits (1 per group). For
1370 * more information regarding this register, see the interrupt section.
1372 union cvmx_pow_wq_int
1375 struct cvmx_pow_wq_int_s
1377 #if __BYTE_ORDER == __BIG_ENDIAN
1378 uint64_t reserved_32_63 : 32;
1379 uint64_t iq_dis : 16; /**< Input queue interrupt temporary disable mask
1380 Corresponding WQ_INT<*> bit cannot be set due to
1381 IQ_CNT/IQ_THR check when this bit is set.
1382 Corresponding IQ_DIS bit is cleared by HW whenever:
1383 - POW_WQ_INT_CNT*[IQ_CNT] is zero, or
1384 - POW_WQ_INT_CNT*[TC_CNT]==1 when periodic
1385 counter POW_WQ_INT_PC[PC]==0 */
1386 uint64_t wq_int : 16; /**< Work queue interrupt bits
1387 Corresponding WQ_INT bit is set by HW whenever:
1388 - POW_WQ_INT_CNT*[IQ_CNT] >=
1389 POW_WQ_INT_THR*[IQ_THR] and the threshold
1390 interrupt is not disabled.
1391 IQ_DIS<*>==1 disables the interrupt.
1392 POW_WQ_INT_THR*[IQ_THR]==0 disables the int.
1393 - POW_WQ_INT_CNT*[DS_CNT] >=
1394 POW_WQ_INT_THR*[DS_THR] and the threshold
1395 interrupt is not disabled
1396 POW_WQ_INT_THR*[DS_THR]==0 disables the int.
1397 - POW_WQ_INT_CNT*[TC_CNT]==1 when periodic
1398 counter POW_WQ_INT_PC[PC]==0 and
1399 POW_WQ_INT_THR*[TC_EN]==1 and at least one of:
1400 - POW_WQ_INT_CNT*[IQ_CNT] > 0
1401 - POW_WQ_INT_CNT*[DS_CNT] > 0 */
1403 uint64_t wq_int : 16;
1404 uint64_t iq_dis : 16;
1405 uint64_t reserved_32_63 : 32;
1408 struct cvmx_pow_wq_int_s cn30xx;
1409 struct cvmx_pow_wq_int_s cn31xx;
1410 struct cvmx_pow_wq_int_s cn38xx;
1411 struct cvmx_pow_wq_int_s cn38xxp2;
1412 struct cvmx_pow_wq_int_s cn50xx;
1413 struct cvmx_pow_wq_int_s cn52xx;
1414 struct cvmx_pow_wq_int_s cn52xxp1;
1415 struct cvmx_pow_wq_int_s cn56xx;
1416 struct cvmx_pow_wq_int_s cn56xxp1;
1417 struct cvmx_pow_wq_int_s cn58xx;
1418 struct cvmx_pow_wq_int_s cn58xxp1;
1419 struct cvmx_pow_wq_int_s cn63xx;
1420 struct cvmx_pow_wq_int_s cn63xxp1;
1422 typedef union cvmx_pow_wq_int cvmx_pow_wq_int_t;
1425 * cvmx_pow_wq_int_cnt#
1427 * POW_WQ_INT_CNTX = POW Work Queue Interrupt Count Register (1 per group)
1429 * Contains a read-only copy of the counts used to trigger work queue interrupts. For more
1430 * information regarding this register, see the interrupt section.
1432 union cvmx_pow_wq_int_cntx
1435 struct cvmx_pow_wq_int_cntx_s
1437 #if __BYTE_ORDER == __BIG_ENDIAN
1438 uint64_t reserved_28_63 : 36;
1439 uint64_t tc_cnt : 4; /**< Time counter current value for group X
1440 HW sets TC_CNT to POW_WQ_INT_THR*[TC_THR] whenever:
1441 - corresponding POW_WQ_INT_CNT*[IQ_CNT]==0 and
1442 corresponding POW_WQ_INT_CNT*[DS_CNT]==0
1443 - corresponding POW_WQ_INT[WQ_INT<*>] is written
1445 - corresponding POW_WQ_INT[IQ_DIS<*>] is written
1447 - corresponding POW_WQ_INT_THR* is written by SW
1448 - TC_CNT==1 and periodic counter
1449 POW_WQ_INT_PC[PC]==0
1450 Otherwise, HW decrements TC_CNT whenever the
1451 periodic counter POW_WQ_INT_PC[PC]==0.
1452 TC_CNT is 0 whenever POW_WQ_INT_THR*[TC_THR]==0. */
1453 uint64_t ds_cnt : 12; /**< De-schedule executable count for group X */
1454 uint64_t iq_cnt : 12; /**< Input queue executable count for group X */
1456 uint64_t iq_cnt : 12;
1457 uint64_t ds_cnt : 12;
1458 uint64_t tc_cnt : 4;
1459 uint64_t reserved_28_63 : 36;
1462 struct cvmx_pow_wq_int_cntx_cn30xx
1464 #if __BYTE_ORDER == __BIG_ENDIAN
1465 uint64_t reserved_28_63 : 36;
1466 uint64_t tc_cnt : 4; /**< Time counter current value for group X
1467 HW sets TC_CNT to POW_WQ_INT_THR*[TC_THR] whenever:
1468 - corresponding POW_WQ_INT_CNT*[IQ_CNT]==0 and
1469 corresponding POW_WQ_INT_CNT*[DS_CNT]==0
1470 - corresponding POW_WQ_INT[WQ_INT<*>] is written
1472 - corresponding POW_WQ_INT[IQ_DIS<*>] is written
1474 - corresponding POW_WQ_INT_THR* is written by SW
1475 - TC_CNT==1 and periodic counter
1476 POW_WQ_INT_PC[PC]==0
1477 Otherwise, HW decrements TC_CNT whenever the
1478 periodic counter POW_WQ_INT_PC[PC]==0.
1479 TC_CNT is 0 whenever POW_WQ_INT_THR*[TC_THR]==0. */
1480 uint64_t reserved_19_23 : 5;
1481 uint64_t ds_cnt : 7; /**< De-schedule executable count for group X */
1482 uint64_t reserved_7_11 : 5;
1483 uint64_t iq_cnt : 7; /**< Input queue executable count for group X */
1485 uint64_t iq_cnt : 7;
1486 uint64_t reserved_7_11 : 5;
1487 uint64_t ds_cnt : 7;
1488 uint64_t reserved_19_23 : 5;
1489 uint64_t tc_cnt : 4;
1490 uint64_t reserved_28_63 : 36;
1493 struct cvmx_pow_wq_int_cntx_cn31xx
1495 #if __BYTE_ORDER == __BIG_ENDIAN
1496 uint64_t reserved_28_63 : 36;
1497 uint64_t tc_cnt : 4; /**< Time counter current value for group X
1498 HW sets TC_CNT to POW_WQ_INT_THR*[TC_THR] whenever:
1499 - corresponding POW_WQ_INT_CNT*[IQ_CNT]==0 and
1500 corresponding POW_WQ_INT_CNT*[DS_CNT]==0
1501 - corresponding POW_WQ_INT[WQ_INT<*>] is written
1503 - corresponding POW_WQ_INT[IQ_DIS<*>] is written
1505 - corresponding POW_WQ_INT_THR* is written by SW
1506 - TC_CNT==1 and periodic counter
1507 POW_WQ_INT_PC[PC]==0
1508 Otherwise, HW decrements TC_CNT whenever the
1509 periodic counter POW_WQ_INT_PC[PC]==0.
1510 TC_CNT is 0 whenever POW_WQ_INT_THR*[TC_THR]==0. */
1511 uint64_t reserved_21_23 : 3;
1512 uint64_t ds_cnt : 9; /**< De-schedule executable count for group X */
1513 uint64_t reserved_9_11 : 3;
1514 uint64_t iq_cnt : 9; /**< Input queue executable count for group X */
1516 uint64_t iq_cnt : 9;
1517 uint64_t reserved_9_11 : 3;
1518 uint64_t ds_cnt : 9;
1519 uint64_t reserved_21_23 : 3;
1520 uint64_t tc_cnt : 4;
1521 uint64_t reserved_28_63 : 36;
1524 struct cvmx_pow_wq_int_cntx_s cn38xx;
1525 struct cvmx_pow_wq_int_cntx_s cn38xxp2;
1526 struct cvmx_pow_wq_int_cntx_cn31xx cn50xx;
1527 struct cvmx_pow_wq_int_cntx_cn52xx
1529 #if __BYTE_ORDER == __BIG_ENDIAN
1530 uint64_t reserved_28_63 : 36;
1531 uint64_t tc_cnt : 4; /**< Time counter current value for group X
1532 HW sets TC_CNT to POW_WQ_INT_THR*[TC_THR] whenever:
1533 - corresponding POW_WQ_INT_CNT*[IQ_CNT]==0 and
1534 corresponding POW_WQ_INT_CNT*[DS_CNT]==0
1535 - corresponding POW_WQ_INT[WQ_INT<*>] is written
1537 - corresponding POW_WQ_INT[IQ_DIS<*>] is written
1539 - corresponding POW_WQ_INT_THR* is written by SW
1540 - TC_CNT==1 and periodic counter
1541 POW_WQ_INT_PC[PC]==0
1542 Otherwise, HW decrements TC_CNT whenever the
1543 periodic counter POW_WQ_INT_PC[PC]==0.
1544 TC_CNT is 0 whenever POW_WQ_INT_THR*[TC_THR]==0. */
1545 uint64_t reserved_22_23 : 2;
1546 uint64_t ds_cnt : 10; /**< De-schedule executable count for group X */
1547 uint64_t reserved_10_11 : 2;
1548 uint64_t iq_cnt : 10; /**< Input queue executable count for group X */
1550 uint64_t iq_cnt : 10;
1551 uint64_t reserved_10_11 : 2;
1552 uint64_t ds_cnt : 10;
1553 uint64_t reserved_22_23 : 2;
1554 uint64_t tc_cnt : 4;
1555 uint64_t reserved_28_63 : 36;
1558 struct cvmx_pow_wq_int_cntx_cn52xx cn52xxp1;
1559 struct cvmx_pow_wq_int_cntx_s cn56xx;
1560 struct cvmx_pow_wq_int_cntx_s cn56xxp1;
1561 struct cvmx_pow_wq_int_cntx_s cn58xx;
1562 struct cvmx_pow_wq_int_cntx_s cn58xxp1;
1563 struct cvmx_pow_wq_int_cntx_cn63xx
1565 #if __BYTE_ORDER == __BIG_ENDIAN
1566 uint64_t reserved_28_63 : 36;
1567 uint64_t tc_cnt : 4; /**< Time counter current value for group X
1568 HW sets TC_CNT to POW_WQ_INT_THR*[TC_THR] whenever:
1569 - corresponding POW_WQ_INT_CNT*[IQ_CNT]==0 and
1570 corresponding POW_WQ_INT_CNT*[DS_CNT]==0
1571 - corresponding POW_WQ_INT[WQ_INT<*>] is written
1573 - corresponding POW_WQ_INT[IQ_DIS<*>] is written
1575 - corresponding POW_WQ_INT_THR* is written by SW
1576 - TC_CNT==1 and periodic counter
1577 POW_WQ_INT_PC[PC]==0
1578 Otherwise, HW decrements TC_CNT whenever the
1579 periodic counter POW_WQ_INT_PC[PC]==0.
1580 TC_CNT is 0 whenever POW_WQ_INT_THR*[TC_THR]==0. */
1581 uint64_t reserved_23_23 : 1;
1582 uint64_t ds_cnt : 11; /**< De-schedule executable count for group X */
1583 uint64_t reserved_11_11 : 1;
1584 uint64_t iq_cnt : 11; /**< Input queue executable count for group X */
1586 uint64_t iq_cnt : 11;
1587 uint64_t reserved_11_11 : 1;
1588 uint64_t ds_cnt : 11;
1589 uint64_t reserved_23_23 : 1;
1590 uint64_t tc_cnt : 4;
1591 uint64_t reserved_28_63 : 36;
1594 struct cvmx_pow_wq_int_cntx_cn63xx cn63xxp1;
1596 typedef union cvmx_pow_wq_int_cntx cvmx_pow_wq_int_cntx_t;
1599 * cvmx_pow_wq_int_pc
1601 * POW_WQ_INT_PC = POW Work Queue Interrupt Periodic Counter Register
1603 * Contains the threshold value for the work queue interrupt periodic counter and also a read-only
1604 * copy of the periodic counter. For more information regarding this register, see the interrupt
1607 union cvmx_pow_wq_int_pc
1610 struct cvmx_pow_wq_int_pc_s
1612 #if __BYTE_ORDER == __BIG_ENDIAN
1613 uint64_t reserved_60_63 : 4;
1614 uint64_t pc : 28; /**< Work queue interrupt periodic counter */
1615 uint64_t reserved_28_31 : 4;
1616 uint64_t pc_thr : 20; /**< Work queue interrupt periodic counter threshold */
1617 uint64_t reserved_0_7 : 8;
1619 uint64_t reserved_0_7 : 8;
1620 uint64_t pc_thr : 20;
1621 uint64_t reserved_28_31 : 4;
1623 uint64_t reserved_60_63 : 4;
1626 struct cvmx_pow_wq_int_pc_s cn30xx;
1627 struct cvmx_pow_wq_int_pc_s cn31xx;
1628 struct cvmx_pow_wq_int_pc_s cn38xx;
1629 struct cvmx_pow_wq_int_pc_s cn38xxp2;
1630 struct cvmx_pow_wq_int_pc_s cn50xx;
1631 struct cvmx_pow_wq_int_pc_s cn52xx;
1632 struct cvmx_pow_wq_int_pc_s cn52xxp1;
1633 struct cvmx_pow_wq_int_pc_s cn56xx;
1634 struct cvmx_pow_wq_int_pc_s cn56xxp1;
1635 struct cvmx_pow_wq_int_pc_s cn58xx;
1636 struct cvmx_pow_wq_int_pc_s cn58xxp1;
1637 struct cvmx_pow_wq_int_pc_s cn63xx;
1638 struct cvmx_pow_wq_int_pc_s cn63xxp1;
1640 typedef union cvmx_pow_wq_int_pc cvmx_pow_wq_int_pc_t;
1643 * cvmx_pow_wq_int_thr#
1645 * POW_WQ_INT_THRX = POW Work Queue Interrupt Threshold Register (1 per group)
1647 * Contains the thresholds for enabling and setting work queue interrupts. For more information
1648 * regarding this register, see the interrupt section.
1650 * Note: Up to 8 of the POW's internal storage buffers can be allocated for hardware use and are
1651 * therefore not available for incoming work queue entries. Additionally, any PP that is not in the
1652 * NULL_NULL state consumes a buffer. Thus in a 6 PP system, it is not advisable to set either
1653 * IQ_THR or DS_THR to greater than 1024 - 8 - 6 = 1010. Doing so may prevent the interrupt from
1656 union cvmx_pow_wq_int_thrx
1659 struct cvmx_pow_wq_int_thrx_s
1661 #if __BYTE_ORDER == __BIG_ENDIAN
1662 uint64_t reserved_29_63 : 35;
1663 uint64_t tc_en : 1; /**< Time counter interrupt enable for group X
1664 TC_EN must be zero when TC_THR==0 */
1665 uint64_t tc_thr : 4; /**< Time counter interrupt threshold for group X
1666 When TC_THR==0, POW_WQ_INT_CNT*[TC_CNT] is zero */
1667 uint64_t reserved_23_23 : 1;
1668 uint64_t ds_thr : 11; /**< De-schedule count threshold for group X
1669 DS_THR==0 disables the threshold interrupt */
1670 uint64_t reserved_11_11 : 1;
1671 uint64_t iq_thr : 11; /**< Input queue count threshold for group X
1672 IQ_THR==0 disables the threshold interrupt */
1674 uint64_t iq_thr : 11;
1675 uint64_t reserved_11_11 : 1;
1676 uint64_t ds_thr : 11;
1677 uint64_t reserved_23_23 : 1;
1678 uint64_t tc_thr : 4;
1680 uint64_t reserved_29_63 : 35;
1683 struct cvmx_pow_wq_int_thrx_cn30xx
1685 #if __BYTE_ORDER == __BIG_ENDIAN
1686 uint64_t reserved_29_63 : 35;
1687 uint64_t tc_en : 1; /**< Time counter interrupt enable for group X
1688 TC_EN must be zero when TC_THR==0 */
1689 uint64_t tc_thr : 4; /**< Time counter interrupt threshold for group X
1690 When TC_THR==0, POW_WQ_INT_CNT*[TC_CNT] is zero */
1691 uint64_t reserved_18_23 : 6;
1692 uint64_t ds_thr : 6; /**< De-schedule count threshold for group X
1693 DS_THR==0 disables the threshold interrupt */
1694 uint64_t reserved_6_11 : 6;
1695 uint64_t iq_thr : 6; /**< Input queue count threshold for group X
1696 IQ_THR==0 disables the threshold interrupt */
1698 uint64_t iq_thr : 6;
1699 uint64_t reserved_6_11 : 6;
1700 uint64_t ds_thr : 6;
1701 uint64_t reserved_18_23 : 6;
1702 uint64_t tc_thr : 4;
1704 uint64_t reserved_29_63 : 35;
1707 struct cvmx_pow_wq_int_thrx_cn31xx
1709 #if __BYTE_ORDER == __BIG_ENDIAN
1710 uint64_t reserved_29_63 : 35;
1711 uint64_t tc_en : 1; /**< Time counter interrupt enable for group X
1712 TC_EN must be zero when TC_THR==0 */
1713 uint64_t tc_thr : 4; /**< Time counter interrupt threshold for group X
1714 When TC_THR==0, POW_WQ_INT_CNT*[TC_CNT] is zero */
1715 uint64_t reserved_20_23 : 4;
1716 uint64_t ds_thr : 8; /**< De-schedule count threshold for group X
1717 DS_THR==0 disables the threshold interrupt */
1718 uint64_t reserved_8_11 : 4;
1719 uint64_t iq_thr : 8; /**< Input queue count threshold for group X
1720 IQ_THR==0 disables the threshold interrupt */
1722 uint64_t iq_thr : 8;
1723 uint64_t reserved_8_11 : 4;
1724 uint64_t ds_thr : 8;
1725 uint64_t reserved_20_23 : 4;
1726 uint64_t tc_thr : 4;
1728 uint64_t reserved_29_63 : 35;
1731 struct cvmx_pow_wq_int_thrx_s cn38xx;
1732 struct cvmx_pow_wq_int_thrx_s cn38xxp2;
1733 struct cvmx_pow_wq_int_thrx_cn31xx cn50xx;
1734 struct cvmx_pow_wq_int_thrx_cn52xx
1736 #if __BYTE_ORDER == __BIG_ENDIAN
1737 uint64_t reserved_29_63 : 35;
1738 uint64_t tc_en : 1; /**< Time counter interrupt enable for group X
1739 TC_EN must be zero when TC_THR==0 */
1740 uint64_t tc_thr : 4; /**< Time counter interrupt threshold for group X
1741 When TC_THR==0, POW_WQ_INT_CNT*[TC_CNT] is zero */
1742 uint64_t reserved_21_23 : 3;
1743 uint64_t ds_thr : 9; /**< De-schedule count threshold for group X
1744 DS_THR==0 disables the threshold interrupt */
1745 uint64_t reserved_9_11 : 3;
1746 uint64_t iq_thr : 9; /**< Input queue count threshold for group X
1747 IQ_THR==0 disables the threshold interrupt */
1749 uint64_t iq_thr : 9;
1750 uint64_t reserved_9_11 : 3;
1751 uint64_t ds_thr : 9;
1752 uint64_t reserved_21_23 : 3;
1753 uint64_t tc_thr : 4;
1755 uint64_t reserved_29_63 : 35;
1758 struct cvmx_pow_wq_int_thrx_cn52xx cn52xxp1;
1759 struct cvmx_pow_wq_int_thrx_s cn56xx;
1760 struct cvmx_pow_wq_int_thrx_s cn56xxp1;
1761 struct cvmx_pow_wq_int_thrx_s cn58xx;
1762 struct cvmx_pow_wq_int_thrx_s cn58xxp1;
1763 struct cvmx_pow_wq_int_thrx_cn63xx
1765 #if __BYTE_ORDER == __BIG_ENDIAN
1766 uint64_t reserved_29_63 : 35;
1767 uint64_t tc_en : 1; /**< Time counter interrupt enable for group X
1768 TC_EN must be zero when TC_THR==0 */
1769 uint64_t tc_thr : 4; /**< Time counter interrupt threshold for group X
1770 When TC_THR==0, POW_WQ_INT_CNT*[TC_CNT] is zero */
1771 uint64_t reserved_22_23 : 2;
1772 uint64_t ds_thr : 10; /**< De-schedule count threshold for group X
1773 DS_THR==0 disables the threshold interrupt */
1774 uint64_t reserved_10_11 : 2;
1775 uint64_t iq_thr : 10; /**< Input queue count threshold for group X
1776 IQ_THR==0 disables the threshold interrupt */
1778 uint64_t iq_thr : 10;
1779 uint64_t reserved_10_11 : 2;
1780 uint64_t ds_thr : 10;
1781 uint64_t reserved_22_23 : 2;
1782 uint64_t tc_thr : 4;
1784 uint64_t reserved_29_63 : 35;
1787 struct cvmx_pow_wq_int_thrx_cn63xx cn63xxp1;
1789 typedef union cvmx_pow_wq_int_thrx cvmx_pow_wq_int_thrx_t;
1794 * POW_WS_PCX = POW Work Schedule Performance Counter (1 per group)
1796 * Counts the number of work schedules for each group. Write to clear.
1798 union cvmx_pow_ws_pcx
1801 struct cvmx_pow_ws_pcx_s
1803 #if __BYTE_ORDER == __BIG_ENDIAN
1804 uint64_t reserved_32_63 : 32;
1805 uint64_t ws_pc : 32; /**< Work schedule performance counter for group X */
1807 uint64_t ws_pc : 32;
1808 uint64_t reserved_32_63 : 32;
1811 struct cvmx_pow_ws_pcx_s cn30xx;
1812 struct cvmx_pow_ws_pcx_s cn31xx;
1813 struct cvmx_pow_ws_pcx_s cn38xx;
1814 struct cvmx_pow_ws_pcx_s cn38xxp2;
1815 struct cvmx_pow_ws_pcx_s cn50xx;
1816 struct cvmx_pow_ws_pcx_s cn52xx;
1817 struct cvmx_pow_ws_pcx_s cn52xxp1;
1818 struct cvmx_pow_ws_pcx_s cn56xx;
1819 struct cvmx_pow_ws_pcx_s cn56xxp1;
1820 struct cvmx_pow_ws_pcx_s cn58xx;
1821 struct cvmx_pow_ws_pcx_s cn58xxp1;
1822 struct cvmx_pow_ws_pcx_s cn63xx;
1823 struct cvmx_pow_ws_pcx_s cn63xxp1;
1825 typedef union cvmx_pow_ws_pcx cvmx_pow_ws_pcx_t;