1 /***********************license start***************
2 * Copyright (c) 2003-2012 Cavium Inc. (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Inc. nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
44 * Configuration and status register (CSR) type definitions for
47 * This file is auto generated. Do not edit.
52 #ifndef __CVMX_SSO_DEFS_H__
53 #define __CVMX_SSO_DEFS_H__
55 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
56 #define CVMX_SSO_ACTIVE_CYCLES CVMX_SSO_ACTIVE_CYCLES_FUNC()
57 static inline uint64_t CVMX_SSO_ACTIVE_CYCLES_FUNC(void)
59 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
60 cvmx_warn("CVMX_SSO_ACTIVE_CYCLES not supported on this chip\n");
61 return CVMX_ADD_IO_SEG(0x00016700000010E8ull);
64 #define CVMX_SSO_ACTIVE_CYCLES (CVMX_ADD_IO_SEG(0x00016700000010E8ull))
66 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
67 #define CVMX_SSO_BIST_STAT CVMX_SSO_BIST_STAT_FUNC()
68 static inline uint64_t CVMX_SSO_BIST_STAT_FUNC(void)
70 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
71 cvmx_warn("CVMX_SSO_BIST_STAT not supported on this chip\n");
72 return CVMX_ADD_IO_SEG(0x0001670000001078ull);
75 #define CVMX_SSO_BIST_STAT (CVMX_ADD_IO_SEG(0x0001670000001078ull))
77 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
78 #define CVMX_SSO_CFG CVMX_SSO_CFG_FUNC()
79 static inline uint64_t CVMX_SSO_CFG_FUNC(void)
81 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
82 cvmx_warn("CVMX_SSO_CFG not supported on this chip\n");
83 return CVMX_ADD_IO_SEG(0x0001670000001088ull);
86 #define CVMX_SSO_CFG (CVMX_ADD_IO_SEG(0x0001670000001088ull))
88 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
89 #define CVMX_SSO_DS_PC CVMX_SSO_DS_PC_FUNC()
90 static inline uint64_t CVMX_SSO_DS_PC_FUNC(void)
92 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
93 cvmx_warn("CVMX_SSO_DS_PC not supported on this chip\n");
94 return CVMX_ADD_IO_SEG(0x0001670000001070ull);
97 #define CVMX_SSO_DS_PC (CVMX_ADD_IO_SEG(0x0001670000001070ull))
99 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
100 #define CVMX_SSO_ERR CVMX_SSO_ERR_FUNC()
101 static inline uint64_t CVMX_SSO_ERR_FUNC(void)
103 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
104 cvmx_warn("CVMX_SSO_ERR not supported on this chip\n");
105 return CVMX_ADD_IO_SEG(0x0001670000001038ull);
108 #define CVMX_SSO_ERR (CVMX_ADD_IO_SEG(0x0001670000001038ull))
110 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
111 #define CVMX_SSO_ERR_ENB CVMX_SSO_ERR_ENB_FUNC()
112 static inline uint64_t CVMX_SSO_ERR_ENB_FUNC(void)
114 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
115 cvmx_warn("CVMX_SSO_ERR_ENB not supported on this chip\n");
116 return CVMX_ADD_IO_SEG(0x0001670000001030ull);
119 #define CVMX_SSO_ERR_ENB (CVMX_ADD_IO_SEG(0x0001670000001030ull))
121 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
122 #define CVMX_SSO_FIDX_ECC_CTL CVMX_SSO_FIDX_ECC_CTL_FUNC()
123 static inline uint64_t CVMX_SSO_FIDX_ECC_CTL_FUNC(void)
125 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
126 cvmx_warn("CVMX_SSO_FIDX_ECC_CTL not supported on this chip\n");
127 return CVMX_ADD_IO_SEG(0x00016700000010D0ull);
130 #define CVMX_SSO_FIDX_ECC_CTL (CVMX_ADD_IO_SEG(0x00016700000010D0ull))
132 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
133 #define CVMX_SSO_FIDX_ECC_ST CVMX_SSO_FIDX_ECC_ST_FUNC()
134 static inline uint64_t CVMX_SSO_FIDX_ECC_ST_FUNC(void)
136 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
137 cvmx_warn("CVMX_SSO_FIDX_ECC_ST not supported on this chip\n");
138 return CVMX_ADD_IO_SEG(0x00016700000010D8ull);
141 #define CVMX_SSO_FIDX_ECC_ST (CVMX_ADD_IO_SEG(0x00016700000010D8ull))
143 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
144 #define CVMX_SSO_FPAGE_CNT CVMX_SSO_FPAGE_CNT_FUNC()
145 static inline uint64_t CVMX_SSO_FPAGE_CNT_FUNC(void)
147 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
148 cvmx_warn("CVMX_SSO_FPAGE_CNT not supported on this chip\n");
149 return CVMX_ADD_IO_SEG(0x0001670000001090ull);
152 #define CVMX_SSO_FPAGE_CNT (CVMX_ADD_IO_SEG(0x0001670000001090ull))
154 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
155 #define CVMX_SSO_GWE_CFG CVMX_SSO_GWE_CFG_FUNC()
156 static inline uint64_t CVMX_SSO_GWE_CFG_FUNC(void)
158 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
159 cvmx_warn("CVMX_SSO_GWE_CFG not supported on this chip\n");
160 return CVMX_ADD_IO_SEG(0x0001670000001098ull);
163 #define CVMX_SSO_GWE_CFG (CVMX_ADD_IO_SEG(0x0001670000001098ull))
165 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
166 #define CVMX_SSO_IDX_ECC_CTL CVMX_SSO_IDX_ECC_CTL_FUNC()
167 static inline uint64_t CVMX_SSO_IDX_ECC_CTL_FUNC(void)
169 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
170 cvmx_warn("CVMX_SSO_IDX_ECC_CTL not supported on this chip\n");
171 return CVMX_ADD_IO_SEG(0x00016700000010C0ull);
174 #define CVMX_SSO_IDX_ECC_CTL (CVMX_ADD_IO_SEG(0x00016700000010C0ull))
176 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
177 #define CVMX_SSO_IDX_ECC_ST CVMX_SSO_IDX_ECC_ST_FUNC()
178 static inline uint64_t CVMX_SSO_IDX_ECC_ST_FUNC(void)
180 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
181 cvmx_warn("CVMX_SSO_IDX_ECC_ST not supported on this chip\n");
182 return CVMX_ADD_IO_SEG(0x00016700000010C8ull);
185 #define CVMX_SSO_IDX_ECC_ST (CVMX_ADD_IO_SEG(0x00016700000010C8ull))
187 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
188 static inline uint64_t CVMX_SSO_IQ_CNTX(unsigned long offset)
191 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
192 cvmx_warn("CVMX_SSO_IQ_CNTX(%lu) is invalid on this chip\n", offset);
193 return CVMX_ADD_IO_SEG(0x0001670000009000ull) + ((offset) & 7) * 8;
196 #define CVMX_SSO_IQ_CNTX(offset) (CVMX_ADD_IO_SEG(0x0001670000009000ull) + ((offset) & 7) * 8)
198 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
199 #define CVMX_SSO_IQ_COM_CNT CVMX_SSO_IQ_COM_CNT_FUNC()
200 static inline uint64_t CVMX_SSO_IQ_COM_CNT_FUNC(void)
202 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
203 cvmx_warn("CVMX_SSO_IQ_COM_CNT not supported on this chip\n");
204 return CVMX_ADD_IO_SEG(0x0001670000001058ull);
207 #define CVMX_SSO_IQ_COM_CNT (CVMX_ADD_IO_SEG(0x0001670000001058ull))
209 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
210 #define CVMX_SSO_IQ_INT CVMX_SSO_IQ_INT_FUNC()
211 static inline uint64_t CVMX_SSO_IQ_INT_FUNC(void)
213 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
214 cvmx_warn("CVMX_SSO_IQ_INT not supported on this chip\n");
215 return CVMX_ADD_IO_SEG(0x0001670000001048ull);
218 #define CVMX_SSO_IQ_INT (CVMX_ADD_IO_SEG(0x0001670000001048ull))
220 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
221 #define CVMX_SSO_IQ_INT_EN CVMX_SSO_IQ_INT_EN_FUNC()
222 static inline uint64_t CVMX_SSO_IQ_INT_EN_FUNC(void)
224 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
225 cvmx_warn("CVMX_SSO_IQ_INT_EN not supported on this chip\n");
226 return CVMX_ADD_IO_SEG(0x0001670000001050ull);
229 #define CVMX_SSO_IQ_INT_EN (CVMX_ADD_IO_SEG(0x0001670000001050ull))
231 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
232 static inline uint64_t CVMX_SSO_IQ_THRX(unsigned long offset)
235 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
236 cvmx_warn("CVMX_SSO_IQ_THRX(%lu) is invalid on this chip\n", offset);
237 return CVMX_ADD_IO_SEG(0x000167000000A000ull) + ((offset) & 7) * 8;
240 #define CVMX_SSO_IQ_THRX(offset) (CVMX_ADD_IO_SEG(0x000167000000A000ull) + ((offset) & 7) * 8)
242 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
243 #define CVMX_SSO_NOS_CNT CVMX_SSO_NOS_CNT_FUNC()
244 static inline uint64_t CVMX_SSO_NOS_CNT_FUNC(void)
246 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
247 cvmx_warn("CVMX_SSO_NOS_CNT not supported on this chip\n");
248 return CVMX_ADD_IO_SEG(0x0001670000001040ull);
251 #define CVMX_SSO_NOS_CNT (CVMX_ADD_IO_SEG(0x0001670000001040ull))
253 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
254 #define CVMX_SSO_NW_TIM CVMX_SSO_NW_TIM_FUNC()
255 static inline uint64_t CVMX_SSO_NW_TIM_FUNC(void)
257 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
258 cvmx_warn("CVMX_SSO_NW_TIM not supported on this chip\n");
259 return CVMX_ADD_IO_SEG(0x0001670000001028ull);
262 #define CVMX_SSO_NW_TIM (CVMX_ADD_IO_SEG(0x0001670000001028ull))
264 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
265 #define CVMX_SSO_OTH_ECC_CTL CVMX_SSO_OTH_ECC_CTL_FUNC()
266 static inline uint64_t CVMX_SSO_OTH_ECC_CTL_FUNC(void)
268 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
269 cvmx_warn("CVMX_SSO_OTH_ECC_CTL not supported on this chip\n");
270 return CVMX_ADD_IO_SEG(0x00016700000010B0ull);
273 #define CVMX_SSO_OTH_ECC_CTL (CVMX_ADD_IO_SEG(0x00016700000010B0ull))
275 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
276 #define CVMX_SSO_OTH_ECC_ST CVMX_SSO_OTH_ECC_ST_FUNC()
277 static inline uint64_t CVMX_SSO_OTH_ECC_ST_FUNC(void)
279 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
280 cvmx_warn("CVMX_SSO_OTH_ECC_ST not supported on this chip\n");
281 return CVMX_ADD_IO_SEG(0x00016700000010B8ull);
284 #define CVMX_SSO_OTH_ECC_ST (CVMX_ADD_IO_SEG(0x00016700000010B8ull))
286 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
287 #define CVMX_SSO_PND_ECC_CTL CVMX_SSO_PND_ECC_CTL_FUNC()
288 static inline uint64_t CVMX_SSO_PND_ECC_CTL_FUNC(void)
290 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
291 cvmx_warn("CVMX_SSO_PND_ECC_CTL not supported on this chip\n");
292 return CVMX_ADD_IO_SEG(0x00016700000010A0ull);
295 #define CVMX_SSO_PND_ECC_CTL (CVMX_ADD_IO_SEG(0x00016700000010A0ull))
297 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
298 #define CVMX_SSO_PND_ECC_ST CVMX_SSO_PND_ECC_ST_FUNC()
299 static inline uint64_t CVMX_SSO_PND_ECC_ST_FUNC(void)
301 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
302 cvmx_warn("CVMX_SSO_PND_ECC_ST not supported on this chip\n");
303 return CVMX_ADD_IO_SEG(0x00016700000010A8ull);
306 #define CVMX_SSO_PND_ECC_ST (CVMX_ADD_IO_SEG(0x00016700000010A8ull))
308 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
309 static inline uint64_t CVMX_SSO_PPX_GRP_MSK(unsigned long offset)
312 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31)))))
313 cvmx_warn("CVMX_SSO_PPX_GRP_MSK(%lu) is invalid on this chip\n", offset);
314 return CVMX_ADD_IO_SEG(0x0001670000006000ull) + ((offset) & 31) * 8;
317 #define CVMX_SSO_PPX_GRP_MSK(offset) (CVMX_ADD_IO_SEG(0x0001670000006000ull) + ((offset) & 31) * 8)
319 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
320 static inline uint64_t CVMX_SSO_PPX_QOS_PRI(unsigned long offset)
323 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 31)))))
324 cvmx_warn("CVMX_SSO_PPX_QOS_PRI(%lu) is invalid on this chip\n", offset);
325 return CVMX_ADD_IO_SEG(0x0001670000003000ull) + ((offset) & 31) * 8;
328 #define CVMX_SSO_PPX_QOS_PRI(offset) (CVMX_ADD_IO_SEG(0x0001670000003000ull) + ((offset) & 31) * 8)
330 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
331 #define CVMX_SSO_PP_STRICT CVMX_SSO_PP_STRICT_FUNC()
332 static inline uint64_t CVMX_SSO_PP_STRICT_FUNC(void)
334 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
335 cvmx_warn("CVMX_SSO_PP_STRICT not supported on this chip\n");
336 return CVMX_ADD_IO_SEG(0x00016700000010E0ull);
339 #define CVMX_SSO_PP_STRICT (CVMX_ADD_IO_SEG(0x00016700000010E0ull))
341 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
342 static inline uint64_t CVMX_SSO_QOSX_RND(unsigned long offset)
345 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
346 cvmx_warn("CVMX_SSO_QOSX_RND(%lu) is invalid on this chip\n", offset);
347 return CVMX_ADD_IO_SEG(0x0001670000002000ull) + ((offset) & 7) * 8;
350 #define CVMX_SSO_QOSX_RND(offset) (CVMX_ADD_IO_SEG(0x0001670000002000ull) + ((offset) & 7) * 8)
352 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
353 static inline uint64_t CVMX_SSO_QOS_THRX(unsigned long offset)
356 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
357 cvmx_warn("CVMX_SSO_QOS_THRX(%lu) is invalid on this chip\n", offset);
358 return CVMX_ADD_IO_SEG(0x000167000000B000ull) + ((offset) & 7) * 8;
361 #define CVMX_SSO_QOS_THRX(offset) (CVMX_ADD_IO_SEG(0x000167000000B000ull) + ((offset) & 7) * 8)
363 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
364 #define CVMX_SSO_QOS_WE CVMX_SSO_QOS_WE_FUNC()
365 static inline uint64_t CVMX_SSO_QOS_WE_FUNC(void)
367 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
368 cvmx_warn("CVMX_SSO_QOS_WE not supported on this chip\n");
369 return CVMX_ADD_IO_SEG(0x0001670000001080ull);
372 #define CVMX_SSO_QOS_WE (CVMX_ADD_IO_SEG(0x0001670000001080ull))
374 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
375 #define CVMX_SSO_RESET CVMX_SSO_RESET_FUNC()
376 static inline uint64_t CVMX_SSO_RESET_FUNC(void)
378 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
379 cvmx_warn("CVMX_SSO_RESET not supported on this chip\n");
380 return CVMX_ADD_IO_SEG(0x00016700000010F0ull);
383 #define CVMX_SSO_RESET (CVMX_ADD_IO_SEG(0x00016700000010F0ull))
385 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
386 static inline uint64_t CVMX_SSO_RWQ_HEAD_PTRX(unsigned long offset)
389 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
390 cvmx_warn("CVMX_SSO_RWQ_HEAD_PTRX(%lu) is invalid on this chip\n", offset);
391 return CVMX_ADD_IO_SEG(0x000167000000C000ull) + ((offset) & 7) * 8;
394 #define CVMX_SSO_RWQ_HEAD_PTRX(offset) (CVMX_ADD_IO_SEG(0x000167000000C000ull) + ((offset) & 7) * 8)
396 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
397 #define CVMX_SSO_RWQ_POP_FPTR CVMX_SSO_RWQ_POP_FPTR_FUNC()
398 static inline uint64_t CVMX_SSO_RWQ_POP_FPTR_FUNC(void)
400 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
401 cvmx_warn("CVMX_SSO_RWQ_POP_FPTR not supported on this chip\n");
402 return CVMX_ADD_IO_SEG(0x000167000000C408ull);
405 #define CVMX_SSO_RWQ_POP_FPTR (CVMX_ADD_IO_SEG(0x000167000000C408ull))
407 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
408 #define CVMX_SSO_RWQ_PSH_FPTR CVMX_SSO_RWQ_PSH_FPTR_FUNC()
409 static inline uint64_t CVMX_SSO_RWQ_PSH_FPTR_FUNC(void)
411 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
412 cvmx_warn("CVMX_SSO_RWQ_PSH_FPTR not supported on this chip\n");
413 return CVMX_ADD_IO_SEG(0x000167000000C400ull);
416 #define CVMX_SSO_RWQ_PSH_FPTR (CVMX_ADD_IO_SEG(0x000167000000C400ull))
418 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
419 static inline uint64_t CVMX_SSO_RWQ_TAIL_PTRX(unsigned long offset)
422 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
423 cvmx_warn("CVMX_SSO_RWQ_TAIL_PTRX(%lu) is invalid on this chip\n", offset);
424 return CVMX_ADD_IO_SEG(0x000167000000C200ull) + ((offset) & 7) * 8;
427 #define CVMX_SSO_RWQ_TAIL_PTRX(offset) (CVMX_ADD_IO_SEG(0x000167000000C200ull) + ((offset) & 7) * 8)
429 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
430 #define CVMX_SSO_TS_PC CVMX_SSO_TS_PC_FUNC()
431 static inline uint64_t CVMX_SSO_TS_PC_FUNC(void)
433 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
434 cvmx_warn("CVMX_SSO_TS_PC not supported on this chip\n");
435 return CVMX_ADD_IO_SEG(0x0001670000001068ull);
438 #define CVMX_SSO_TS_PC (CVMX_ADD_IO_SEG(0x0001670000001068ull))
440 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
441 #define CVMX_SSO_WA_COM_PC CVMX_SSO_WA_COM_PC_FUNC()
442 static inline uint64_t CVMX_SSO_WA_COM_PC_FUNC(void)
444 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
445 cvmx_warn("CVMX_SSO_WA_COM_PC not supported on this chip\n");
446 return CVMX_ADD_IO_SEG(0x0001670000001060ull);
449 #define CVMX_SSO_WA_COM_PC (CVMX_ADD_IO_SEG(0x0001670000001060ull))
451 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
452 static inline uint64_t CVMX_SSO_WA_PCX(unsigned long offset)
455 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 7)))))
456 cvmx_warn("CVMX_SSO_WA_PCX(%lu) is invalid on this chip\n", offset);
457 return CVMX_ADD_IO_SEG(0x0001670000005000ull) + ((offset) & 7) * 8;
460 #define CVMX_SSO_WA_PCX(offset) (CVMX_ADD_IO_SEG(0x0001670000005000ull) + ((offset) & 7) * 8)
462 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
463 #define CVMX_SSO_WQ_INT CVMX_SSO_WQ_INT_FUNC()
464 static inline uint64_t CVMX_SSO_WQ_INT_FUNC(void)
466 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
467 cvmx_warn("CVMX_SSO_WQ_INT not supported on this chip\n");
468 return CVMX_ADD_IO_SEG(0x0001670000001000ull);
471 #define CVMX_SSO_WQ_INT (CVMX_ADD_IO_SEG(0x0001670000001000ull))
473 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
474 static inline uint64_t CVMX_SSO_WQ_INT_CNTX(unsigned long offset)
477 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
478 cvmx_warn("CVMX_SSO_WQ_INT_CNTX(%lu) is invalid on this chip\n", offset);
479 return CVMX_ADD_IO_SEG(0x0001670000008000ull) + ((offset) & 63) * 8;
482 #define CVMX_SSO_WQ_INT_CNTX(offset) (CVMX_ADD_IO_SEG(0x0001670000008000ull) + ((offset) & 63) * 8)
484 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
485 #define CVMX_SSO_WQ_INT_PC CVMX_SSO_WQ_INT_PC_FUNC()
486 static inline uint64_t CVMX_SSO_WQ_INT_PC_FUNC(void)
488 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
489 cvmx_warn("CVMX_SSO_WQ_INT_PC not supported on this chip\n");
490 return CVMX_ADD_IO_SEG(0x0001670000001020ull);
493 #define CVMX_SSO_WQ_INT_PC (CVMX_ADD_IO_SEG(0x0001670000001020ull))
495 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
496 static inline uint64_t CVMX_SSO_WQ_INT_THRX(unsigned long offset)
499 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
500 cvmx_warn("CVMX_SSO_WQ_INT_THRX(%lu) is invalid on this chip\n", offset);
501 return CVMX_ADD_IO_SEG(0x0001670000007000ull) + ((offset) & 63) * 8;
504 #define CVMX_SSO_WQ_INT_THRX(offset) (CVMX_ADD_IO_SEG(0x0001670000007000ull) + ((offset) & 63) * 8)
506 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
507 #define CVMX_SSO_WQ_IQ_DIS CVMX_SSO_WQ_IQ_DIS_FUNC()
508 static inline uint64_t CVMX_SSO_WQ_IQ_DIS_FUNC(void)
510 if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
511 cvmx_warn("CVMX_SSO_WQ_IQ_DIS not supported on this chip\n");
512 return CVMX_ADD_IO_SEG(0x0001670000001010ull);
515 #define CVMX_SSO_WQ_IQ_DIS (CVMX_ADD_IO_SEG(0x0001670000001010ull))
517 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
518 static inline uint64_t CVMX_SSO_WS_PCX(unsigned long offset)
521 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 63)))))
522 cvmx_warn("CVMX_SSO_WS_PCX(%lu) is invalid on this chip\n", offset);
523 return CVMX_ADD_IO_SEG(0x0001670000004000ull) + ((offset) & 63) * 8;
526 #define CVMX_SSO_WS_PCX(offset) (CVMX_ADD_IO_SEG(0x0001670000004000ull) + ((offset) & 63) * 8)
530 * cvmx_sso_active_cycles
532 * SSO_ACTIVE_CYCLES = SSO cycles SSO active
534 * This register counts every sclk cycle that the SSO clocks are active.
535 * **NOTE: Added in pass 2.0
537 union cvmx_sso_active_cycles {
539 struct cvmx_sso_active_cycles_s {
540 #ifdef __BIG_ENDIAN_BITFIELD
541 uint64_t act_cyc : 64; /**< Counts number of active cycles. */
543 uint64_t act_cyc : 64;
546 struct cvmx_sso_active_cycles_s cn68xx;
548 typedef union cvmx_sso_active_cycles cvmx_sso_active_cycles_t;
553 * SSO_BIST_STAT = SSO BIST Status Register
555 * Contains the BIST status for the SSO memories ('0' = pass, '1' = fail).
556 * Note that PP BIST status is not reported here as it was in previous designs.
558 * There may be more for DDR interface buffers.
559 * It's possible that a RAM will be used for SSO_PP_QOS_RND.
561 union cvmx_sso_bist_stat {
563 struct cvmx_sso_bist_stat_s {
564 #ifdef __BIG_ENDIAN_BITFIELD
565 uint64_t reserved_62_63 : 2;
566 uint64_t odu_pref : 2; /**< ODU Prefetch memory BIST status */
567 uint64_t reserved_54_59 : 6;
568 uint64_t fptr : 2; /**< FPTR memory BIST status */
569 uint64_t reserved_45_51 : 7;
570 uint64_t rwo_dat : 1; /**< RWO_DAT memory BIST status */
571 uint64_t rwo : 2; /**< RWO memory BIST status */
572 uint64_t reserved_35_41 : 7;
573 uint64_t rwi_dat : 1; /**< RWI_DAT memory BIST status */
574 uint64_t reserved_32_33 : 2;
575 uint64_t soc : 1; /**< SSO CAM BIST status */
576 uint64_t reserved_28_30 : 3;
577 uint64_t ncbo : 4; /**< NCBO transmitter memory BIST status */
578 uint64_t reserved_21_23 : 3;
579 uint64_t index : 1; /**< Index memory BIST status */
580 uint64_t reserved_17_19 : 3;
581 uint64_t fidx : 1; /**< Forward index memory BIST status */
582 uint64_t reserved_10_15 : 6;
583 uint64_t pend : 2; /**< Pending switch memory BIST status */
584 uint64_t reserved_2_7 : 6;
585 uint64_t oth : 2; /**< WQP, GRP memory BIST status */
588 uint64_t reserved_2_7 : 6;
590 uint64_t reserved_10_15 : 6;
592 uint64_t reserved_17_19 : 3;
594 uint64_t reserved_21_23 : 3;
596 uint64_t reserved_28_30 : 3;
598 uint64_t reserved_32_33 : 2;
599 uint64_t rwi_dat : 1;
600 uint64_t reserved_35_41 : 7;
602 uint64_t rwo_dat : 1;
603 uint64_t reserved_45_51 : 7;
605 uint64_t reserved_54_59 : 6;
606 uint64_t odu_pref : 2;
607 uint64_t reserved_62_63 : 2;
610 struct cvmx_sso_bist_stat_s cn68xx;
611 struct cvmx_sso_bist_stat_cn68xxp1 {
612 #ifdef __BIG_ENDIAN_BITFIELD
613 uint64_t reserved_54_63 : 10;
614 uint64_t fptr : 2; /**< FPTR memory BIST status */
615 uint64_t reserved_45_51 : 7;
616 uint64_t rwo_dat : 1; /**< RWO_DAT memory BIST status */
617 uint64_t rwo : 2; /**< RWO memory BIST status */
618 uint64_t reserved_35_41 : 7;
619 uint64_t rwi_dat : 1; /**< RWI_DAT memory BIST status */
620 uint64_t reserved_32_33 : 2;
621 uint64_t soc : 1; /**< SSO CAM BIST status */
622 uint64_t reserved_28_30 : 3;
623 uint64_t ncbo : 4; /**< NCBO transmitter memory BIST status */
624 uint64_t reserved_21_23 : 3;
625 uint64_t index : 1; /**< Index memory BIST status */
626 uint64_t reserved_17_19 : 3;
627 uint64_t fidx : 1; /**< Forward index memory BIST status */
628 uint64_t reserved_10_15 : 6;
629 uint64_t pend : 2; /**< Pending switch memory BIST status */
630 uint64_t reserved_2_7 : 6;
631 uint64_t oth : 2; /**< WQP, GRP memory BIST status */
634 uint64_t reserved_2_7 : 6;
636 uint64_t reserved_10_15 : 6;
638 uint64_t reserved_17_19 : 3;
640 uint64_t reserved_21_23 : 3;
642 uint64_t reserved_28_30 : 3;
644 uint64_t reserved_32_33 : 2;
645 uint64_t rwi_dat : 1;
646 uint64_t reserved_35_41 : 7;
648 uint64_t rwo_dat : 1;
649 uint64_t reserved_45_51 : 7;
651 uint64_t reserved_54_63 : 10;
655 typedef union cvmx_sso_bist_stat cvmx_sso_bist_stat_t;
660 * SSO_CFG = SSO Config
662 * This register is an assortment of various SSO configuration bits.
666 struct cvmx_sso_cfg_s {
667 #ifdef __BIG_ENDIAN_BITFIELD
668 uint64_t reserved_16_63 : 48;
669 uint64_t qck_gw_rsp_adj : 3; /**< Fast GET_WORK response fine adjustment
670 Allowed values are 0, 1, and 2 (0 is quickest) */
671 uint64_t qck_gw_rsp_dis : 1; /**< Disable faster response to GET_WORK */
672 uint64_t qck_sw_dis : 1; /**< Disable faster switch to UNSCHEDULED on GET_WORK */
673 uint64_t rwq_alloc_dis : 1; /**< Disable FPA Alloc Requests when SSO_FPAGE_CNT < 16 */
674 uint64_t soc_ccam_dis : 1; /**< Disable power saving SOC conditional CAM
675 (**NOTE: Added in pass 2.0) */
676 uint64_t sso_cclk_dis : 1; /**< Disable power saving SSO conditional clocking
677 (**NOTE: Added in pass 2.0) */
678 uint64_t rwo_flush : 1; /**< Flush RWO engine
679 Allows outbound NCB entries to go immediately rather
680 than waiting for a complete fill packet. This register
681 is one-shot and clears itself each time it is set. */
682 uint64_t wfe_thr : 1; /**< Use 1 Work-fetch engine (instead of 4) */
683 uint64_t rwio_byp_dis : 1; /**< Disable Bypass path in RWI/RWO Engines */
684 uint64_t rwq_byp_dis : 1; /**< Disable Bypass path in RWQ Engine */
685 uint64_t stt : 1; /**< STT Setting for RW Stores */
686 uint64_t ldt : 1; /**< LDT Setting for RW Loads */
687 uint64_t dwb : 1; /**< DWB Setting for Return Page Requests
688 1 = 2 128B cache pages to issue DWB for
689 0 = 0 128B cache pages ro issue DWB for */
690 uint64_t rwen : 1; /**< Enable RWI/RWO operations
691 This bit should be set after SSO_RWQ_HEAD_PTRX and
692 SSO_RWQ_TAIL_PTRX have been programmed. */
698 uint64_t rwq_byp_dis : 1;
699 uint64_t rwio_byp_dis : 1;
700 uint64_t wfe_thr : 1;
701 uint64_t rwo_flush : 1;
702 uint64_t sso_cclk_dis : 1;
703 uint64_t soc_ccam_dis : 1;
704 uint64_t rwq_alloc_dis : 1;
705 uint64_t qck_sw_dis : 1;
706 uint64_t qck_gw_rsp_dis : 1;
707 uint64_t qck_gw_rsp_adj : 3;
708 uint64_t reserved_16_63 : 48;
711 struct cvmx_sso_cfg_s cn68xx;
712 struct cvmx_sso_cfg_cn68xxp1 {
713 #ifdef __BIG_ENDIAN_BITFIELD
714 uint64_t reserved_8_63 : 56;
715 uint64_t rwo_flush : 1; /**< Flush RWO engine
716 Allows outbound NCB entries to go immediately rather
717 than waiting for a complete fill packet. This register
718 is one-shot and clears itself each time it is set. */
719 uint64_t wfe_thr : 1; /**< Use 1 Work-fetch engine (instead of 4) */
720 uint64_t rwio_byp_dis : 1; /**< Disable Bypass path in RWI/RWO Engines */
721 uint64_t rwq_byp_dis : 1; /**< Disable Bypass path in RWQ Engine */
722 uint64_t stt : 1; /**< STT Setting for RW Stores */
723 uint64_t ldt : 1; /**< LDT Setting for RW Loads */
724 uint64_t dwb : 1; /**< DWB Setting for Return Page Requests
725 1 = 2 128B cache pages to issue DWB for
726 0 = 0 128B cache pages ro issue DWB for */
727 uint64_t rwen : 1; /**< Enable RWI/RWO operations
728 This bit should be set after SSO_RWQ_HEAD_PTRX and
729 SSO_RWQ_TAIL_PTRX have been programmed. */
735 uint64_t rwq_byp_dis : 1;
736 uint64_t rwio_byp_dis : 1;
737 uint64_t wfe_thr : 1;
738 uint64_t rwo_flush : 1;
739 uint64_t reserved_8_63 : 56;
743 typedef union cvmx_sso_cfg cvmx_sso_cfg_t;
748 * SSO_DS_PC = SSO De-Schedule Performance Counter
750 * Counts the number of de-schedule requests.
751 * Counter rolls over through zero when max value exceeded.
753 union cvmx_sso_ds_pc {
755 struct cvmx_sso_ds_pc_s {
756 #ifdef __BIG_ENDIAN_BITFIELD
757 uint64_t ds_pc : 64; /**< De-schedule performance counter */
762 struct cvmx_sso_ds_pc_s cn68xx;
763 struct cvmx_sso_ds_pc_s cn68xxp1;
765 typedef union cvmx_sso_ds_pc cvmx_sso_ds_pc_t;
770 * SSO_ERR = SSO Error Register
772 * Contains ECC and other misc error bits.
774 * <45> The free page error bit will assert when SSO_FPAGE_CNT <= 16 and
775 * SSO_CFG[RWEN] is 1. Software will want to disable the interrupt
776 * associated with this error when recovering SSO pointers from the
779 * This register also contains the illegal operation error bits:
781 * <42> Received ADDWQ with tag specified as EMPTY
782 * <41> Received illegal opcode
783 * <40> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/DESCH/UPD_WQP/GET_WORK/ALLOC_WE
784 * from WS with CLR_NSCHED pending
785 * <39> Received CLR_NSCHED
786 * from WS with SWTAG_DESCH/DESCH/CLR_NSCHED pending
787 * <38> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/DESCH/UPD_WQP/GET_WORK/ALLOC_WE
788 * from WS with ALLOC_WE pending
789 * <37> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/DESCH/UPD_WQP/GET_WORK/ALLOC_WE/CLR_NSCHED
790 * from WS with GET_WORK pending
791 * <36> Received SWTAG_FULL/SWTAG_DESCH
792 * with tag specified as UNSCHEDULED
793 * <35> Received SWTAG/SWTAG_FULL/SWTAG_DESCH
794 * with tag specified as EMPTY
795 * <34> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/GET_WORK
796 * from WS with pending tag switch to ORDERED or ATOMIC
797 * <33> Received SWTAG/SWTAG_DESCH/DESCH/UPD_WQP
798 * from WS in UNSCHEDULED state
799 * <32> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/DESCH/UPD_WQP
800 * from WS in EMPTY state
804 struct cvmx_sso_err_s {
805 #ifdef __BIG_ENDIAN_BITFIELD
806 uint64_t reserved_48_63 : 16;
807 uint64_t bfp : 1; /**< Bad Fill Packet error
808 Last byte of the fill packet did not match 8'h1a */
809 uint64_t awe : 1; /**< Out-of-memory error (ADDWQ Request is dropped) */
810 uint64_t fpe : 1; /**< Free page error */
811 uint64_t reserved_43_44 : 2;
812 uint64_t iop : 11; /**< Illegal operation errors */
813 uint64_t reserved_12_31 : 20;
814 uint64_t pnd_dbe0 : 1; /**< Double bit error for even PND RAM */
815 uint64_t pnd_sbe0 : 1; /**< Single bit error for even PND RAM */
816 uint64_t pnd_dbe1 : 1; /**< Double bit error for odd PND RAM */
817 uint64_t pnd_sbe1 : 1; /**< Single bit error for odd PND RAM */
818 uint64_t oth_dbe0 : 1; /**< Double bit error for even OTH RAM */
819 uint64_t oth_sbe0 : 1; /**< Single bit error for even OTH RAM */
820 uint64_t oth_dbe1 : 1; /**< Double bit error for odd OTH RAM */
821 uint64_t oth_sbe1 : 1; /**< Single bit error for odd OTH RAM */
822 uint64_t idx_dbe : 1; /**< Double bit error for IDX RAM */
823 uint64_t idx_sbe : 1; /**< Single bit error for IDX RAM */
824 uint64_t fidx_dbe : 1; /**< Double bit error for FIDX RAM */
825 uint64_t fidx_sbe : 1; /**< Single bit error for FIDX RAM */
827 uint64_t fidx_sbe : 1;
828 uint64_t fidx_dbe : 1;
829 uint64_t idx_sbe : 1;
830 uint64_t idx_dbe : 1;
831 uint64_t oth_sbe1 : 1;
832 uint64_t oth_dbe1 : 1;
833 uint64_t oth_sbe0 : 1;
834 uint64_t oth_dbe0 : 1;
835 uint64_t pnd_sbe1 : 1;
836 uint64_t pnd_dbe1 : 1;
837 uint64_t pnd_sbe0 : 1;
838 uint64_t pnd_dbe0 : 1;
839 uint64_t reserved_12_31 : 20;
841 uint64_t reserved_43_44 : 2;
845 uint64_t reserved_48_63 : 16;
848 struct cvmx_sso_err_s cn68xx;
849 struct cvmx_sso_err_s cn68xxp1;
851 typedef union cvmx_sso_err cvmx_sso_err_t;
856 * SSO_ERR_ENB = SSO Error Enable Register
858 * Contains the interrupt enables corresponding to SSO_ERR.
860 union cvmx_sso_err_enb {
862 struct cvmx_sso_err_enb_s {
863 #ifdef __BIG_ENDIAN_BITFIELD
864 uint64_t reserved_48_63 : 16;
865 uint64_t bfp_ie : 1; /**< Bad Fill Packet error interrupt enable */
866 uint64_t awe_ie : 1; /**< Add-work error interrupt enable */
867 uint64_t fpe_ie : 1; /**< Free Page error interrupt enable */
868 uint64_t reserved_43_44 : 2;
869 uint64_t iop_ie : 11; /**< Illegal operation interrupt enables */
870 uint64_t reserved_12_31 : 20;
871 uint64_t pnd_dbe0_ie : 1; /**< Double bit error interrupt enable for even PND RAM */
872 uint64_t pnd_sbe0_ie : 1; /**< Single bit error interrupt enable for even PND RAM */
873 uint64_t pnd_dbe1_ie : 1; /**< Double bit error interrupt enable for odd PND RAM */
874 uint64_t pnd_sbe1_ie : 1; /**< Single bit error interrupt enable for odd PND RAM */
875 uint64_t oth_dbe0_ie : 1; /**< Double bit error interrupt enable for even OTH RAM */
876 uint64_t oth_sbe0_ie : 1; /**< Single bit error interrupt enable for even OTH RAM */
877 uint64_t oth_dbe1_ie : 1; /**< Double bit error interrupt enable for odd OTH RAM */
878 uint64_t oth_sbe1_ie : 1; /**< Single bit error interrupt enable for odd OTH RAM */
879 uint64_t idx_dbe_ie : 1; /**< Double bit error interrupt enable for IDX RAM */
880 uint64_t idx_sbe_ie : 1; /**< Single bit error interrupt enable for IDX RAM */
881 uint64_t fidx_dbe_ie : 1; /**< Double bit error interrupt enable for FIDX RAM */
882 uint64_t fidx_sbe_ie : 1; /**< Single bit error interrupt enable for FIDX RAM */
884 uint64_t fidx_sbe_ie : 1;
885 uint64_t fidx_dbe_ie : 1;
886 uint64_t idx_sbe_ie : 1;
887 uint64_t idx_dbe_ie : 1;
888 uint64_t oth_sbe1_ie : 1;
889 uint64_t oth_dbe1_ie : 1;
890 uint64_t oth_sbe0_ie : 1;
891 uint64_t oth_dbe0_ie : 1;
892 uint64_t pnd_sbe1_ie : 1;
893 uint64_t pnd_dbe1_ie : 1;
894 uint64_t pnd_sbe0_ie : 1;
895 uint64_t pnd_dbe0_ie : 1;
896 uint64_t reserved_12_31 : 20;
897 uint64_t iop_ie : 11;
898 uint64_t reserved_43_44 : 2;
902 uint64_t reserved_48_63 : 16;
905 struct cvmx_sso_err_enb_s cn68xx;
906 struct cvmx_sso_err_enb_s cn68xxp1;
908 typedef union cvmx_sso_err_enb cvmx_sso_err_enb_t;
911 * cvmx_sso_fidx_ecc_ctl
913 * SSO_FIDX_ECC_CTL = SSO FIDX ECC Control
916 union cvmx_sso_fidx_ecc_ctl {
918 struct cvmx_sso_fidx_ecc_ctl_s {
919 #ifdef __BIG_ENDIAN_BITFIELD
920 uint64_t reserved_3_63 : 61;
921 uint64_t flip_synd : 2; /**< Testing feature. Flip Syndrom to generate single or
922 double bit error for the FIDX RAM. */
923 uint64_t ecc_ena : 1; /**< ECC Enable: When set will enable the 5 bit ECC
924 correct logic for the FIDX RAM. */
926 uint64_t ecc_ena : 1;
927 uint64_t flip_synd : 2;
928 uint64_t reserved_3_63 : 61;
931 struct cvmx_sso_fidx_ecc_ctl_s cn68xx;
932 struct cvmx_sso_fidx_ecc_ctl_s cn68xxp1;
934 typedef union cvmx_sso_fidx_ecc_ctl cvmx_sso_fidx_ecc_ctl_t;
937 * cvmx_sso_fidx_ecc_st
939 * SSO_FIDX_ECC_ST = SSO FIDX ECC Status
942 union cvmx_sso_fidx_ecc_st {
944 struct cvmx_sso_fidx_ecc_st_s {
945 #ifdef __BIG_ENDIAN_BITFIELD
946 uint64_t reserved_27_63 : 37;
947 uint64_t addr : 11; /**< Latch the address for latest sde/dbe occured
949 uint64_t reserved_9_15 : 7;
950 uint64_t syndrom : 5; /**< Report the latest error syndrom for the
952 uint64_t reserved_0_3 : 4;
954 uint64_t reserved_0_3 : 4;
955 uint64_t syndrom : 5;
956 uint64_t reserved_9_15 : 7;
958 uint64_t reserved_27_63 : 37;
961 struct cvmx_sso_fidx_ecc_st_s cn68xx;
962 struct cvmx_sso_fidx_ecc_st_s cn68xxp1;
964 typedef union cvmx_sso_fidx_ecc_st cvmx_sso_fidx_ecc_st_t;
969 * SSO_FPAGE_CNT = SSO Free Page Cnt
971 * This register keeps track of the number of free pages pointers available for use in external memory.
973 union cvmx_sso_fpage_cnt {
975 struct cvmx_sso_fpage_cnt_s {
976 #ifdef __BIG_ENDIAN_BITFIELD
977 uint64_t reserved_32_63 : 32;
978 uint64_t fpage_cnt : 32; /**< Free Page Cnt
979 HW updates this register. Writes to this register
980 are only for diagnostic purposes */
982 uint64_t fpage_cnt : 32;
983 uint64_t reserved_32_63 : 32;
986 struct cvmx_sso_fpage_cnt_s cn68xx;
987 struct cvmx_sso_fpage_cnt_s cn68xxp1;
989 typedef union cvmx_sso_fpage_cnt cvmx_sso_fpage_cnt_t;
994 * SSO_GWE_CFG = SSO Get-Work Examiner Configuration
996 * This register controls the operation of the Get-Work Examiner (GWE)
998 union cvmx_sso_gwe_cfg {
1000 struct cvmx_sso_gwe_cfg_s {
1001 #ifdef __BIG_ENDIAN_BITFIELD
1002 uint64_t reserved_12_63 : 52;
1003 uint64_t odu_ffpgw_dis : 1; /**< Disable flushing ODU on periodic restart of GWE */
1004 uint64_t gwe_rfpgw_dis : 1; /**< Disable periodic restart of GWE for pending get_work */
1005 uint64_t odu_prf_dis : 1; /**< Disable ODU-initiated prefetches of WQEs into L2C
1006 For diagnostic use only. */
1007 uint64_t odu_bmp_dis : 1; /**< Disable ODU bumps.
1008 If SSO_PP_STRICT is true, could
1009 prevent forward progress under some circumstances.
1010 For diagnostic use only. */
1011 uint64_t reserved_5_7 : 3;
1012 uint64_t gwe_hvy_dis : 1; /**< Disable GWE automatic, proportional weight-increase
1013 mechanism and use SSO_QOSX_RND values as-is.
1014 For diagnostic use only. */
1015 uint64_t gwe_poe : 1; /**< Pause GWE on extracts
1016 For diagnostic use only. */
1017 uint64_t gwe_fpor : 1; /**< Flush GWE pipeline when restarting GWE.
1018 For diagnostic use only. */
1019 uint64_t gwe_rah : 1; /**< Begin at head of input queues when restarting GWE.
1020 For diagnostic use only. */
1021 uint64_t gwe_dis : 1; /**< Disable Get-Work Examiner */
1023 uint64_t gwe_dis : 1;
1024 uint64_t gwe_rah : 1;
1025 uint64_t gwe_fpor : 1;
1026 uint64_t gwe_poe : 1;
1027 uint64_t gwe_hvy_dis : 1;
1028 uint64_t reserved_5_7 : 3;
1029 uint64_t odu_bmp_dis : 1;
1030 uint64_t odu_prf_dis : 1;
1031 uint64_t gwe_rfpgw_dis : 1;
1032 uint64_t odu_ffpgw_dis : 1;
1033 uint64_t reserved_12_63 : 52;
1036 struct cvmx_sso_gwe_cfg_s cn68xx;
1037 struct cvmx_sso_gwe_cfg_cn68xxp1 {
1038 #ifdef __BIG_ENDIAN_BITFIELD
1039 uint64_t reserved_4_63 : 60;
1040 uint64_t gwe_poe : 1; /**< Pause GWE on extracts
1041 For diagnostic use only. */
1042 uint64_t gwe_fpor : 1; /**< Flush GWE pipeline when restarting GWE.
1043 For diagnostic use only. */
1044 uint64_t gwe_rah : 1; /**< Begin at head of input queues when restarting GWE.
1045 For diagnostic use only. */
1046 uint64_t gwe_dis : 1; /**< Disable Get-Work Examiner */
1048 uint64_t gwe_dis : 1;
1049 uint64_t gwe_rah : 1;
1050 uint64_t gwe_fpor : 1;
1051 uint64_t gwe_poe : 1;
1052 uint64_t reserved_4_63 : 60;
1056 typedef union cvmx_sso_gwe_cfg cvmx_sso_gwe_cfg_t;
1059 * cvmx_sso_idx_ecc_ctl
1061 * SSO_IDX_ECC_CTL = SSO IDX ECC Control
1064 union cvmx_sso_idx_ecc_ctl {
1066 struct cvmx_sso_idx_ecc_ctl_s {
1067 #ifdef __BIG_ENDIAN_BITFIELD
1068 uint64_t reserved_3_63 : 61;
1069 uint64_t flip_synd : 2; /**< Testing feature. Flip Syndrom to generate single or
1070 double bit error for the IDX RAM. */
1071 uint64_t ecc_ena : 1; /**< ECC Enable: When set will enable the 5 bit ECC
1072 correct logic for the IDX RAM. */
1074 uint64_t ecc_ena : 1;
1075 uint64_t flip_synd : 2;
1076 uint64_t reserved_3_63 : 61;
1079 struct cvmx_sso_idx_ecc_ctl_s cn68xx;
1080 struct cvmx_sso_idx_ecc_ctl_s cn68xxp1;
1082 typedef union cvmx_sso_idx_ecc_ctl cvmx_sso_idx_ecc_ctl_t;
1085 * cvmx_sso_idx_ecc_st
1087 * SSO_IDX_ECC_ST = SSO IDX ECC Status
1090 union cvmx_sso_idx_ecc_st {
1092 struct cvmx_sso_idx_ecc_st_s {
1093 #ifdef __BIG_ENDIAN_BITFIELD
1094 uint64_t reserved_27_63 : 37;
1095 uint64_t addr : 11; /**< Latch the address for latest sde/dbe occured
1097 uint64_t reserved_9_15 : 7;
1098 uint64_t syndrom : 5; /**< Report the latest error syndrom for the
1100 uint64_t reserved_0_3 : 4;
1102 uint64_t reserved_0_3 : 4;
1103 uint64_t syndrom : 5;
1104 uint64_t reserved_9_15 : 7;
1106 uint64_t reserved_27_63 : 37;
1109 struct cvmx_sso_idx_ecc_st_s cn68xx;
1110 struct cvmx_sso_idx_ecc_st_s cn68xxp1;
1112 typedef union cvmx_sso_idx_ecc_st cvmx_sso_idx_ecc_st_t;
1117 * CSR reserved addresses: (64): 0x8200..0x83f8
1118 * CSR align addresses: ===========================================================================================================
1119 * SSO_IQ_CNTX = SSO Input Queue Count Register
1120 * (one per QOS level)
1122 * Contains a read-only count of the number of work queue entries for each QOS
1123 * level. Counts both in-unit and in-memory entries.
1125 union cvmx_sso_iq_cntx {
1127 struct cvmx_sso_iq_cntx_s {
1128 #ifdef __BIG_ENDIAN_BITFIELD
1129 uint64_t reserved_32_63 : 32;
1130 uint64_t iq_cnt : 32; /**< Input queue count for QOS level X */
1132 uint64_t iq_cnt : 32;
1133 uint64_t reserved_32_63 : 32;
1136 struct cvmx_sso_iq_cntx_s cn68xx;
1137 struct cvmx_sso_iq_cntx_s cn68xxp1;
1139 typedef union cvmx_sso_iq_cntx cvmx_sso_iq_cntx_t;
1142 * cvmx_sso_iq_com_cnt
1144 * SSO_IQ_COM_CNT = SSO Input Queue Combined Count Register
1146 * Contains a read-only count of the total number of work queue entries in all
1147 * QOS levels. Counts both in-unit and in-memory entries.
1149 union cvmx_sso_iq_com_cnt {
1151 struct cvmx_sso_iq_com_cnt_s {
1152 #ifdef __BIG_ENDIAN_BITFIELD
1153 uint64_t reserved_32_63 : 32;
1154 uint64_t iq_cnt : 32; /**< Input queue combined count */
1156 uint64_t iq_cnt : 32;
1157 uint64_t reserved_32_63 : 32;
1160 struct cvmx_sso_iq_com_cnt_s cn68xx;
1161 struct cvmx_sso_iq_com_cnt_s cn68xxp1;
1163 typedef union cvmx_sso_iq_com_cnt cvmx_sso_iq_com_cnt_t;
1168 * SSO_IQ_INT = SSO Input Queue Interrupt Register
1170 * Contains the bits (one per QOS level) that can trigger the input queue
1171 * interrupt. An IQ_INT bit will be set if SSO_IQ_CNT#QOS# changes and the
1172 * resulting value is equal to SSO_IQ_THR#QOS#.
1174 union cvmx_sso_iq_int {
1176 struct cvmx_sso_iq_int_s {
1177 #ifdef __BIG_ENDIAN_BITFIELD
1178 uint64_t reserved_8_63 : 56;
1179 uint64_t iq_int : 8; /**< Input queue interrupt bits */
1181 uint64_t iq_int : 8;
1182 uint64_t reserved_8_63 : 56;
1185 struct cvmx_sso_iq_int_s cn68xx;
1186 struct cvmx_sso_iq_int_s cn68xxp1;
1188 typedef union cvmx_sso_iq_int cvmx_sso_iq_int_t;
1191 * cvmx_sso_iq_int_en
1193 * SSO_IQ_INT_EN = SSO Input Queue Interrupt Enable Register
1195 * Contains the bits (one per QOS level) that enable the input queue interrupt.
1197 union cvmx_sso_iq_int_en {
1199 struct cvmx_sso_iq_int_en_s {
1200 #ifdef __BIG_ENDIAN_BITFIELD
1201 uint64_t reserved_8_63 : 56;
1202 uint64_t int_en : 8; /**< Input queue interrupt enable bits */
1204 uint64_t int_en : 8;
1205 uint64_t reserved_8_63 : 56;
1208 struct cvmx_sso_iq_int_en_s cn68xx;
1209 struct cvmx_sso_iq_int_en_s cn68xxp1;
1211 typedef union cvmx_sso_iq_int_en cvmx_sso_iq_int_en_t;
1216 * CSR reserved addresses: (24): 0x9040..0x90f8
1217 * CSR align addresses: ===========================================================================================================
1218 * SSO_IQ_THRX = SSO Input Queue Threshold Register
1219 * (one per QOS level)
1221 * Threshold value for triggering input queue interrupts.
1223 union cvmx_sso_iq_thrx {
1225 struct cvmx_sso_iq_thrx_s {
1226 #ifdef __BIG_ENDIAN_BITFIELD
1227 uint64_t reserved_32_63 : 32;
1228 uint64_t iq_thr : 32; /**< Input queue threshold for QOS level X */
1230 uint64_t iq_thr : 32;
1231 uint64_t reserved_32_63 : 32;
1234 struct cvmx_sso_iq_thrx_s cn68xx;
1235 struct cvmx_sso_iq_thrx_s cn68xxp1;
1237 typedef union cvmx_sso_iq_thrx cvmx_sso_iq_thrx_t;
1242 * SSO_NOS_CNT = SSO No-schedule Count Register
1244 * Contains the number of work queue entries on the no-schedule list.
1246 union cvmx_sso_nos_cnt {
1248 struct cvmx_sso_nos_cnt_s {
1249 #ifdef __BIG_ENDIAN_BITFIELD
1250 uint64_t reserved_12_63 : 52;
1251 uint64_t nos_cnt : 12; /**< Number of work queue entries on the no-schedule list */
1253 uint64_t nos_cnt : 12;
1254 uint64_t reserved_12_63 : 52;
1257 struct cvmx_sso_nos_cnt_s cn68xx;
1258 struct cvmx_sso_nos_cnt_s cn68xxp1;
1260 typedef union cvmx_sso_nos_cnt cvmx_sso_nos_cnt_t;
1265 * SSO_NW_TIM = SSO New Work Timer Period Register
1267 * Sets the minimum period for a new work request timeout. Period is specified
1268 * in n-1 notation where the increment value is 1024 clock cycles. Thus, a
1269 * value of 0x0 in this register translates to 1024 cycles, 0x1 translates to
1270 * 2048 cycles, 0x2 translates to 3072 cycles, etc... Note: the maximum period
1271 * for a new work request timeout is 2 times the minimum period. Note: the new
1272 * work request timeout counter is reset when this register is written.
1274 * There are two new work request timeout cases:
1276 * - WAIT bit clear. The new work request can timeout if the timer expires
1277 * before the pre-fetch engine has reached the end of all work queues. This
1278 * can occur if the executable work queue entry is deep in the queue and the
1279 * pre-fetch engine is subject to many resets (i.e. high switch, de-schedule,
1280 * or new work load from other PP's). Thus, it is possible for a PP to
1281 * receive a work response with the NO_WORK bit set even though there was at
1282 * least one executable entry in the work queues. The other (and typical)
1283 * scenario for receiving a NO_WORK response with the WAIT bit clear is that
1284 * the pre-fetch engine has reached the end of all work queues without
1285 * finding executable work.
1287 * - WAIT bit set. The new work request can timeout if the timer expires
1288 * before the pre-fetch engine has found executable work. In this case, the
1289 * only scenario where the PP will receive a work response with the NO_WORK
1290 * bit set is if the timer expires. Note: it is still possible for a PP to
1291 * receive a NO_WORK response even though there was at least one executable
1292 * entry in the work queues.
1294 * In either case, it's important to note that switches and de-schedules are
1295 * higher priority operations that can cause the pre-fetch engine to reset.
1296 * Thus in a system with many switches or de-schedules occurring, it's possible
1297 * for the new work timer to expire (resulting in NO_WORK responses) before the
1298 * pre-fetch engine is able to get very deep into the work queues.
1300 union cvmx_sso_nw_tim {
1302 struct cvmx_sso_nw_tim_s {
1303 #ifdef __BIG_ENDIAN_BITFIELD
1304 uint64_t reserved_10_63 : 54;
1305 uint64_t nw_tim : 10; /**< New work timer period */
1307 uint64_t nw_tim : 10;
1308 uint64_t reserved_10_63 : 54;
1311 struct cvmx_sso_nw_tim_s cn68xx;
1312 struct cvmx_sso_nw_tim_s cn68xxp1;
1314 typedef union cvmx_sso_nw_tim cvmx_sso_nw_tim_t;
1317 * cvmx_sso_oth_ecc_ctl
1319 * SSO_OTH_ECC_CTL = SSO OTH ECC Control
1322 union cvmx_sso_oth_ecc_ctl {
1324 struct cvmx_sso_oth_ecc_ctl_s {
1325 #ifdef __BIG_ENDIAN_BITFIELD
1326 uint64_t reserved_6_63 : 58;
1327 uint64_t flip_synd1 : 2; /**< Testing feature. Flip Syndrom to generate single or
1328 double bit error for the odd OTH RAM. */
1329 uint64_t ecc_ena1 : 1; /**< ECC Enable: When set will enable the 7 bit ECC
1330 correct logic for the odd OTH RAM. */
1331 uint64_t flip_synd0 : 2; /**< Testing feature. Flip Syndrom to generate single or
1332 double bit error for the even OTH RAM. */
1333 uint64_t ecc_ena0 : 1; /**< ECC Enable: When set will enable the 7 bit ECC
1334 correct logic for the even OTH RAM. */
1336 uint64_t ecc_ena0 : 1;
1337 uint64_t flip_synd0 : 2;
1338 uint64_t ecc_ena1 : 1;
1339 uint64_t flip_synd1 : 2;
1340 uint64_t reserved_6_63 : 58;
1343 struct cvmx_sso_oth_ecc_ctl_s cn68xx;
1344 struct cvmx_sso_oth_ecc_ctl_s cn68xxp1;
1346 typedef union cvmx_sso_oth_ecc_ctl cvmx_sso_oth_ecc_ctl_t;
1349 * cvmx_sso_oth_ecc_st
1351 * SSO_OTH_ECC_ST = SSO OTH ECC Status
1354 union cvmx_sso_oth_ecc_st {
1356 struct cvmx_sso_oth_ecc_st_s {
1357 #ifdef __BIG_ENDIAN_BITFIELD
1358 uint64_t reserved_59_63 : 5;
1359 uint64_t addr1 : 11; /**< Latch the address for latest sde/dbe occured
1360 for the odd OTH RAM */
1361 uint64_t reserved_43_47 : 5;
1362 uint64_t syndrom1 : 7; /**< Report the latest error syndrom for the odd
1364 uint64_t reserved_27_35 : 9;
1365 uint64_t addr0 : 11; /**< Latch the address for latest sde/dbe occured
1366 for the even OTH RAM */
1367 uint64_t reserved_11_15 : 5;
1368 uint64_t syndrom0 : 7; /**< Report the latest error syndrom for the even
1370 uint64_t reserved_0_3 : 4;
1372 uint64_t reserved_0_3 : 4;
1373 uint64_t syndrom0 : 7;
1374 uint64_t reserved_11_15 : 5;
1375 uint64_t addr0 : 11;
1376 uint64_t reserved_27_35 : 9;
1377 uint64_t syndrom1 : 7;
1378 uint64_t reserved_43_47 : 5;
1379 uint64_t addr1 : 11;
1380 uint64_t reserved_59_63 : 5;
1383 struct cvmx_sso_oth_ecc_st_s cn68xx;
1384 struct cvmx_sso_oth_ecc_st_s cn68xxp1;
1386 typedef union cvmx_sso_oth_ecc_st cvmx_sso_oth_ecc_st_t;
1389 * cvmx_sso_pnd_ecc_ctl
1391 * SSO_PND_ECC_CTL = SSO PND ECC Control
1394 union cvmx_sso_pnd_ecc_ctl {
1396 struct cvmx_sso_pnd_ecc_ctl_s {
1397 #ifdef __BIG_ENDIAN_BITFIELD
1398 uint64_t reserved_6_63 : 58;
1399 uint64_t flip_synd1 : 2; /**< Testing feature. Flip Syndrom to generate single or
1400 double bit error for the odd PND RAM. */
1401 uint64_t ecc_ena1 : 1; /**< ECC Enable: When set will enable the 7 bit ECC
1402 correct logic for the odd PND RAM. */
1403 uint64_t flip_synd0 : 2; /**< Testing feature. Flip Syndrom to generate single or
1404 double bit error for the even PND RAM. */
1405 uint64_t ecc_ena0 : 1; /**< ECC Enable: When set will enable the 7 bit ECC
1406 correct logic for the even PND RAM. */
1408 uint64_t ecc_ena0 : 1;
1409 uint64_t flip_synd0 : 2;
1410 uint64_t ecc_ena1 : 1;
1411 uint64_t flip_synd1 : 2;
1412 uint64_t reserved_6_63 : 58;
1415 struct cvmx_sso_pnd_ecc_ctl_s cn68xx;
1416 struct cvmx_sso_pnd_ecc_ctl_s cn68xxp1;
1418 typedef union cvmx_sso_pnd_ecc_ctl cvmx_sso_pnd_ecc_ctl_t;
1421 * cvmx_sso_pnd_ecc_st
1423 * SSO_PND_ECC_ST = SSO PND ECC Status
1426 union cvmx_sso_pnd_ecc_st {
1428 struct cvmx_sso_pnd_ecc_st_s {
1429 #ifdef __BIG_ENDIAN_BITFIELD
1430 uint64_t reserved_59_63 : 5;
1431 uint64_t addr1 : 11; /**< Latch the address for latest sde/dbe occured
1432 for the odd PND RAM */
1433 uint64_t reserved_43_47 : 5;
1434 uint64_t syndrom1 : 7; /**< Report the latest error syndrom for the odd
1436 uint64_t reserved_27_35 : 9;
1437 uint64_t addr0 : 11; /**< Latch the address for latest sde/dbe occured
1438 for the even PND RAM */
1439 uint64_t reserved_11_15 : 5;
1440 uint64_t syndrom0 : 7; /**< Report the latest error syndrom for the even
1442 uint64_t reserved_0_3 : 4;
1444 uint64_t reserved_0_3 : 4;
1445 uint64_t syndrom0 : 7;
1446 uint64_t reserved_11_15 : 5;
1447 uint64_t addr0 : 11;
1448 uint64_t reserved_27_35 : 9;
1449 uint64_t syndrom1 : 7;
1450 uint64_t reserved_43_47 : 5;
1451 uint64_t addr1 : 11;
1452 uint64_t reserved_59_63 : 5;
1455 struct cvmx_sso_pnd_ecc_st_s cn68xx;
1456 struct cvmx_sso_pnd_ecc_st_s cn68xxp1;
1458 typedef union cvmx_sso_pnd_ecc_st cvmx_sso_pnd_ecc_st_t;
1461 * cvmx_sso_pp#_grp_msk
1463 * CSR reserved addresses: (24): 0x5040..0x50f8
1464 * CSR align addresses: ===========================================================================================================
1465 * SSO_PPX_GRP_MSK = SSO PP Group Mask Register
1466 * (one bit per group per PP)
1468 * Selects which group(s) a PP belongs to. A '1' in any bit position sets the
1469 * PP's membership in the corresponding group. A value of 0x0 will prevent the
1470 * PP from receiving new work.
1472 * Note that these do not contain QOS level priorities for each PP. This is a
1473 * change from previous POW designs.
1475 union cvmx_sso_ppx_grp_msk {
1477 struct cvmx_sso_ppx_grp_msk_s {
1478 #ifdef __BIG_ENDIAN_BITFIELD
1479 uint64_t grp_msk : 64; /**< PPX group mask */
1481 uint64_t grp_msk : 64;
1484 struct cvmx_sso_ppx_grp_msk_s cn68xx;
1485 struct cvmx_sso_ppx_grp_msk_s cn68xxp1;
1487 typedef union cvmx_sso_ppx_grp_msk cvmx_sso_ppx_grp_msk_t;
1490 * cvmx_sso_pp#_qos_pri
1492 * CSR reserved addresses: (56): 0x2040..0x21f8
1493 * CSR align addresses: ===========================================================================================================
1494 * SSO_PP(0..31)_QOS_PRI = SSO PP QOS Priority Register
1495 * (one field per IQ per PP)
1497 * Contains the QOS level priorities for each PP.
1498 * 0x0 is the highest priority
1499 * 0x7 is the lowest priority
1500 * 0xf prevents the PP from receiving work from that QOS level
1503 * For a given PP, priorities should begin at 0x0, and remain contiguous
1504 * throughout the range. Failure to do so may result in severe
1505 * performance degradation.
1508 * Priorities for IQs 0..7
1510 union cvmx_sso_ppx_qos_pri {
1512 struct cvmx_sso_ppx_qos_pri_s {
1513 #ifdef __BIG_ENDIAN_BITFIELD
1514 uint64_t reserved_60_63 : 4;
1515 uint64_t qos7_pri : 4; /**< QOS7 priority for PPX */
1516 uint64_t reserved_52_55 : 4;
1517 uint64_t qos6_pri : 4; /**< QOS6 priority for PPX */
1518 uint64_t reserved_44_47 : 4;
1519 uint64_t qos5_pri : 4; /**< QOS5 priority for PPX */
1520 uint64_t reserved_36_39 : 4;
1521 uint64_t qos4_pri : 4; /**< QOS4 priority for PPX */
1522 uint64_t reserved_28_31 : 4;
1523 uint64_t qos3_pri : 4; /**< QOS3 priority for PPX */
1524 uint64_t reserved_20_23 : 4;
1525 uint64_t qos2_pri : 4; /**< QOS2 priority for PPX */
1526 uint64_t reserved_12_15 : 4;
1527 uint64_t qos1_pri : 4; /**< QOS1 priority for PPX */
1528 uint64_t reserved_4_7 : 4;
1529 uint64_t qos0_pri : 4; /**< QOS0 priority for PPX */
1531 uint64_t qos0_pri : 4;
1532 uint64_t reserved_4_7 : 4;
1533 uint64_t qos1_pri : 4;
1534 uint64_t reserved_12_15 : 4;
1535 uint64_t qos2_pri : 4;
1536 uint64_t reserved_20_23 : 4;
1537 uint64_t qos3_pri : 4;
1538 uint64_t reserved_28_31 : 4;
1539 uint64_t qos4_pri : 4;
1540 uint64_t reserved_36_39 : 4;
1541 uint64_t qos5_pri : 4;
1542 uint64_t reserved_44_47 : 4;
1543 uint64_t qos6_pri : 4;
1544 uint64_t reserved_52_55 : 4;
1545 uint64_t qos7_pri : 4;
1546 uint64_t reserved_60_63 : 4;
1549 struct cvmx_sso_ppx_qos_pri_s cn68xx;
1550 struct cvmx_sso_ppx_qos_pri_s cn68xxp1;
1552 typedef union cvmx_sso_ppx_qos_pri cvmx_sso_ppx_qos_pri_t;
1555 * cvmx_sso_pp_strict
1557 * SSO_PP_STRICT = SSO Strict Priority
1559 * This register controls getting work from the input queues. If the bit
1560 * corresponding to a PP is set, that PP will not take work off the input
1561 * queues until it is known that there is no higher-priority work available.
1563 * Setting SSO_PP_STRICT may incur a performance penalty if highest-priority
1564 * work is not found early.
1566 * It is possible to starve a PP of work with SSO_PP_STRICT. If the
1567 * SSO_PPX_GRP_MSK for a PP masks-out much of the work added to the input
1568 * queues that are higher-priority for that PP, and if there is a constant
1569 * stream of work through one or more of those higher-priority input queues,
1570 * then that PP may not accept work from lower-priority input queues. This can
1571 * be alleviated by ensuring that most or all the work added to the
1572 * higher-priority input queues for a PP with SSO_PP_STRICT set are in a group
1573 * acceptable to that PP.
1575 * It is also possible to neglect work in an input queue if SSO_PP_STRICT is
1576 * used. If an input queue is a lower-priority queue for all PPs, and if all
1577 * the PPs have their corresponding bit in SSO_PP_STRICT set, then work may
1578 * never be taken (or be seldom taken) from that queue. This can be alleviated
1579 * by ensuring that work in all input queues can be serviced by one or more PPs
1580 * that do not have SSO_PP_STRICT set, or that the input queue is the
1581 * highest-priority input queue for one or more PPs that do have SSO_PP_STRICT
1584 union cvmx_sso_pp_strict {
1586 struct cvmx_sso_pp_strict_s {
1587 #ifdef __BIG_ENDIAN_BITFIELD
1588 uint64_t reserved_32_63 : 32;
1589 uint64_t pp_strict : 32; /**< Corresponding PP operates in strict mode. */
1591 uint64_t pp_strict : 32;
1592 uint64_t reserved_32_63 : 32;
1595 struct cvmx_sso_pp_strict_s cn68xx;
1596 struct cvmx_sso_pp_strict_s cn68xxp1;
1598 typedef union cvmx_sso_pp_strict cvmx_sso_pp_strict_t;
1603 * CSR align addresses: ===========================================================================================================
1604 * SSO_QOS(0..7)_RND = SSO QOS Issue Round Register
1607 * The number of arbitration rounds each QOS level participates in.
1609 union cvmx_sso_qosx_rnd {
1611 struct cvmx_sso_qosx_rnd_s {
1612 #ifdef __BIG_ENDIAN_BITFIELD
1613 uint64_t reserved_8_63 : 56;
1614 uint64_t rnds_qos : 8; /**< Number of rounds to participate in for IQ(X). */
1616 uint64_t rnds_qos : 8;
1617 uint64_t reserved_8_63 : 56;
1620 struct cvmx_sso_qosx_rnd_s cn68xx;
1621 struct cvmx_sso_qosx_rnd_s cn68xxp1;
1623 typedef union cvmx_sso_qosx_rnd cvmx_sso_qosx_rnd_t;
1628 * CSR reserved addresses: (24): 0xa040..0xa0f8
1629 * CSR align addresses: ===========================================================================================================
1630 * SSO_QOS_THRX = SSO QOS Threshold Register
1631 * (one per QOS level)
1633 * Contains the thresholds for allocating SSO internal storage buffers. If the
1634 * number of remaining free buffers drops below the minimum threshold (MIN_THR)
1635 * or the number of allocated buffers for this QOS level rises above the
1636 * maximum threshold (MAX_THR), future incoming work queue entries will be
1637 * buffered externally rather than internally. This register also contains the
1638 * number of internal buffers currently allocated to this QOS level (BUF_CNT).
1640 union cvmx_sso_qos_thrx {
1642 struct cvmx_sso_qos_thrx_s {
1643 #ifdef __BIG_ENDIAN_BITFIELD
1644 uint64_t reserved_40_63 : 24;
1645 uint64_t buf_cnt : 12; /**< # of internal buffers allocated to QOS level X */
1646 uint64_t reserved_26_27 : 2;
1647 uint64_t max_thr : 12; /**< Max threshold for QOS level X
1648 For performance reasons, MAX_THR can have a slop of 4
1649 WQE for QOS level X. */
1650 uint64_t reserved_12_13 : 2;
1651 uint64_t min_thr : 12; /**< Min threshold for QOS level X
1652 For performance reasons, MIN_THR can have a slop of 4
1653 WQEs for QOS level X. */
1655 uint64_t min_thr : 12;
1656 uint64_t reserved_12_13 : 2;
1657 uint64_t max_thr : 12;
1658 uint64_t reserved_26_27 : 2;
1659 uint64_t buf_cnt : 12;
1660 uint64_t reserved_40_63 : 24;
1663 struct cvmx_sso_qos_thrx_s cn68xx;
1664 struct cvmx_sso_qos_thrx_s cn68xxp1;
1666 typedef union cvmx_sso_qos_thrx cvmx_sso_qos_thrx_t;
1671 * SSO_QOS_WE = SSO WE Buffers
1673 * This register contains a read-only count of the current number of free
1674 * buffers (FREE_CNT) and the total number of tag chain heads on the de-schedule list
1675 * (DES_CNT) (which is not the same as the total number of entries on all of the descheduled
1678 union cvmx_sso_qos_we {
1680 struct cvmx_sso_qos_we_s {
1681 #ifdef __BIG_ENDIAN_BITFIELD
1682 uint64_t reserved_26_63 : 38;
1683 uint64_t des_cnt : 12; /**< Number of buffers on de-schedule list */
1684 uint64_t reserved_12_13 : 2;
1685 uint64_t free_cnt : 12; /**< Number of total free buffers */
1687 uint64_t free_cnt : 12;
1688 uint64_t reserved_12_13 : 2;
1689 uint64_t des_cnt : 12;
1690 uint64_t reserved_26_63 : 38;
1693 struct cvmx_sso_qos_we_s cn68xx;
1694 struct cvmx_sso_qos_we_s cn68xxp1;
1696 typedef union cvmx_sso_qos_we cvmx_sso_qos_we_t;
1701 * SSO_RESET = SSO Soft Reset
1703 * Writing a one to SSO_RESET[RESET] will reset the SSO. After receiving a
1704 * store to this CSR, the SSO must not be sent any other operations for 2500
1707 * Note that the contents of this register are reset along with the rest of the
1710 * IMPLEMENTATION NOTES--NOT FOR SPEC:
1711 * The SSO must return the bus credit associated with the CSR store used
1712 * to write this register before reseting itself. And the RSL tree
1713 * that passes through the SSO must continue to work for RSL operations
1714 * that do not target the SSO itself.
1716 union cvmx_sso_reset {
1718 struct cvmx_sso_reset_s {
1719 #ifdef __BIG_ENDIAN_BITFIELD
1720 uint64_t reserved_1_63 : 63;
1721 uint64_t reset : 1; /**< Reset the SSO */
1724 uint64_t reserved_1_63 : 63;
1727 struct cvmx_sso_reset_s cn68xx;
1729 typedef union cvmx_sso_reset cvmx_sso_reset_t;
1732 * cvmx_sso_rwq_head_ptr#
1734 * CSR reserved addresses: (24): 0xb040..0xb0f8
1735 * CSR align addresses: ===========================================================================================================
1736 * SSO_RWQ_HEAD_PTRX = SSO Remote Queue Head Register
1737 * (one per QOS level)
1738 * Contains the ptr to the first entry of the remote linked list(s) for a particular
1739 * QoS level. SW should initialize the remote linked list(s) by programming
1740 * SSO_RWQ_HEAD_PTRX and SSO_RWQ_TAIL_PTRX to identical values.
1742 union cvmx_sso_rwq_head_ptrx {
1744 struct cvmx_sso_rwq_head_ptrx_s {
1745 #ifdef __BIG_ENDIAN_BITFIELD
1746 uint64_t reserved_38_63 : 26;
1747 uint64_t ptr : 31; /**< Head Pointer */
1748 uint64_t reserved_5_6 : 2;
1749 uint64_t rctr : 5; /**< Index of next WQE entry in fill packet to be
1750 processed (inbound queues) */
1753 uint64_t reserved_5_6 : 2;
1755 uint64_t reserved_38_63 : 26;
1758 struct cvmx_sso_rwq_head_ptrx_s cn68xx;
1759 struct cvmx_sso_rwq_head_ptrx_s cn68xxp1;
1761 typedef union cvmx_sso_rwq_head_ptrx cvmx_sso_rwq_head_ptrx_t;
1764 * cvmx_sso_rwq_pop_fptr
1766 * SSO_RWQ_POP_FPTR = SSO Pop Free Pointer
1768 * This register is used by SW to remove pointers for buffer-reallocation and diagnostics, and
1769 * should only be used when SSO is idle.
1771 * To remove ALL pointers, software must insure that there are modulus 16
1772 * pointers in the FPA. To do this, SSO_CFG.RWQ_BYP_DIS must be set, the FPA
1773 * pointer count read, and enough fake buffers pushed via SSO_RWQ_PSH_FPTR to
1774 * bring the FPA pointer count up to mod 16.
1776 union cvmx_sso_rwq_pop_fptr {
1778 struct cvmx_sso_rwq_pop_fptr_s {
1779 #ifdef __BIG_ENDIAN_BITFIELD
1780 uint64_t val : 1; /**< Free Pointer Valid */
1781 uint64_t cnt : 6; /**< fptr_in count */
1782 uint64_t reserved_38_56 : 19;
1783 uint64_t fptr : 31; /**< Free Pointer */
1784 uint64_t reserved_0_6 : 7;
1786 uint64_t reserved_0_6 : 7;
1788 uint64_t reserved_38_56 : 19;
1793 struct cvmx_sso_rwq_pop_fptr_s cn68xx;
1794 struct cvmx_sso_rwq_pop_fptr_cn68xxp1 {
1795 #ifdef __BIG_ENDIAN_BITFIELD
1796 uint64_t val : 1; /**< Free Pointer Valid */
1797 uint64_t reserved_38_62 : 25;
1798 uint64_t fptr : 31; /**< Free Pointer */
1799 uint64_t reserved_0_6 : 7;
1801 uint64_t reserved_0_6 : 7;
1803 uint64_t reserved_38_62 : 25;
1808 typedef union cvmx_sso_rwq_pop_fptr cvmx_sso_rwq_pop_fptr_t;
1811 * cvmx_sso_rwq_psh_fptr
1813 * CSR reserved addresses: (56): 0xc240..0xc3f8
1814 * SSO_RWQ_PSH_FPTR = SSO Free Pointer FIFO
1816 * This register is used by SW to initialize the SSO with a pool of free
1817 * pointers by writing the FPTR field whenever FULL = 0. Free pointers are
1818 * fetched/released from/to the pool when accessing WQE entries stored remotely
1819 * (in remote linked lists). Free pointers should be 128 byte aligned, each of
1820 * 256 bytes. This register should only be used when SSO is idle.
1822 * Software needs to set aside buffering for
1823 * 8 + 48 + ROUNDUP(N/26)
1825 * where as many as N DRAM work queue entries may be used. The first 8 buffers
1826 * are used to setup the SSO_RWQ_HEAD_PTR and SSO_RWQ_TAIL_PTRs, and the
1827 * remainder are pushed via this register.
1829 * IMPLEMENTATION NOTES--NOT FOR SPEC:
1830 * 48 avoids false out of buffer error due to (16) FPA and in-sso FPA buffering (32)
1831 * 26 is number of WAE's per 256B buffer
1833 union cvmx_sso_rwq_psh_fptr {
1835 struct cvmx_sso_rwq_psh_fptr_s {
1836 #ifdef __BIG_ENDIAN_BITFIELD
1837 uint64_t full : 1; /**< FIFO Full. When set, the FPA is busy writing entries
1838 and software must wait before adding new entries. */
1839 uint64_t cnt : 4; /**< fptr_out count */
1840 uint64_t reserved_38_58 : 21;
1841 uint64_t fptr : 31; /**< Free Pointer */
1842 uint64_t reserved_0_6 : 7;
1844 uint64_t reserved_0_6 : 7;
1846 uint64_t reserved_38_58 : 21;
1851 struct cvmx_sso_rwq_psh_fptr_s cn68xx;
1852 struct cvmx_sso_rwq_psh_fptr_cn68xxp1 {
1853 #ifdef __BIG_ENDIAN_BITFIELD
1854 uint64_t full : 1; /**< FIFO Full. When set, the FPA is busy writing entries
1855 and software must wait before adding new entries. */
1856 uint64_t reserved_38_62 : 25;
1857 uint64_t fptr : 31; /**< Free Pointer */
1858 uint64_t reserved_0_6 : 7;
1860 uint64_t reserved_0_6 : 7;
1862 uint64_t reserved_38_62 : 25;
1867 typedef union cvmx_sso_rwq_psh_fptr cvmx_sso_rwq_psh_fptr_t;
1870 * cvmx_sso_rwq_tail_ptr#
1872 * CSR reserved addresses: (56): 0xc040..0xc1f8
1873 * SSO_RWQ_TAIL_PTRX = SSO Remote Queue Tail Register
1874 * (one per QOS level)
1875 * Contains the ptr to the last entry of the remote linked list(s) for a particular
1876 * QoS level. SW must initialize the remote linked list(s) by programming
1877 * SSO_RWQ_HEAD_PTRX and SSO_RWQ_TAIL_PTRX to identical values.
1879 union cvmx_sso_rwq_tail_ptrx {
1881 struct cvmx_sso_rwq_tail_ptrx_s {
1882 #ifdef __BIG_ENDIAN_BITFIELD
1883 uint64_t reserved_38_63 : 26;
1884 uint64_t ptr : 31; /**< Tail Pointer */
1885 uint64_t reserved_5_6 : 2;
1886 uint64_t rctr : 5; /**< Number of entries waiting to be sent out to external
1887 RAM (outbound queues) */
1890 uint64_t reserved_5_6 : 2;
1892 uint64_t reserved_38_63 : 26;
1895 struct cvmx_sso_rwq_tail_ptrx_s cn68xx;
1896 struct cvmx_sso_rwq_tail_ptrx_s cn68xxp1;
1898 typedef union cvmx_sso_rwq_tail_ptrx cvmx_sso_rwq_tail_ptrx_t;
1903 * SSO_TS_PC = SSO Tag Switch Performance Counter
1905 * Counts the number of tag switch requests.
1906 * Counter rolls over through zero when max value exceeded.
1908 union cvmx_sso_ts_pc {
1910 struct cvmx_sso_ts_pc_s {
1911 #ifdef __BIG_ENDIAN_BITFIELD
1912 uint64_t ts_pc : 64; /**< Tag switch performance counter */
1914 uint64_t ts_pc : 64;
1917 struct cvmx_sso_ts_pc_s cn68xx;
1918 struct cvmx_sso_ts_pc_s cn68xxp1;
1920 typedef union cvmx_sso_ts_pc cvmx_sso_ts_pc_t;
1923 * cvmx_sso_wa_com_pc
1925 * SSO_WA_COM_PC = SSO Work Add Combined Performance Counter
1927 * Counts the number of add new work requests for all QOS levels.
1928 * Counter rolls over through zero when max value exceeded.
1930 union cvmx_sso_wa_com_pc {
1932 struct cvmx_sso_wa_com_pc_s {
1933 #ifdef __BIG_ENDIAN_BITFIELD
1934 uint64_t wa_pc : 64; /**< Work add combined performance counter */
1936 uint64_t wa_pc : 64;
1939 struct cvmx_sso_wa_com_pc_s cn68xx;
1940 struct cvmx_sso_wa_com_pc_s cn68xxp1;
1942 typedef union cvmx_sso_wa_com_pc cvmx_sso_wa_com_pc_t;
1947 * CSR reserved addresses: (64): 0x4200..0x43f8
1948 * CSR align addresses: ===========================================================================================================
1949 * SSO_WA_PCX = SSO Work Add Performance Counter
1950 * (one per QOS level)
1952 * Counts the number of add new work requests for each QOS level.
1953 * Counter rolls over through zero when max value exceeded.
1955 union cvmx_sso_wa_pcx {
1957 struct cvmx_sso_wa_pcx_s {
1958 #ifdef __BIG_ENDIAN_BITFIELD
1959 uint64_t wa_pc : 64; /**< Work add performance counter for QOS level X */
1961 uint64_t wa_pc : 64;
1964 struct cvmx_sso_wa_pcx_s cn68xx;
1965 struct cvmx_sso_wa_pcx_s cn68xxp1;
1967 typedef union cvmx_sso_wa_pcx cvmx_sso_wa_pcx_t;
1972 * Note, the old POW offsets ran from 0x0 to 0x3f8, leaving the next available slot at 0x400.
1973 * To ensure no overlap, start on 4k boundary: 0x1000.
1974 * SSO_WQ_INT = SSO Work Queue Interrupt Register
1976 * Contains the bits (one per group) that set work queue interrupts and are
1977 * used to clear these interrupts. For more information regarding this
1978 * register, see the interrupt section of the SSO spec.
1980 union cvmx_sso_wq_int {
1982 struct cvmx_sso_wq_int_s {
1983 #ifdef __BIG_ENDIAN_BITFIELD
1984 uint64_t wq_int : 64; /**< Work queue interrupt bits
1985 Corresponding WQ_INT bit is set by HW whenever:
1986 - SSO_WQ_INT_CNTX[IQ_CNT] >=
1987 SSO_WQ_INT_THRX[IQ_THR] and the threshold
1988 interrupt is not disabled.
1989 SSO_WQ_IQ_DISX[IQ_DIS<X>]==1 disables the interrupt
1990 SSO_WQ_INT_THRX[IQ_THR]==0 disables the int.
1991 - SSO_WQ_INT_CNTX[DS_CNT] >=
1992 SSO_WQ_INT_THRX[DS_THR] and the threshold
1993 interrupt is not disabled
1994 SSO_WQ_INT_THRX[DS_THR]==0 disables the int.
1995 - SSO_WQ_INT_CNTX[TC_CNT]==1 when periodic
1996 counter SSO_WQ_INT_PC[PC]==0 and
1997 SSO_WQ_INT_THRX[TC_EN]==1 and at least one of:
1998 - SSO_WQ_INT_CNTX[IQ_CNT] > 0
1999 - SSO_WQ_INT_CNTX[DS_CNT] > 0 */
2001 uint64_t wq_int : 64;
2004 struct cvmx_sso_wq_int_s cn68xx;
2005 struct cvmx_sso_wq_int_s cn68xxp1;
2007 typedef union cvmx_sso_wq_int cvmx_sso_wq_int_t;
2010 * cvmx_sso_wq_int_cnt#
2012 * CSR reserved addresses: (64): 0x7200..0x73f8
2013 * CSR align addresses: ===========================================================================================================
2014 * SSO_WQ_INT_CNTX = SSO Work Queue Interrupt Count Register
2017 * Contains a read-only copy of the counts used to trigger work queue
2018 * interrupts. For more information regarding this register, see the interrupt
2021 union cvmx_sso_wq_int_cntx {
2023 struct cvmx_sso_wq_int_cntx_s {
2024 #ifdef __BIG_ENDIAN_BITFIELD
2025 uint64_t reserved_32_63 : 32;
2026 uint64_t tc_cnt : 4; /**< Time counter current value for group X
2027 HW sets TC_CNT to SSO_WQ_INT_THRX[TC_THR] whenever:
2028 - corresponding SSO_WQ_INT_CNTX[IQ_CNT]==0 and
2029 corresponding SSO_WQ_INT_CNTX[DS_CNT]==0
2030 - corresponding SSO_WQ_INT[WQ_INT<X>] is written
2032 - corresponding SSO_WQ_IQ_DIS[IQ_DIS<X>] is written
2034 - corresponding SSO_WQ_INT_THRX is written by SW
2035 - TC_CNT==1 and periodic counter
2036 SSO_WQ_INT_PC[PC]==0
2037 Otherwise, HW decrements TC_CNT whenever the
2038 periodic counter SSO_WQ_INT_PC[PC]==0.
2039 TC_CNT is 0 whenever SSO_WQ_INT_THRX[TC_THR]==0. */
2040 uint64_t reserved_26_27 : 2;
2041 uint64_t ds_cnt : 12; /**< De-schedule executable count for group X */
2042 uint64_t reserved_12_13 : 2;
2043 uint64_t iq_cnt : 12; /**< Input queue executable count for group X */
2045 uint64_t iq_cnt : 12;
2046 uint64_t reserved_12_13 : 2;
2047 uint64_t ds_cnt : 12;
2048 uint64_t reserved_26_27 : 2;
2049 uint64_t tc_cnt : 4;
2050 uint64_t reserved_32_63 : 32;
2053 struct cvmx_sso_wq_int_cntx_s cn68xx;
2054 struct cvmx_sso_wq_int_cntx_s cn68xxp1;
2056 typedef union cvmx_sso_wq_int_cntx cvmx_sso_wq_int_cntx_t;
2059 * cvmx_sso_wq_int_pc
2061 * CSR reserved addresses: (1): 0x1018..0x1018
2062 * SSO_WQ_INT_PC = SSO Work Queue Interrupt Periodic Counter Register
2064 * Contains the threshold value for the work queue interrupt periodic counter
2065 * and also a read-only copy of the periodic counter. For more information
2066 * regarding this register, see the interrupt section.
2068 union cvmx_sso_wq_int_pc {
2070 struct cvmx_sso_wq_int_pc_s {
2071 #ifdef __BIG_ENDIAN_BITFIELD
2072 uint64_t reserved_60_63 : 4;
2073 uint64_t pc : 28; /**< Work queue interrupt periodic counter */
2074 uint64_t reserved_28_31 : 4;
2075 uint64_t pc_thr : 20; /**< Work queue interrupt periodic counter threshold */
2076 uint64_t reserved_0_7 : 8;
2078 uint64_t reserved_0_7 : 8;
2079 uint64_t pc_thr : 20;
2080 uint64_t reserved_28_31 : 4;
2082 uint64_t reserved_60_63 : 4;
2085 struct cvmx_sso_wq_int_pc_s cn68xx;
2086 struct cvmx_sso_wq_int_pc_s cn68xxp1;
2088 typedef union cvmx_sso_wq_int_pc cvmx_sso_wq_int_pc_t;
2091 * cvmx_sso_wq_int_thr#
2093 * CSR reserved addresses: (96): 0x6100..0x63f8
2094 * CSR align addresses: ===========================================================================================================
2095 * SSO_WQ_INT_THR(0..63) = SSO Work Queue Interrupt Threshold Registers
2098 * Contains the thresholds for enabling and setting work queue interrupts. For
2099 * more information, see the interrupt section.
2101 * Note: Up to 16 of the SSO's internal storage buffers can be allocated
2102 * for hardware use and are therefore not available for incoming work queue
2103 * entries. Additionally, any WS that is not in the EMPTY state consumes a
2104 * buffer. Thus in a 32 PP system, it is not advisable to set either IQ_THR or
2105 * DS_THR to greater than 2048 - 16 - 32*2 = 1968. Doing so may prevent the
2106 * interrupt from ever triggering.
2108 * Priorities for QOS levels 0..7
2110 union cvmx_sso_wq_int_thrx {
2112 struct cvmx_sso_wq_int_thrx_s {
2113 #ifdef __BIG_ENDIAN_BITFIELD
2114 uint64_t reserved_33_63 : 31;
2115 uint64_t tc_en : 1; /**< Time counter interrupt enable for group X
2116 TC_EN must be zero when TC_THR==0 */
2117 uint64_t tc_thr : 4; /**< Time counter interrupt threshold for group X
2118 When TC_THR==0, SSO_WQ_INT_CNTX[TC_CNT] is zero */
2119 uint64_t reserved_26_27 : 2;
2120 uint64_t ds_thr : 12; /**< De-schedule count threshold for group X
2121 DS_THR==0 disables the threshold interrupt */
2122 uint64_t reserved_12_13 : 2;
2123 uint64_t iq_thr : 12; /**< Input queue count threshold for group X
2124 IQ_THR==0 disables the threshold interrupt */
2126 uint64_t iq_thr : 12;
2127 uint64_t reserved_12_13 : 2;
2128 uint64_t ds_thr : 12;
2129 uint64_t reserved_26_27 : 2;
2130 uint64_t tc_thr : 4;
2132 uint64_t reserved_33_63 : 31;
2135 struct cvmx_sso_wq_int_thrx_s cn68xx;
2136 struct cvmx_sso_wq_int_thrx_s cn68xxp1;
2138 typedef union cvmx_sso_wq_int_thrx cvmx_sso_wq_int_thrx_t;
2141 * cvmx_sso_wq_iq_dis
2143 * CSR reserved addresses: (1): 0x1008..0x1008
2144 * SSO_WQ_IQ_DIS = SSO Input Queue Interrupt Temporary Disable Mask
2146 * Contains the input queue interrupt temporary disable bits (one per group).
2147 * For more information regarding this register, see the interrupt section.
2149 union cvmx_sso_wq_iq_dis {
2151 struct cvmx_sso_wq_iq_dis_s {
2152 #ifdef __BIG_ENDIAN_BITFIELD
2153 uint64_t iq_dis : 64; /**< Input queue interrupt temporary disable mask
2154 Corresponding SSO_WQ_INTX[WQ_INT<X>] bit cannot be
2155 set due to IQ_CNT/IQ_THR check when this bit is set.
2156 Corresponding IQ_DIS bit is cleared by HW whenever:
2157 - SSO_WQ_INT_CNTX[IQ_CNT] is zero, or
2158 - SSO_WQ_INT_CNTX[TC_CNT]==1 when periodic
2159 counter SSO_WQ_INT_PC[PC]==0 */
2161 uint64_t iq_dis : 64;
2164 struct cvmx_sso_wq_iq_dis_s cn68xx;
2165 struct cvmx_sso_wq_iq_dis_s cn68xxp1;
2167 typedef union cvmx_sso_wq_iq_dis cvmx_sso_wq_iq_dis_t;
2172 * CSR reserved addresses: (225): 0x3100..0x3800
2173 * CSR align addresses: ===========================================================================================================
2174 * SSO_WS_PCX = SSO Work Schedule Performance Counter
2177 * Counts the number of work schedules for each group.
2178 * Counter rolls over through zero when max value exceeded.
2180 union cvmx_sso_ws_pcx {
2182 struct cvmx_sso_ws_pcx_s {
2183 #ifdef __BIG_ENDIAN_BITFIELD
2184 uint64_t ws_pc : 64; /**< Work schedule performance counter for group X */
2186 uint64_t ws_pc : 64;
2189 struct cvmx_sso_ws_pcx_s cn68xx;
2190 struct cvmx_sso_ws_pcx_s cn68xxp1;
2192 typedef union cvmx_sso_ws_pcx cvmx_sso_ws_pcx_t;