1 /***********************license start***************
2 * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Networks nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
44 * Configuration and status register (CSR) type definitions for
47 * This file is auto generated. Do not edit.
52 #ifndef __CVMX_FPA_TYPEDEFS_H__
53 #define __CVMX_FPA_TYPEDEFS_H__
55 #define CVMX_FPA_BIST_STATUS (CVMX_ADD_IO_SEG(0x00011800280000E8ull))
56 #define CVMX_FPA_CTL_STATUS (CVMX_ADD_IO_SEG(0x0001180028000050ull))
57 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
58 #define CVMX_FPA_FPF0_MARKS CVMX_FPA_FPF0_MARKS_FUNC()
59 static inline uint64_t CVMX_FPA_FPF0_MARKS_FUNC(void)
61 if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
62 cvmx_warn("CVMX_FPA_FPF0_MARKS not supported on this chip\n");
63 return CVMX_ADD_IO_SEG(0x0001180028000000ull);
66 #define CVMX_FPA_FPF0_MARKS (CVMX_ADD_IO_SEG(0x0001180028000000ull))
68 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
69 #define CVMX_FPA_FPF0_SIZE CVMX_FPA_FPF0_SIZE_FUNC()
70 static inline uint64_t CVMX_FPA_FPF0_SIZE_FUNC(void)
72 if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
73 cvmx_warn("CVMX_FPA_FPF0_SIZE not supported on this chip\n");
74 return CVMX_ADD_IO_SEG(0x0001180028000058ull);
77 #define CVMX_FPA_FPF0_SIZE (CVMX_ADD_IO_SEG(0x0001180028000058ull))
79 #define CVMX_FPA_FPF1_MARKS CVMX_FPA_FPFX_MARKS(1)
80 #define CVMX_FPA_FPF2_MARKS CVMX_FPA_FPFX_MARKS(2)
81 #define CVMX_FPA_FPF3_MARKS CVMX_FPA_FPFX_MARKS(3)
82 #define CVMX_FPA_FPF4_MARKS CVMX_FPA_FPFX_MARKS(4)
83 #define CVMX_FPA_FPF5_MARKS CVMX_FPA_FPFX_MARKS(5)
84 #define CVMX_FPA_FPF6_MARKS CVMX_FPA_FPFX_MARKS(6)
85 #define CVMX_FPA_FPF7_MARKS CVMX_FPA_FPFX_MARKS(7)
86 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
87 static inline uint64_t CVMX_FPA_FPFX_MARKS(unsigned long offset)
90 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset >= 1) && (offset <= 7)))) ||
91 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset >= 1) && (offset <= 7)))) ||
92 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset >= 1) && (offset <= 7)))) ||
93 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset >= 1) && (offset <= 7))))))
94 cvmx_warn("CVMX_FPA_FPFX_MARKS(%lu) is invalid on this chip\n", offset);
95 return CVMX_ADD_IO_SEG(0x0001180028000008ull) + ((offset) & 7) * 8 - 8*1;
98 #define CVMX_FPA_FPFX_MARKS(offset) (CVMX_ADD_IO_SEG(0x0001180028000008ull) + ((offset) & 7) * 8 - 8*1)
100 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
101 static inline uint64_t CVMX_FPA_FPFX_SIZE(unsigned long offset)
104 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset >= 1) && (offset <= 7)))) ||
105 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset >= 1) && (offset <= 7)))) ||
106 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset >= 1) && (offset <= 7)))) ||
107 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset >= 1) && (offset <= 7))))))
108 cvmx_warn("CVMX_FPA_FPFX_SIZE(%lu) is invalid on this chip\n", offset);
109 return CVMX_ADD_IO_SEG(0x0001180028000060ull) + ((offset) & 7) * 8 - 8*1;
112 #define CVMX_FPA_FPFX_SIZE(offset) (CVMX_ADD_IO_SEG(0x0001180028000060ull) + ((offset) & 7) * 8 - 8*1)
114 #define CVMX_FPA_INT_ENB (CVMX_ADD_IO_SEG(0x0001180028000048ull))
115 #define CVMX_FPA_INT_SUM (CVMX_ADD_IO_SEG(0x0001180028000040ull))
116 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
117 #define CVMX_FPA_PACKET_THRESHOLD CVMX_FPA_PACKET_THRESHOLD_FUNC()
118 static inline uint64_t CVMX_FPA_PACKET_THRESHOLD_FUNC(void)
120 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
121 cvmx_warn("CVMX_FPA_PACKET_THRESHOLD not supported on this chip\n");
122 return CVMX_ADD_IO_SEG(0x0001180028000460ull);
125 #define CVMX_FPA_PACKET_THRESHOLD (CVMX_ADD_IO_SEG(0x0001180028000460ull))
127 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
128 static inline uint64_t CVMX_FPA_POOLX_THRESHOLD(unsigned long offset)
131 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 7)))))
132 cvmx_warn("CVMX_FPA_POOLX_THRESHOLD(%lu) is invalid on this chip\n", offset);
133 return CVMX_ADD_IO_SEG(0x0001180028000140ull) + ((offset) & 7) * 8;
136 #define CVMX_FPA_POOLX_THRESHOLD(offset) (CVMX_ADD_IO_SEG(0x0001180028000140ull) + ((offset) & 7) * 8)
138 #define CVMX_FPA_QUE0_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(0)
139 #define CVMX_FPA_QUE1_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(1)
140 #define CVMX_FPA_QUE2_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(2)
141 #define CVMX_FPA_QUE3_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(3)
142 #define CVMX_FPA_QUE4_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(4)
143 #define CVMX_FPA_QUE5_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(5)
144 #define CVMX_FPA_QUE6_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(6)
145 #define CVMX_FPA_QUE7_PAGE_INDEX CVMX_FPA_QUEX_PAGE_INDEX(7)
146 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
147 static inline uint64_t CVMX_FPA_QUEX_AVAILABLE(unsigned long offset)
150 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 7))) ||
151 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 7))) ||
152 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 7))) ||
153 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 7))) ||
154 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7))) ||
155 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 7))) ||
156 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 7))) ||
157 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 7)))))
158 cvmx_warn("CVMX_FPA_QUEX_AVAILABLE(%lu) is invalid on this chip\n", offset);
159 return CVMX_ADD_IO_SEG(0x0001180028000098ull) + ((offset) & 7) * 8;
162 #define CVMX_FPA_QUEX_AVAILABLE(offset) (CVMX_ADD_IO_SEG(0x0001180028000098ull) + ((offset) & 7) * 8)
164 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
165 static inline uint64_t CVMX_FPA_QUEX_PAGE_INDEX(unsigned long offset)
168 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 7))) ||
169 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 7))) ||
170 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 7))) ||
171 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 7))) ||
172 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 7))) ||
173 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 7))) ||
174 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 7))) ||
175 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 7)))))
176 cvmx_warn("CVMX_FPA_QUEX_PAGE_INDEX(%lu) is invalid on this chip\n", offset);
177 return CVMX_ADD_IO_SEG(0x00011800280000F0ull) + ((offset) & 7) * 8;
180 #define CVMX_FPA_QUEX_PAGE_INDEX(offset) (CVMX_ADD_IO_SEG(0x00011800280000F0ull) + ((offset) & 7) * 8)
182 #define CVMX_FPA_QUE_ACT (CVMX_ADD_IO_SEG(0x0001180028000138ull))
183 #define CVMX_FPA_QUE_EXP (CVMX_ADD_IO_SEG(0x0001180028000130ull))
184 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
185 #define CVMX_FPA_WART_CTL CVMX_FPA_WART_CTL_FUNC()
186 static inline uint64_t CVMX_FPA_WART_CTL_FUNC(void)
188 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
189 cvmx_warn("CVMX_FPA_WART_CTL not supported on this chip\n");
190 return CVMX_ADD_IO_SEG(0x00011800280000D8ull);
193 #define CVMX_FPA_WART_CTL (CVMX_ADD_IO_SEG(0x00011800280000D8ull))
195 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
196 #define CVMX_FPA_WART_STATUS CVMX_FPA_WART_STATUS_FUNC()
197 static inline uint64_t CVMX_FPA_WART_STATUS_FUNC(void)
199 if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX)))
200 cvmx_warn("CVMX_FPA_WART_STATUS not supported on this chip\n");
201 return CVMX_ADD_IO_SEG(0x00011800280000E0ull);
204 #define CVMX_FPA_WART_STATUS (CVMX_ADD_IO_SEG(0x00011800280000E0ull))
206 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
207 #define CVMX_FPA_WQE_THRESHOLD CVMX_FPA_WQE_THRESHOLD_FUNC()
208 static inline uint64_t CVMX_FPA_WQE_THRESHOLD_FUNC(void)
210 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)))
211 cvmx_warn("CVMX_FPA_WQE_THRESHOLD not supported on this chip\n");
212 return CVMX_ADD_IO_SEG(0x0001180028000468ull);
215 #define CVMX_FPA_WQE_THRESHOLD (CVMX_ADD_IO_SEG(0x0001180028000468ull))
219 * cvmx_fpa_bist_status
221 * FPA_BIST_STATUS = BIST Status of FPA Memories
223 * The result of the BIST run on the FPA memories.
225 union cvmx_fpa_bist_status
228 struct cvmx_fpa_bist_status_s
230 #if __BYTE_ORDER == __BIG_ENDIAN
231 uint64_t reserved_5_63 : 59;
232 uint64_t frd : 1; /**< fpa_frd memory bist status. */
233 uint64_t fpf0 : 1; /**< fpa_fpf0 memory bist status. */
234 uint64_t fpf1 : 1; /**< fpa_fpf1 memory bist status. */
235 uint64_t ffr : 1; /**< fpa_ffr memory bist status. */
236 uint64_t fdr : 1; /**< fpa_fdr memory bist status. */
243 uint64_t reserved_5_63 : 59;
246 struct cvmx_fpa_bist_status_s cn30xx;
247 struct cvmx_fpa_bist_status_s cn31xx;
248 struct cvmx_fpa_bist_status_s cn38xx;
249 struct cvmx_fpa_bist_status_s cn38xxp2;
250 struct cvmx_fpa_bist_status_s cn50xx;
251 struct cvmx_fpa_bist_status_s cn52xx;
252 struct cvmx_fpa_bist_status_s cn52xxp1;
253 struct cvmx_fpa_bist_status_s cn56xx;
254 struct cvmx_fpa_bist_status_s cn56xxp1;
255 struct cvmx_fpa_bist_status_s cn58xx;
256 struct cvmx_fpa_bist_status_s cn58xxp1;
257 struct cvmx_fpa_bist_status_s cn63xx;
258 struct cvmx_fpa_bist_status_s cn63xxp1;
260 typedef union cvmx_fpa_bist_status cvmx_fpa_bist_status_t;
263 * cvmx_fpa_ctl_status
265 * FPA_CTL_STATUS = FPA's Control/Status Register
267 * The FPA's interrupt enable register.
269 union cvmx_fpa_ctl_status
272 struct cvmx_fpa_ctl_status_s
274 #if __BYTE_ORDER == __BIG_ENDIAN
275 uint64_t reserved_21_63 : 43;
276 uint64_t free_en : 1; /**< Enables the setting of the INT_SUM_[FREE*] bits. */
277 uint64_t ret_off : 1; /**< When set NCB devices returning pointer will be
279 uint64_t req_off : 1; /**< When set NCB devices requesting pointers will be
281 uint64_t reset : 1; /**< When set causes a reset of the FPA with the
282 exception of the RSL. This is a PASS-2 field. */
283 uint64_t use_ldt : 1; /**< When clear '0' the FPA will use LDT to load
284 pointers from the L2C. This is a PASS-2 field. */
285 uint64_t use_stt : 1; /**< When clear '0' the FPA will use STT to store
286 pointers to the L2C. This is a PASS-2 field. */
287 uint64_t enb : 1; /**< Must be set to 1 AFTER writing all config registers
288 and 10 cycles have past. If any of the config
289 register are written after writing this bit the
290 FPA may begin to operate incorrectly. */
291 uint64_t mem1_err : 7; /**< Causes a flip of the ECC bit associated 38:32
292 respective to bit 6:0 of this field, for FPF
294 uint64_t mem0_err : 7; /**< Causes a flip of the ECC bit associated 38:32
295 respective to bit 6:0 of this field, for FPF
298 uint64_t mem0_err : 7;
299 uint64_t mem1_err : 7;
301 uint64_t use_stt : 1;
302 uint64_t use_ldt : 1;
304 uint64_t req_off : 1;
305 uint64_t ret_off : 1;
306 uint64_t free_en : 1;
307 uint64_t reserved_21_63 : 43;
310 struct cvmx_fpa_ctl_status_cn30xx
312 #if __BYTE_ORDER == __BIG_ENDIAN
313 uint64_t reserved_18_63 : 46;
314 uint64_t reset : 1; /**< When set causes a reset of the FPA with the
315 exception of the RSL. */
316 uint64_t use_ldt : 1; /**< When clear '0' the FPA will use LDT to load
317 pointers from the L2C. */
318 uint64_t use_stt : 1; /**< When clear '0' the FPA will use STT to store
319 pointers to the L2C. */
320 uint64_t enb : 1; /**< Must be set to 1 AFTER writing all config registers
321 and 10 cycles have past. If any of the config
322 register are written after writing this bit the
323 FPA may begin to operate incorrectly. */
324 uint64_t mem1_err : 7; /**< Causes a flip of the ECC bit associated 38:32
325 respective to bit 6:0 of this field, for FPF
327 uint64_t mem0_err : 7; /**< Causes a flip of the ECC bit associated 38:32
328 respective to bit 6:0 of this field, for FPF
331 uint64_t mem0_err : 7;
332 uint64_t mem1_err : 7;
334 uint64_t use_stt : 1;
335 uint64_t use_ldt : 1;
337 uint64_t reserved_18_63 : 46;
340 struct cvmx_fpa_ctl_status_cn30xx cn31xx;
341 struct cvmx_fpa_ctl_status_cn30xx cn38xx;
342 struct cvmx_fpa_ctl_status_cn30xx cn38xxp2;
343 struct cvmx_fpa_ctl_status_cn30xx cn50xx;
344 struct cvmx_fpa_ctl_status_cn30xx cn52xx;
345 struct cvmx_fpa_ctl_status_cn30xx cn52xxp1;
346 struct cvmx_fpa_ctl_status_cn30xx cn56xx;
347 struct cvmx_fpa_ctl_status_cn30xx cn56xxp1;
348 struct cvmx_fpa_ctl_status_cn30xx cn58xx;
349 struct cvmx_fpa_ctl_status_cn30xx cn58xxp1;
350 struct cvmx_fpa_ctl_status_s cn63xx;
351 struct cvmx_fpa_ctl_status_cn30xx cn63xxp1;
353 typedef union cvmx_fpa_ctl_status cvmx_fpa_ctl_status_t;
356 * cvmx_fpa_fpf#_marks
358 * FPA_FPF1_MARKS = FPA's Queue 1 Free Page FIFO Read Write Marks
360 * The high and low watermark register that determines when we write and read free pages from L2C
361 * for Queue 1. The value of FPF_RD and FPF_WR should have at least a 33 difference. Recommend value
362 * is FPF_RD == (FPA_FPF#_SIZE[FPF_SIZ] * .25) and FPF_WR == (FPA_FPF#_SIZE[FPF_SIZ] * .75)
364 union cvmx_fpa_fpfx_marks
367 struct cvmx_fpa_fpfx_marks_s
369 #if __BYTE_ORDER == __BIG_ENDIAN
370 uint64_t reserved_22_63 : 42;
371 uint64_t fpf_wr : 11; /**< When the number of free-page-pointers in a
372 queue exceeds this value the FPA will write
373 32-page-pointers of that queue to DRAM.
374 The MAX value for this field should be
375 FPA_FPF1_SIZE[FPF_SIZ]-2. */
376 uint64_t fpf_rd : 11; /**< When the number of free-page-pointers in a
377 queue drops below this value and there are
378 free-page-pointers in DRAM, the FPA will
379 read one page (32 pointers) from DRAM.
380 This maximum value for this field should be
381 FPA_FPF1_SIZE[FPF_SIZ]-34. The min number
382 for this would be 16. */
384 uint64_t fpf_rd : 11;
385 uint64_t fpf_wr : 11;
386 uint64_t reserved_22_63 : 42;
389 struct cvmx_fpa_fpfx_marks_s cn38xx;
390 struct cvmx_fpa_fpfx_marks_s cn38xxp2;
391 struct cvmx_fpa_fpfx_marks_s cn56xx;
392 struct cvmx_fpa_fpfx_marks_s cn56xxp1;
393 struct cvmx_fpa_fpfx_marks_s cn58xx;
394 struct cvmx_fpa_fpfx_marks_s cn58xxp1;
395 struct cvmx_fpa_fpfx_marks_s cn63xx;
396 struct cvmx_fpa_fpfx_marks_s cn63xxp1;
398 typedef union cvmx_fpa_fpfx_marks cvmx_fpa_fpfx_marks_t;
403 * FPA_FPFX_SIZE = FPA's Queue 1-7 Free Page FIFO Size
405 * The number of page pointers that will be kept local to the FPA for this Queue. FPA Queues are
406 * assigned in order from Queue 0 to Queue 7, though only Queue 0 through Queue x can be used.
407 * The sum of the 8 (0-7) FPA_FPF#_SIZE registers must be limited to 2048.
409 union cvmx_fpa_fpfx_size
412 struct cvmx_fpa_fpfx_size_s
414 #if __BYTE_ORDER == __BIG_ENDIAN
415 uint64_t reserved_11_63 : 53;
416 uint64_t fpf_siz : 11; /**< The number of entries assigned in the FPA FIFO
417 (used to hold page-pointers) for this Queue.
418 The value of this register must divisable by 2,
419 and the FPA will ignore bit [0] of this register.
420 The total of the FPF_SIZ field of the 8 (0-7)
421 FPA_FPF#_SIZE registers must not exceed 2048.
422 After writing this field the FPA will need 10
423 core clock cycles to be ready for operation. The
424 assignment of location in the FPA FIFO must
425 start with Queue 0, then 1, 2, etc.
426 The number of useable entries will be FPF_SIZ-2. */
428 uint64_t fpf_siz : 11;
429 uint64_t reserved_11_63 : 53;
432 struct cvmx_fpa_fpfx_size_s cn38xx;
433 struct cvmx_fpa_fpfx_size_s cn38xxp2;
434 struct cvmx_fpa_fpfx_size_s cn56xx;
435 struct cvmx_fpa_fpfx_size_s cn56xxp1;
436 struct cvmx_fpa_fpfx_size_s cn58xx;
437 struct cvmx_fpa_fpfx_size_s cn58xxp1;
438 struct cvmx_fpa_fpfx_size_s cn63xx;
439 struct cvmx_fpa_fpfx_size_s cn63xxp1;
441 typedef union cvmx_fpa_fpfx_size cvmx_fpa_fpfx_size_t;
444 * cvmx_fpa_fpf0_marks
446 * FPA_FPF0_MARKS = FPA's Queue 0 Free Page FIFO Read Write Marks
448 * The high and low watermark register that determines when we write and read free pages from L2C
449 * for Queue 0. The value of FPF_RD and FPF_WR should have at least a 33 difference. Recommend value
450 * is FPF_RD == (FPA_FPF#_SIZE[FPF_SIZ] * .25) and FPF_WR == (FPA_FPF#_SIZE[FPF_SIZ] * .75)
452 union cvmx_fpa_fpf0_marks
455 struct cvmx_fpa_fpf0_marks_s
457 #if __BYTE_ORDER == __BIG_ENDIAN
458 uint64_t reserved_24_63 : 40;
459 uint64_t fpf_wr : 12; /**< When the number of free-page-pointers in a
460 queue exceeds this value the FPA will write
461 32-page-pointers of that queue to DRAM.
462 The MAX value for this field should be
463 FPA_FPF0_SIZE[FPF_SIZ]-2. */
464 uint64_t fpf_rd : 12; /**< When the number of free-page-pointers in a
465 queue drops below this value and there are
466 free-page-pointers in DRAM, the FPA will
467 read one page (32 pointers) from DRAM.
468 This maximum value for this field should be
469 FPA_FPF0_SIZE[FPF_SIZ]-34. The min number
470 for this would be 16. */
472 uint64_t fpf_rd : 12;
473 uint64_t fpf_wr : 12;
474 uint64_t reserved_24_63 : 40;
477 struct cvmx_fpa_fpf0_marks_s cn38xx;
478 struct cvmx_fpa_fpf0_marks_s cn38xxp2;
479 struct cvmx_fpa_fpf0_marks_s cn56xx;
480 struct cvmx_fpa_fpf0_marks_s cn56xxp1;
481 struct cvmx_fpa_fpf0_marks_s cn58xx;
482 struct cvmx_fpa_fpf0_marks_s cn58xxp1;
483 struct cvmx_fpa_fpf0_marks_s cn63xx;
484 struct cvmx_fpa_fpf0_marks_s cn63xxp1;
486 typedef union cvmx_fpa_fpf0_marks cvmx_fpa_fpf0_marks_t;
491 * FPA_FPF0_SIZE = FPA's Queue 0 Free Page FIFO Size
493 * The number of page pointers that will be kept local to the FPA for this Queue. FPA Queues are
494 * assigned in order from Queue 0 to Queue 7, though only Queue 0 through Queue x can be used.
495 * The sum of the 8 (0-7) FPA_FPF#_SIZE registers must be limited to 2048.
497 union cvmx_fpa_fpf0_size
500 struct cvmx_fpa_fpf0_size_s
502 #if __BYTE_ORDER == __BIG_ENDIAN
503 uint64_t reserved_12_63 : 52;
504 uint64_t fpf_siz : 12; /**< The number of entries assigned in the FPA FIFO
505 (used to hold page-pointers) for this Queue.
506 The value of this register must divisable by 2,
507 and the FPA will ignore bit [0] of this register.
508 The total of the FPF_SIZ field of the 8 (0-7)
509 FPA_FPF#_SIZE registers must not exceed 2048.
510 After writing this field the FPA will need 10
511 core clock cycles to be ready for operation. The
512 assignment of location in the FPA FIFO must
513 start with Queue 0, then 1, 2, etc.
514 The number of useable entries will be FPF_SIZ-2. */
516 uint64_t fpf_siz : 12;
517 uint64_t reserved_12_63 : 52;
520 struct cvmx_fpa_fpf0_size_s cn38xx;
521 struct cvmx_fpa_fpf0_size_s cn38xxp2;
522 struct cvmx_fpa_fpf0_size_s cn56xx;
523 struct cvmx_fpa_fpf0_size_s cn56xxp1;
524 struct cvmx_fpa_fpf0_size_s cn58xx;
525 struct cvmx_fpa_fpf0_size_s cn58xxp1;
526 struct cvmx_fpa_fpf0_size_s cn63xx;
527 struct cvmx_fpa_fpf0_size_s cn63xxp1;
529 typedef union cvmx_fpa_fpf0_size cvmx_fpa_fpf0_size_t;
534 * FPA_INT_ENB = FPA's Interrupt Enable
536 * The FPA's interrupt enable register.
538 union cvmx_fpa_int_enb
541 struct cvmx_fpa_int_enb_s
543 #if __BYTE_ORDER == __BIG_ENDIAN
544 uint64_t reserved_44_63 : 20;
545 uint64_t free7 : 1; /**< When set (1) and bit 43 of the FPA_INT_SUM
546 register is asserted the FPA will assert an
548 uint64_t free6 : 1; /**< When set (1) and bit 42 of the FPA_INT_SUM
549 register is asserted the FPA will assert an
551 uint64_t free5 : 1; /**< When set (1) and bit 41 of the FPA_INT_SUM
552 register is asserted the FPA will assert an
554 uint64_t free4 : 1; /**< When set (1) and bit 40 of the FPA_INT_SUM
555 register is asserted the FPA will assert an
557 uint64_t free3 : 1; /**< When set (1) and bit 39 of the FPA_INT_SUM
558 register is asserted the FPA will assert an
560 uint64_t free2 : 1; /**< When set (1) and bit 38 of the FPA_INT_SUM
561 register is asserted the FPA will assert an
563 uint64_t free1 : 1; /**< When set (1) and bit 37 of the FPA_INT_SUM
564 register is asserted the FPA will assert an
566 uint64_t free0 : 1; /**< When set (1) and bit 36 of the FPA_INT_SUM
567 register is asserted the FPA will assert an
569 uint64_t pool7th : 1; /**< When set (1) and bit 35 of the FPA_INT_SUM
570 register is asserted the FPA will assert an
572 uint64_t pool6th : 1; /**< When set (1) and bit 34 of the FPA_INT_SUM
573 register is asserted the FPA will assert an
575 uint64_t pool5th : 1; /**< When set (1) and bit 33 of the FPA_INT_SUM
576 register is asserted the FPA will assert an
578 uint64_t pool4th : 1; /**< When set (1) and bit 32 of the FPA_INT_SUM
579 register is asserted the FPA will assert an
581 uint64_t pool3th : 1; /**< When set (1) and bit 31 of the FPA_INT_SUM
582 register is asserted the FPA will assert an
584 uint64_t pool2th : 1; /**< When set (1) and bit 30 of the FPA_INT_SUM
585 register is asserted the FPA will assert an
587 uint64_t pool1th : 1; /**< When set (1) and bit 29 of the FPA_INT_SUM
588 register is asserted the FPA will assert an
590 uint64_t pool0th : 1; /**< When set (1) and bit 28 of the FPA_INT_SUM
591 register is asserted the FPA will assert an
593 uint64_t q7_perr : 1; /**< When set (1) and bit 27 of the FPA_INT_SUM
594 register is asserted the FPA will assert an
596 uint64_t q7_coff : 1; /**< When set (1) and bit 26 of the FPA_INT_SUM
597 register is asserted the FPA will assert an
599 uint64_t q7_und : 1; /**< When set (1) and bit 25 of the FPA_INT_SUM
600 register is asserted the FPA will assert an
602 uint64_t q6_perr : 1; /**< When set (1) and bit 24 of the FPA_INT_SUM
603 register is asserted the FPA will assert an
605 uint64_t q6_coff : 1; /**< When set (1) and bit 23 of the FPA_INT_SUM
606 register is asserted the FPA will assert an
608 uint64_t q6_und : 1; /**< When set (1) and bit 22 of the FPA_INT_SUM
609 register is asserted the FPA will assert an
611 uint64_t q5_perr : 1; /**< When set (1) and bit 21 of the FPA_INT_SUM
612 register is asserted the FPA will assert an
614 uint64_t q5_coff : 1; /**< When set (1) and bit 20 of the FPA_INT_SUM
615 register is asserted the FPA will assert an
617 uint64_t q5_und : 1; /**< When set (1) and bit 19 of the FPA_INT_SUM
618 register is asserted the FPA will assert an
620 uint64_t q4_perr : 1; /**< When set (1) and bit 18 of the FPA_INT_SUM
621 register is asserted the FPA will assert an
623 uint64_t q4_coff : 1; /**< When set (1) and bit 17 of the FPA_INT_SUM
624 register is asserted the FPA will assert an
626 uint64_t q4_und : 1; /**< When set (1) and bit 16 of the FPA_INT_SUM
627 register is asserted the FPA will assert an
629 uint64_t q3_perr : 1; /**< When set (1) and bit 15 of the FPA_INT_SUM
630 register is asserted the FPA will assert an
632 uint64_t q3_coff : 1; /**< When set (1) and bit 14 of the FPA_INT_SUM
633 register is asserted the FPA will assert an
635 uint64_t q3_und : 1; /**< When set (1) and bit 13 of the FPA_INT_SUM
636 register is asserted the FPA will assert an
638 uint64_t q2_perr : 1; /**< When set (1) and bit 12 of the FPA_INT_SUM
639 register is asserted the FPA will assert an
641 uint64_t q2_coff : 1; /**< When set (1) and bit 11 of the FPA_INT_SUM
642 register is asserted the FPA will assert an
644 uint64_t q2_und : 1; /**< When set (1) and bit 10 of the FPA_INT_SUM
645 register is asserted the FPA will assert an
647 uint64_t q1_perr : 1; /**< When set (1) and bit 9 of the FPA_INT_SUM
648 register is asserted the FPA will assert an
650 uint64_t q1_coff : 1; /**< When set (1) and bit 8 of the FPA_INT_SUM
651 register is asserted the FPA will assert an
653 uint64_t q1_und : 1; /**< When set (1) and bit 7 of the FPA_INT_SUM
654 register is asserted the FPA will assert an
656 uint64_t q0_perr : 1; /**< When set (1) and bit 6 of the FPA_INT_SUM
657 register is asserted the FPA will assert an
659 uint64_t q0_coff : 1; /**< When set (1) and bit 5 of the FPA_INT_SUM
660 register is asserted the FPA will assert an
662 uint64_t q0_und : 1; /**< When set (1) and bit 4 of the FPA_INT_SUM
663 register is asserted the FPA will assert an
665 uint64_t fed1_dbe : 1; /**< When set (1) and bit 3 of the FPA_INT_SUM
666 register is asserted the FPA will assert an
668 uint64_t fed1_sbe : 1; /**< When set (1) and bit 2 of the FPA_INT_SUM
669 register is asserted the FPA will assert an
671 uint64_t fed0_dbe : 1; /**< When set (1) and bit 1 of the FPA_INT_SUM
672 register is asserted the FPA will assert an
674 uint64_t fed0_sbe : 1; /**< When set (1) and bit 0 of the FPA_INT_SUM
675 register is asserted the FPA will assert an
678 uint64_t fed0_sbe : 1;
679 uint64_t fed0_dbe : 1;
680 uint64_t fed1_sbe : 1;
681 uint64_t fed1_dbe : 1;
683 uint64_t q0_coff : 1;
684 uint64_t q0_perr : 1;
686 uint64_t q1_coff : 1;
687 uint64_t q1_perr : 1;
689 uint64_t q2_coff : 1;
690 uint64_t q2_perr : 1;
692 uint64_t q3_coff : 1;
693 uint64_t q3_perr : 1;
695 uint64_t q4_coff : 1;
696 uint64_t q4_perr : 1;
698 uint64_t q5_coff : 1;
699 uint64_t q5_perr : 1;
701 uint64_t q6_coff : 1;
702 uint64_t q6_perr : 1;
704 uint64_t q7_coff : 1;
705 uint64_t q7_perr : 1;
706 uint64_t pool0th : 1;
707 uint64_t pool1th : 1;
708 uint64_t pool2th : 1;
709 uint64_t pool3th : 1;
710 uint64_t pool4th : 1;
711 uint64_t pool5th : 1;
712 uint64_t pool6th : 1;
713 uint64_t pool7th : 1;
722 uint64_t reserved_44_63 : 20;
725 struct cvmx_fpa_int_enb_cn30xx
727 #if __BYTE_ORDER == __BIG_ENDIAN
728 uint64_t reserved_28_63 : 36;
729 uint64_t q7_perr : 1; /**< When set (1) and bit 27 of the FPA_INT_SUM
730 register is asserted the FPA will assert an
732 uint64_t q7_coff : 1; /**< When set (1) and bit 26 of the FPA_INT_SUM
733 register is asserted the FPA will assert an
735 uint64_t q7_und : 1; /**< When set (1) and bit 25 of the FPA_INT_SUM
736 register is asserted the FPA will assert an
738 uint64_t q6_perr : 1; /**< When set (1) and bit 24 of the FPA_INT_SUM
739 register is asserted the FPA will assert an
741 uint64_t q6_coff : 1; /**< When set (1) and bit 23 of the FPA_INT_SUM
742 register is asserted the FPA will assert an
744 uint64_t q6_und : 1; /**< When set (1) and bit 22 of the FPA_INT_SUM
745 register is asserted the FPA will assert an
747 uint64_t q5_perr : 1; /**< When set (1) and bit 21 of the FPA_INT_SUM
748 register is asserted the FPA will assert an
750 uint64_t q5_coff : 1; /**< When set (1) and bit 20 of the FPA_INT_SUM
751 register is asserted the FPA will assert an
753 uint64_t q5_und : 1; /**< When set (1) and bit 19 of the FPA_INT_SUM
754 register is asserted the FPA will assert an
756 uint64_t q4_perr : 1; /**< When set (1) and bit 18 of the FPA_INT_SUM
757 register is asserted the FPA will assert an
759 uint64_t q4_coff : 1; /**< When set (1) and bit 17 of the FPA_INT_SUM
760 register is asserted the FPA will assert an
762 uint64_t q4_und : 1; /**< When set (1) and bit 16 of the FPA_INT_SUM
763 register is asserted the FPA will assert an
765 uint64_t q3_perr : 1; /**< When set (1) and bit 15 of the FPA_INT_SUM
766 register is asserted the FPA will assert an
768 uint64_t q3_coff : 1; /**< When set (1) and bit 14 of the FPA_INT_SUM
769 register is asserted the FPA will assert an
771 uint64_t q3_und : 1; /**< When set (1) and bit 13 of the FPA_INT_SUM
772 register is asserted the FPA will assert an
774 uint64_t q2_perr : 1; /**< When set (1) and bit 12 of the FPA_INT_SUM
775 register is asserted the FPA will assert an
777 uint64_t q2_coff : 1; /**< When set (1) and bit 11 of the FPA_INT_SUM
778 register is asserted the FPA will assert an
780 uint64_t q2_und : 1; /**< When set (1) and bit 10 of the FPA_INT_SUM
781 register is asserted the FPA will assert an
783 uint64_t q1_perr : 1; /**< When set (1) and bit 9 of the FPA_INT_SUM
784 register is asserted the FPA will assert an
786 uint64_t q1_coff : 1; /**< When set (1) and bit 8 of the FPA_INT_SUM
787 register is asserted the FPA will assert an
789 uint64_t q1_und : 1; /**< When set (1) and bit 7 of the FPA_INT_SUM
790 register is asserted the FPA will assert an
792 uint64_t q0_perr : 1; /**< When set (1) and bit 6 of the FPA_INT_SUM
793 register is asserted the FPA will assert an
795 uint64_t q0_coff : 1; /**< When set (1) and bit 5 of the FPA_INT_SUM
796 register is asserted the FPA will assert an
798 uint64_t q0_und : 1; /**< When set (1) and bit 4 of the FPA_INT_SUM
799 register is asserted the FPA will assert an
801 uint64_t fed1_dbe : 1; /**< When set (1) and bit 3 of the FPA_INT_SUM
802 register is asserted the FPA will assert an
804 uint64_t fed1_sbe : 1; /**< When set (1) and bit 2 of the FPA_INT_SUM
805 register is asserted the FPA will assert an
807 uint64_t fed0_dbe : 1; /**< When set (1) and bit 1 of the FPA_INT_SUM
808 register is asserted the FPA will assert an
810 uint64_t fed0_sbe : 1; /**< When set (1) and bit 0 of the FPA_INT_SUM
811 register is asserted the FPA will assert an
814 uint64_t fed0_sbe : 1;
815 uint64_t fed0_dbe : 1;
816 uint64_t fed1_sbe : 1;
817 uint64_t fed1_dbe : 1;
819 uint64_t q0_coff : 1;
820 uint64_t q0_perr : 1;
822 uint64_t q1_coff : 1;
823 uint64_t q1_perr : 1;
825 uint64_t q2_coff : 1;
826 uint64_t q2_perr : 1;
828 uint64_t q3_coff : 1;
829 uint64_t q3_perr : 1;
831 uint64_t q4_coff : 1;
832 uint64_t q4_perr : 1;
834 uint64_t q5_coff : 1;
835 uint64_t q5_perr : 1;
837 uint64_t q6_coff : 1;
838 uint64_t q6_perr : 1;
840 uint64_t q7_coff : 1;
841 uint64_t q7_perr : 1;
842 uint64_t reserved_28_63 : 36;
845 struct cvmx_fpa_int_enb_cn30xx cn31xx;
846 struct cvmx_fpa_int_enb_cn30xx cn38xx;
847 struct cvmx_fpa_int_enb_cn30xx cn38xxp2;
848 struct cvmx_fpa_int_enb_cn30xx cn50xx;
849 struct cvmx_fpa_int_enb_cn30xx cn52xx;
850 struct cvmx_fpa_int_enb_cn30xx cn52xxp1;
851 struct cvmx_fpa_int_enb_cn30xx cn56xx;
852 struct cvmx_fpa_int_enb_cn30xx cn56xxp1;
853 struct cvmx_fpa_int_enb_cn30xx cn58xx;
854 struct cvmx_fpa_int_enb_cn30xx cn58xxp1;
855 struct cvmx_fpa_int_enb_s cn63xx;
856 struct cvmx_fpa_int_enb_cn30xx cn63xxp1;
858 typedef union cvmx_fpa_int_enb cvmx_fpa_int_enb_t;
863 * FPA_INT_SUM = FPA's Interrupt Summary Register
865 * Contains the different interrupt summary bits of the FPA.
867 union cvmx_fpa_int_sum
870 struct cvmx_fpa_int_sum_s
872 #if __BYTE_ORDER == __BIG_ENDIAN
873 uint64_t reserved_44_63 : 20;
874 uint64_t free7 : 1; /**< When a pointer for POOL7 is freed bit is set. */
875 uint64_t free6 : 1; /**< When a pointer for POOL6 is freed bit is set. */
876 uint64_t free5 : 1; /**< When a pointer for POOL5 is freed bit is set. */
877 uint64_t free4 : 1; /**< When a pointer for POOL4 is freed bit is set. */
878 uint64_t free3 : 1; /**< When a pointer for POOL3 is freed bit is set. */
879 uint64_t free2 : 1; /**< When a pointer for POOL2 is freed bit is set. */
880 uint64_t free1 : 1; /**< When a pointer for POOL1 is freed bit is set. */
881 uint64_t free0 : 1; /**< When a pointer for POOL0 is freed bit is set. */
882 uint64_t pool7th : 1; /**< Set when FPA_QUE7_AVAILABLE is equal to
883 FPA_POOL7_THRESHOLD[THRESH] and a pointer is
884 allocated or de-allocated. */
885 uint64_t pool6th : 1; /**< Set when FPA_QUE6_AVAILABLE is equal to
886 FPA_POOL6_THRESHOLD[THRESH] and a pointer is
887 allocated or de-allocated. */
888 uint64_t pool5th : 1; /**< Set when FPA_QUE5_AVAILABLE is equal to
889 FPA_POOL5_THRESHOLD[THRESH] and a pointer is
890 allocated or de-allocated. */
891 uint64_t pool4th : 1; /**< Set when FPA_QUE4_AVAILABLE is equal to
892 FPA_POOL4_THRESHOLD[THRESH] and a pointer is
893 allocated or de-allocated. */
894 uint64_t pool3th : 1; /**< Set when FPA_QUE3_AVAILABLE is equal to
895 FPA_POOL3_THRESHOLD[THRESH] and a pointer is
896 allocated or de-allocated. */
897 uint64_t pool2th : 1; /**< Set when FPA_QUE2_AVAILABLE is equal to
898 FPA_POOL2_THRESHOLD[THRESH] and a pointer is
899 allocated or de-allocated. */
900 uint64_t pool1th : 1; /**< Set when FPA_QUE1_AVAILABLE is equal to
901 FPA_POOL1_THRESHOLD[THRESH] and a pointer is
902 allocated or de-allocated. */
903 uint64_t pool0th : 1; /**< Set when FPA_QUE0_AVAILABLE is equal to
904 FPA_POOL`_THRESHOLD[THRESH] and a pointer is
905 allocated or de-allocated. */
906 uint64_t q7_perr : 1; /**< Set when a Queue0 pointer read from the stack in
907 the L2C does not have the FPA owner ship bit set. */
908 uint64_t q7_coff : 1; /**< Set when a Queue0 stack end tag is present and
909 the count available is greater than than pointers
910 present in the FPA. */
911 uint64_t q7_und : 1; /**< Set when a Queue0 page count available goes
913 uint64_t q6_perr : 1; /**< Set when a Queue0 pointer read from the stack in
914 the L2C does not have the FPA owner ship bit set. */
915 uint64_t q6_coff : 1; /**< Set when a Queue0 stack end tag is present and
916 the count available is greater than than pointers
917 present in the FPA. */
918 uint64_t q6_und : 1; /**< Set when a Queue0 page count available goes
920 uint64_t q5_perr : 1; /**< Set when a Queue0 pointer read from the stack in
921 the L2C does not have the FPA owner ship bit set. */
922 uint64_t q5_coff : 1; /**< Set when a Queue0 stack end tag is present and
923 the count available is greater than than pointers
924 present in the FPA. */
925 uint64_t q5_und : 1; /**< Set when a Queue0 page count available goes
927 uint64_t q4_perr : 1; /**< Set when a Queue0 pointer read from the stack in
928 the L2C does not have the FPA owner ship bit set. */
929 uint64_t q4_coff : 1; /**< Set when a Queue0 stack end tag is present and
930 the count available is greater than than pointers
931 present in the FPA. */
932 uint64_t q4_und : 1; /**< Set when a Queue0 page count available goes
934 uint64_t q3_perr : 1; /**< Set when a Queue0 pointer read from the stack in
935 the L2C does not have the FPA owner ship bit set. */
936 uint64_t q3_coff : 1; /**< Set when a Queue0 stack end tag is present and
937 the count available is greater than than pointers
938 present in the FPA. */
939 uint64_t q3_und : 1; /**< Set when a Queue0 page count available goes
941 uint64_t q2_perr : 1; /**< Set when a Queue0 pointer read from the stack in
942 the L2C does not have the FPA owner ship bit set. */
943 uint64_t q2_coff : 1; /**< Set when a Queue0 stack end tag is present and
944 the count available is greater than than pointers
945 present in the FPA. */
946 uint64_t q2_und : 1; /**< Set when a Queue0 page count available goes
948 uint64_t q1_perr : 1; /**< Set when a Queue0 pointer read from the stack in
949 the L2C does not have the FPA owner ship bit set. */
950 uint64_t q1_coff : 1; /**< Set when a Queue0 stack end tag is present and
951 the count available is greater than pointers
952 present in the FPA. */
953 uint64_t q1_und : 1; /**< Set when a Queue0 page count available goes
955 uint64_t q0_perr : 1; /**< Set when a Queue0 pointer read from the stack in
956 the L2C does not have the FPA owner ship bit set. */
957 uint64_t q0_coff : 1; /**< Set when a Queue0 stack end tag is present and
958 the count available is greater than pointers
959 present in the FPA. */
960 uint64_t q0_und : 1; /**< Set when a Queue0 page count available goes
962 uint64_t fed1_dbe : 1; /**< Set when a Double Bit Error is detected in FPF1. */
963 uint64_t fed1_sbe : 1; /**< Set when a Single Bit Error is detected in FPF1. */
964 uint64_t fed0_dbe : 1; /**< Set when a Double Bit Error is detected in FPF0. */
965 uint64_t fed0_sbe : 1; /**< Set when a Single Bit Error is detected in FPF0. */
967 uint64_t fed0_sbe : 1;
968 uint64_t fed0_dbe : 1;
969 uint64_t fed1_sbe : 1;
970 uint64_t fed1_dbe : 1;
972 uint64_t q0_coff : 1;
973 uint64_t q0_perr : 1;
975 uint64_t q1_coff : 1;
976 uint64_t q1_perr : 1;
978 uint64_t q2_coff : 1;
979 uint64_t q2_perr : 1;
981 uint64_t q3_coff : 1;
982 uint64_t q3_perr : 1;
984 uint64_t q4_coff : 1;
985 uint64_t q4_perr : 1;
987 uint64_t q5_coff : 1;
988 uint64_t q5_perr : 1;
990 uint64_t q6_coff : 1;
991 uint64_t q6_perr : 1;
993 uint64_t q7_coff : 1;
994 uint64_t q7_perr : 1;
995 uint64_t pool0th : 1;
996 uint64_t pool1th : 1;
997 uint64_t pool2th : 1;
998 uint64_t pool3th : 1;
999 uint64_t pool4th : 1;
1000 uint64_t pool5th : 1;
1001 uint64_t pool6th : 1;
1002 uint64_t pool7th : 1;
1011 uint64_t reserved_44_63 : 20;
1014 struct cvmx_fpa_int_sum_cn30xx
1016 #if __BYTE_ORDER == __BIG_ENDIAN
1017 uint64_t reserved_28_63 : 36;
1018 uint64_t q7_perr : 1; /**< Set when a Queue0 pointer read from the stack in
1019 the L2C does not have the FPA owner ship bit set. */
1020 uint64_t q7_coff : 1; /**< Set when a Queue0 stack end tag is present and
1021 the count available is greater than than pointers
1022 present in the FPA. */
1023 uint64_t q7_und : 1; /**< Set when a Queue0 page count available goes
1025 uint64_t q6_perr : 1; /**< Set when a Queue0 pointer read from the stack in
1026 the L2C does not have the FPA owner ship bit set. */
1027 uint64_t q6_coff : 1; /**< Set when a Queue0 stack end tag is present and
1028 the count available is greater than than pointers
1029 present in the FPA. */
1030 uint64_t q6_und : 1; /**< Set when a Queue0 page count available goes
1032 uint64_t q5_perr : 1; /**< Set when a Queue0 pointer read from the stack in
1033 the L2C does not have the FPA owner ship bit set. */
1034 uint64_t q5_coff : 1; /**< Set when a Queue0 stack end tag is present and
1035 the count available is greater than than pointers
1036 present in the FPA. */
1037 uint64_t q5_und : 1; /**< Set when a Queue0 page count available goes
1039 uint64_t q4_perr : 1; /**< Set when a Queue0 pointer read from the stack in
1040 the L2C does not have the FPA owner ship bit set. */
1041 uint64_t q4_coff : 1; /**< Set when a Queue0 stack end tag is present and
1042 the count available is greater than than pointers
1043 present in the FPA. */
1044 uint64_t q4_und : 1; /**< Set when a Queue0 page count available goes
1046 uint64_t q3_perr : 1; /**< Set when a Queue0 pointer read from the stack in
1047 the L2C does not have the FPA owner ship bit set. */
1048 uint64_t q3_coff : 1; /**< Set when a Queue0 stack end tag is present and
1049 the count available is greater than than pointers
1050 present in the FPA. */
1051 uint64_t q3_und : 1; /**< Set when a Queue0 page count available goes
1053 uint64_t q2_perr : 1; /**< Set when a Queue0 pointer read from the stack in
1054 the L2C does not have the FPA owner ship bit set. */
1055 uint64_t q2_coff : 1; /**< Set when a Queue0 stack end tag is present and
1056 the count available is greater than than pointers
1057 present in the FPA. */
1058 uint64_t q2_und : 1; /**< Set when a Queue0 page count available goes
1060 uint64_t q1_perr : 1; /**< Set when a Queue0 pointer read from the stack in
1061 the L2C does not have the FPA owner ship bit set. */
1062 uint64_t q1_coff : 1; /**< Set when a Queue0 stack end tag is present and
1063 the count available is greater than pointers
1064 present in the FPA. */
1065 uint64_t q1_und : 1; /**< Set when a Queue0 page count available goes
1067 uint64_t q0_perr : 1; /**< Set when a Queue0 pointer read from the stack in
1068 the L2C does not have the FPA owner ship bit set. */
1069 uint64_t q0_coff : 1; /**< Set when a Queue0 stack end tag is present and
1070 the count available is greater than pointers
1071 present in the FPA. */
1072 uint64_t q0_und : 1; /**< Set when a Queue0 page count available goes
1074 uint64_t fed1_dbe : 1; /**< Set when a Double Bit Error is detected in FPF1. */
1075 uint64_t fed1_sbe : 1; /**< Set when a Single Bit Error is detected in FPF1. */
1076 uint64_t fed0_dbe : 1; /**< Set when a Double Bit Error is detected in FPF0. */
1077 uint64_t fed0_sbe : 1; /**< Set when a Single Bit Error is detected in FPF0. */
1079 uint64_t fed0_sbe : 1;
1080 uint64_t fed0_dbe : 1;
1081 uint64_t fed1_sbe : 1;
1082 uint64_t fed1_dbe : 1;
1083 uint64_t q0_und : 1;
1084 uint64_t q0_coff : 1;
1085 uint64_t q0_perr : 1;
1086 uint64_t q1_und : 1;
1087 uint64_t q1_coff : 1;
1088 uint64_t q1_perr : 1;
1089 uint64_t q2_und : 1;
1090 uint64_t q2_coff : 1;
1091 uint64_t q2_perr : 1;
1092 uint64_t q3_und : 1;
1093 uint64_t q3_coff : 1;
1094 uint64_t q3_perr : 1;
1095 uint64_t q4_und : 1;
1096 uint64_t q4_coff : 1;
1097 uint64_t q4_perr : 1;
1098 uint64_t q5_und : 1;
1099 uint64_t q5_coff : 1;
1100 uint64_t q5_perr : 1;
1101 uint64_t q6_und : 1;
1102 uint64_t q6_coff : 1;
1103 uint64_t q6_perr : 1;
1104 uint64_t q7_und : 1;
1105 uint64_t q7_coff : 1;
1106 uint64_t q7_perr : 1;
1107 uint64_t reserved_28_63 : 36;
1110 struct cvmx_fpa_int_sum_cn30xx cn31xx;
1111 struct cvmx_fpa_int_sum_cn30xx cn38xx;
1112 struct cvmx_fpa_int_sum_cn30xx cn38xxp2;
1113 struct cvmx_fpa_int_sum_cn30xx cn50xx;
1114 struct cvmx_fpa_int_sum_cn30xx cn52xx;
1115 struct cvmx_fpa_int_sum_cn30xx cn52xxp1;
1116 struct cvmx_fpa_int_sum_cn30xx cn56xx;
1117 struct cvmx_fpa_int_sum_cn30xx cn56xxp1;
1118 struct cvmx_fpa_int_sum_cn30xx cn58xx;
1119 struct cvmx_fpa_int_sum_cn30xx cn58xxp1;
1120 struct cvmx_fpa_int_sum_s cn63xx;
1121 struct cvmx_fpa_int_sum_cn30xx cn63xxp1;
1123 typedef union cvmx_fpa_int_sum cvmx_fpa_int_sum_t;
1126 * cvmx_fpa_packet_threshold
1128 * FPA_PACKET_THRESHOLD = FPA's Packet Threshold
1130 * When the value of FPA_QUE0_AVAILABLE[QUE_SIZ] is Less than the value of this register a low pool count signal is sent to the
1131 * PCIe packet instruction engine (to make it stop reading instructions) and to the Packet-Arbiter informing it to not give grants
1132 * to packets MAC with the exception of the PCIe MAC.
1134 union cvmx_fpa_packet_threshold
1137 struct cvmx_fpa_packet_threshold_s
1139 #if __BYTE_ORDER == __BIG_ENDIAN
1140 uint64_t reserved_32_63 : 32;
1141 uint64_t thresh : 32; /**< Packet Threshold. */
1143 uint64_t thresh : 32;
1144 uint64_t reserved_32_63 : 32;
1147 struct cvmx_fpa_packet_threshold_s cn63xx;
1149 typedef union cvmx_fpa_packet_threshold cvmx_fpa_packet_threshold_t;
1152 * cvmx_fpa_pool#_threshold
1154 * FPA_POOLX_THRESHOLD = FPA's Pool 0-7 Threshold
1156 * When the value of FPA_QUEX_AVAILABLE is equal to FPA_POOLX_THRESHOLD[THRESH] when a pointer is allocated
1157 * or deallocated, set interrupt FPA_INT_SUM[POOLXTH].
1159 union cvmx_fpa_poolx_threshold
1162 struct cvmx_fpa_poolx_threshold_s
1164 #if __BYTE_ORDER == __BIG_ENDIAN
1165 uint64_t reserved_29_63 : 35;
1166 uint64_t thresh : 29; /**< The Threshold. */
1168 uint64_t thresh : 29;
1169 uint64_t reserved_29_63 : 35;
1172 struct cvmx_fpa_poolx_threshold_s cn63xx;
1174 typedef union cvmx_fpa_poolx_threshold cvmx_fpa_poolx_threshold_t;
1177 * cvmx_fpa_que#_available
1179 * FPA_QUEX_PAGES_AVAILABLE = FPA's Queue 0-7 Free Page Available Register
1181 * The number of page pointers that are available in the FPA and local DRAM.
1183 union cvmx_fpa_quex_available
1186 struct cvmx_fpa_quex_available_s
1188 #if __BYTE_ORDER == __BIG_ENDIAN
1189 uint64_t reserved_29_63 : 35;
1190 uint64_t que_siz : 29; /**< The number of free pages available in this Queue.
1191 In PASS-1 this field was [25:0]. */
1193 uint64_t que_siz : 29;
1194 uint64_t reserved_29_63 : 35;
1197 struct cvmx_fpa_quex_available_s cn30xx;
1198 struct cvmx_fpa_quex_available_s cn31xx;
1199 struct cvmx_fpa_quex_available_s cn38xx;
1200 struct cvmx_fpa_quex_available_s cn38xxp2;
1201 struct cvmx_fpa_quex_available_s cn50xx;
1202 struct cvmx_fpa_quex_available_s cn52xx;
1203 struct cvmx_fpa_quex_available_s cn52xxp1;
1204 struct cvmx_fpa_quex_available_s cn56xx;
1205 struct cvmx_fpa_quex_available_s cn56xxp1;
1206 struct cvmx_fpa_quex_available_s cn58xx;
1207 struct cvmx_fpa_quex_available_s cn58xxp1;
1208 struct cvmx_fpa_quex_available_s cn63xx;
1209 struct cvmx_fpa_quex_available_s cn63xxp1;
1211 typedef union cvmx_fpa_quex_available cvmx_fpa_quex_available_t;
1214 * cvmx_fpa_que#_page_index
1216 * FPA_QUE0_PAGE_INDEX = FPA's Queue0 Page Index
1218 * The present index page for queue 0 of the FPA, this is a PASS-2 register.
1219 * This number reflects the number of pages of pointers that have been written to memory
1222 union cvmx_fpa_quex_page_index
1225 struct cvmx_fpa_quex_page_index_s
1227 #if __BYTE_ORDER == __BIG_ENDIAN
1228 uint64_t reserved_25_63 : 39;
1229 uint64_t pg_num : 25; /**< Page number. */
1231 uint64_t pg_num : 25;
1232 uint64_t reserved_25_63 : 39;
1235 struct cvmx_fpa_quex_page_index_s cn30xx;
1236 struct cvmx_fpa_quex_page_index_s cn31xx;
1237 struct cvmx_fpa_quex_page_index_s cn38xx;
1238 struct cvmx_fpa_quex_page_index_s cn38xxp2;
1239 struct cvmx_fpa_quex_page_index_s cn50xx;
1240 struct cvmx_fpa_quex_page_index_s cn52xx;
1241 struct cvmx_fpa_quex_page_index_s cn52xxp1;
1242 struct cvmx_fpa_quex_page_index_s cn56xx;
1243 struct cvmx_fpa_quex_page_index_s cn56xxp1;
1244 struct cvmx_fpa_quex_page_index_s cn58xx;
1245 struct cvmx_fpa_quex_page_index_s cn58xxp1;
1246 struct cvmx_fpa_quex_page_index_s cn63xx;
1247 struct cvmx_fpa_quex_page_index_s cn63xxp1;
1249 typedef union cvmx_fpa_quex_page_index cvmx_fpa_quex_page_index_t;
1254 * FPA_QUE_ACT = FPA's Queue# Actual Page Index
1256 * When a INT_SUM[PERR#] occurs this will be latched with the value read from L2C. PASS-2 register.
1257 * This is latched on the first error and will not latch again unitl all errors are cleared.
1259 union cvmx_fpa_que_act
1262 struct cvmx_fpa_que_act_s
1264 #if __BYTE_ORDER == __BIG_ENDIAN
1265 uint64_t reserved_29_63 : 35;
1266 uint64_t act_que : 3; /**< FPA-queue-number read from memory. */
1267 uint64_t act_indx : 26; /**< Page number read from memory. */
1269 uint64_t act_indx : 26;
1270 uint64_t act_que : 3;
1271 uint64_t reserved_29_63 : 35;
1274 struct cvmx_fpa_que_act_s cn30xx;
1275 struct cvmx_fpa_que_act_s cn31xx;
1276 struct cvmx_fpa_que_act_s cn38xx;
1277 struct cvmx_fpa_que_act_s cn38xxp2;
1278 struct cvmx_fpa_que_act_s cn50xx;
1279 struct cvmx_fpa_que_act_s cn52xx;
1280 struct cvmx_fpa_que_act_s cn52xxp1;
1281 struct cvmx_fpa_que_act_s cn56xx;
1282 struct cvmx_fpa_que_act_s cn56xxp1;
1283 struct cvmx_fpa_que_act_s cn58xx;
1284 struct cvmx_fpa_que_act_s cn58xxp1;
1285 struct cvmx_fpa_que_act_s cn63xx;
1286 struct cvmx_fpa_que_act_s cn63xxp1;
1288 typedef union cvmx_fpa_que_act cvmx_fpa_que_act_t;
1293 * FPA_QUE_EXP = FPA's Queue# Expected Page Index
1295 * When a INT_SUM[PERR#] occurs this will be latched with the expected value. PASS-2 register.
1296 * This is latched on the first error and will not latch again unitl all errors are cleared.
1298 union cvmx_fpa_que_exp
1301 struct cvmx_fpa_que_exp_s
1303 #if __BYTE_ORDER == __BIG_ENDIAN
1304 uint64_t reserved_29_63 : 35;
1305 uint64_t exp_que : 3; /**< Expected fpa-queue-number read from memory. */
1306 uint64_t exp_indx : 26; /**< Expected page number read from memory. */
1308 uint64_t exp_indx : 26;
1309 uint64_t exp_que : 3;
1310 uint64_t reserved_29_63 : 35;
1313 struct cvmx_fpa_que_exp_s cn30xx;
1314 struct cvmx_fpa_que_exp_s cn31xx;
1315 struct cvmx_fpa_que_exp_s cn38xx;
1316 struct cvmx_fpa_que_exp_s cn38xxp2;
1317 struct cvmx_fpa_que_exp_s cn50xx;
1318 struct cvmx_fpa_que_exp_s cn52xx;
1319 struct cvmx_fpa_que_exp_s cn52xxp1;
1320 struct cvmx_fpa_que_exp_s cn56xx;
1321 struct cvmx_fpa_que_exp_s cn56xxp1;
1322 struct cvmx_fpa_que_exp_s cn58xx;
1323 struct cvmx_fpa_que_exp_s cn58xxp1;
1324 struct cvmx_fpa_que_exp_s cn63xx;
1325 struct cvmx_fpa_que_exp_s cn63xxp1;
1327 typedef union cvmx_fpa_que_exp cvmx_fpa_que_exp_t;
1332 * FPA_WART_CTL = FPA's WART Control
1334 * Control and status for the WART block.
1336 union cvmx_fpa_wart_ctl
1339 struct cvmx_fpa_wart_ctl_s
1341 #if __BYTE_ORDER == __BIG_ENDIAN
1342 uint64_t reserved_16_63 : 48;
1343 uint64_t ctl : 16; /**< Control information. */
1346 uint64_t reserved_16_63 : 48;
1349 struct cvmx_fpa_wart_ctl_s cn30xx;
1350 struct cvmx_fpa_wart_ctl_s cn31xx;
1351 struct cvmx_fpa_wart_ctl_s cn38xx;
1352 struct cvmx_fpa_wart_ctl_s cn38xxp2;
1353 struct cvmx_fpa_wart_ctl_s cn50xx;
1354 struct cvmx_fpa_wart_ctl_s cn52xx;
1355 struct cvmx_fpa_wart_ctl_s cn52xxp1;
1356 struct cvmx_fpa_wart_ctl_s cn56xx;
1357 struct cvmx_fpa_wart_ctl_s cn56xxp1;
1358 struct cvmx_fpa_wart_ctl_s cn58xx;
1359 struct cvmx_fpa_wart_ctl_s cn58xxp1;
1361 typedef union cvmx_fpa_wart_ctl cvmx_fpa_wart_ctl_t;
1364 * cvmx_fpa_wart_status
1366 * FPA_WART_STATUS = FPA's WART Status
1368 * Control and status for the WART block.
1370 union cvmx_fpa_wart_status
1373 struct cvmx_fpa_wart_status_s
1375 #if __BYTE_ORDER == __BIG_ENDIAN
1376 uint64_t reserved_32_63 : 32;
1377 uint64_t status : 32; /**< Status information. */
1379 uint64_t status : 32;
1380 uint64_t reserved_32_63 : 32;
1383 struct cvmx_fpa_wart_status_s cn30xx;
1384 struct cvmx_fpa_wart_status_s cn31xx;
1385 struct cvmx_fpa_wart_status_s cn38xx;
1386 struct cvmx_fpa_wart_status_s cn38xxp2;
1387 struct cvmx_fpa_wart_status_s cn50xx;
1388 struct cvmx_fpa_wart_status_s cn52xx;
1389 struct cvmx_fpa_wart_status_s cn52xxp1;
1390 struct cvmx_fpa_wart_status_s cn56xx;
1391 struct cvmx_fpa_wart_status_s cn56xxp1;
1392 struct cvmx_fpa_wart_status_s cn58xx;
1393 struct cvmx_fpa_wart_status_s cn58xxp1;
1395 typedef union cvmx_fpa_wart_status cvmx_fpa_wart_status_t;
1398 * cvmx_fpa_wqe_threshold
1400 * FPA_WQE_THRESHOLD = FPA's WQE Threshold
1402 * When the value of FPA_QUE#_AVAILABLE[QUE_SIZ] (\# is determined by the value of IPD_WQE_FPA_QUEUE) is Less than the value of this
1403 * register a low pool count signal is sent to the PCIe packet instruction engine (to make it stop reading instructions) and to the
1404 * Packet-Arbiter informing it to not give grants to packets MAC with the exception of the PCIe MAC.
1406 union cvmx_fpa_wqe_threshold
1409 struct cvmx_fpa_wqe_threshold_s
1411 #if __BYTE_ORDER == __BIG_ENDIAN
1412 uint64_t reserved_32_63 : 32;
1413 uint64_t thresh : 32; /**< WQE Threshold. */
1415 uint64_t thresh : 32;
1416 uint64_t reserved_32_63 : 32;
1419 struct cvmx_fpa_wqe_threshold_s cn63xx;
1421 typedef union cvmx_fpa_wqe_threshold cvmx_fpa_wqe_threshold_t;