1 /***********************license start***************
2 * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Networks nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
44 * Configuration and status register (CSR) type definitions for
47 * This file is auto generated. Do not edit.
52 #ifndef __CVMX_TIM_TYPEDEFS_H__
53 #define __CVMX_TIM_TYPEDEFS_H__
55 #define CVMX_TIM_MEM_DEBUG0 (CVMX_ADD_IO_SEG(0x0001180058001100ull))
56 #define CVMX_TIM_MEM_DEBUG1 (CVMX_ADD_IO_SEG(0x0001180058001108ull))
57 #define CVMX_TIM_MEM_DEBUG2 (CVMX_ADD_IO_SEG(0x0001180058001110ull))
58 #define CVMX_TIM_MEM_RING0 (CVMX_ADD_IO_SEG(0x0001180058001000ull))
59 #define CVMX_TIM_MEM_RING1 (CVMX_ADD_IO_SEG(0x0001180058001008ull))
60 #define CVMX_TIM_REG_BIST_RESULT (CVMX_ADD_IO_SEG(0x0001180058000080ull))
61 #define CVMX_TIM_REG_ERROR (CVMX_ADD_IO_SEG(0x0001180058000088ull))
62 #define CVMX_TIM_REG_FLAGS (CVMX_ADD_IO_SEG(0x0001180058000000ull))
63 #define CVMX_TIM_REG_INT_MASK (CVMX_ADD_IO_SEG(0x0001180058000090ull))
64 #define CVMX_TIM_REG_READ_IDX (CVMX_ADD_IO_SEG(0x0001180058000008ull))
70 * Internal per-ring state intended for debug use only - tim.ctl[47:0]
71 * This CSR is a memory of 16 entries, and thus, the TIM_REG_READ_IDX CSR must be written before any
72 * CSR read operations to this address can be performed.
74 union cvmx_tim_mem_debug0
77 struct cvmx_tim_mem_debug0_s
79 #if __BYTE_ORDER == __BIG_ENDIAN
80 uint64_t reserved_48_63 : 16;
81 uint64_t ena : 1; /**< Ring timer enable */
82 uint64_t reserved_46_46 : 1;
83 uint64_t count : 22; /**< Time offset for the ring
84 Set to INTERVAL and counts down by 1 every 1024
85 cycles when ENA==1. The HW forces a bucket
86 traversal (and resets COUNT to INTERVAL) whenever
87 the decrement would cause COUNT to go negative.
88 COUNT is unpredictable whenever ENA==0.
89 COUNT is reset to INTERVAL whenever TIM_MEM_RING1
90 is written for the ring. */
91 uint64_t reserved_22_23 : 2;
92 uint64_t interval : 22; /**< Timer interval - 1 */
94 uint64_t interval : 22;
95 uint64_t reserved_22_23 : 2;
97 uint64_t reserved_46_46 : 1;
99 uint64_t reserved_48_63 : 16;
102 struct cvmx_tim_mem_debug0_s cn30xx;
103 struct cvmx_tim_mem_debug0_s cn31xx;
104 struct cvmx_tim_mem_debug0_s cn38xx;
105 struct cvmx_tim_mem_debug0_s cn38xxp2;
106 struct cvmx_tim_mem_debug0_s cn50xx;
107 struct cvmx_tim_mem_debug0_s cn52xx;
108 struct cvmx_tim_mem_debug0_s cn52xxp1;
109 struct cvmx_tim_mem_debug0_s cn56xx;
110 struct cvmx_tim_mem_debug0_s cn56xxp1;
111 struct cvmx_tim_mem_debug0_s cn58xx;
112 struct cvmx_tim_mem_debug0_s cn58xxp1;
113 struct cvmx_tim_mem_debug0_s cn63xx;
114 struct cvmx_tim_mem_debug0_s cn63xxp1;
116 typedef union cvmx_tim_mem_debug0 cvmx_tim_mem_debug0_t;
119 * cvmx_tim_mem_debug1
122 * Internal per-ring state intended for debug use only - tim.sta[63:0]
123 * This CSR is a memory of 16 entries, and thus, the TIM_REG_READ_IDX CSR must be written before any
124 * CSR read operations to this address can be performed.
126 union cvmx_tim_mem_debug1
129 struct cvmx_tim_mem_debug1_s
131 #if __BYTE_ORDER == __BIG_ENDIAN
132 uint64_t bucket : 13; /**< Current bucket[12:0]
133 Reset to 0 whenever TIM_MEM_RING0 is written for
134 the ring. Incremented (modulo BSIZE) once per
136 See TIM_MEM_DEBUG2[BUCKET]. */
137 uint64_t base : 31; /**< Pointer[35:5] to bucket[0] */
138 uint64_t bsize : 20; /**< Number of buckets - 1 */
142 uint64_t bucket : 13;
145 struct cvmx_tim_mem_debug1_s cn30xx;
146 struct cvmx_tim_mem_debug1_s cn31xx;
147 struct cvmx_tim_mem_debug1_s cn38xx;
148 struct cvmx_tim_mem_debug1_s cn38xxp2;
149 struct cvmx_tim_mem_debug1_s cn50xx;
150 struct cvmx_tim_mem_debug1_s cn52xx;
151 struct cvmx_tim_mem_debug1_s cn52xxp1;
152 struct cvmx_tim_mem_debug1_s cn56xx;
153 struct cvmx_tim_mem_debug1_s cn56xxp1;
154 struct cvmx_tim_mem_debug1_s cn58xx;
155 struct cvmx_tim_mem_debug1_s cn58xxp1;
156 struct cvmx_tim_mem_debug1_s cn63xx;
157 struct cvmx_tim_mem_debug1_s cn63xxp1;
159 typedef union cvmx_tim_mem_debug1 cvmx_tim_mem_debug1_t;
162 * cvmx_tim_mem_debug2
165 * Internal per-ring state intended for debug use only - tim.sta[95:64]
166 * This CSR is a memory of 16 entries, and thus, the TIM_REG_READ_IDX CSR must be written before any
167 * CSR read operations to this address can be performed.
169 union cvmx_tim_mem_debug2
172 struct cvmx_tim_mem_debug2_s
174 #if __BYTE_ORDER == __BIG_ENDIAN
175 uint64_t reserved_24_63 : 40;
176 uint64_t cpool : 3; /**< Free list used to free chunks */
177 uint64_t csize : 13; /**< Number of words per chunk */
178 uint64_t reserved_7_7 : 1;
179 uint64_t bucket : 7; /**< Current bucket[19:13]
180 See TIM_MEM_DEBUG1[BUCKET]. */
183 uint64_t reserved_7_7 : 1;
186 uint64_t reserved_24_63 : 40;
189 struct cvmx_tim_mem_debug2_s cn30xx;
190 struct cvmx_tim_mem_debug2_s cn31xx;
191 struct cvmx_tim_mem_debug2_s cn38xx;
192 struct cvmx_tim_mem_debug2_s cn38xxp2;
193 struct cvmx_tim_mem_debug2_s cn50xx;
194 struct cvmx_tim_mem_debug2_s cn52xx;
195 struct cvmx_tim_mem_debug2_s cn52xxp1;
196 struct cvmx_tim_mem_debug2_s cn56xx;
197 struct cvmx_tim_mem_debug2_s cn56xxp1;
198 struct cvmx_tim_mem_debug2_s cn58xx;
199 struct cvmx_tim_mem_debug2_s cn58xxp1;
200 struct cvmx_tim_mem_debug2_s cn63xx;
201 struct cvmx_tim_mem_debug2_s cn63xxp1;
203 typedef union cvmx_tim_mem_debug2 cvmx_tim_mem_debug2_t;
209 * TIM_MEM_RING0 must not be written for a ring when TIM_MEM_RING1[ENA] is set for the ring.
210 * Every write to TIM_MEM_RING0 clears the current bucket for the ring. (The current bucket is
211 * readable via TIM_MEM_DEBUG2[BUCKET],TIM_MEM_DEBUG1[BUCKET].)
212 * BASE is a 32-byte aligned pointer[35:0]. Only pointer[35:5] are stored because pointer[4:0] = 0.
213 * This CSR is a memory of 16 entries, and thus, the TIM_REG_READ_IDX CSR must be written before any
214 * CSR read operations to this address can be performed.
216 union cvmx_tim_mem_ring0
219 struct cvmx_tim_mem_ring0_s
221 #if __BYTE_ORDER == __BIG_ENDIAN
222 uint64_t reserved_55_63 : 9;
223 uint64_t first_bucket : 31; /**< Pointer[35:5] to bucket[0] */
224 uint64_t num_buckets : 20; /**< Number of buckets - 1 */
225 uint64_t ring : 4; /**< Ring ID */
228 uint64_t num_buckets : 20;
229 uint64_t first_bucket : 31;
230 uint64_t reserved_55_63 : 9;
233 struct cvmx_tim_mem_ring0_s cn30xx;
234 struct cvmx_tim_mem_ring0_s cn31xx;
235 struct cvmx_tim_mem_ring0_s cn38xx;
236 struct cvmx_tim_mem_ring0_s cn38xxp2;
237 struct cvmx_tim_mem_ring0_s cn50xx;
238 struct cvmx_tim_mem_ring0_s cn52xx;
239 struct cvmx_tim_mem_ring0_s cn52xxp1;
240 struct cvmx_tim_mem_ring0_s cn56xx;
241 struct cvmx_tim_mem_ring0_s cn56xxp1;
242 struct cvmx_tim_mem_ring0_s cn58xx;
243 struct cvmx_tim_mem_ring0_s cn58xxp1;
244 struct cvmx_tim_mem_ring0_s cn63xx;
245 struct cvmx_tim_mem_ring0_s cn63xxp1;
247 typedef union cvmx_tim_mem_ring0 cvmx_tim_mem_ring0_t;
253 * After a 1->0 transition on ENA, the HW will still complete a bucket traversal for the ring
254 * if it was pending or active prior to the transition. (SW must delay to ensure the completion
255 * of the traversal before reprogramming the ring.)
256 * Every write to TIM_MEM_RING1 resets the current time offset for the ring to the INTERVAL value.
257 * (The current time offset for the ring is readable via TIM_MEM_DEBUG0[COUNT].)
258 * CSIZE must be at least 16. It is illegal to program CSIZE to a value that is less than 16.
259 * This CSR is a memory of 16 entries, and thus, the TIM_REG_READ_IDX CSR must be written before any
260 * CSR read operations to this address can be performed.
262 union cvmx_tim_mem_ring1
265 struct cvmx_tim_mem_ring1_s
267 #if __BYTE_ORDER == __BIG_ENDIAN
268 uint64_t reserved_43_63 : 21;
269 uint64_t enable : 1; /**< Ring timer enable
270 When clear, the ring is disabled and TIM
271 will not traverse any new buckets for the ring. */
272 uint64_t pool : 3; /**< Free list used to free chunks */
273 uint64_t words_per_chunk : 13; /**< Number of words per chunk */
274 uint64_t interval : 22; /**< Timer interval - 1, measured in 1024 cycle ticks */
275 uint64_t ring : 4; /**< Ring ID */
278 uint64_t interval : 22;
279 uint64_t words_per_chunk : 13;
282 uint64_t reserved_43_63 : 21;
285 struct cvmx_tim_mem_ring1_s cn30xx;
286 struct cvmx_tim_mem_ring1_s cn31xx;
287 struct cvmx_tim_mem_ring1_s cn38xx;
288 struct cvmx_tim_mem_ring1_s cn38xxp2;
289 struct cvmx_tim_mem_ring1_s cn50xx;
290 struct cvmx_tim_mem_ring1_s cn52xx;
291 struct cvmx_tim_mem_ring1_s cn52xxp1;
292 struct cvmx_tim_mem_ring1_s cn56xx;
293 struct cvmx_tim_mem_ring1_s cn56xxp1;
294 struct cvmx_tim_mem_ring1_s cn58xx;
295 struct cvmx_tim_mem_ring1_s cn58xxp1;
296 struct cvmx_tim_mem_ring1_s cn63xx;
297 struct cvmx_tim_mem_ring1_s cn63xxp1;
299 typedef union cvmx_tim_mem_ring1 cvmx_tim_mem_ring1_t;
302 * cvmx_tim_reg_bist_result
305 * Access to the internal BiST results
306 * Each bit is the BiST result of an individual memory (per bit, 0=pass and 1=fail).
308 union cvmx_tim_reg_bist_result
311 struct cvmx_tim_reg_bist_result_s
313 #if __BYTE_ORDER == __BIG_ENDIAN
314 uint64_t reserved_4_63 : 60;
315 uint64_t sta : 2; /**< BiST result of the STA memories (0=pass, !0=fail) */
316 uint64_t ncb : 1; /**< BiST result of the NCB memories (0=pass, !0=fail) */
317 uint64_t ctl : 1; /**< BiST result of the CTL memories (0=pass, !0=fail) */
322 uint64_t reserved_4_63 : 60;
325 struct cvmx_tim_reg_bist_result_s cn30xx;
326 struct cvmx_tim_reg_bist_result_s cn31xx;
327 struct cvmx_tim_reg_bist_result_s cn38xx;
328 struct cvmx_tim_reg_bist_result_s cn38xxp2;
329 struct cvmx_tim_reg_bist_result_s cn50xx;
330 struct cvmx_tim_reg_bist_result_s cn52xx;
331 struct cvmx_tim_reg_bist_result_s cn52xxp1;
332 struct cvmx_tim_reg_bist_result_s cn56xx;
333 struct cvmx_tim_reg_bist_result_s cn56xxp1;
334 struct cvmx_tim_reg_bist_result_s cn58xx;
335 struct cvmx_tim_reg_bist_result_s cn58xxp1;
336 struct cvmx_tim_reg_bist_result_s cn63xx;
337 struct cvmx_tim_reg_bist_result_s cn63xxp1;
339 typedef union cvmx_tim_reg_bist_result cvmx_tim_reg_bist_result_t;
345 * A ring is in error if its interval has elapsed more than once without having been serviced.
346 * During a CSR write to this register, the write data is used as a mask to clear the selected mask
347 * bits (mask'[15:0] = mask[15:0] & ~write_data[15:0]).
349 union cvmx_tim_reg_error
352 struct cvmx_tim_reg_error_s
354 #if __BYTE_ORDER == __BIG_ENDIAN
355 uint64_t reserved_16_63 : 48;
356 uint64_t mask : 16; /**< Bit mask indicating the rings in error */
359 uint64_t reserved_16_63 : 48;
362 struct cvmx_tim_reg_error_s cn30xx;
363 struct cvmx_tim_reg_error_s cn31xx;
364 struct cvmx_tim_reg_error_s cn38xx;
365 struct cvmx_tim_reg_error_s cn38xxp2;
366 struct cvmx_tim_reg_error_s cn50xx;
367 struct cvmx_tim_reg_error_s cn52xx;
368 struct cvmx_tim_reg_error_s cn52xxp1;
369 struct cvmx_tim_reg_error_s cn56xx;
370 struct cvmx_tim_reg_error_s cn56xxp1;
371 struct cvmx_tim_reg_error_s cn58xx;
372 struct cvmx_tim_reg_error_s cn58xxp1;
373 struct cvmx_tim_reg_error_s cn63xx;
374 struct cvmx_tim_reg_error_s cn63xxp1;
376 typedef union cvmx_tim_reg_error cvmx_tim_reg_error_t;
382 * TIM has a counter that causes a periodic tick every 1024 cycles. This counter is shared by all
383 * rings. (Each tick causes the HW to decrement the time offset (i.e. COUNT) for all enabled rings.)
384 * When ENA_TIM==0, the HW stops this shared periodic counter, so there are no more ticks, and there
385 * are no more new bucket traversals (for any ring).
387 * If ENA_TIM transitions 1->0, TIM will no longer create new bucket traversals, but there may
388 * have been previous ones. If there are ring bucket traversals that were already pending but
389 * not currently active (i.e. bucket traversals that need to be done by the HW, but haven't been yet)
390 * during this ENA_TIM 1->0 transition, then these bucket traversals will remain pending until
391 * ENA_TIM is later set to one. Bucket traversals that were already in progress will complete
392 * after the 1->0 ENA_TIM transition, though.
394 union cvmx_tim_reg_flags
397 struct cvmx_tim_reg_flags_s
399 #if __BYTE_ORDER == __BIG_ENDIAN
400 uint64_t reserved_3_63 : 61;
401 uint64_t reset : 1; /**< Reset oneshot pulse for free-running structures */
402 uint64_t enable_dwb : 1; /**< Enables non-zero DonwWriteBacks when set
403 When set, enables the use of
404 DontWriteBacks during the buffer freeing
406 uint64_t enable_timers : 1; /**< Enables the TIM section when set
407 When set, TIM is in normal operation.
408 When clear, time is effectively stopped for all
411 uint64_t enable_timers : 1;
412 uint64_t enable_dwb : 1;
414 uint64_t reserved_3_63 : 61;
417 struct cvmx_tim_reg_flags_s cn30xx;
418 struct cvmx_tim_reg_flags_s cn31xx;
419 struct cvmx_tim_reg_flags_s cn38xx;
420 struct cvmx_tim_reg_flags_s cn38xxp2;
421 struct cvmx_tim_reg_flags_s cn50xx;
422 struct cvmx_tim_reg_flags_s cn52xx;
423 struct cvmx_tim_reg_flags_s cn52xxp1;
424 struct cvmx_tim_reg_flags_s cn56xx;
425 struct cvmx_tim_reg_flags_s cn56xxp1;
426 struct cvmx_tim_reg_flags_s cn58xx;
427 struct cvmx_tim_reg_flags_s cn58xxp1;
428 struct cvmx_tim_reg_flags_s cn63xx;
429 struct cvmx_tim_reg_flags_s cn63xxp1;
431 typedef union cvmx_tim_reg_flags cvmx_tim_reg_flags_t;
434 * cvmx_tim_reg_int_mask
437 * Note that this CSR is present only in chip revisions beginning with pass2.
438 * When mask bit is set, the interrupt is enabled.
440 union cvmx_tim_reg_int_mask
443 struct cvmx_tim_reg_int_mask_s
445 #if __BYTE_ORDER == __BIG_ENDIAN
446 uint64_t reserved_16_63 : 48;
447 uint64_t mask : 16; /**< Bit mask corresponding to TIM_REG_ERROR.MASK above */
450 uint64_t reserved_16_63 : 48;
453 struct cvmx_tim_reg_int_mask_s cn30xx;
454 struct cvmx_tim_reg_int_mask_s cn31xx;
455 struct cvmx_tim_reg_int_mask_s cn38xx;
456 struct cvmx_tim_reg_int_mask_s cn38xxp2;
457 struct cvmx_tim_reg_int_mask_s cn50xx;
458 struct cvmx_tim_reg_int_mask_s cn52xx;
459 struct cvmx_tim_reg_int_mask_s cn52xxp1;
460 struct cvmx_tim_reg_int_mask_s cn56xx;
461 struct cvmx_tim_reg_int_mask_s cn56xxp1;
462 struct cvmx_tim_reg_int_mask_s cn58xx;
463 struct cvmx_tim_reg_int_mask_s cn58xxp1;
464 struct cvmx_tim_reg_int_mask_s cn63xx;
465 struct cvmx_tim_reg_int_mask_s cn63xxp1;
467 typedef union cvmx_tim_reg_int_mask cvmx_tim_reg_int_mask_t;
470 * cvmx_tim_reg_read_idx
473 * Provides the read index during a CSR read operation to any of the CSRs that are physically stored
474 * as memories. The names of these CSRs begin with the prefix "TIM_MEM_".
475 * IDX[7:0] is the read index. INC[7:0] is an increment that is added to IDX[7:0] after any CSR read.
476 * The intended use is to initially write this CSR such that IDX=0 and INC=1. Then, the entire
477 * contents of a CSR memory can be read with consecutive CSR read commands.
479 union cvmx_tim_reg_read_idx
482 struct cvmx_tim_reg_read_idx_s
484 #if __BYTE_ORDER == __BIG_ENDIAN
485 uint64_t reserved_16_63 : 48;
486 uint64_t inc : 8; /**< Increment to add to current index for next index */
487 uint64_t index : 8; /**< Index to use for next memory CSR read */
491 uint64_t reserved_16_63 : 48;
494 struct cvmx_tim_reg_read_idx_s cn30xx;
495 struct cvmx_tim_reg_read_idx_s cn31xx;
496 struct cvmx_tim_reg_read_idx_s cn38xx;
497 struct cvmx_tim_reg_read_idx_s cn38xxp2;
498 struct cvmx_tim_reg_read_idx_s cn50xx;
499 struct cvmx_tim_reg_read_idx_s cn52xx;
500 struct cvmx_tim_reg_read_idx_s cn52xxp1;
501 struct cvmx_tim_reg_read_idx_s cn56xx;
502 struct cvmx_tim_reg_read_idx_s cn56xxp1;
503 struct cvmx_tim_reg_read_idx_s cn58xx;
504 struct cvmx_tim_reg_read_idx_s cn58xxp1;
505 struct cvmx_tim_reg_read_idx_s cn63xx;
506 struct cvmx_tim_reg_read_idx_s cn63xxp1;
508 typedef union cvmx_tim_reg_read_idx cvmx_tim_reg_read_idx_t;