1 /***********************license start***************
2 * Copyright (c) 2003-2012 Cavium Inc. (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Inc. nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
44 * Configuration and status register (CSR) type definitions for
47 * This file is auto generated. Do not edit.
52 #ifndef __CVMX_SPXX_DEFS_H__
53 #define __CVMX_SPXX_DEFS_H__
55 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
56 static inline uint64_t CVMX_SPXX_BCKPRS_CNT(unsigned long block_id)
59 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
60 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
61 cvmx_warn("CVMX_SPXX_BCKPRS_CNT(%lu) is invalid on this chip\n", block_id);
62 return CVMX_ADD_IO_SEG(0x0001180090000340ull) + ((block_id) & 1) * 0x8000000ull;
65 #define CVMX_SPXX_BCKPRS_CNT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000340ull) + ((block_id) & 1) * 0x8000000ull)
67 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
68 static inline uint64_t CVMX_SPXX_BIST_STAT(unsigned long block_id)
71 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
72 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
73 cvmx_warn("CVMX_SPXX_BIST_STAT(%lu) is invalid on this chip\n", block_id);
74 return CVMX_ADD_IO_SEG(0x00011800900007F8ull) + ((block_id) & 1) * 0x8000000ull;
77 #define CVMX_SPXX_BIST_STAT(block_id) (CVMX_ADD_IO_SEG(0x00011800900007F8ull) + ((block_id) & 1) * 0x8000000ull)
79 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
80 static inline uint64_t CVMX_SPXX_CLK_CTL(unsigned long block_id)
83 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
84 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
85 cvmx_warn("CVMX_SPXX_CLK_CTL(%lu) is invalid on this chip\n", block_id);
86 return CVMX_ADD_IO_SEG(0x0001180090000348ull) + ((block_id) & 1) * 0x8000000ull;
89 #define CVMX_SPXX_CLK_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000348ull) + ((block_id) & 1) * 0x8000000ull)
91 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
92 static inline uint64_t CVMX_SPXX_CLK_STAT(unsigned long block_id)
95 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
96 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
97 cvmx_warn("CVMX_SPXX_CLK_STAT(%lu) is invalid on this chip\n", block_id);
98 return CVMX_ADD_IO_SEG(0x0001180090000350ull) + ((block_id) & 1) * 0x8000000ull;
101 #define CVMX_SPXX_CLK_STAT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000350ull) + ((block_id) & 1) * 0x8000000ull)
103 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
104 static inline uint64_t CVMX_SPXX_DBG_DESKEW_CTL(unsigned long block_id)
107 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
108 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
109 cvmx_warn("CVMX_SPXX_DBG_DESKEW_CTL(%lu) is invalid on this chip\n", block_id);
110 return CVMX_ADD_IO_SEG(0x0001180090000368ull) + ((block_id) & 1) * 0x8000000ull;
113 #define CVMX_SPXX_DBG_DESKEW_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000368ull) + ((block_id) & 1) * 0x8000000ull)
115 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
116 static inline uint64_t CVMX_SPXX_DBG_DESKEW_STATE(unsigned long block_id)
119 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
120 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
121 cvmx_warn("CVMX_SPXX_DBG_DESKEW_STATE(%lu) is invalid on this chip\n", block_id);
122 return CVMX_ADD_IO_SEG(0x0001180090000370ull) + ((block_id) & 1) * 0x8000000ull;
125 #define CVMX_SPXX_DBG_DESKEW_STATE(block_id) (CVMX_ADD_IO_SEG(0x0001180090000370ull) + ((block_id) & 1) * 0x8000000ull)
127 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
128 static inline uint64_t CVMX_SPXX_DRV_CTL(unsigned long block_id)
131 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
132 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
133 cvmx_warn("CVMX_SPXX_DRV_CTL(%lu) is invalid on this chip\n", block_id);
134 return CVMX_ADD_IO_SEG(0x0001180090000358ull) + ((block_id) & 1) * 0x8000000ull;
137 #define CVMX_SPXX_DRV_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000358ull) + ((block_id) & 1) * 0x8000000ull)
139 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
140 static inline uint64_t CVMX_SPXX_ERR_CTL(unsigned long block_id)
143 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
144 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
145 cvmx_warn("CVMX_SPXX_ERR_CTL(%lu) is invalid on this chip\n", block_id);
146 return CVMX_ADD_IO_SEG(0x0001180090000320ull) + ((block_id) & 1) * 0x8000000ull;
149 #define CVMX_SPXX_ERR_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000320ull) + ((block_id) & 1) * 0x8000000ull)
151 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
152 static inline uint64_t CVMX_SPXX_INT_DAT(unsigned long block_id)
155 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
156 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
157 cvmx_warn("CVMX_SPXX_INT_DAT(%lu) is invalid on this chip\n", block_id);
158 return CVMX_ADD_IO_SEG(0x0001180090000318ull) + ((block_id) & 1) * 0x8000000ull;
161 #define CVMX_SPXX_INT_DAT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000318ull) + ((block_id) & 1) * 0x8000000ull)
163 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
164 static inline uint64_t CVMX_SPXX_INT_MSK(unsigned long block_id)
167 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
168 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
169 cvmx_warn("CVMX_SPXX_INT_MSK(%lu) is invalid on this chip\n", block_id);
170 return CVMX_ADD_IO_SEG(0x0001180090000308ull) + ((block_id) & 1) * 0x8000000ull;
173 #define CVMX_SPXX_INT_MSK(block_id) (CVMX_ADD_IO_SEG(0x0001180090000308ull) + ((block_id) & 1) * 0x8000000ull)
175 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
176 static inline uint64_t CVMX_SPXX_INT_REG(unsigned long block_id)
179 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
180 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
181 cvmx_warn("CVMX_SPXX_INT_REG(%lu) is invalid on this chip\n", block_id);
182 return CVMX_ADD_IO_SEG(0x0001180090000300ull) + ((block_id) & 1) * 0x8000000ull;
185 #define CVMX_SPXX_INT_REG(block_id) (CVMX_ADD_IO_SEG(0x0001180090000300ull) + ((block_id) & 1) * 0x8000000ull)
187 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
188 static inline uint64_t CVMX_SPXX_INT_SYNC(unsigned long block_id)
191 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
192 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
193 cvmx_warn("CVMX_SPXX_INT_SYNC(%lu) is invalid on this chip\n", block_id);
194 return CVMX_ADD_IO_SEG(0x0001180090000310ull) + ((block_id) & 1) * 0x8000000ull;
197 #define CVMX_SPXX_INT_SYNC(block_id) (CVMX_ADD_IO_SEG(0x0001180090000310ull) + ((block_id) & 1) * 0x8000000ull)
199 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
200 static inline uint64_t CVMX_SPXX_TPA_ACC(unsigned long block_id)
203 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
204 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
205 cvmx_warn("CVMX_SPXX_TPA_ACC(%lu) is invalid on this chip\n", block_id);
206 return CVMX_ADD_IO_SEG(0x0001180090000338ull) + ((block_id) & 1) * 0x8000000ull;
209 #define CVMX_SPXX_TPA_ACC(block_id) (CVMX_ADD_IO_SEG(0x0001180090000338ull) + ((block_id) & 1) * 0x8000000ull)
211 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
212 static inline uint64_t CVMX_SPXX_TPA_MAX(unsigned long block_id)
215 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
216 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
217 cvmx_warn("CVMX_SPXX_TPA_MAX(%lu) is invalid on this chip\n", block_id);
218 return CVMX_ADD_IO_SEG(0x0001180090000330ull) + ((block_id) & 1) * 0x8000000ull;
221 #define CVMX_SPXX_TPA_MAX(block_id) (CVMX_ADD_IO_SEG(0x0001180090000330ull) + ((block_id) & 1) * 0x8000000ull)
223 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
224 static inline uint64_t CVMX_SPXX_TPA_SEL(unsigned long block_id)
227 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
228 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
229 cvmx_warn("CVMX_SPXX_TPA_SEL(%lu) is invalid on this chip\n", block_id);
230 return CVMX_ADD_IO_SEG(0x0001180090000328ull) + ((block_id) & 1) * 0x8000000ull;
233 #define CVMX_SPXX_TPA_SEL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000328ull) + ((block_id) & 1) * 0x8000000ull)
235 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
236 static inline uint64_t CVMX_SPXX_TRN4_CTL(unsigned long block_id)
239 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
240 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
241 cvmx_warn("CVMX_SPXX_TRN4_CTL(%lu) is invalid on this chip\n", block_id);
242 return CVMX_ADD_IO_SEG(0x0001180090000360ull) + ((block_id) & 1) * 0x8000000ull;
245 #define CVMX_SPXX_TRN4_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000360ull) + ((block_id) & 1) * 0x8000000ull)
249 * cvmx_spx#_bckprs_cnt
251 union cvmx_spxx_bckprs_cnt {
253 struct cvmx_spxx_bckprs_cnt_s {
254 #ifdef __BIG_ENDIAN_BITFIELD
255 uint64_t reserved_32_63 : 32;
256 uint64_t cnt : 32; /**< Counts the number of core clock cycles in which
257 the SPI-4.2 receiver receives data once the TPA
258 for a particular port has been deasserted. The
259 desired port to watch can be selected with the
260 SPX_TPA_SEL[PRTSEL] field. CNT can be cleared by
261 writing all 1s to it. */
264 uint64_t reserved_32_63 : 32;
267 struct cvmx_spxx_bckprs_cnt_s cn38xx;
268 struct cvmx_spxx_bckprs_cnt_s cn38xxp2;
269 struct cvmx_spxx_bckprs_cnt_s cn58xx;
270 struct cvmx_spxx_bckprs_cnt_s cn58xxp1;
272 typedef union cvmx_spxx_bckprs_cnt cvmx_spxx_bckprs_cnt_t;
275 * cvmx_spx#_bist_stat
278 * Bist results encoding
279 * - 0: good (or bist in progress/never run)
282 union cvmx_spxx_bist_stat {
284 struct cvmx_spxx_bist_stat_s {
285 #ifdef __BIG_ENDIAN_BITFIELD
286 uint64_t reserved_3_63 : 61;
287 uint64_t stat2 : 1; /**< Bist Results/No Repair (Tx calendar table)
288 (spx.stx.cal.calendar) */
289 uint64_t stat1 : 1; /**< Bist Results/No Repair (Rx calendar table)
290 (spx.srx.spi4.cal.calendar) */
291 uint64_t stat0 : 1; /**< Bist Results/No Repair (Spi4 receive datapath FIFO)
292 (spx.srx.spi4.dat.dpr) */
297 uint64_t reserved_3_63 : 61;
300 struct cvmx_spxx_bist_stat_s cn38xx;
301 struct cvmx_spxx_bist_stat_s cn38xxp2;
302 struct cvmx_spxx_bist_stat_s cn58xx;
303 struct cvmx_spxx_bist_stat_s cn58xxp1;
305 typedef union cvmx_spxx_bist_stat cvmx_spxx_bist_stat_t;
312 * When asserted, this bit locks the Spi4 receive DLLs. This bit also
313 * acts as the Spi4 receiver reset and must be asserted before the
314 * training sequences are used to initialize the interface. This bit
315 * only applies to the receiver interface.
318 * Once the SRXDLCK bit is asserted and the DLLs have locked and the
319 * system has been programmed, software should assert this bit in order
320 * to start looking for valid training sequence and synchronize the
321 * interface. This bit only applies to the receiver interface.
324 * The Spi4 receiver can either convert training packets into NOPs or
325 * drop them entirely. Dropping ticks allows the interface to deskew
326 * periodically if the dclk and eclk ratios are close. This bit only
327 * applies to the receiver interface.
330 * When software sets this bit, it indicates that the Spi4 transmit
331 * interface has been setup and has seen the calendare status. Once the
332 * transmitter begins sending training data, the receiving device is free
333 * to start traversing the calendar table to synch the link.
336 * This bit determines which status clock edge to sample the status
337 * channel in Spi4 mode. Since the status channel is in the opposite
338 * direction to the datapath, the STATRCV actually effects the
339 * transmitter/TX block.
342 * This bit determines which status clock edge to drive the status
343 * channel in Spi4 mode. Since the status channel is in the opposite
344 * direction to the datapath, the STATDRV actually effects the
348 * RUNBIST will beginning BIST/BISR in all the SPX compilied memories.
349 * These memories are...
351 * * spx.srx.spi4.dat.dpr // FIFO Spi4 to IMX
352 * * spx.stx.cal.calendar // Spi4 TX calendar table
353 * * spx.srx.spi4.cal.calendar // Spi4 RX calendar table
355 * RUNBIST must never be asserted when the interface is enabled.
356 * Furthmore, setting RUNBIST at any other time is destructive and can
357 * cause data and configuration corruption. The entire interface must be
358 * reconfigured when this bit is set.
361 * CLKDLY should be kept at its reset value during normal operation. This
362 * register controls the SPI4.2 static clock positioning which normally only is
363 * set to the non-reset value in quarter clocking schemes. In this mode, the
364 * delay window is not large enough for slow clock freq, therefore clock and
365 * data must be statically positioned with CSRs. By changing the clock position
366 * relative to the data bits, we give the system a wider window.
369 * In systems in which no training data is sent to N2 or N2 cannot
370 * correctly sample the training data, software may pulse this bit by
371 * writing a '1' followed by a '0' in order to correctly set the
372 * receivers state. The receive data bus should be idle at this time
373 * (only NOPs on the bus). If N2 cannot see at least on training
374 * sequence, the data bus will not send any data to the core. The
375 * interface will hang.
377 union cvmx_spxx_clk_ctl {
379 struct cvmx_spxx_clk_ctl_s {
380 #ifdef __BIG_ENDIAN_BITFIELD
381 uint64_t reserved_17_63 : 47;
382 uint64_t seetrn : 1; /**< Force the Spi4 receive into seeing a traing
384 uint64_t reserved_12_15 : 4;
385 uint64_t clkdly : 5; /**< Set the spx__clkdly lines to this value to
386 control the delay on the incoming dclk
388 uint64_t runbist : 1; /**< Write this bit to begin BIST testing in SPX */
389 uint64_t statdrv : 1; /**< Spi4 status channel drive mode
390 - 1: Drive STAT on posedge of SCLK
391 - 0: Drive STAT on negedge of SCLK */
392 uint64_t statrcv : 1; /**< Spi4 status channel sample mode
393 - 1: Sample STAT on posedge of SCLK
394 - 0: Sample STAT on negedge of SCLK */
395 uint64_t sndtrn : 1; /**< Start sending training patterns on the Spi4
397 uint64_t drptrn : 1; /**< Drop blocks of training packets */
398 uint64_t rcvtrn : 1; /**< Write this bit once the DLL is locked to sync
399 on the training seqeunce */
400 uint64_t srxdlck : 1; /**< Write this bit to lock the Spi4 receive DLL */
402 uint64_t srxdlck : 1;
406 uint64_t statrcv : 1;
407 uint64_t statdrv : 1;
408 uint64_t runbist : 1;
410 uint64_t reserved_12_15 : 4;
412 uint64_t reserved_17_63 : 47;
415 struct cvmx_spxx_clk_ctl_s cn38xx;
416 struct cvmx_spxx_clk_ctl_s cn38xxp2;
417 struct cvmx_spxx_clk_ctl_s cn58xx;
418 struct cvmx_spxx_clk_ctl_s cn58xxp1;
420 typedef union cvmx_spxx_clk_ctl cvmx_spxx_clk_ctl_t;
425 union cvmx_spxx_clk_stat {
427 struct cvmx_spxx_clk_stat_s {
428 #ifdef __BIG_ENDIAN_BITFIELD
429 uint64_t reserved_11_63 : 53;
430 uint64_t stxcal : 1; /**< The transistion from Sync to Calendar on status
432 uint64_t reserved_9_9 : 1;
433 uint64_t srxtrn : 1; /**< Saw a good data training sequence */
434 uint64_t s4clk1 : 1; /**< Saw '1' on Spi4 transmit status forward clk input */
435 uint64_t s4clk0 : 1; /**< Saw '0' on Spi4 transmit status forward clk input */
436 uint64_t d4clk1 : 1; /**< Saw '1' on Spi4 receive data forward clk input */
437 uint64_t d4clk0 : 1; /**< Saw '0' on Spi4 receive data forward clk input */
438 uint64_t reserved_0_3 : 4;
440 uint64_t reserved_0_3 : 4;
446 uint64_t reserved_9_9 : 1;
448 uint64_t reserved_11_63 : 53;
451 struct cvmx_spxx_clk_stat_s cn38xx;
452 struct cvmx_spxx_clk_stat_s cn38xxp2;
453 struct cvmx_spxx_clk_stat_s cn58xx;
454 struct cvmx_spxx_clk_stat_s cn58xxp1;
456 typedef union cvmx_spxx_clk_stat cvmx_spxx_clk_stat_t;
459 * cvmx_spx#_dbg_deskew_ctl
462 * These bits are meant as a backdoor to control Spi4 per-bit deskew. See
463 * that Spec for more details.
465 * The basic idea is to allow software to disable the auto-deskew widgets
466 * and make any adjustments by hand. These steps should only be taken
467 * once the RCVTRN bit is set and before any real traffic is sent on the
468 * Spi4 bus. Great care should be taken when messing with these bits as
469 * improper programmings can cause catestrophic or intermitent problems.
471 * The params we have to test are the MUX tap selects and the XCV delay
474 * For the muxes, we can set each tap to a random value and then read
475 * back the taps. To write...
477 * SPXX_DBG_DESKEW_CTL[BITSEL] = bit to set
478 * SPXX_DBG_DESKEW_CTL[OFFSET] = mux tap value (2-bits)
479 * SPXX_DBG_DESKEW_CTL[MUX] = go bit
481 * Notice this can all happen with a single CSR write. To read, first
482 * set the bit you to look at with the SPXX_DBG_DESKEW_CTL[BITSEL], then
483 * simply read SPXX_DBG_DESKEW_STATE[MUXSEL]...
485 * SPXX_DBG_DESKEW_CTL[BITSEL] = bit to set
486 * SPXX_DBG_DESKEW_STATE[MUXSEL] = 2-bit value
488 * For the xcv delay taps, the CSR controls increment and decrement the
489 * 5-bit count value in the XCV. This is a saturating counter, so it
490 * will not wrap when decrementing below zero or incrementing above 31.
494 * SPXX_DBG_DESKEW_CTL[BITSEL] = bit to set
495 * SPXX_DBG_DESKEW_CTL[OFFSET] = tap value increment or decrement amount (5-bits)
496 * SPXX_DBG_DESKEW_CTL[INC|DEC] = go bit
498 * These values are copied in SPX, so that they can be read back by
499 * software by a similar mechanism to the MUX selects...
501 * SPXX_DBG_DESKEW_CTL[BITSEL] = bit to set
502 * SPXX_DBG_DESKEW_STATE[OFFSET] = 5-bit value
504 * In addition, there is a reset bit that sets all the state back to the
505 * default/starting value of 0x10.
507 * SPXX_DBG_DESKEW_CTL[CLRDLY] = 1
509 * SINGLE STEP TRAINING MODE (WILMA)
510 * Debug feature that will enable the user to single-step the debug
511 * logic to watch initial movement and trends by putting the training
512 * machine in single step mode.
514 * * SPX*_DBG_DESKEW_CTL[SSTEP]
515 * This will put the training control logic into single step mode. We
516 * will not deskew in this scenario and will require the TX device to
517 * send continuous training sequences.
519 * It is required that SRX*_COM_CTL[INF_EN] be clear so that suspect
520 * data does not flow into the chip.
522 * Deasserting SPX*_DBG_DESKEW_CTL[SSTEP] will attempt to deskew as per
523 * the normal definition. Single step mode is for debug only. Special
524 * care must be given to correctly deskew the interface if normal
525 * operation is desired.
527 * * SPX*_DBG_DESKEW_CTL[SSTEP_GO]
528 * Each write of '1' to SSTEP_GO will go through a single training
529 * iteration and will perform...
531 * - DLL update, if SPX*_DBG_DESKEW_CTL[DLLDIS] is clear
532 * - coarse update, if SPX*_TRN4_CTL[MUX_EN] is set
533 * - single fine update, if SPX*_TRN4_CTL[MACRO_EN] is set and an edge
534 * was detected after walked +/- SPX*_TRN4_CTL[MAXDIST] taps.
536 * Writes to this register have no effect if the interface is not in
537 * SSTEP mode (SPX*_DBG_DESKEW_CTL[SSTEP]).
539 * The WILMA mode will be cleared at the final state transition, so
540 * that software can set SPX*_DBG_DESKEW_CTL[SSTEP] and
541 * SPX*_DBG_DESKEW_CTL[SSTEP_GO] before setting SPX*_CLK_CTL[RCVTRN]
542 * and the machine will go through the initial iteration and stop -
543 * waiting for another SPX*_DBG_DESKEW_CTL[SSTEP_GO] or an interface
546 * * SPX*_DBG_DESKEW_CTL[FALL8]
547 * Determines how many pattern matches are required during training
548 * operations to fallout of training and begin processing the normal data
549 * stream. The default value is 10 pattern matches. The pattern that is
550 * used is dependent on the SPX*_DBG_DESKEW_CTL[FALLNOP] CSR which
551 * determines between non-training packets (the default) and NOPs.
553 * * SPX*_DBG_DESKEW_CTL[FALLNOP]
554 * Determines the pattern that is required during training operations to
555 * fallout of training and begin processing the normal data stream. The
556 * default value is to match against non-training data. Setting this
557 * bit, changes the behavior to watch for NOPs packet instead.
559 * This bit should not be changed dynamically while the link is
562 union cvmx_spxx_dbg_deskew_ctl {
564 struct cvmx_spxx_dbg_deskew_ctl_s {
565 #ifdef __BIG_ENDIAN_BITFIELD
566 uint64_t reserved_30_63 : 34;
567 uint64_t fallnop : 1; /**< Training fallout on NOP matches instead of
568 non-training matches.
569 (spx_csr__spi4_fallout_nop) */
570 uint64_t fall8 : 1; /**< Training fallout at 8 pattern matches instead of 10
571 (spx_csr__spi4_fallout_8_match) */
572 uint64_t reserved_26_27 : 2;
573 uint64_t sstep_go : 1; /**< Single Step Training Sequence
574 (spx_csr__spi4_single_step_go) */
575 uint64_t sstep : 1; /**< Single Step Training Mode
576 (spx_csr__spi4_single_step_mode) */
577 uint64_t reserved_22_23 : 2;
578 uint64_t clrdly : 1; /**< Resets the offset control in the XCV
579 (spx_csr__spi4_dll_clr_dly) */
580 uint64_t dec : 1; /**< Decrement the offset by OFFSET for the Spi4
581 bit selected by BITSEL
582 (spx_csr__spi4_dbg_trn_dec) */
583 uint64_t inc : 1; /**< Increment the offset by OFFSET for the Spi4
584 bit selected by BITSEL
585 (spx_csr__spi4_dbg_trn_inc) */
586 uint64_t mux : 1; /**< Set the mux select tap for the Spi4 bit
588 (spx_csr__spi4_dbg_trn_mux) */
589 uint64_t offset : 5; /**< Adds or subtracts (Based on INC or DEC) the
590 offset to Spi4 bit BITSEL.
591 (spx_csr__spi4_dbg_trn_offset) */
592 uint64_t bitsel : 5; /**< Select the Spi4 CTL or DAT bit
593 15-0 : Spi4 DAT[15:0]
596 (spx_csr__spi4_dbg_trn_bitsel) */
597 uint64_t offdly : 6; /**< Set the spx__offset lines to this value when
598 not in macro sequence
599 (spx_csr__spi4_mac_offdly) */
600 uint64_t dllfrc : 1; /**< Force the Spi4 RX DLL to update
601 (spx_csr__spi4_dll_force) */
602 uint64_t dlldis : 1; /**< Disable sending the update signal to the Spi4
604 (spx_csr__spi4_dll_trn_en) */
615 uint64_t reserved_22_23 : 2;
617 uint64_t sstep_go : 1;
618 uint64_t reserved_26_27 : 2;
620 uint64_t fallnop : 1;
621 uint64_t reserved_30_63 : 34;
624 struct cvmx_spxx_dbg_deskew_ctl_s cn38xx;
625 struct cvmx_spxx_dbg_deskew_ctl_s cn38xxp2;
626 struct cvmx_spxx_dbg_deskew_ctl_s cn58xx;
627 struct cvmx_spxx_dbg_deskew_ctl_s cn58xxp1;
629 typedef union cvmx_spxx_dbg_deskew_ctl cvmx_spxx_dbg_deskew_ctl_t;
632 * cvmx_spx#_dbg_deskew_state
635 * These bits are meant as a backdoor to control Spi4 per-bit deskew. See
636 * that Spec for more details.
638 union cvmx_spxx_dbg_deskew_state {
640 struct cvmx_spxx_dbg_deskew_state_s {
641 #ifdef __BIG_ENDIAN_BITFIELD
642 uint64_t reserved_9_63 : 55;
643 uint64_t testres : 1; /**< Training Test Mode Result
644 (srx_spi4__test_mode_result) */
645 uint64_t unxterm : 1; /**< Unexpected training terminiation
646 (srx_spi4__top_unxexp_trn_term) */
647 uint64_t muxsel : 2; /**< The mux select value of the bit selected by
648 SPX_DBG_DESKEW_CTL[BITSEL]
649 (srx_spi4__trn_mux_sel) */
650 uint64_t offset : 5; /**< The counter value of the bit selected by
651 SPX_DBG_DESKEW_CTL[BITSEL]
652 (srx_spi4__xcv_tap_select) */
656 uint64_t unxterm : 1;
657 uint64_t testres : 1;
658 uint64_t reserved_9_63 : 55;
661 struct cvmx_spxx_dbg_deskew_state_s cn38xx;
662 struct cvmx_spxx_dbg_deskew_state_s cn38xxp2;
663 struct cvmx_spxx_dbg_deskew_state_s cn58xx;
664 struct cvmx_spxx_dbg_deskew_state_s cn58xxp1;
666 typedef union cvmx_spxx_dbg_deskew_state cvmx_spxx_dbg_deskew_state_t;
672 * These bits all come from Duke - he will provide documentation and
673 * explanation. I'll just butcher it.
675 union cvmx_spxx_drv_ctl {
677 struct cvmx_spxx_drv_ctl_s {
678 #ifdef __BIG_ENDIAN_BITFIELD
679 uint64_t reserved_0_63 : 64;
681 uint64_t reserved_0_63 : 64;
684 struct cvmx_spxx_drv_ctl_cn38xx {
685 #ifdef __BIG_ENDIAN_BITFIELD
686 uint64_t reserved_16_63 : 48;
687 uint64_t stx4ncmp : 4; /**< Duke (spx__spi4_tx_nctl_comp) */
688 uint64_t stx4pcmp : 4; /**< Duke (spx__spi4_tx_pctl_comp) */
689 uint64_t srx4cmp : 8; /**< Duke (spx__spi4_rx_rctl_comp) */
691 uint64_t srx4cmp : 8;
692 uint64_t stx4pcmp : 4;
693 uint64_t stx4ncmp : 4;
694 uint64_t reserved_16_63 : 48;
697 struct cvmx_spxx_drv_ctl_cn38xx cn38xxp2;
698 struct cvmx_spxx_drv_ctl_cn58xx {
699 #ifdef __BIG_ENDIAN_BITFIELD
700 uint64_t reserved_24_63 : 40;
701 uint64_t stx4ncmp : 4; /**< Not used in CN58XX (spx__spi4_tx_nctl_comp) */
702 uint64_t stx4pcmp : 4; /**< Not used in CN58XX (spx__spi4_tx_pctl_comp) */
703 uint64_t reserved_10_15 : 6;
704 uint64_t srx4cmp : 10; /**< Suresh (spx__spi4_rx_rctl_comp)
705 Can be used to bypass the RX termination resistor
706 value. We have an on-chip RX termination resistor
707 compensation control block, which adjusts the
708 resistor value to a nominal 100 ohms. This
709 register can be used to bypass this automatically
712 uint64_t srx4cmp : 10;
713 uint64_t reserved_10_15 : 6;
714 uint64_t stx4pcmp : 4;
715 uint64_t stx4ncmp : 4;
716 uint64_t reserved_24_63 : 40;
719 struct cvmx_spxx_drv_ctl_cn58xx cn58xxp1;
721 typedef union cvmx_spxx_drv_ctl cvmx_spxx_drv_ctl_t;
726 * SPX_ERR_CTL - Spi error control register
730 * * DIPPAY, DIPCLS, PRTNXA
731 * These bits control whether or not the packet's ERR bit is set when any of
732 * the these error is detected. If the corresponding error's bit is clear,
733 * the packet ERR will be set. If the error bit is set, the SPX will simply
734 * pass through the ERR bit without modifying it in anyway - the error bit
735 * may or may not have been set by the transmitter device.
737 union cvmx_spxx_err_ctl {
739 struct cvmx_spxx_err_ctl_s {
740 #ifdef __BIG_ENDIAN_BITFIELD
741 uint64_t reserved_9_63 : 55;
742 uint64_t prtnxa : 1; /**< Spi4 - set the ERR bit on packets in which the
743 port is out-of-range */
744 uint64_t dipcls : 1; /**< Spi4 DIPERR on closing control words cause the
746 uint64_t dippay : 1; /**< Spi4 DIPERR on payload control words cause the
748 uint64_t reserved_4_5 : 2;
749 uint64_t errcnt : 4; /**< Number of Dip4 errors before bringing down the
753 uint64_t reserved_4_5 : 2;
757 uint64_t reserved_9_63 : 55;
760 struct cvmx_spxx_err_ctl_s cn38xx;
761 struct cvmx_spxx_err_ctl_s cn38xxp2;
762 struct cvmx_spxx_err_ctl_s cn58xx;
763 struct cvmx_spxx_err_ctl_s cn58xxp1;
765 typedef union cvmx_spxx_err_ctl cvmx_spxx_err_ctl_t;
770 * SPX_INT_DAT - Interrupt Data Register
774 * Note: The SPX_INT_DAT[MUL] bit is set when multiple errors have been
775 * detected that would set any of the data fields: PRT, RSVOP, and CALBNK.
777 * The following errors will cause MUL to assert for PRT conflicts.
782 * The following errors will cause MUL to assert for RSVOP conflicts.
785 * The following errors will cause MUL to assert for CALBNK conflicts.
788 * The following errors will cause MUL to assert if multiple interrupts are
792 * The MUL bit will be cleared once all outstanding errors have been
793 * cleared by software (not just MUL errors - all errors).
795 union cvmx_spxx_int_dat {
797 struct cvmx_spxx_int_dat_s {
798 #ifdef __BIG_ENDIAN_BITFIELD
799 uint64_t reserved_32_63 : 32;
800 uint64_t mul : 1; /**< Multiple errors have occured */
801 uint64_t reserved_14_30 : 17;
802 uint64_t calbnk : 2; /**< Spi4 Calendar table parity error bank */
803 uint64_t rsvop : 4; /**< Spi4 reserved control word */
804 uint64_t prt : 8; /**< Port associated with error */
809 uint64_t reserved_14_30 : 17;
811 uint64_t reserved_32_63 : 32;
814 struct cvmx_spxx_int_dat_s cn38xx;
815 struct cvmx_spxx_int_dat_s cn38xxp2;
816 struct cvmx_spxx_int_dat_s cn58xx;
817 struct cvmx_spxx_int_dat_s cn58xxp1;
819 typedef union cvmx_spxx_int_dat cvmx_spxx_int_dat_t;
824 * SPX_INT_MSK - Interrupt Mask Register
827 union cvmx_spxx_int_msk {
829 struct cvmx_spxx_int_msk_s {
830 #ifdef __BIG_ENDIAN_BITFIELD
831 uint64_t reserved_12_63 : 52;
832 uint64_t calerr : 1; /**< Spi4 Calendar table parity error */
833 uint64_t syncerr : 1; /**< Consecutive Spi4 DIP4 errors have exceeded
834 SPX_ERR_CTL[ERRCNT] */
835 uint64_t diperr : 1; /**< Spi4 DIP4 error */
836 uint64_t tpaovr : 1; /**< Selected port has hit TPA overflow */
837 uint64_t rsverr : 1; /**< Spi4 reserved control word detected */
838 uint64_t drwnng : 1; /**< Spi4 receive FIFO drowning/overflow */
839 uint64_t clserr : 1; /**< Spi4 packet closed on non-16B alignment without EOP */
840 uint64_t spiovr : 1; /**< Spi async FIFO overflow (Spi3 or Spi4) */
841 uint64_t reserved_2_3 : 2;
842 uint64_t abnorm : 1; /**< Abnormal packet termination (ERR bit) */
843 uint64_t prtnxa : 1; /**< Port out of range */
847 uint64_t reserved_2_3 : 2;
854 uint64_t syncerr : 1;
856 uint64_t reserved_12_63 : 52;
859 struct cvmx_spxx_int_msk_s cn38xx;
860 struct cvmx_spxx_int_msk_s cn38xxp2;
861 struct cvmx_spxx_int_msk_s cn58xx;
862 struct cvmx_spxx_int_msk_s cn58xxp1;
864 typedef union cvmx_spxx_int_msk cvmx_spxx_int_msk_t;
869 * SPX_INT_REG - Interrupt Register
874 * This error indicates that the port on the Spi bus was not a valid port
875 * for the system. Spi4 accesses occur on payload control bit-times. The
876 * SRX can be configured with the exact number of ports available (by
877 * SRX_COM_CTL[PRTS] register). Any Spi access to anthing outside the range
878 * of 0 .. (SRX_COM_CTL[PRTS] - 1) is considered an error. The offending
879 * port is logged in SPX_INT_DAT[PRT] if there are no pending interrupts in
880 * SPX_INT_REG that require SPX_INT_DAT[PRT].
882 * SRX will not drop the packet with the bogus port address. Instead, the
883 * port will be mapped into the supported port range. The remapped address
886 * Address = [ interfaceId, ADR[3:0] ]
888 * If the SPX detects that a PRTNXA error has occured, the packet will
889 * have its ERR bit set (or'ed in with the ERR bit from the transmitter)
890 * if the SPX_ERR_CTL[PRTNXA] bit is clear.
892 * In Spi4 mode, SPX will generate an interrupt for every 8B data burst
893 * associated with the invalid address. The SPX_INT_DAT[MUL] bit will never
897 * This bit simply indicates that a given packet had abnormal terminiation.
898 * In Spi4 mode, this means that packet completed with an EOPS[1:0] code of
899 * 2'b01. This error can also be thought of as the application specific
900 * error (as mentioned in the Spi4 spec). The offending port is logged in
901 * SPX_INT_DAT[PRT] if there are no pending interrupts in SPX_INT_REG that
902 * require SPX_INT_DAT[PRT].
904 * The ABNORM error is only raised when the ERR bit that comes from the
905 * Spi interface is set. It will never assert if any internal condition
906 * causes the ERR bit to assert (e.g. PRTNXA or DPERR).
909 * This error indicates that the FIFOs that manage the async crossing from
910 * the Spi clocks to the core clock domains have overflowed. This is a
911 * fatal error and can cause much data/control corruption since ticks will
912 * be dropped and reordered. This is purely a function of clock ratios and
913 * correct system ratios should make this an impossible condition.
916 * This is a Spi4 error that indicates that a given data transfer burst
917 * that did not terminate with an EOP, did not end with the 16B alignment
918 * as per the Spi4 spec. The offending port cannot be logged since the
919 * block does not know the streamm terminated until the port switches.
920 * At that time, that packet has already been pushed down the pipe.
922 * The CLSERR bit does not actually check the Spi4 burst - just how data
923 * is accumulated for the downstream logic. Bursts that are separted by
924 * idles or training will still be merged into accumulated transfers and
925 * will not fire the CLSERR condition. The checker is really checking
926 * non-8B aligned, non-EOP data ticks that are sent downstream. These
927 * ticks are what will really mess up the core.
929 * This is an expensive fix, so we'll probably let it ride. We never
930 * claim to check Spi4 protocol anyway.
933 * This error indicates that the Spi4 FIFO that services the GMX has
934 * overflowed. Like the SPIOVR error condition, correct system ratios
935 * should make this an impossible condition.
938 * This Spi4 error indicates that the Spi4 receiver has seen a reserve
939 * control packet. A reserve control packet is an invalid combiniation
940 * of bits on DAT[15:12]. Basically this is DAT[15] == 1'b0 and DAT[12]
941 * == 1'b1 (an SOP without a payload command). The RSVERR indicates an
942 * error has occured and SPX_INT_DAT[RSVOP] holds the first reserved
943 * opcode and will be set if there are no pending interrupts in
944 * SPX_INT_REG that require SPX_INT_DAT[RSVOP].
947 * This bit indicates that the TPA Watcher has flagged an event. See the
948 * TPA Watcher for a more detailed discussion.
951 * This bit indicates that the Spi4 receiver has encountered a DIP4
952 * miscompare on the datapath. A DIPERR can occur in an IDLE or a
953 * control word that frames a data burst. If the DIPERR occurs on a
954 * framing word there are three cases.
956 * 1) DIPERR occurs at the end of a data burst. The previous packet is
957 * marked with the ERR bit to be processed later if
958 * SPX_ERR_CTL[DIPCLS] is clear.
959 * 2) DIPERR occurs on a payload word. The subsequent packet is marked
960 * with the ERR bit to be processed later if SPX_ERR_CTL[DIPPAY] is
962 * 3) DIPERR occurs on a control word that closes on packet and is a
963 * payload for another packet. In this case, both packets will have
964 * their ERR bit marked depending on the respective values of
965 * SPX_ERR_CTL[DIPCLS] and SPX_ERR_CTL[DIPPAY] as discussed above.
968 * This bit indicates that the Spi4 receiver has encountered
969 * SPX_ERR_CTL[ERRCNT] consecutive Spi4 DIP4 errors and the interface
973 * This bit indicates that the Spi4 calendar table encountered a parity
974 * error. This error bit is associated with the calendar table on the RX
975 * interface - the interface that receives the Spi databus. Parity errors
976 * can occur during normal operation when the calendar table is constantly
977 * being read for the port information, or during initialization time, when
978 * the user has access. Since the calendar table is split into two banks,
979 * SPX_INT_DAT[CALBNK] indicates which banks have taken a parity error.
980 * CALBNK[1] indicates the error occured in the upper bank, while CALBNK[0]
981 * indicates that the error occured in the lower bank. SPX_INT_DAT[CALBNK]
982 * will be set if there are no pending interrupts in SPX_INT_REG that
983 * require SPX_INT_DAT[CALBNK].
986 * This bit indicates that a Spi fatal error has occurred. A fatal error
987 * is defined as any error condition for which the corresponding
988 * SPX_INT_SYNC bit is set. Therefore, conservative systems can halt the
989 * interface on any error condition although this is not strictly
990 * necessary. Some error are much more fatal in nature than others.
992 * PRTNXA, SPIOVR, CLSERR, DRWNNG, DIPERR, CALERR, and SYNCERR are examples
993 * of fatal error for different reasons - usually because multiple port
994 * streams could be effected. ABNORM, RSVERR, and TPAOVR are conditions
995 * that are contained to a single packet which allows the interface to drop
996 * a single packet and remain up and stable.
998 union cvmx_spxx_int_reg {
1000 struct cvmx_spxx_int_reg_s {
1001 #ifdef __BIG_ENDIAN_BITFIELD
1002 uint64_t reserved_32_63 : 32;
1003 uint64_t spf : 1; /**< Spi interface down */
1004 uint64_t reserved_12_30 : 19;
1005 uint64_t calerr : 1; /**< Spi4 Calendar table parity error */
1006 uint64_t syncerr : 1; /**< Consecutive Spi4 DIP4 errors have exceeded
1007 SPX_ERR_CTL[ERRCNT] */
1008 uint64_t diperr : 1; /**< Spi4 DIP4 error */
1009 uint64_t tpaovr : 1; /**< Selected port has hit TPA overflow */
1010 uint64_t rsverr : 1; /**< Spi4 reserved control word detected */
1011 uint64_t drwnng : 1; /**< Spi4 receive FIFO drowning/overflow */
1012 uint64_t clserr : 1; /**< Spi4 packet closed on non-16B alignment without EOP */
1013 uint64_t spiovr : 1; /**< Spi async FIFO overflow */
1014 uint64_t reserved_2_3 : 2;
1015 uint64_t abnorm : 1; /**< Abnormal packet termination (ERR bit) */
1016 uint64_t prtnxa : 1; /**< Port out of range */
1018 uint64_t prtnxa : 1;
1019 uint64_t abnorm : 1;
1020 uint64_t reserved_2_3 : 2;
1021 uint64_t spiovr : 1;
1022 uint64_t clserr : 1;
1023 uint64_t drwnng : 1;
1024 uint64_t rsverr : 1;
1025 uint64_t tpaovr : 1;
1026 uint64_t diperr : 1;
1027 uint64_t syncerr : 1;
1028 uint64_t calerr : 1;
1029 uint64_t reserved_12_30 : 19;
1031 uint64_t reserved_32_63 : 32;
1034 struct cvmx_spxx_int_reg_s cn38xx;
1035 struct cvmx_spxx_int_reg_s cn38xxp2;
1036 struct cvmx_spxx_int_reg_s cn58xx;
1037 struct cvmx_spxx_int_reg_s cn58xxp1;
1039 typedef union cvmx_spxx_int_reg cvmx_spxx_int_reg_t;
1042 * cvmx_spx#_int_sync
1044 * SPX_INT_SYNC - Interrupt Sync Register
1048 * This mask set indicates which exception condition should cause the
1049 * SPX_INT_REG[SPF] bit to assert
1051 * It is recommended that software set the PRTNXA, SPIOVR, CLSERR, DRWNNG,
1052 * DIPERR, CALERR, and SYNCERR errors as synchronization events. Software is
1053 * free to synchronize the bus on other conditions, but this is the minimum
1056 union cvmx_spxx_int_sync {
1058 struct cvmx_spxx_int_sync_s {
1059 #ifdef __BIG_ENDIAN_BITFIELD
1060 uint64_t reserved_12_63 : 52;
1061 uint64_t calerr : 1; /**< Spi4 Calendar table parity error */
1062 uint64_t syncerr : 1; /**< Consecutive Spi4 DIP4 errors have exceeded
1063 SPX_ERR_CTL[ERRCNT] */
1064 uint64_t diperr : 1; /**< Spi4 DIP4 error */
1065 uint64_t tpaovr : 1; /**< Selected port has hit TPA overflow */
1066 uint64_t rsverr : 1; /**< Spi4 reserved control word detected */
1067 uint64_t drwnng : 1; /**< Spi4 receive FIFO drowning/overflow */
1068 uint64_t clserr : 1; /**< Spi4 packet closed on non-16B alignment without EOP */
1069 uint64_t spiovr : 1; /**< Spi async FIFO overflow (Spi3 or Spi4) */
1070 uint64_t reserved_2_3 : 2;
1071 uint64_t abnorm : 1; /**< Abnormal packet termination (ERR bit) */
1072 uint64_t prtnxa : 1; /**< Port out of range */
1074 uint64_t prtnxa : 1;
1075 uint64_t abnorm : 1;
1076 uint64_t reserved_2_3 : 2;
1077 uint64_t spiovr : 1;
1078 uint64_t clserr : 1;
1079 uint64_t drwnng : 1;
1080 uint64_t rsverr : 1;
1081 uint64_t tpaovr : 1;
1082 uint64_t diperr : 1;
1083 uint64_t syncerr : 1;
1084 uint64_t calerr : 1;
1085 uint64_t reserved_12_63 : 52;
1088 struct cvmx_spxx_int_sync_s cn38xx;
1089 struct cvmx_spxx_int_sync_s cn38xxp2;
1090 struct cvmx_spxx_int_sync_s cn58xx;
1091 struct cvmx_spxx_int_sync_s cn58xxp1;
1093 typedef union cvmx_spxx_int_sync cvmx_spxx_int_sync_t;
1098 * SPX_TPA_ACC - TPA watcher byte accumulator
1102 * This field allows the user to access the TPA watcher accumulator counter.
1103 * This register reflects the number of bytes sent to IMX once the port
1104 * specified by SPX_TPA_SEL[PRTSEL] has lost its TPA. The SPX_INT_REG[TPAOVR]
1105 * bit is asserted when CNT >= SPX_TPA_MAX[MAX]. The CNT will continue to
1106 * increment until the TPA for the port is asserted. At that point the CNT
1107 * value is frozen until software clears the interrupt bit.
1109 union cvmx_spxx_tpa_acc {
1111 struct cvmx_spxx_tpa_acc_s {
1112 #ifdef __BIG_ENDIAN_BITFIELD
1113 uint64_t reserved_32_63 : 32;
1114 uint64_t cnt : 32; /**< TPA watcher accumulate count */
1117 uint64_t reserved_32_63 : 32;
1120 struct cvmx_spxx_tpa_acc_s cn38xx;
1121 struct cvmx_spxx_tpa_acc_s cn38xxp2;
1122 struct cvmx_spxx_tpa_acc_s cn58xx;
1123 struct cvmx_spxx_tpa_acc_s cn58xxp1;
1125 typedef union cvmx_spxx_tpa_acc cvmx_spxx_tpa_acc_t;
1130 * SPX_TPA_MAX - TPA watcher assertion threshold
1134 * The TPA watcher has the ability to notify the system with an interrupt when
1135 * too much data has been received on loss of TPA. The user sets the
1136 * SPX_TPA_MAX[MAX] register and when the watcher has accumulated that many
1137 * ticks, then the interrupt is conditionally raised (based on interrupt mask
1138 * bits). This feature will be disabled if the programmed count is zero.
1140 union cvmx_spxx_tpa_max {
1142 struct cvmx_spxx_tpa_max_s {
1143 #ifdef __BIG_ENDIAN_BITFIELD
1144 uint64_t reserved_32_63 : 32;
1145 uint64_t max : 32; /**< TPA watcher TPA threshold */
1148 uint64_t reserved_32_63 : 32;
1151 struct cvmx_spxx_tpa_max_s cn38xx;
1152 struct cvmx_spxx_tpa_max_s cn38xxp2;
1153 struct cvmx_spxx_tpa_max_s cn58xx;
1154 struct cvmx_spxx_tpa_max_s cn58xxp1;
1156 typedef union cvmx_spxx_tpa_max cvmx_spxx_tpa_max_t;
1161 * SPX_TPA_SEL - TPA watcher port selector
1165 * The TPA Watcher is primarily a debug vehicle used to help initial bringup
1166 * of a system. The TPA watcher counts bytes that roll in from the Spi
1167 * interface. The user programs the Spi port to watch using
1168 * SPX_TPA_SEL[PRTSEL]. Once the TPA is deasserted for that port, the watcher
1169 * begins to count the data ticks that have been delivered to the inbound
1170 * datapath (and eventually to the IOB). The result is that we can derive
1171 * turn-around times of the other device by watching how much data was sent
1172 * after a loss of TPA through the SPX_TPA_ACC[CNT] register. An optional
1173 * interrupt may be raised as well. See SPX_TPA_MAX for further information.
1175 * TPA's can be deasserted for a number of reasons...
1177 * 1) IPD indicates backpressure
1178 * 2) The GMX inbound FIFO is filling up and should BP
1179 * 3) User has out an override on the TPA wires
1181 union cvmx_spxx_tpa_sel {
1183 struct cvmx_spxx_tpa_sel_s {
1184 #ifdef __BIG_ENDIAN_BITFIELD
1185 uint64_t reserved_4_63 : 60;
1186 uint64_t prtsel : 4; /**< TPA watcher port select */
1188 uint64_t prtsel : 4;
1189 uint64_t reserved_4_63 : 60;
1192 struct cvmx_spxx_tpa_sel_s cn38xx;
1193 struct cvmx_spxx_tpa_sel_s cn38xxp2;
1194 struct cvmx_spxx_tpa_sel_s cn58xx;
1195 struct cvmx_spxx_tpa_sel_s cn58xxp1;
1197 typedef union cvmx_spxx_tpa_sel cvmx_spxx_tpa_sel_t;
1200 * cvmx_spx#_trn4_ctl
1203 * These bits are controls for the Spi4 RX bit deskew logic. See that Spec
1204 * for further details.
1207 * On the initial training synchronization sequence, the hardware has the
1208 * BOOT_BIT set which means that it will continueously perform macro
1209 * operations. Once the BOOT_BIT is cleared, the macro machine will finish
1210 * the macro operation is working on and then return to the idle state.
1211 * Subsequent training sequences will only go through a single macro
1212 * operation in order to do slight deskews.
1215 * Minimum value is 1. This parameter must be set for Spi4 mode using
1216 * auto-bit deskew. Regardless of the original intent, this field must be
1217 * set non-zero for deskew to function correctly.
1219 * The thought is the JITTER range is no longer required since the macro
1220 * machine was enhanced to understand about edge direction. Originally
1221 * these bits were intended to compensate for clock jitter.
1223 * dly: this is the intrinsic delay of each delay element
1224 * tap currently, it is 70ps-110ps.
1225 * jitter: amount of jitter we expect in the system (~200ps)
1226 * j: number of taps to account for jitter
1228 * j = ((jitter / dly) + 1)
1231 * This mode is used to test systems to make sure that the bit deskew
1232 * parameters have been correctly setup. After configuration, software can
1233 * set the TRNTEST mode bit. This should be done before SRX_COM_CTL[ST_EN]
1234 * is set such that we can be sure that the TX device is simply sending
1235 * continuous training patterns.
1237 * The test mode samples every incoming bit-time and makes sure that it is
1238 * either a training control or a training data packet. If any other data
1239 * is observed, then SPX_DBG_DESKEW_STATE[TESTRES] will assert signaling a
1242 * Software must clear TRNTEST before training is terminated.
1244 * * Example Spi4 RX init flow...
1246 * 1) set the CLKDLY lines (SPXX_CLK_CTL[CLKDLY])
1247 * - these bits must be set before the DLL can successfully lock
1249 * 2) set the SRXDLCK (SPXX_CLK_CTL[SRXDLCK])
1250 * - this is the DLL lock bit which also acts as a block reset
1252 * 3) wait for the DLLs lock
1254 * 4) set any desired fields in SPXX_DBG_DESKEW_CTL
1255 * - This register has only one field that most users will care about.
1256 * When set, DLLDIS will disable sending update pulses to the Spi4 RX
1257 * DLLs. This pulse allows the DLL to adjust to clock variations over
1258 * time. In general, it is desired behavior.
1260 * 5) set fields in SPXX_TRN4_CTL
1261 * - These fields deal with the MUX training sequence
1263 * This is the enable bit for the mux select. The MUX select will
1264 * run in the training sequence between the DLL and the Macro
1265 * sequence when enabled. Once the MUX selects are selected, the
1266 * entire macro sequence must be rerun. The expectation is that
1267 * this is only run at boot time and this is bit cleared at/around
1269 * - These fields deal with the Macro training sequence
1271 * This is the enable bit for the macro sequence. Macro sequences
1272 * will run after the DLL and MUX training sequences. Each macro
1273 * sequence can move the offset by one value.
1275 * This is how far we will search for an edge. Example...
1277 * dly: this is the intrinsic delay of each delay element
1278 * tap currently, it is 70ps-110ps.
1279 * U: bit time period in time units.
1281 * MAXDIST = MIN(16, ((bit_time / 2) / dly)
1283 * Each MAXDIST iteration consists of an edge detect in the early
1284 * and late (+/-) directions in an attempt to center the data. This
1285 * requires two training transistions, the control/data and
1286 * data/control transistions which comprise a training sequence.
1287 * Therefore, the number of training sequences required for a single
1288 * macro operation is simply MAXDIST.
1290 * 6) set the RCVTRN go bit (SPXX_CLK_CTL[RCVTRN])
1291 * - this bit synchs on the first valid complete training cycle and
1292 * starts to process the training packets
1294 * 6b) This is where software could manually set the controls as opposed to
1295 * letting the hardware do it. See the SPXX_DBG_DESKEW_CTL register
1296 * description for more detail.
1298 * 7) the TX device must continue to send training packets for the initial
1300 * - this can be determined by...
1302 * DLL: one training sequence for the DLL adjustment (regardless of enable/disable)
1303 * MUX: one training sequence for the Flop MUX taps (regardless of enable/disable)
1304 * INIT_SEQUENCES: max number of taps that we must move
1306 * INIT_SEQUENCES = MIN(16, ((bit_time / 2) / dly))
1308 * INIT_TRN = DLL + MUX + ROUNDUP((INIT_SEQUENCES * (MAXDIST + 2)))
1311 * - software can either wait a fixed amount of time based on the clock
1312 * frequencies or poll the SPXX_CLK_STAT[SRXTRN] register. Each
1313 * assertion of SRXTRN means that at least one training sequence has
1314 * been received. Software can poll, clear, and repeat on this bit to
1315 * eventually count all required transistions.
1318 * while (cnt < INIT_TRN) [
1319 * if (SPXX_CLK_STAT[SRXTRN]) [
1321 * SPXX_CLK_STAT[SRXTRN] = 0;
1325 * - subsequent training sequences will normally move the taps only
1326 * one position, so the ALPHA equation becomes...
1328 * MAC = (MAXDIST == 0) ? 1 : ROUNDUP((1 * (MAXDIST + 2))) + 1
1330 * ALPHA = DLL + MUX + MAC
1332 * ergo, MAXDIST simplifies to...
1334 * ALPHA = (MAXDIST == 0) ? 3 : MAXDIST + 5
1336 * DLL and MUX and MAC will always require at least a training sequence
1337 * each - even if disabled. If the macro sequence is enabled, an
1338 * additional training sequenece at the end is necessary. The extra
1339 * sequence allows for all training state to be cleared before resuming
1342 * 8) after the recevier gets enough training sequences in order to achieve
1343 * deskew lock, set SPXX_TRN4_CTL[CLR_BOOT]
1344 * - this disables the continuous macro sequences and puts into into one
1345 * macro sequnence per training operation
1346 * - optionally, the machine can choose to fall out of training if
1347 * enough NOPs follow the training operation (require at least 32 NOPs
1348 * to follow the training sequence).
1350 * There must be at least MAXDIST + 3 training sequences after the
1351 * SPXX_TRN4_CTL[CLR_BOOT] is set or sufficient NOPs from the TX device.
1353 * 9) the TX device continues to send training sequences until the RX
1354 * device sends a calendar transistion. This is controlled by
1355 * SRXX_COM_CTL[ST_EN]. Other restrictions require other Spi parameters
1356 * (e.g. the calendar table) to be setup before this bit can be enabled.
1357 * Once the entire interface is properly programmed, software writes
1358 * SRXX_COM_CTL[INF_EN]. At this point, the Spi4 packets will begin to
1359 * be sent into the N2K core and processed by the chip.
1361 union cvmx_spxx_trn4_ctl {
1363 struct cvmx_spxx_trn4_ctl_s {
1364 #ifdef __BIG_ENDIAN_BITFIELD
1365 uint64_t reserved_13_63 : 51;
1366 uint64_t trntest : 1; /**< Training Test Mode
1367 This bit is only for initial bringup
1368 (spx_csr__spi4_trn_test_mode) */
1369 uint64_t jitter : 3; /**< Accounts for jitter when the macro sequence is
1370 locking. The value is how many consecutive
1371 transititions before declaring en edge. Minimum
1372 value is 1. This parameter must be set for Spi4
1373 mode using auto-bit deskew.
1374 (spx_csr__spi4_mac_jitter) */
1375 uint64_t clr_boot : 1; /**< Clear the macro boot sequence mode bit
1376 (spx_csr__spi4_mac_clr_boot) */
1377 uint64_t set_boot : 1; /**< Enable the macro boot sequence mode bit
1378 (spx_csr__spi4_mac_set_boot) */
1379 uint64_t maxdist : 5; /**< This field defines how far from center the
1380 deskew logic will search in a single macro
1381 sequence (spx_csr__spi4_mac_iters) */
1382 uint64_t macro_en : 1; /**< Allow the macro sequence to center the sample
1383 point in the data window through hardware
1384 (spx_csr__spi4_mac_trn_en) */
1385 uint64_t mux_en : 1; /**< Enable the hardware machine that selects the
1386 proper coarse FLOP selects
1387 (spx_csr__spi4_mux_trn_en) */
1389 uint64_t mux_en : 1;
1390 uint64_t macro_en : 1;
1391 uint64_t maxdist : 5;
1392 uint64_t set_boot : 1;
1393 uint64_t clr_boot : 1;
1394 uint64_t jitter : 3;
1395 uint64_t trntest : 1;
1396 uint64_t reserved_13_63 : 51;
1399 struct cvmx_spxx_trn4_ctl_s cn38xx;
1400 struct cvmx_spxx_trn4_ctl_s cn38xxp2;
1401 struct cvmx_spxx_trn4_ctl_s cn58xx;
1402 struct cvmx_spxx_trn4_ctl_s cn58xxp1;
1404 typedef union cvmx_spxx_trn4_ctl cvmx_spxx_trn4_ctl_t;