1 /***********************license start***************
2 * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Networks nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
44 * Configuration and status register (CSR) type definitions for
47 * This file is auto generated. Do not edit.
52 #ifndef __CVMX_ASXX_TYPEDEFS_H__
53 #define __CVMX_ASXX_TYPEDEFS_H__
55 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
56 static inline uint64_t CVMX_ASXX_GMII_RX_CLK_SET(unsigned long block_id)
59 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
60 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
61 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0)))))
62 cvmx_warn("CVMX_ASXX_GMII_RX_CLK_SET(%lu) is invalid on this chip\n", block_id);
63 return CVMX_ADD_IO_SEG(0x00011800B0000180ull);
66 #define CVMX_ASXX_GMII_RX_CLK_SET(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000180ull))
68 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
69 static inline uint64_t CVMX_ASXX_GMII_RX_DAT_SET(unsigned long block_id)
72 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
73 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
74 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0)))))
75 cvmx_warn("CVMX_ASXX_GMII_RX_DAT_SET(%lu) is invalid on this chip\n", block_id);
76 return CVMX_ADD_IO_SEG(0x00011800B0000188ull);
79 #define CVMX_ASXX_GMII_RX_DAT_SET(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000188ull))
81 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
82 static inline uint64_t CVMX_ASXX_INT_EN(unsigned long block_id)
85 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
86 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
87 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
88 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
89 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
90 cvmx_warn("CVMX_ASXX_INT_EN(%lu) is invalid on this chip\n", block_id);
91 return CVMX_ADD_IO_SEG(0x00011800B0000018ull) + ((block_id) & 1) * 0x8000000ull;
94 #define CVMX_ASXX_INT_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000018ull) + ((block_id) & 1) * 0x8000000ull)
96 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
97 static inline uint64_t CVMX_ASXX_INT_REG(unsigned long block_id)
100 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
101 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
102 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
103 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
104 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
105 cvmx_warn("CVMX_ASXX_INT_REG(%lu) is invalid on this chip\n", block_id);
106 return CVMX_ADD_IO_SEG(0x00011800B0000010ull) + ((block_id) & 1) * 0x8000000ull;
109 #define CVMX_ASXX_INT_REG(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000010ull) + ((block_id) & 1) * 0x8000000ull)
111 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
112 static inline uint64_t CVMX_ASXX_MII_RX_DAT_SET(unsigned long block_id)
115 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
116 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0)))))
117 cvmx_warn("CVMX_ASXX_MII_RX_DAT_SET(%lu) is invalid on this chip\n", block_id);
118 return CVMX_ADD_IO_SEG(0x00011800B0000190ull);
121 #define CVMX_ASXX_MII_RX_DAT_SET(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000190ull))
123 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
124 static inline uint64_t CVMX_ASXX_PRT_LOOP(unsigned long block_id)
127 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
128 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
129 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
130 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
131 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
132 cvmx_warn("CVMX_ASXX_PRT_LOOP(%lu) is invalid on this chip\n", block_id);
133 return CVMX_ADD_IO_SEG(0x00011800B0000040ull) + ((block_id) & 1) * 0x8000000ull;
136 #define CVMX_ASXX_PRT_LOOP(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000040ull) + ((block_id) & 1) * 0x8000000ull)
138 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
139 static inline uint64_t CVMX_ASXX_RLD_BYPASS(unsigned long block_id)
142 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
143 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
144 cvmx_warn("CVMX_ASXX_RLD_BYPASS(%lu) is invalid on this chip\n", block_id);
145 return CVMX_ADD_IO_SEG(0x00011800B0000248ull) + ((block_id) & 1) * 0x8000000ull;
148 #define CVMX_ASXX_RLD_BYPASS(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000248ull) + ((block_id) & 1) * 0x8000000ull)
150 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
151 static inline uint64_t CVMX_ASXX_RLD_BYPASS_SETTING(unsigned long block_id)
154 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
155 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
156 cvmx_warn("CVMX_ASXX_RLD_BYPASS_SETTING(%lu) is invalid on this chip\n", block_id);
157 return CVMX_ADD_IO_SEG(0x00011800B0000250ull) + ((block_id) & 1) * 0x8000000ull;
160 #define CVMX_ASXX_RLD_BYPASS_SETTING(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000250ull) + ((block_id) & 1) * 0x8000000ull)
162 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
163 static inline uint64_t CVMX_ASXX_RLD_COMP(unsigned long block_id)
166 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
167 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
168 cvmx_warn("CVMX_ASXX_RLD_COMP(%lu) is invalid on this chip\n", block_id);
169 return CVMX_ADD_IO_SEG(0x00011800B0000220ull) + ((block_id) & 1) * 0x8000000ull;
172 #define CVMX_ASXX_RLD_COMP(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000220ull) + ((block_id) & 1) * 0x8000000ull)
174 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
175 static inline uint64_t CVMX_ASXX_RLD_DATA_DRV(unsigned long block_id)
178 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
179 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
180 cvmx_warn("CVMX_ASXX_RLD_DATA_DRV(%lu) is invalid on this chip\n", block_id);
181 return CVMX_ADD_IO_SEG(0x00011800B0000218ull) + ((block_id) & 1) * 0x8000000ull;
184 #define CVMX_ASXX_RLD_DATA_DRV(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000218ull) + ((block_id) & 1) * 0x8000000ull)
186 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
187 static inline uint64_t CVMX_ASXX_RLD_FCRAM_MODE(unsigned long block_id)
190 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1)))))
191 cvmx_warn("CVMX_ASXX_RLD_FCRAM_MODE(%lu) is invalid on this chip\n", block_id);
192 return CVMX_ADD_IO_SEG(0x00011800B0000210ull) + ((block_id) & 1) * 0x8000000ull;
195 #define CVMX_ASXX_RLD_FCRAM_MODE(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000210ull) + ((block_id) & 1) * 0x8000000ull)
197 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
198 static inline uint64_t CVMX_ASXX_RLD_NCTL_STRONG(unsigned long block_id)
201 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
202 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
203 cvmx_warn("CVMX_ASXX_RLD_NCTL_STRONG(%lu) is invalid on this chip\n", block_id);
204 return CVMX_ADD_IO_SEG(0x00011800B0000230ull) + ((block_id) & 1) * 0x8000000ull;
207 #define CVMX_ASXX_RLD_NCTL_STRONG(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000230ull) + ((block_id) & 1) * 0x8000000ull)
209 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
210 static inline uint64_t CVMX_ASXX_RLD_NCTL_WEAK(unsigned long block_id)
213 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
214 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
215 cvmx_warn("CVMX_ASXX_RLD_NCTL_WEAK(%lu) is invalid on this chip\n", block_id);
216 return CVMX_ADD_IO_SEG(0x00011800B0000240ull) + ((block_id) & 1) * 0x8000000ull;
219 #define CVMX_ASXX_RLD_NCTL_WEAK(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000240ull) + ((block_id) & 1) * 0x8000000ull)
221 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
222 static inline uint64_t CVMX_ASXX_RLD_PCTL_STRONG(unsigned long block_id)
225 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
226 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
227 cvmx_warn("CVMX_ASXX_RLD_PCTL_STRONG(%lu) is invalid on this chip\n", block_id);
228 return CVMX_ADD_IO_SEG(0x00011800B0000228ull) + ((block_id) & 1) * 0x8000000ull;
231 #define CVMX_ASXX_RLD_PCTL_STRONG(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000228ull) + ((block_id) & 1) * 0x8000000ull)
233 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
234 static inline uint64_t CVMX_ASXX_RLD_PCTL_WEAK(unsigned long block_id)
237 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
238 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
239 cvmx_warn("CVMX_ASXX_RLD_PCTL_WEAK(%lu) is invalid on this chip\n", block_id);
240 return CVMX_ADD_IO_SEG(0x00011800B0000238ull) + ((block_id) & 1) * 0x8000000ull;
243 #define CVMX_ASXX_RLD_PCTL_WEAK(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000238ull) + ((block_id) & 1) * 0x8000000ull)
245 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
246 static inline uint64_t CVMX_ASXX_RLD_SETTING(unsigned long block_id)
249 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
250 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
251 cvmx_warn("CVMX_ASXX_RLD_SETTING(%lu) is invalid on this chip\n", block_id);
252 return CVMX_ADD_IO_SEG(0x00011800B0000258ull) + ((block_id) & 1) * 0x8000000ull;
255 #define CVMX_ASXX_RLD_SETTING(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000258ull) + ((block_id) & 1) * 0x8000000ull)
257 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
258 static inline uint64_t CVMX_ASXX_RX_CLK_SETX(unsigned long offset, unsigned long block_id)
261 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
262 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
263 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
264 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
265 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
266 cvmx_warn("CVMX_ASXX_RX_CLK_SETX(%lu,%lu) is invalid on this chip\n", offset, block_id);
267 return CVMX_ADD_IO_SEG(0x00011800B0000020ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8;
270 #define CVMX_ASXX_RX_CLK_SETX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800B0000020ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8)
272 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
273 static inline uint64_t CVMX_ASXX_RX_PRT_EN(unsigned long block_id)
276 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
277 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
278 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
279 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
280 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
281 cvmx_warn("CVMX_ASXX_RX_PRT_EN(%lu) is invalid on this chip\n", block_id);
282 return CVMX_ADD_IO_SEG(0x00011800B0000000ull) + ((block_id) & 1) * 0x8000000ull;
285 #define CVMX_ASXX_RX_PRT_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000000ull) + ((block_id) & 1) * 0x8000000ull)
287 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
288 static inline uint64_t CVMX_ASXX_RX_WOL(unsigned long block_id)
291 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1)))))
292 cvmx_warn("CVMX_ASXX_RX_WOL(%lu) is invalid on this chip\n", block_id);
293 return CVMX_ADD_IO_SEG(0x00011800B0000100ull) + ((block_id) & 1) * 0x8000000ull;
296 #define CVMX_ASXX_RX_WOL(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000100ull) + ((block_id) & 1) * 0x8000000ull)
298 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
299 static inline uint64_t CVMX_ASXX_RX_WOL_MSK(unsigned long block_id)
302 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1)))))
303 cvmx_warn("CVMX_ASXX_RX_WOL_MSK(%lu) is invalid on this chip\n", block_id);
304 return CVMX_ADD_IO_SEG(0x00011800B0000108ull) + ((block_id) & 1) * 0x8000000ull;
307 #define CVMX_ASXX_RX_WOL_MSK(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000108ull) + ((block_id) & 1) * 0x8000000ull)
309 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
310 static inline uint64_t CVMX_ASXX_RX_WOL_POWOK(unsigned long block_id)
313 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1)))))
314 cvmx_warn("CVMX_ASXX_RX_WOL_POWOK(%lu) is invalid on this chip\n", block_id);
315 return CVMX_ADD_IO_SEG(0x00011800B0000118ull) + ((block_id) & 1) * 0x8000000ull;
318 #define CVMX_ASXX_RX_WOL_POWOK(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000118ull) + ((block_id) & 1) * 0x8000000ull)
320 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
321 static inline uint64_t CVMX_ASXX_RX_WOL_SIG(unsigned long block_id)
324 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1)))))
325 cvmx_warn("CVMX_ASXX_RX_WOL_SIG(%lu) is invalid on this chip\n", block_id);
326 return CVMX_ADD_IO_SEG(0x00011800B0000110ull) + ((block_id) & 1) * 0x8000000ull;
329 #define CVMX_ASXX_RX_WOL_SIG(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000110ull) + ((block_id) & 1) * 0x8000000ull)
331 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
332 static inline uint64_t CVMX_ASXX_TX_CLK_SETX(unsigned long offset, unsigned long block_id)
335 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
336 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
337 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
338 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
339 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
340 cvmx_warn("CVMX_ASXX_TX_CLK_SETX(%lu,%lu) is invalid on this chip\n", offset, block_id);
341 return CVMX_ADD_IO_SEG(0x00011800B0000048ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8;
344 #define CVMX_ASXX_TX_CLK_SETX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800B0000048ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8)
346 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
347 static inline uint64_t CVMX_ASXX_TX_COMP_BYP(unsigned long block_id)
350 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
351 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
352 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
353 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
354 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
355 cvmx_warn("CVMX_ASXX_TX_COMP_BYP(%lu) is invalid on this chip\n", block_id);
356 return CVMX_ADD_IO_SEG(0x00011800B0000068ull) + ((block_id) & 1) * 0x8000000ull;
359 #define CVMX_ASXX_TX_COMP_BYP(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000068ull) + ((block_id) & 1) * 0x8000000ull)
361 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
362 static inline uint64_t CVMX_ASXX_TX_HI_WATERX(unsigned long offset, unsigned long block_id)
365 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
366 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
367 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
368 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
369 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
370 cvmx_warn("CVMX_ASXX_TX_HI_WATERX(%lu,%lu) is invalid on this chip\n", offset, block_id);
371 return CVMX_ADD_IO_SEG(0x00011800B0000080ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8;
374 #define CVMX_ASXX_TX_HI_WATERX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800B0000080ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8)
376 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
377 static inline uint64_t CVMX_ASXX_TX_PRT_EN(unsigned long block_id)
380 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
381 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
382 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
383 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
384 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
385 cvmx_warn("CVMX_ASXX_TX_PRT_EN(%lu) is invalid on this chip\n", block_id);
386 return CVMX_ADD_IO_SEG(0x00011800B0000008ull) + ((block_id) & 1) * 0x8000000ull;
389 #define CVMX_ASXX_TX_PRT_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000008ull) + ((block_id) & 1) * 0x8000000ull)
393 * cvmx_asx#_gmii_rx_clk_set
395 * ASX_GMII_RX_CLK_SET = GMII Clock delay setting
398 union cvmx_asxx_gmii_rx_clk_set
401 struct cvmx_asxx_gmii_rx_clk_set_s
403 #if __BYTE_ORDER == __BIG_ENDIAN
404 uint64_t reserved_5_63 : 59;
405 uint64_t setting : 5; /**< Setting to place on the RXCLK (GMII receive clk)
406 delay line. The intrinsic delay can range from
407 50ps to 80ps per tap. */
409 uint64_t setting : 5;
410 uint64_t reserved_5_63 : 59;
413 struct cvmx_asxx_gmii_rx_clk_set_s cn30xx;
414 struct cvmx_asxx_gmii_rx_clk_set_s cn31xx;
415 struct cvmx_asxx_gmii_rx_clk_set_s cn50xx;
417 typedef union cvmx_asxx_gmii_rx_clk_set cvmx_asxx_gmii_rx_clk_set_t;
420 * cvmx_asx#_gmii_rx_dat_set
422 * ASX_GMII_RX_DAT_SET = GMII Clock delay setting
425 union cvmx_asxx_gmii_rx_dat_set
428 struct cvmx_asxx_gmii_rx_dat_set_s
430 #if __BYTE_ORDER == __BIG_ENDIAN
431 uint64_t reserved_5_63 : 59;
432 uint64_t setting : 5; /**< Setting to place on the RXD (GMII receive data)
433 delay lines. The intrinsic delay can range from
434 50ps to 80ps per tap. */
436 uint64_t setting : 5;
437 uint64_t reserved_5_63 : 59;
440 struct cvmx_asxx_gmii_rx_dat_set_s cn30xx;
441 struct cvmx_asxx_gmii_rx_dat_set_s cn31xx;
442 struct cvmx_asxx_gmii_rx_dat_set_s cn50xx;
444 typedef union cvmx_asxx_gmii_rx_dat_set cvmx_asxx_gmii_rx_dat_set_t;
449 * ASX_INT_EN = Interrupt Enable
452 union cvmx_asxx_int_en
455 struct cvmx_asxx_int_en_s
457 #if __BYTE_ORDER == __BIG_ENDIAN
458 uint64_t reserved_12_63 : 52;
459 uint64_t txpsh : 4; /**< TX FIFO overflow on RMGII port */
460 uint64_t txpop : 4; /**< TX FIFO underflow on RMGII port */
461 uint64_t ovrflw : 4; /**< RX FIFO overflow on RMGII port */
466 uint64_t reserved_12_63 : 52;
469 struct cvmx_asxx_int_en_cn30xx
471 #if __BYTE_ORDER == __BIG_ENDIAN
472 uint64_t reserved_11_63 : 53;
473 uint64_t txpsh : 3; /**< TX FIFO overflow on RMGII port */
474 uint64_t reserved_7_7 : 1;
475 uint64_t txpop : 3; /**< TX FIFO underflow on RMGII port */
476 uint64_t reserved_3_3 : 1;
477 uint64_t ovrflw : 3; /**< RX FIFO overflow on RMGII port */
480 uint64_t reserved_3_3 : 1;
482 uint64_t reserved_7_7 : 1;
484 uint64_t reserved_11_63 : 53;
487 struct cvmx_asxx_int_en_cn30xx cn31xx;
488 struct cvmx_asxx_int_en_s cn38xx;
489 struct cvmx_asxx_int_en_s cn38xxp2;
490 struct cvmx_asxx_int_en_cn30xx cn50xx;
491 struct cvmx_asxx_int_en_s cn58xx;
492 struct cvmx_asxx_int_en_s cn58xxp1;
494 typedef union cvmx_asxx_int_en cvmx_asxx_int_en_t;
499 * ASX_INT_REG = Interrupt Register
502 union cvmx_asxx_int_reg
505 struct cvmx_asxx_int_reg_s
507 #if __BYTE_ORDER == __BIG_ENDIAN
508 uint64_t reserved_12_63 : 52;
509 uint64_t txpsh : 4; /**< TX FIFO overflow on RMGII port */
510 uint64_t txpop : 4; /**< TX FIFO underflow on RMGII port */
511 uint64_t ovrflw : 4; /**< RX FIFO overflow on RMGII port */
516 uint64_t reserved_12_63 : 52;
519 struct cvmx_asxx_int_reg_cn30xx
521 #if __BYTE_ORDER == __BIG_ENDIAN
522 uint64_t reserved_11_63 : 53;
523 uint64_t txpsh : 3; /**< TX FIFO overflow on RMGII port */
524 uint64_t reserved_7_7 : 1;
525 uint64_t txpop : 3; /**< TX FIFO underflow on RMGII port */
526 uint64_t reserved_3_3 : 1;
527 uint64_t ovrflw : 3; /**< RX FIFO overflow on RMGII port */
530 uint64_t reserved_3_3 : 1;
532 uint64_t reserved_7_7 : 1;
534 uint64_t reserved_11_63 : 53;
537 struct cvmx_asxx_int_reg_cn30xx cn31xx;
538 struct cvmx_asxx_int_reg_s cn38xx;
539 struct cvmx_asxx_int_reg_s cn38xxp2;
540 struct cvmx_asxx_int_reg_cn30xx cn50xx;
541 struct cvmx_asxx_int_reg_s cn58xx;
542 struct cvmx_asxx_int_reg_s cn58xxp1;
544 typedef union cvmx_asxx_int_reg cvmx_asxx_int_reg_t;
547 * cvmx_asx#_mii_rx_dat_set
549 * ASX_MII_RX_DAT_SET = GMII Clock delay setting
552 union cvmx_asxx_mii_rx_dat_set
555 struct cvmx_asxx_mii_rx_dat_set_s
557 #if __BYTE_ORDER == __BIG_ENDIAN
558 uint64_t reserved_5_63 : 59;
559 uint64_t setting : 5; /**< Setting to place on the RXD (MII receive data)
560 delay lines. The intrinsic delay can range from
561 50ps to 80ps per tap. */
563 uint64_t setting : 5;
564 uint64_t reserved_5_63 : 59;
567 struct cvmx_asxx_mii_rx_dat_set_s cn30xx;
568 struct cvmx_asxx_mii_rx_dat_set_s cn50xx;
570 typedef union cvmx_asxx_mii_rx_dat_set cvmx_asxx_mii_rx_dat_set_t;
575 * ASX_PRT_LOOP = Internal Loopback mode - TX FIFO output goes into RX FIFO (and maybe pins)
578 union cvmx_asxx_prt_loop
581 struct cvmx_asxx_prt_loop_s
583 #if __BYTE_ORDER == __BIG_ENDIAN
584 uint64_t reserved_8_63 : 56;
585 uint64_t ext_loop : 4; /**< External Loopback Enable
586 0 = No Loopback (TX FIFO is filled by RMGII)
587 1 = RX FIFO drives the TX FIFO
588 - GMX_PRT_CFG[DUPLEX] must be 1 (FullDuplex)
589 - GMX_PRT_CFG[SPEED] must be 1 (GigE speed)
590 - core clock > 250MHZ
591 - rxc must not deviate from the +-50ppm
592 - if txc>rxc, idle cycle may drop over time */
593 uint64_t int_loop : 4; /**< Internal Loopback Enable
594 0 = No Loopback (RX FIFO is filled by RMGII pins)
595 1 = TX FIFO drives the RX FIFO
596 Note, in internal loop-back mode, the RGMII link
597 status is not used (since there is no real PHY).
598 Software cannot use the inband status. */
600 uint64_t int_loop : 4;
601 uint64_t ext_loop : 4;
602 uint64_t reserved_8_63 : 56;
605 struct cvmx_asxx_prt_loop_cn30xx
607 #if __BYTE_ORDER == __BIG_ENDIAN
608 uint64_t reserved_7_63 : 57;
609 uint64_t ext_loop : 3; /**< External Loopback Enable
610 0 = No Loopback (TX FIFO is filled by RMGII)
611 1 = RX FIFO drives the TX FIFO
612 - GMX_PRT_CFG[DUPLEX] must be 1 (FullDuplex)
613 - GMX_PRT_CFG[SPEED] must be 1 (GigE speed)
614 - core clock > 250MHZ
615 - rxc must not deviate from the +-50ppm
616 - if txc>rxc, idle cycle may drop over time */
617 uint64_t reserved_3_3 : 1;
618 uint64_t int_loop : 3; /**< Internal Loopback Enable
619 0 = No Loopback (RX FIFO is filled by RMGII pins)
620 1 = TX FIFO drives the RX FIFO
621 - GMX_PRT_CFG[DUPLEX] must be 1 (FullDuplex)
622 - GMX_PRT_CFG[SPEED] must be 1 (GigE speed)
623 - GMX_TX_CLK[CLK_CNT] must be 1
624 Note, in internal loop-back mode, the RGMII link
625 status is not used (since there is no real PHY).
626 Software cannot use the inband status. */
628 uint64_t int_loop : 3;
629 uint64_t reserved_3_3 : 1;
630 uint64_t ext_loop : 3;
631 uint64_t reserved_7_63 : 57;
634 struct cvmx_asxx_prt_loop_cn30xx cn31xx;
635 struct cvmx_asxx_prt_loop_s cn38xx;
636 struct cvmx_asxx_prt_loop_s cn38xxp2;
637 struct cvmx_asxx_prt_loop_cn30xx cn50xx;
638 struct cvmx_asxx_prt_loop_s cn58xx;
639 struct cvmx_asxx_prt_loop_s cn58xxp1;
641 typedef union cvmx_asxx_prt_loop cvmx_asxx_prt_loop_t;
644 * cvmx_asx#_rld_bypass
649 union cvmx_asxx_rld_bypass
652 struct cvmx_asxx_rld_bypass_s
654 #if __BYTE_ORDER == __BIG_ENDIAN
655 uint64_t reserved_1_63 : 63;
656 uint64_t bypass : 1; /**< When set, the rld_dll setting is bypassed with
657 ASX_RLD_BYPASS_SETTING */
660 uint64_t reserved_1_63 : 63;
663 struct cvmx_asxx_rld_bypass_s cn38xx;
664 struct cvmx_asxx_rld_bypass_s cn38xxp2;
665 struct cvmx_asxx_rld_bypass_s cn58xx;
666 struct cvmx_asxx_rld_bypass_s cn58xxp1;
668 typedef union cvmx_asxx_rld_bypass cvmx_asxx_rld_bypass_t;
671 * cvmx_asx#_rld_bypass_setting
673 * ASX_RLD_BYPASS_SETTING
676 union cvmx_asxx_rld_bypass_setting
679 struct cvmx_asxx_rld_bypass_setting_s
681 #if __BYTE_ORDER == __BIG_ENDIAN
682 uint64_t reserved_5_63 : 59;
683 uint64_t setting : 5; /**< The rld_dll setting bypass value */
685 uint64_t setting : 5;
686 uint64_t reserved_5_63 : 59;
689 struct cvmx_asxx_rld_bypass_setting_s cn38xx;
690 struct cvmx_asxx_rld_bypass_setting_s cn38xxp2;
691 struct cvmx_asxx_rld_bypass_setting_s cn58xx;
692 struct cvmx_asxx_rld_bypass_setting_s cn58xxp1;
694 typedef union cvmx_asxx_rld_bypass_setting cvmx_asxx_rld_bypass_setting_t;
702 union cvmx_asxx_rld_comp
705 struct cvmx_asxx_rld_comp_s
707 #if __BYTE_ORDER == __BIG_ENDIAN
708 uint64_t reserved_9_63 : 55;
709 uint64_t pctl : 5; /**< PCTL Compensation Value
710 These bits reflect the computed compensation
711 values from the built-in compensation circuit. */
712 uint64_t nctl : 4; /**< These bits reflect the computed compensation
713 values from the built-in compensation circuit. */
717 uint64_t reserved_9_63 : 55;
720 struct cvmx_asxx_rld_comp_cn38xx
722 #if __BYTE_ORDER == __BIG_ENDIAN
723 uint64_t reserved_8_63 : 56;
724 uint64_t pctl : 4; /**< These bits reflect the computed compensation
725 values from the built-in compensation circuit. */
726 uint64_t nctl : 4; /**< These bits reflect the computed compensation
727 values from the built-in compensation circuit. */
731 uint64_t reserved_8_63 : 56;
734 struct cvmx_asxx_rld_comp_cn38xx cn38xxp2;
735 struct cvmx_asxx_rld_comp_s cn58xx;
736 struct cvmx_asxx_rld_comp_s cn58xxp1;
738 typedef union cvmx_asxx_rld_comp cvmx_asxx_rld_comp_t;
741 * cvmx_asx#_rld_data_drv
746 union cvmx_asxx_rld_data_drv
749 struct cvmx_asxx_rld_data_drv_s
751 #if __BYTE_ORDER == __BIG_ENDIAN
752 uint64_t reserved_8_63 : 56;
753 uint64_t pctl : 4; /**< These bits specify a driving strength (positive
754 integer) for the RLD I/Os when the built-in
755 compensation circuit is bypassed. */
756 uint64_t nctl : 4; /**< These bits specify a driving strength (positive
757 integer) for the RLD I/Os when the built-in
758 compensation circuit is bypassed. */
762 uint64_t reserved_8_63 : 56;
765 struct cvmx_asxx_rld_data_drv_s cn38xx;
766 struct cvmx_asxx_rld_data_drv_s cn38xxp2;
767 struct cvmx_asxx_rld_data_drv_s cn58xx;
768 struct cvmx_asxx_rld_data_drv_s cn58xxp1;
770 typedef union cvmx_asxx_rld_data_drv cvmx_asxx_rld_data_drv_t;
773 * cvmx_asx#_rld_fcram_mode
778 union cvmx_asxx_rld_fcram_mode
781 struct cvmx_asxx_rld_fcram_mode_s
783 #if __BYTE_ORDER == __BIG_ENDIAN
784 uint64_t reserved_1_63 : 63;
785 uint64_t mode : 1; /**< Memory Mode
790 uint64_t reserved_1_63 : 63;
793 struct cvmx_asxx_rld_fcram_mode_s cn38xx;
794 struct cvmx_asxx_rld_fcram_mode_s cn38xxp2;
796 typedef union cvmx_asxx_rld_fcram_mode cvmx_asxx_rld_fcram_mode_t;
799 * cvmx_asx#_rld_nctl_strong
801 * ASX_RLD_NCTL_STRONG
804 union cvmx_asxx_rld_nctl_strong
807 struct cvmx_asxx_rld_nctl_strong_s
809 #if __BYTE_ORDER == __BIG_ENDIAN
810 uint64_t reserved_5_63 : 59;
811 uint64_t nctl : 5; /**< Duke's drive control */
814 uint64_t reserved_5_63 : 59;
817 struct cvmx_asxx_rld_nctl_strong_s cn38xx;
818 struct cvmx_asxx_rld_nctl_strong_s cn38xxp2;
819 struct cvmx_asxx_rld_nctl_strong_s cn58xx;
820 struct cvmx_asxx_rld_nctl_strong_s cn58xxp1;
822 typedef union cvmx_asxx_rld_nctl_strong cvmx_asxx_rld_nctl_strong_t;
825 * cvmx_asx#_rld_nctl_weak
830 union cvmx_asxx_rld_nctl_weak
833 struct cvmx_asxx_rld_nctl_weak_s
835 #if __BYTE_ORDER == __BIG_ENDIAN
836 uint64_t reserved_5_63 : 59;
837 uint64_t nctl : 5; /**< UNUSED (not needed for CN58XX) */
840 uint64_t reserved_5_63 : 59;
843 struct cvmx_asxx_rld_nctl_weak_s cn38xx;
844 struct cvmx_asxx_rld_nctl_weak_s cn38xxp2;
845 struct cvmx_asxx_rld_nctl_weak_s cn58xx;
846 struct cvmx_asxx_rld_nctl_weak_s cn58xxp1;
848 typedef union cvmx_asxx_rld_nctl_weak cvmx_asxx_rld_nctl_weak_t;
851 * cvmx_asx#_rld_pctl_strong
853 * ASX_RLD_PCTL_STRONG
856 union cvmx_asxx_rld_pctl_strong
859 struct cvmx_asxx_rld_pctl_strong_s
861 #if __BYTE_ORDER == __BIG_ENDIAN
862 uint64_t reserved_5_63 : 59;
863 uint64_t pctl : 5; /**< Duke's drive control */
866 uint64_t reserved_5_63 : 59;
869 struct cvmx_asxx_rld_pctl_strong_s cn38xx;
870 struct cvmx_asxx_rld_pctl_strong_s cn38xxp2;
871 struct cvmx_asxx_rld_pctl_strong_s cn58xx;
872 struct cvmx_asxx_rld_pctl_strong_s cn58xxp1;
874 typedef union cvmx_asxx_rld_pctl_strong cvmx_asxx_rld_pctl_strong_t;
877 * cvmx_asx#_rld_pctl_weak
882 union cvmx_asxx_rld_pctl_weak
885 struct cvmx_asxx_rld_pctl_weak_s
887 #if __BYTE_ORDER == __BIG_ENDIAN
888 uint64_t reserved_5_63 : 59;
889 uint64_t pctl : 5; /**< UNUSED (not needed for CN58XX) */
892 uint64_t reserved_5_63 : 59;
895 struct cvmx_asxx_rld_pctl_weak_s cn38xx;
896 struct cvmx_asxx_rld_pctl_weak_s cn38xxp2;
897 struct cvmx_asxx_rld_pctl_weak_s cn58xx;
898 struct cvmx_asxx_rld_pctl_weak_s cn58xxp1;
900 typedef union cvmx_asxx_rld_pctl_weak cvmx_asxx_rld_pctl_weak_t;
903 * cvmx_asx#_rld_setting
908 union cvmx_asxx_rld_setting
911 struct cvmx_asxx_rld_setting_s
913 #if __BYTE_ORDER == __BIG_ENDIAN
914 uint64_t reserved_13_63 : 51;
915 uint64_t dfaset : 5; /**< RLD ClkGen DLL Setting(debug) */
916 uint64_t dfalag : 1; /**< RLD ClkGen DLL Lag Error(debug) */
917 uint64_t dfalead : 1; /**< RLD ClkGen DLL Lead Error(debug) */
918 uint64_t dfalock : 1; /**< RLD ClkGen DLL Lock acquisition(debug) */
919 uint64_t setting : 5; /**< RLDCK90 DLL Setting(debug) */
921 uint64_t setting : 5;
922 uint64_t dfalock : 1;
923 uint64_t dfalead : 1;
926 uint64_t reserved_13_63 : 51;
929 struct cvmx_asxx_rld_setting_cn38xx
931 #if __BYTE_ORDER == __BIG_ENDIAN
932 uint64_t reserved_5_63 : 59;
933 uint64_t setting : 5; /**< This is the read-only true rld dll_setting. */
935 uint64_t setting : 5;
936 uint64_t reserved_5_63 : 59;
939 struct cvmx_asxx_rld_setting_cn38xx cn38xxp2;
940 struct cvmx_asxx_rld_setting_s cn58xx;
941 struct cvmx_asxx_rld_setting_s cn58xxp1;
943 typedef union cvmx_asxx_rld_setting cvmx_asxx_rld_setting_t;
946 * cvmx_asx#_rx_clk_set#
948 * ASX_RX_CLK_SET = RGMII Clock delay setting
952 * Setting to place on the open-loop RXC (RGMII receive clk)
953 * delay line, which can delay the recieved clock. This
954 * can be used if the board and/or transmitting device
955 * has not otherwise delayed the clock.
957 * A value of SETTING=0 disables the delay line. The delay
958 * line should be disabled unless the transmitter or board
959 * does not delay the clock.
961 * Note that this delay line provides only a coarse control
962 * over the delay. Generally, it can only reliably provide
963 * a delay in the range 1.25-2.5ns, which may not be adequate
964 * for some system applications.
966 * The open loop delay line selects
967 * from among a series of tap positions. Each incremental
968 * tap position adds a delay of 50ps to 135ps per tap, depending
969 * on the chip, its temperature, and the voltage.
970 * To achieve from 1.25-2.5ns of delay on the recieved
971 * clock, a fixed value of SETTING=24 may work.
972 * For more precision, we recommend the following settings
973 * based on the chip voltage:
976 * -----------------------------
985 union cvmx_asxx_rx_clk_setx
988 struct cvmx_asxx_rx_clk_setx_s
990 #if __BYTE_ORDER == __BIG_ENDIAN
991 uint64_t reserved_5_63 : 59;
992 uint64_t setting : 5; /**< Setting to place on the open-loop RXC delay line */
994 uint64_t setting : 5;
995 uint64_t reserved_5_63 : 59;
998 struct cvmx_asxx_rx_clk_setx_s cn30xx;
999 struct cvmx_asxx_rx_clk_setx_s cn31xx;
1000 struct cvmx_asxx_rx_clk_setx_s cn38xx;
1001 struct cvmx_asxx_rx_clk_setx_s cn38xxp2;
1002 struct cvmx_asxx_rx_clk_setx_s cn50xx;
1003 struct cvmx_asxx_rx_clk_setx_s cn58xx;
1004 struct cvmx_asxx_rx_clk_setx_s cn58xxp1;
1006 typedef union cvmx_asxx_rx_clk_setx cvmx_asxx_rx_clk_setx_t;
1009 * cvmx_asx#_rx_prt_en
1011 * ASX_RX_PRT_EN = RGMII Port Enable
1014 union cvmx_asxx_rx_prt_en
1017 struct cvmx_asxx_rx_prt_en_s
1019 #if __BYTE_ORDER == __BIG_ENDIAN
1020 uint64_t reserved_4_63 : 60;
1021 uint64_t prt_en : 4; /**< Port enable. Must be set for Octane to receive
1022 RMGII traffic. When this bit clear on a given
1023 port, then the all RGMII cycles will appear as
1024 inter-frame cycles. */
1026 uint64_t prt_en : 4;
1027 uint64_t reserved_4_63 : 60;
1030 struct cvmx_asxx_rx_prt_en_cn30xx
1032 #if __BYTE_ORDER == __BIG_ENDIAN
1033 uint64_t reserved_3_63 : 61;
1034 uint64_t prt_en : 3; /**< Port enable. Must be set for Octane to receive
1035 RMGII traffic. When this bit clear on a given
1036 port, then the all RGMII cycles will appear as
1037 inter-frame cycles. */
1039 uint64_t prt_en : 3;
1040 uint64_t reserved_3_63 : 61;
1043 struct cvmx_asxx_rx_prt_en_cn30xx cn31xx;
1044 struct cvmx_asxx_rx_prt_en_s cn38xx;
1045 struct cvmx_asxx_rx_prt_en_s cn38xxp2;
1046 struct cvmx_asxx_rx_prt_en_cn30xx cn50xx;
1047 struct cvmx_asxx_rx_prt_en_s cn58xx;
1048 struct cvmx_asxx_rx_prt_en_s cn58xxp1;
1050 typedef union cvmx_asxx_rx_prt_en cvmx_asxx_rx_prt_en_t;
1055 * ASX_RX_WOL = RGMII RX Wake on LAN status register
1058 union cvmx_asxx_rx_wol
1061 struct cvmx_asxx_rx_wol_s
1063 #if __BYTE_ORDER == __BIG_ENDIAN
1064 uint64_t reserved_2_63 : 62;
1065 uint64_t status : 1; /**< Copy of PMCSR[15] - PME_status */
1066 uint64_t enable : 1; /**< Copy of PMCSR[8] - PME_enable */
1068 uint64_t enable : 1;
1069 uint64_t status : 1;
1070 uint64_t reserved_2_63 : 62;
1073 struct cvmx_asxx_rx_wol_s cn38xx;
1074 struct cvmx_asxx_rx_wol_s cn38xxp2;
1076 typedef union cvmx_asxx_rx_wol cvmx_asxx_rx_wol_t;
1079 * cvmx_asx#_rx_wol_msk
1081 * ASX_RX_WOL_MSK = RGMII RX Wake on LAN byte mask
1084 union cvmx_asxx_rx_wol_msk
1087 struct cvmx_asxx_rx_wol_msk_s
1089 #if __BYTE_ORDER == __BIG_ENDIAN
1090 uint64_t msk : 64; /**< Bytes to include in the CRC signature */
1095 struct cvmx_asxx_rx_wol_msk_s cn38xx;
1096 struct cvmx_asxx_rx_wol_msk_s cn38xxp2;
1098 typedef union cvmx_asxx_rx_wol_msk cvmx_asxx_rx_wol_msk_t;
1101 * cvmx_asx#_rx_wol_powok
1103 * ASX_RX_WOL_POWOK = RGMII RX Wake on LAN Power OK
1106 union cvmx_asxx_rx_wol_powok
1109 struct cvmx_asxx_rx_wol_powok_s
1111 #if __BYTE_ORDER == __BIG_ENDIAN
1112 uint64_t reserved_1_63 : 63;
1113 uint64_t powerok : 1; /**< Power OK */
1115 uint64_t powerok : 1;
1116 uint64_t reserved_1_63 : 63;
1119 struct cvmx_asxx_rx_wol_powok_s cn38xx;
1120 struct cvmx_asxx_rx_wol_powok_s cn38xxp2;
1122 typedef union cvmx_asxx_rx_wol_powok cvmx_asxx_rx_wol_powok_t;
1125 * cvmx_asx#_rx_wol_sig
1127 * ASX_RX_WOL_SIG = RGMII RX Wake on LAN CRC signature
1130 union cvmx_asxx_rx_wol_sig
1133 struct cvmx_asxx_rx_wol_sig_s
1135 #if __BYTE_ORDER == __BIG_ENDIAN
1136 uint64_t reserved_32_63 : 32;
1137 uint64_t sig : 32; /**< CRC signature */
1140 uint64_t reserved_32_63 : 32;
1143 struct cvmx_asxx_rx_wol_sig_s cn38xx;
1144 struct cvmx_asxx_rx_wol_sig_s cn38xxp2;
1146 typedef union cvmx_asxx_rx_wol_sig cvmx_asxx_rx_wol_sig_t;
1149 * cvmx_asx#_tx_clk_set#
1151 * ASX_TX_CLK_SET = RGMII Clock delay setting
1155 * Setting to place on the open-loop TXC (RGMII transmit clk)
1156 * delay line, which can delay the transmited clock. This
1157 * can be used if the board and/or transmitting device
1158 * has not otherwise delayed the clock.
1160 * A value of SETTING=0 disables the delay line. The delay
1161 * line should be disabled unless the transmitter or board
1162 * does not delay the clock.
1164 * Note that this delay line provides only a coarse control
1165 * over the delay. Generally, it can only reliably provide
1166 * a delay in the range 1.25-2.5ns, which may not be adequate
1167 * for some system applications.
1169 * The open loop delay line selects
1170 * from among a series of tap positions. Each incremental
1171 * tap position adds a delay of 50ps to 135ps per tap, depending
1172 * on the chip, its temperature, and the voltage.
1173 * To achieve from 1.25-2.5ns of delay on the recieved
1174 * clock, a fixed value of SETTING=24 may work.
1175 * For more precision, we recommend the following settings
1176 * based on the chip voltage:
1179 * -----------------------------
1188 union cvmx_asxx_tx_clk_setx
1191 struct cvmx_asxx_tx_clk_setx_s
1193 #if __BYTE_ORDER == __BIG_ENDIAN
1194 uint64_t reserved_5_63 : 59;
1195 uint64_t setting : 5; /**< Setting to place on the open-loop TXC delay line */
1197 uint64_t setting : 5;
1198 uint64_t reserved_5_63 : 59;
1201 struct cvmx_asxx_tx_clk_setx_s cn30xx;
1202 struct cvmx_asxx_tx_clk_setx_s cn31xx;
1203 struct cvmx_asxx_tx_clk_setx_s cn38xx;
1204 struct cvmx_asxx_tx_clk_setx_s cn38xxp2;
1205 struct cvmx_asxx_tx_clk_setx_s cn50xx;
1206 struct cvmx_asxx_tx_clk_setx_s cn58xx;
1207 struct cvmx_asxx_tx_clk_setx_s cn58xxp1;
1209 typedef union cvmx_asxx_tx_clk_setx cvmx_asxx_tx_clk_setx_t;
1212 * cvmx_asx#_tx_comp_byp
1214 * ASX_TX_COMP_BYP = RGMII Clock delay setting
1217 union cvmx_asxx_tx_comp_byp
1220 struct cvmx_asxx_tx_comp_byp_s
1222 #if __BYTE_ORDER == __BIG_ENDIAN
1223 uint64_t reserved_0_63 : 64;
1225 uint64_t reserved_0_63 : 64;
1228 struct cvmx_asxx_tx_comp_byp_cn30xx
1230 #if __BYTE_ORDER == __BIG_ENDIAN
1231 uint64_t reserved_9_63 : 55;
1232 uint64_t bypass : 1; /**< Compensation bypass */
1233 uint64_t pctl : 4; /**< PCTL Compensation Value (see Duke) */
1234 uint64_t nctl : 4; /**< NCTL Compensation Value (see Duke) */
1238 uint64_t bypass : 1;
1239 uint64_t reserved_9_63 : 55;
1242 struct cvmx_asxx_tx_comp_byp_cn30xx cn31xx;
1243 struct cvmx_asxx_tx_comp_byp_cn38xx
1245 #if __BYTE_ORDER == __BIG_ENDIAN
1246 uint64_t reserved_8_63 : 56;
1247 uint64_t pctl : 4; /**< PCTL Compensation Value (see Duke) */
1248 uint64_t nctl : 4; /**< NCTL Compensation Value (see Duke) */
1252 uint64_t reserved_8_63 : 56;
1255 struct cvmx_asxx_tx_comp_byp_cn38xx cn38xxp2;
1256 struct cvmx_asxx_tx_comp_byp_cn50xx
1258 #if __BYTE_ORDER == __BIG_ENDIAN
1259 uint64_t reserved_17_63 : 47;
1260 uint64_t bypass : 1; /**< Compensation bypass */
1261 uint64_t reserved_13_15 : 3;
1262 uint64_t pctl : 5; /**< PCTL Compensation Value (see Duke) */
1263 uint64_t reserved_5_7 : 3;
1264 uint64_t nctl : 5; /**< NCTL Compensation Value (see Duke) */
1267 uint64_t reserved_5_7 : 3;
1269 uint64_t reserved_13_15 : 3;
1270 uint64_t bypass : 1;
1271 uint64_t reserved_17_63 : 47;
1274 struct cvmx_asxx_tx_comp_byp_cn58xx
1276 #if __BYTE_ORDER == __BIG_ENDIAN
1277 uint64_t reserved_13_63 : 51;
1278 uint64_t pctl : 5; /**< PCTL Compensation Value (see Duke) */
1279 uint64_t reserved_5_7 : 3;
1280 uint64_t nctl : 5; /**< NCTL Compensation Value (see Duke) */
1283 uint64_t reserved_5_7 : 3;
1285 uint64_t reserved_13_63 : 51;
1288 struct cvmx_asxx_tx_comp_byp_cn58xx cn58xxp1;
1290 typedef union cvmx_asxx_tx_comp_byp cvmx_asxx_tx_comp_byp_t;
1293 * cvmx_asx#_tx_hi_water#
1295 * ASX_TX_HI_WATER = RGMII TX FIFO Hi WaterMark
1298 union cvmx_asxx_tx_hi_waterx
1301 struct cvmx_asxx_tx_hi_waterx_s
1303 #if __BYTE_ORDER == __BIG_ENDIAN
1304 uint64_t reserved_4_63 : 60;
1305 uint64_t mark : 4; /**< TX FIFO HiWatermark to stall GMX
1306 Value of 0 maps to 16
1307 Reset value changed from 10 in pass1
1308 Pass1 settings (assuming 125 tclk)
1315 uint64_t reserved_4_63 : 60;
1318 struct cvmx_asxx_tx_hi_waterx_cn30xx
1320 #if __BYTE_ORDER == __BIG_ENDIAN
1321 uint64_t reserved_3_63 : 61;
1322 uint64_t mark : 3; /**< TX FIFO HiWatermark to stall GMX
1323 Value 0 maps to 8. */
1326 uint64_t reserved_3_63 : 61;
1329 struct cvmx_asxx_tx_hi_waterx_cn30xx cn31xx;
1330 struct cvmx_asxx_tx_hi_waterx_s cn38xx;
1331 struct cvmx_asxx_tx_hi_waterx_s cn38xxp2;
1332 struct cvmx_asxx_tx_hi_waterx_cn30xx cn50xx;
1333 struct cvmx_asxx_tx_hi_waterx_s cn58xx;
1334 struct cvmx_asxx_tx_hi_waterx_s cn58xxp1;
1336 typedef union cvmx_asxx_tx_hi_waterx cvmx_asxx_tx_hi_waterx_t;
1339 * cvmx_asx#_tx_prt_en
1341 * ASX_TX_PRT_EN = RGMII Port Enable
1344 union cvmx_asxx_tx_prt_en
1347 struct cvmx_asxx_tx_prt_en_s
1349 #if __BYTE_ORDER == __BIG_ENDIAN
1350 uint64_t reserved_4_63 : 60;
1351 uint64_t prt_en : 4; /**< Port enable. Must be set for Octane to send
1352 RMGII traffic. When this bit clear on a given
1353 port, then all RGMII cycles will appear as
1354 inter-frame cycles. */
1356 uint64_t prt_en : 4;
1357 uint64_t reserved_4_63 : 60;
1360 struct cvmx_asxx_tx_prt_en_cn30xx
1362 #if __BYTE_ORDER == __BIG_ENDIAN
1363 uint64_t reserved_3_63 : 61;
1364 uint64_t prt_en : 3; /**< Port enable. Must be set for Octane to send
1365 RMGII traffic. When this bit clear on a given
1366 port, then all RGMII cycles will appear as
1367 inter-frame cycles. */
1369 uint64_t prt_en : 3;
1370 uint64_t reserved_3_63 : 61;
1373 struct cvmx_asxx_tx_prt_en_cn30xx cn31xx;
1374 struct cvmx_asxx_tx_prt_en_s cn38xx;
1375 struct cvmx_asxx_tx_prt_en_s cn38xxp2;
1376 struct cvmx_asxx_tx_prt_en_cn30xx cn50xx;
1377 struct cvmx_asxx_tx_prt_en_s cn58xx;
1378 struct cvmx_asxx_tx_prt_en_s cn58xxp1;
1380 typedef union cvmx_asxx_tx_prt_en cvmx_asxx_tx_prt_en_t;