1 /***********************license start***************
2 * Copyright (c) 2003-2012 Cavium Inc. (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Inc. nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
44 * Configuration and status register (CSR) type definitions for
47 * This file is auto generated. Do not edit.
52 #ifndef __CVMX_AGL_DEFS_H__
53 #define __CVMX_AGL_DEFS_H__
55 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
56 #define CVMX_AGL_GMX_BAD_REG CVMX_AGL_GMX_BAD_REG_FUNC()
57 static inline uint64_t CVMX_AGL_GMX_BAD_REG_FUNC(void)
59 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
60 cvmx_warn("CVMX_AGL_GMX_BAD_REG not supported on this chip\n");
61 return CVMX_ADD_IO_SEG(0x00011800E0000518ull);
64 #define CVMX_AGL_GMX_BAD_REG (CVMX_ADD_IO_SEG(0x00011800E0000518ull))
66 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
67 #define CVMX_AGL_GMX_BIST CVMX_AGL_GMX_BIST_FUNC()
68 static inline uint64_t CVMX_AGL_GMX_BIST_FUNC(void)
70 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
71 cvmx_warn("CVMX_AGL_GMX_BIST not supported on this chip\n");
72 return CVMX_ADD_IO_SEG(0x00011800E0000400ull);
75 #define CVMX_AGL_GMX_BIST (CVMX_ADD_IO_SEG(0x00011800E0000400ull))
77 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
78 #define CVMX_AGL_GMX_DRV_CTL CVMX_AGL_GMX_DRV_CTL_FUNC()
79 static inline uint64_t CVMX_AGL_GMX_DRV_CTL_FUNC(void)
81 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
82 cvmx_warn("CVMX_AGL_GMX_DRV_CTL not supported on this chip\n");
83 return CVMX_ADD_IO_SEG(0x00011800E00007F0ull);
86 #define CVMX_AGL_GMX_DRV_CTL (CVMX_ADD_IO_SEG(0x00011800E00007F0ull))
88 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
89 #define CVMX_AGL_GMX_INF_MODE CVMX_AGL_GMX_INF_MODE_FUNC()
90 static inline uint64_t CVMX_AGL_GMX_INF_MODE_FUNC(void)
92 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
93 cvmx_warn("CVMX_AGL_GMX_INF_MODE not supported on this chip\n");
94 return CVMX_ADD_IO_SEG(0x00011800E00007F8ull);
97 #define CVMX_AGL_GMX_INF_MODE (CVMX_ADD_IO_SEG(0x00011800E00007F8ull))
99 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
100 static inline uint64_t CVMX_AGL_GMX_PRTX_CFG(unsigned long offset)
103 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
104 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
105 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
106 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
107 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
108 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
109 cvmx_warn("CVMX_AGL_GMX_PRTX_CFG(%lu) is invalid on this chip\n", offset);
110 return CVMX_ADD_IO_SEG(0x00011800E0000010ull) + ((offset) & 1) * 2048;
113 #define CVMX_AGL_GMX_PRTX_CFG(offset) (CVMX_ADD_IO_SEG(0x00011800E0000010ull) + ((offset) & 1) * 2048)
115 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
116 static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM0(unsigned long offset)
119 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
120 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
121 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
122 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
123 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
124 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
125 cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM0(%lu) is invalid on this chip\n", offset);
126 return CVMX_ADD_IO_SEG(0x00011800E0000180ull) + ((offset) & 1) * 2048;
129 #define CVMX_AGL_GMX_RXX_ADR_CAM0(offset) (CVMX_ADD_IO_SEG(0x00011800E0000180ull) + ((offset) & 1) * 2048)
131 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
132 static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM1(unsigned long offset)
135 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
136 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
137 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
138 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
139 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
140 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
141 cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM1(%lu) is invalid on this chip\n", offset);
142 return CVMX_ADD_IO_SEG(0x00011800E0000188ull) + ((offset) & 1) * 2048;
145 #define CVMX_AGL_GMX_RXX_ADR_CAM1(offset) (CVMX_ADD_IO_SEG(0x00011800E0000188ull) + ((offset) & 1) * 2048)
147 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
148 static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM2(unsigned long offset)
151 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
152 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
153 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
154 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
155 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
156 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
157 cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM2(%lu) is invalid on this chip\n", offset);
158 return CVMX_ADD_IO_SEG(0x00011800E0000190ull) + ((offset) & 1) * 2048;
161 #define CVMX_AGL_GMX_RXX_ADR_CAM2(offset) (CVMX_ADD_IO_SEG(0x00011800E0000190ull) + ((offset) & 1) * 2048)
163 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
164 static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM3(unsigned long offset)
167 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
168 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
169 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
170 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
171 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
172 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
173 cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM3(%lu) is invalid on this chip\n", offset);
174 return CVMX_ADD_IO_SEG(0x00011800E0000198ull) + ((offset) & 1) * 2048;
177 #define CVMX_AGL_GMX_RXX_ADR_CAM3(offset) (CVMX_ADD_IO_SEG(0x00011800E0000198ull) + ((offset) & 1) * 2048)
179 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
180 static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM4(unsigned long offset)
183 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
184 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
185 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
186 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
187 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
188 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
189 cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM4(%lu) is invalid on this chip\n", offset);
190 return CVMX_ADD_IO_SEG(0x00011800E00001A0ull) + ((offset) & 1) * 2048;
193 #define CVMX_AGL_GMX_RXX_ADR_CAM4(offset) (CVMX_ADD_IO_SEG(0x00011800E00001A0ull) + ((offset) & 1) * 2048)
195 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
196 static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM5(unsigned long offset)
199 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
200 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
201 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
202 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
203 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
204 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
205 cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM5(%lu) is invalid on this chip\n", offset);
206 return CVMX_ADD_IO_SEG(0x00011800E00001A8ull) + ((offset) & 1) * 2048;
209 #define CVMX_AGL_GMX_RXX_ADR_CAM5(offset) (CVMX_ADD_IO_SEG(0x00011800E00001A8ull) + ((offset) & 1) * 2048)
211 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
212 static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM_EN(unsigned long offset)
215 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
216 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
217 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
218 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
219 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
220 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
221 cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM_EN(%lu) is invalid on this chip\n", offset);
222 return CVMX_ADD_IO_SEG(0x00011800E0000108ull) + ((offset) & 1) * 2048;
225 #define CVMX_AGL_GMX_RXX_ADR_CAM_EN(offset) (CVMX_ADD_IO_SEG(0x00011800E0000108ull) + ((offset) & 1) * 2048)
227 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
228 static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CTL(unsigned long offset)
231 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
232 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
233 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
234 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
235 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
236 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
237 cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CTL(%lu) is invalid on this chip\n", offset);
238 return CVMX_ADD_IO_SEG(0x00011800E0000100ull) + ((offset) & 1) * 2048;
241 #define CVMX_AGL_GMX_RXX_ADR_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000100ull) + ((offset) & 1) * 2048)
243 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
244 static inline uint64_t CVMX_AGL_GMX_RXX_DECISION(unsigned long offset)
247 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
248 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
249 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
250 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
251 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
252 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
253 cvmx_warn("CVMX_AGL_GMX_RXX_DECISION(%lu) is invalid on this chip\n", offset);
254 return CVMX_ADD_IO_SEG(0x00011800E0000040ull) + ((offset) & 1) * 2048;
257 #define CVMX_AGL_GMX_RXX_DECISION(offset) (CVMX_ADD_IO_SEG(0x00011800E0000040ull) + ((offset) & 1) * 2048)
259 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
260 static inline uint64_t CVMX_AGL_GMX_RXX_FRM_CHK(unsigned long offset)
263 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
264 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
265 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
266 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
267 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
268 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
269 cvmx_warn("CVMX_AGL_GMX_RXX_FRM_CHK(%lu) is invalid on this chip\n", offset);
270 return CVMX_ADD_IO_SEG(0x00011800E0000020ull) + ((offset) & 1) * 2048;
273 #define CVMX_AGL_GMX_RXX_FRM_CHK(offset) (CVMX_ADD_IO_SEG(0x00011800E0000020ull) + ((offset) & 1) * 2048)
275 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
276 static inline uint64_t CVMX_AGL_GMX_RXX_FRM_CTL(unsigned long offset)
279 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
280 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
281 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
282 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
283 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
284 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
285 cvmx_warn("CVMX_AGL_GMX_RXX_FRM_CTL(%lu) is invalid on this chip\n", offset);
286 return CVMX_ADD_IO_SEG(0x00011800E0000018ull) + ((offset) & 1) * 2048;
289 #define CVMX_AGL_GMX_RXX_FRM_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000018ull) + ((offset) & 1) * 2048)
291 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
292 static inline uint64_t CVMX_AGL_GMX_RXX_FRM_MAX(unsigned long offset)
295 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
296 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
297 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
298 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
299 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
300 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
301 cvmx_warn("CVMX_AGL_GMX_RXX_FRM_MAX(%lu) is invalid on this chip\n", offset);
302 return CVMX_ADD_IO_SEG(0x00011800E0000030ull) + ((offset) & 1) * 2048;
305 #define CVMX_AGL_GMX_RXX_FRM_MAX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000030ull) + ((offset) & 1) * 2048)
307 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
308 static inline uint64_t CVMX_AGL_GMX_RXX_FRM_MIN(unsigned long offset)
311 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
312 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
313 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
314 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
315 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
316 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
317 cvmx_warn("CVMX_AGL_GMX_RXX_FRM_MIN(%lu) is invalid on this chip\n", offset);
318 return CVMX_ADD_IO_SEG(0x00011800E0000028ull) + ((offset) & 1) * 2048;
321 #define CVMX_AGL_GMX_RXX_FRM_MIN(offset) (CVMX_ADD_IO_SEG(0x00011800E0000028ull) + ((offset) & 1) * 2048)
323 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
324 static inline uint64_t CVMX_AGL_GMX_RXX_IFG(unsigned long offset)
327 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
328 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
329 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
330 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
331 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
332 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
333 cvmx_warn("CVMX_AGL_GMX_RXX_IFG(%lu) is invalid on this chip\n", offset);
334 return CVMX_ADD_IO_SEG(0x00011800E0000058ull) + ((offset) & 1) * 2048;
337 #define CVMX_AGL_GMX_RXX_IFG(offset) (CVMX_ADD_IO_SEG(0x00011800E0000058ull) + ((offset) & 1) * 2048)
339 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
340 static inline uint64_t CVMX_AGL_GMX_RXX_INT_EN(unsigned long offset)
343 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
344 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
345 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
346 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
347 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
348 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
349 cvmx_warn("CVMX_AGL_GMX_RXX_INT_EN(%lu) is invalid on this chip\n", offset);
350 return CVMX_ADD_IO_SEG(0x00011800E0000008ull) + ((offset) & 1) * 2048;
353 #define CVMX_AGL_GMX_RXX_INT_EN(offset) (CVMX_ADD_IO_SEG(0x00011800E0000008ull) + ((offset) & 1) * 2048)
355 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
356 static inline uint64_t CVMX_AGL_GMX_RXX_INT_REG(unsigned long offset)
359 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
360 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
361 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
362 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
363 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
364 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
365 cvmx_warn("CVMX_AGL_GMX_RXX_INT_REG(%lu) is invalid on this chip\n", offset);
366 return CVMX_ADD_IO_SEG(0x00011800E0000000ull) + ((offset) & 1) * 2048;
369 #define CVMX_AGL_GMX_RXX_INT_REG(offset) (CVMX_ADD_IO_SEG(0x00011800E0000000ull) + ((offset) & 1) * 2048)
371 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
372 static inline uint64_t CVMX_AGL_GMX_RXX_JABBER(unsigned long offset)
375 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
376 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
377 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
378 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
379 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
380 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
381 cvmx_warn("CVMX_AGL_GMX_RXX_JABBER(%lu) is invalid on this chip\n", offset);
382 return CVMX_ADD_IO_SEG(0x00011800E0000038ull) + ((offset) & 1) * 2048;
385 #define CVMX_AGL_GMX_RXX_JABBER(offset) (CVMX_ADD_IO_SEG(0x00011800E0000038ull) + ((offset) & 1) * 2048)
387 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
388 static inline uint64_t CVMX_AGL_GMX_RXX_PAUSE_DROP_TIME(unsigned long offset)
391 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
392 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
393 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
394 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
395 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
396 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
397 cvmx_warn("CVMX_AGL_GMX_RXX_PAUSE_DROP_TIME(%lu) is invalid on this chip\n", offset);
398 return CVMX_ADD_IO_SEG(0x00011800E0000068ull) + ((offset) & 1) * 2048;
401 #define CVMX_AGL_GMX_RXX_PAUSE_DROP_TIME(offset) (CVMX_ADD_IO_SEG(0x00011800E0000068ull) + ((offset) & 1) * 2048)
403 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
404 static inline uint64_t CVMX_AGL_GMX_RXX_RX_INBND(unsigned long offset)
407 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
408 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
409 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
410 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
411 cvmx_warn("CVMX_AGL_GMX_RXX_RX_INBND(%lu) is invalid on this chip\n", offset);
412 return CVMX_ADD_IO_SEG(0x00011800E0000060ull) + ((offset) & 1) * 2048;
415 #define CVMX_AGL_GMX_RXX_RX_INBND(offset) (CVMX_ADD_IO_SEG(0x00011800E0000060ull) + ((offset) & 1) * 2048)
417 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
418 static inline uint64_t CVMX_AGL_GMX_RXX_STATS_CTL(unsigned long offset)
421 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
422 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
423 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
424 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
425 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
426 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
427 cvmx_warn("CVMX_AGL_GMX_RXX_STATS_CTL(%lu) is invalid on this chip\n", offset);
428 return CVMX_ADD_IO_SEG(0x00011800E0000050ull) + ((offset) & 1) * 2048;
431 #define CVMX_AGL_GMX_RXX_STATS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000050ull) + ((offset) & 1) * 2048)
433 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
434 static inline uint64_t CVMX_AGL_GMX_RXX_STATS_OCTS(unsigned long offset)
437 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
438 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
439 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
440 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
441 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
442 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
443 cvmx_warn("CVMX_AGL_GMX_RXX_STATS_OCTS(%lu) is invalid on this chip\n", offset);
444 return CVMX_ADD_IO_SEG(0x00011800E0000088ull) + ((offset) & 1) * 2048;
447 #define CVMX_AGL_GMX_RXX_STATS_OCTS(offset) (CVMX_ADD_IO_SEG(0x00011800E0000088ull) + ((offset) & 1) * 2048)
449 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
450 static inline uint64_t CVMX_AGL_GMX_RXX_STATS_OCTS_CTL(unsigned long offset)
453 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
454 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
455 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
456 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
457 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
458 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
459 cvmx_warn("CVMX_AGL_GMX_RXX_STATS_OCTS_CTL(%lu) is invalid on this chip\n", offset);
460 return CVMX_ADD_IO_SEG(0x00011800E0000098ull) + ((offset) & 1) * 2048;
463 #define CVMX_AGL_GMX_RXX_STATS_OCTS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000098ull) + ((offset) & 1) * 2048)
465 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
466 static inline uint64_t CVMX_AGL_GMX_RXX_STATS_OCTS_DMAC(unsigned long offset)
469 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
470 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
471 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
472 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
473 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
474 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
475 cvmx_warn("CVMX_AGL_GMX_RXX_STATS_OCTS_DMAC(%lu) is invalid on this chip\n", offset);
476 return CVMX_ADD_IO_SEG(0x00011800E00000A8ull) + ((offset) & 1) * 2048;
479 #define CVMX_AGL_GMX_RXX_STATS_OCTS_DMAC(offset) (CVMX_ADD_IO_SEG(0x00011800E00000A8ull) + ((offset) & 1) * 2048)
481 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
482 static inline uint64_t CVMX_AGL_GMX_RXX_STATS_OCTS_DRP(unsigned long offset)
485 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
486 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
487 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
488 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
489 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
490 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
491 cvmx_warn("CVMX_AGL_GMX_RXX_STATS_OCTS_DRP(%lu) is invalid on this chip\n", offset);
492 return CVMX_ADD_IO_SEG(0x00011800E00000B8ull) + ((offset) & 1) * 2048;
495 #define CVMX_AGL_GMX_RXX_STATS_OCTS_DRP(offset) (CVMX_ADD_IO_SEG(0x00011800E00000B8ull) + ((offset) & 1) * 2048)
497 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
498 static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS(unsigned long offset)
501 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
502 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
503 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
504 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
505 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
506 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
507 cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS(%lu) is invalid on this chip\n", offset);
508 return CVMX_ADD_IO_SEG(0x00011800E0000080ull) + ((offset) & 1) * 2048;
511 #define CVMX_AGL_GMX_RXX_STATS_PKTS(offset) (CVMX_ADD_IO_SEG(0x00011800E0000080ull) + ((offset) & 1) * 2048)
513 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
514 static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(unsigned long offset)
517 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
518 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
519 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
520 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
521 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
522 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
523 cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(%lu) is invalid on this chip\n", offset);
524 return CVMX_ADD_IO_SEG(0x00011800E00000C0ull) + ((offset) & 1) * 2048;
527 #define CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(offset) (CVMX_ADD_IO_SEG(0x00011800E00000C0ull) + ((offset) & 1) * 2048)
529 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
530 static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS_CTL(unsigned long offset)
533 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
534 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
535 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
536 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
537 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
538 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
539 cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS_CTL(%lu) is invalid on this chip\n", offset);
540 return CVMX_ADD_IO_SEG(0x00011800E0000090ull) + ((offset) & 1) * 2048;
543 #define CVMX_AGL_GMX_RXX_STATS_PKTS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000090ull) + ((offset) & 1) * 2048)
545 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
546 static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS_DMAC(unsigned long offset)
549 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
550 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
551 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
552 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
553 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
554 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
555 cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS_DMAC(%lu) is invalid on this chip\n", offset);
556 return CVMX_ADD_IO_SEG(0x00011800E00000A0ull) + ((offset) & 1) * 2048;
559 #define CVMX_AGL_GMX_RXX_STATS_PKTS_DMAC(offset) (CVMX_ADD_IO_SEG(0x00011800E00000A0ull) + ((offset) & 1) * 2048)
561 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
562 static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(unsigned long offset)
565 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
566 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
567 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
568 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
569 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
570 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
571 cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(%lu) is invalid on this chip\n", offset);
572 return CVMX_ADD_IO_SEG(0x00011800E00000B0ull) + ((offset) & 1) * 2048;
575 #define CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(offset) (CVMX_ADD_IO_SEG(0x00011800E00000B0ull) + ((offset) & 1) * 2048)
577 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
578 static inline uint64_t CVMX_AGL_GMX_RXX_UDD_SKP(unsigned long offset)
581 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
582 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
583 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
584 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
585 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
586 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
587 cvmx_warn("CVMX_AGL_GMX_RXX_UDD_SKP(%lu) is invalid on this chip\n", offset);
588 return CVMX_ADD_IO_SEG(0x00011800E0000048ull) + ((offset) & 1) * 2048;
591 #define CVMX_AGL_GMX_RXX_UDD_SKP(offset) (CVMX_ADD_IO_SEG(0x00011800E0000048ull) + ((offset) & 1) * 2048)
593 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
594 static inline uint64_t CVMX_AGL_GMX_RX_BP_DROPX(unsigned long offset)
597 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
598 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
599 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
600 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
601 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
602 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
603 cvmx_warn("CVMX_AGL_GMX_RX_BP_DROPX(%lu) is invalid on this chip\n", offset);
604 return CVMX_ADD_IO_SEG(0x00011800E0000420ull) + ((offset) & 1) * 8;
607 #define CVMX_AGL_GMX_RX_BP_DROPX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000420ull) + ((offset) & 1) * 8)
609 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
610 static inline uint64_t CVMX_AGL_GMX_RX_BP_OFFX(unsigned long offset)
613 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
614 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
615 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
616 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
617 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
618 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
619 cvmx_warn("CVMX_AGL_GMX_RX_BP_OFFX(%lu) is invalid on this chip\n", offset);
620 return CVMX_ADD_IO_SEG(0x00011800E0000460ull) + ((offset) & 1) * 8;
623 #define CVMX_AGL_GMX_RX_BP_OFFX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000460ull) + ((offset) & 1) * 8)
625 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
626 static inline uint64_t CVMX_AGL_GMX_RX_BP_ONX(unsigned long offset)
629 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
630 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
631 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
632 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
633 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
634 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
635 cvmx_warn("CVMX_AGL_GMX_RX_BP_ONX(%lu) is invalid on this chip\n", offset);
636 return CVMX_ADD_IO_SEG(0x00011800E0000440ull) + ((offset) & 1) * 8;
639 #define CVMX_AGL_GMX_RX_BP_ONX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000440ull) + ((offset) & 1) * 8)
641 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
642 #define CVMX_AGL_GMX_RX_PRT_INFO CVMX_AGL_GMX_RX_PRT_INFO_FUNC()
643 static inline uint64_t CVMX_AGL_GMX_RX_PRT_INFO_FUNC(void)
645 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
646 cvmx_warn("CVMX_AGL_GMX_RX_PRT_INFO not supported on this chip\n");
647 return CVMX_ADD_IO_SEG(0x00011800E00004E8ull);
650 #define CVMX_AGL_GMX_RX_PRT_INFO (CVMX_ADD_IO_SEG(0x00011800E00004E8ull))
652 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
653 #define CVMX_AGL_GMX_RX_TX_STATUS CVMX_AGL_GMX_RX_TX_STATUS_FUNC()
654 static inline uint64_t CVMX_AGL_GMX_RX_TX_STATUS_FUNC(void)
656 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
657 cvmx_warn("CVMX_AGL_GMX_RX_TX_STATUS not supported on this chip\n");
658 return CVMX_ADD_IO_SEG(0x00011800E00007E8ull);
661 #define CVMX_AGL_GMX_RX_TX_STATUS (CVMX_ADD_IO_SEG(0x00011800E00007E8ull))
663 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
664 static inline uint64_t CVMX_AGL_GMX_SMACX(unsigned long offset)
667 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
668 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
669 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
670 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
671 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
672 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
673 cvmx_warn("CVMX_AGL_GMX_SMACX(%lu) is invalid on this chip\n", offset);
674 return CVMX_ADD_IO_SEG(0x00011800E0000230ull) + ((offset) & 1) * 2048;
677 #define CVMX_AGL_GMX_SMACX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000230ull) + ((offset) & 1) * 2048)
679 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
680 #define CVMX_AGL_GMX_STAT_BP CVMX_AGL_GMX_STAT_BP_FUNC()
681 static inline uint64_t CVMX_AGL_GMX_STAT_BP_FUNC(void)
683 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
684 cvmx_warn("CVMX_AGL_GMX_STAT_BP not supported on this chip\n");
685 return CVMX_ADD_IO_SEG(0x00011800E0000520ull);
688 #define CVMX_AGL_GMX_STAT_BP (CVMX_ADD_IO_SEG(0x00011800E0000520ull))
690 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
691 static inline uint64_t CVMX_AGL_GMX_TXX_APPEND(unsigned long offset)
694 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
695 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
696 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
697 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
698 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
699 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
700 cvmx_warn("CVMX_AGL_GMX_TXX_APPEND(%lu) is invalid on this chip\n", offset);
701 return CVMX_ADD_IO_SEG(0x00011800E0000218ull) + ((offset) & 1) * 2048;
704 #define CVMX_AGL_GMX_TXX_APPEND(offset) (CVMX_ADD_IO_SEG(0x00011800E0000218ull) + ((offset) & 1) * 2048)
706 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
707 static inline uint64_t CVMX_AGL_GMX_TXX_CLK(unsigned long offset)
710 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
711 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
712 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
713 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
714 cvmx_warn("CVMX_AGL_GMX_TXX_CLK(%lu) is invalid on this chip\n", offset);
715 return CVMX_ADD_IO_SEG(0x00011800E0000208ull) + ((offset) & 1) * 2048;
718 #define CVMX_AGL_GMX_TXX_CLK(offset) (CVMX_ADD_IO_SEG(0x00011800E0000208ull) + ((offset) & 1) * 2048)
720 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
721 static inline uint64_t CVMX_AGL_GMX_TXX_CTL(unsigned long offset)
724 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
725 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
726 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
727 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
728 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
729 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
730 cvmx_warn("CVMX_AGL_GMX_TXX_CTL(%lu) is invalid on this chip\n", offset);
731 return CVMX_ADD_IO_SEG(0x00011800E0000270ull) + ((offset) & 1) * 2048;
734 #define CVMX_AGL_GMX_TXX_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000270ull) + ((offset) & 1) * 2048)
736 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
737 static inline uint64_t CVMX_AGL_GMX_TXX_MIN_PKT(unsigned long offset)
740 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
741 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
742 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
743 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
744 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
745 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
746 cvmx_warn("CVMX_AGL_GMX_TXX_MIN_PKT(%lu) is invalid on this chip\n", offset);
747 return CVMX_ADD_IO_SEG(0x00011800E0000240ull) + ((offset) & 1) * 2048;
750 #define CVMX_AGL_GMX_TXX_MIN_PKT(offset) (CVMX_ADD_IO_SEG(0x00011800E0000240ull) + ((offset) & 1) * 2048)
752 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
753 static inline uint64_t CVMX_AGL_GMX_TXX_PAUSE_PKT_INTERVAL(unsigned long offset)
756 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
757 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
758 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
759 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
760 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
761 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
762 cvmx_warn("CVMX_AGL_GMX_TXX_PAUSE_PKT_INTERVAL(%lu) is invalid on this chip\n", offset);
763 return CVMX_ADD_IO_SEG(0x00011800E0000248ull) + ((offset) & 1) * 2048;
766 #define CVMX_AGL_GMX_TXX_PAUSE_PKT_INTERVAL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000248ull) + ((offset) & 1) * 2048)
768 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
769 static inline uint64_t CVMX_AGL_GMX_TXX_PAUSE_PKT_TIME(unsigned long offset)
772 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
773 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
774 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
775 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
776 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
777 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
778 cvmx_warn("CVMX_AGL_GMX_TXX_PAUSE_PKT_TIME(%lu) is invalid on this chip\n", offset);
779 return CVMX_ADD_IO_SEG(0x00011800E0000238ull) + ((offset) & 1) * 2048;
782 #define CVMX_AGL_GMX_TXX_PAUSE_PKT_TIME(offset) (CVMX_ADD_IO_SEG(0x00011800E0000238ull) + ((offset) & 1) * 2048)
784 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
785 static inline uint64_t CVMX_AGL_GMX_TXX_PAUSE_TOGO(unsigned long offset)
788 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
789 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
790 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
791 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
792 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
793 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
794 cvmx_warn("CVMX_AGL_GMX_TXX_PAUSE_TOGO(%lu) is invalid on this chip\n", offset);
795 return CVMX_ADD_IO_SEG(0x00011800E0000258ull) + ((offset) & 1) * 2048;
798 #define CVMX_AGL_GMX_TXX_PAUSE_TOGO(offset) (CVMX_ADD_IO_SEG(0x00011800E0000258ull) + ((offset) & 1) * 2048)
800 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
801 static inline uint64_t CVMX_AGL_GMX_TXX_PAUSE_ZERO(unsigned long offset)
804 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
805 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
806 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
807 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
808 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
809 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
810 cvmx_warn("CVMX_AGL_GMX_TXX_PAUSE_ZERO(%lu) is invalid on this chip\n", offset);
811 return CVMX_ADD_IO_SEG(0x00011800E0000260ull) + ((offset) & 1) * 2048;
814 #define CVMX_AGL_GMX_TXX_PAUSE_ZERO(offset) (CVMX_ADD_IO_SEG(0x00011800E0000260ull) + ((offset) & 1) * 2048)
816 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
817 static inline uint64_t CVMX_AGL_GMX_TXX_SOFT_PAUSE(unsigned long offset)
820 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
821 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
822 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
823 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
824 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
825 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
826 cvmx_warn("CVMX_AGL_GMX_TXX_SOFT_PAUSE(%lu) is invalid on this chip\n", offset);
827 return CVMX_ADD_IO_SEG(0x00011800E0000250ull) + ((offset) & 1) * 2048;
830 #define CVMX_AGL_GMX_TXX_SOFT_PAUSE(offset) (CVMX_ADD_IO_SEG(0x00011800E0000250ull) + ((offset) & 1) * 2048)
832 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
833 static inline uint64_t CVMX_AGL_GMX_TXX_STAT0(unsigned long offset)
836 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
837 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
838 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
839 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
840 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
841 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
842 cvmx_warn("CVMX_AGL_GMX_TXX_STAT0(%lu) is invalid on this chip\n", offset);
843 return CVMX_ADD_IO_SEG(0x00011800E0000280ull) + ((offset) & 1) * 2048;
846 #define CVMX_AGL_GMX_TXX_STAT0(offset) (CVMX_ADD_IO_SEG(0x00011800E0000280ull) + ((offset) & 1) * 2048)
848 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
849 static inline uint64_t CVMX_AGL_GMX_TXX_STAT1(unsigned long offset)
852 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
853 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
854 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
855 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
856 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
857 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
858 cvmx_warn("CVMX_AGL_GMX_TXX_STAT1(%lu) is invalid on this chip\n", offset);
859 return CVMX_ADD_IO_SEG(0x00011800E0000288ull) + ((offset) & 1) * 2048;
862 #define CVMX_AGL_GMX_TXX_STAT1(offset) (CVMX_ADD_IO_SEG(0x00011800E0000288ull) + ((offset) & 1) * 2048)
864 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
865 static inline uint64_t CVMX_AGL_GMX_TXX_STAT2(unsigned long offset)
868 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
869 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
870 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
871 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
872 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
873 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
874 cvmx_warn("CVMX_AGL_GMX_TXX_STAT2(%lu) is invalid on this chip\n", offset);
875 return CVMX_ADD_IO_SEG(0x00011800E0000290ull) + ((offset) & 1) * 2048;
878 #define CVMX_AGL_GMX_TXX_STAT2(offset) (CVMX_ADD_IO_SEG(0x00011800E0000290ull) + ((offset) & 1) * 2048)
880 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
881 static inline uint64_t CVMX_AGL_GMX_TXX_STAT3(unsigned long offset)
884 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
885 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
886 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
887 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
888 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
889 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
890 cvmx_warn("CVMX_AGL_GMX_TXX_STAT3(%lu) is invalid on this chip\n", offset);
891 return CVMX_ADD_IO_SEG(0x00011800E0000298ull) + ((offset) & 1) * 2048;
894 #define CVMX_AGL_GMX_TXX_STAT3(offset) (CVMX_ADD_IO_SEG(0x00011800E0000298ull) + ((offset) & 1) * 2048)
896 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
897 static inline uint64_t CVMX_AGL_GMX_TXX_STAT4(unsigned long offset)
900 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
901 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
902 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
903 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
904 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
905 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
906 cvmx_warn("CVMX_AGL_GMX_TXX_STAT4(%lu) is invalid on this chip\n", offset);
907 return CVMX_ADD_IO_SEG(0x00011800E00002A0ull) + ((offset) & 1) * 2048;
910 #define CVMX_AGL_GMX_TXX_STAT4(offset) (CVMX_ADD_IO_SEG(0x00011800E00002A0ull) + ((offset) & 1) * 2048)
912 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
913 static inline uint64_t CVMX_AGL_GMX_TXX_STAT5(unsigned long offset)
916 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
917 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
918 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
919 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
920 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
921 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
922 cvmx_warn("CVMX_AGL_GMX_TXX_STAT5(%lu) is invalid on this chip\n", offset);
923 return CVMX_ADD_IO_SEG(0x00011800E00002A8ull) + ((offset) & 1) * 2048;
926 #define CVMX_AGL_GMX_TXX_STAT5(offset) (CVMX_ADD_IO_SEG(0x00011800E00002A8ull) + ((offset) & 1) * 2048)
928 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
929 static inline uint64_t CVMX_AGL_GMX_TXX_STAT6(unsigned long offset)
932 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
933 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
934 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
935 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
936 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
937 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
938 cvmx_warn("CVMX_AGL_GMX_TXX_STAT6(%lu) is invalid on this chip\n", offset);
939 return CVMX_ADD_IO_SEG(0x00011800E00002B0ull) + ((offset) & 1) * 2048;
942 #define CVMX_AGL_GMX_TXX_STAT6(offset) (CVMX_ADD_IO_SEG(0x00011800E00002B0ull) + ((offset) & 1) * 2048)
944 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
945 static inline uint64_t CVMX_AGL_GMX_TXX_STAT7(unsigned long offset)
948 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
949 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
950 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
951 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
952 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
953 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
954 cvmx_warn("CVMX_AGL_GMX_TXX_STAT7(%lu) is invalid on this chip\n", offset);
955 return CVMX_ADD_IO_SEG(0x00011800E00002B8ull) + ((offset) & 1) * 2048;
958 #define CVMX_AGL_GMX_TXX_STAT7(offset) (CVMX_ADD_IO_SEG(0x00011800E00002B8ull) + ((offset) & 1) * 2048)
960 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
961 static inline uint64_t CVMX_AGL_GMX_TXX_STAT8(unsigned long offset)
964 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
965 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
966 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
967 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
968 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
969 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
970 cvmx_warn("CVMX_AGL_GMX_TXX_STAT8(%lu) is invalid on this chip\n", offset);
971 return CVMX_ADD_IO_SEG(0x00011800E00002C0ull) + ((offset) & 1) * 2048;
974 #define CVMX_AGL_GMX_TXX_STAT8(offset) (CVMX_ADD_IO_SEG(0x00011800E00002C0ull) + ((offset) & 1) * 2048)
976 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
977 static inline uint64_t CVMX_AGL_GMX_TXX_STAT9(unsigned long offset)
980 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
981 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
982 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
983 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
984 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
985 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
986 cvmx_warn("CVMX_AGL_GMX_TXX_STAT9(%lu) is invalid on this chip\n", offset);
987 return CVMX_ADD_IO_SEG(0x00011800E00002C8ull) + ((offset) & 1) * 2048;
990 #define CVMX_AGL_GMX_TXX_STAT9(offset) (CVMX_ADD_IO_SEG(0x00011800E00002C8ull) + ((offset) & 1) * 2048)
992 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
993 static inline uint64_t CVMX_AGL_GMX_TXX_STATS_CTL(unsigned long offset)
996 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
997 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
998 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
999 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
1000 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
1001 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
1002 cvmx_warn("CVMX_AGL_GMX_TXX_STATS_CTL(%lu) is invalid on this chip\n", offset);
1003 return CVMX_ADD_IO_SEG(0x00011800E0000268ull) + ((offset) & 1) * 2048;
1006 #define CVMX_AGL_GMX_TXX_STATS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000268ull) + ((offset) & 1) * 2048)
1008 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1009 static inline uint64_t CVMX_AGL_GMX_TXX_THRESH(unsigned long offset)
1012 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
1013 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
1014 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
1015 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
1016 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
1017 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
1018 cvmx_warn("CVMX_AGL_GMX_TXX_THRESH(%lu) is invalid on this chip\n", offset);
1019 return CVMX_ADD_IO_SEG(0x00011800E0000210ull) + ((offset) & 1) * 2048;
1022 #define CVMX_AGL_GMX_TXX_THRESH(offset) (CVMX_ADD_IO_SEG(0x00011800E0000210ull) + ((offset) & 1) * 2048)
1024 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1025 #define CVMX_AGL_GMX_TX_BP CVMX_AGL_GMX_TX_BP_FUNC()
1026 static inline uint64_t CVMX_AGL_GMX_TX_BP_FUNC(void)
1028 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
1029 cvmx_warn("CVMX_AGL_GMX_TX_BP not supported on this chip\n");
1030 return CVMX_ADD_IO_SEG(0x00011800E00004D0ull);
1033 #define CVMX_AGL_GMX_TX_BP (CVMX_ADD_IO_SEG(0x00011800E00004D0ull))
1035 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1036 #define CVMX_AGL_GMX_TX_COL_ATTEMPT CVMX_AGL_GMX_TX_COL_ATTEMPT_FUNC()
1037 static inline uint64_t CVMX_AGL_GMX_TX_COL_ATTEMPT_FUNC(void)
1039 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
1040 cvmx_warn("CVMX_AGL_GMX_TX_COL_ATTEMPT not supported on this chip\n");
1041 return CVMX_ADD_IO_SEG(0x00011800E0000498ull);
1044 #define CVMX_AGL_GMX_TX_COL_ATTEMPT (CVMX_ADD_IO_SEG(0x00011800E0000498ull))
1046 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1047 #define CVMX_AGL_GMX_TX_IFG CVMX_AGL_GMX_TX_IFG_FUNC()
1048 static inline uint64_t CVMX_AGL_GMX_TX_IFG_FUNC(void)
1050 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
1051 cvmx_warn("CVMX_AGL_GMX_TX_IFG not supported on this chip\n");
1052 return CVMX_ADD_IO_SEG(0x00011800E0000488ull);
1055 #define CVMX_AGL_GMX_TX_IFG (CVMX_ADD_IO_SEG(0x00011800E0000488ull))
1057 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1058 #define CVMX_AGL_GMX_TX_INT_EN CVMX_AGL_GMX_TX_INT_EN_FUNC()
1059 static inline uint64_t CVMX_AGL_GMX_TX_INT_EN_FUNC(void)
1061 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
1062 cvmx_warn("CVMX_AGL_GMX_TX_INT_EN not supported on this chip\n");
1063 return CVMX_ADD_IO_SEG(0x00011800E0000508ull);
1066 #define CVMX_AGL_GMX_TX_INT_EN (CVMX_ADD_IO_SEG(0x00011800E0000508ull))
1068 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1069 #define CVMX_AGL_GMX_TX_INT_REG CVMX_AGL_GMX_TX_INT_REG_FUNC()
1070 static inline uint64_t CVMX_AGL_GMX_TX_INT_REG_FUNC(void)
1072 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
1073 cvmx_warn("CVMX_AGL_GMX_TX_INT_REG not supported on this chip\n");
1074 return CVMX_ADD_IO_SEG(0x00011800E0000500ull);
1077 #define CVMX_AGL_GMX_TX_INT_REG (CVMX_ADD_IO_SEG(0x00011800E0000500ull))
1079 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1080 #define CVMX_AGL_GMX_TX_JAM CVMX_AGL_GMX_TX_JAM_FUNC()
1081 static inline uint64_t CVMX_AGL_GMX_TX_JAM_FUNC(void)
1083 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
1084 cvmx_warn("CVMX_AGL_GMX_TX_JAM not supported on this chip\n");
1085 return CVMX_ADD_IO_SEG(0x00011800E0000490ull);
1088 #define CVMX_AGL_GMX_TX_JAM (CVMX_ADD_IO_SEG(0x00011800E0000490ull))
1090 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1091 #define CVMX_AGL_GMX_TX_LFSR CVMX_AGL_GMX_TX_LFSR_FUNC()
1092 static inline uint64_t CVMX_AGL_GMX_TX_LFSR_FUNC(void)
1094 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
1095 cvmx_warn("CVMX_AGL_GMX_TX_LFSR not supported on this chip\n");
1096 return CVMX_ADD_IO_SEG(0x00011800E00004F8ull);
1099 #define CVMX_AGL_GMX_TX_LFSR (CVMX_ADD_IO_SEG(0x00011800E00004F8ull))
1101 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1102 #define CVMX_AGL_GMX_TX_OVR_BP CVMX_AGL_GMX_TX_OVR_BP_FUNC()
1103 static inline uint64_t CVMX_AGL_GMX_TX_OVR_BP_FUNC(void)
1105 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
1106 cvmx_warn("CVMX_AGL_GMX_TX_OVR_BP not supported on this chip\n");
1107 return CVMX_ADD_IO_SEG(0x00011800E00004C8ull);
1110 #define CVMX_AGL_GMX_TX_OVR_BP (CVMX_ADD_IO_SEG(0x00011800E00004C8ull))
1112 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1113 #define CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC_FUNC()
1114 static inline uint64_t CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC_FUNC(void)
1116 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
1117 cvmx_warn("CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC not supported on this chip\n");
1118 return CVMX_ADD_IO_SEG(0x00011800E00004A0ull);
1121 #define CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC (CVMX_ADD_IO_SEG(0x00011800E00004A0ull))
1123 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1124 #define CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE_FUNC()
1125 static inline uint64_t CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE_FUNC(void)
1127 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)))
1128 cvmx_warn("CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE not supported on this chip\n");
1129 return CVMX_ADD_IO_SEG(0x00011800E00004A8ull);
1132 #define CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE (CVMX_ADD_IO_SEG(0x00011800E00004A8ull))
1134 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1135 static inline uint64_t CVMX_AGL_PRTX_CTL(unsigned long offset)
1138 (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
1139 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
1140 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
1141 (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset == 0)))))
1142 cvmx_warn("CVMX_AGL_PRTX_CTL(%lu) is invalid on this chip\n", offset);
1143 return CVMX_ADD_IO_SEG(0x00011800E0002000ull) + ((offset) & 1) * 8;
1146 #define CVMX_AGL_PRTX_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0002000ull) + ((offset) & 1) * 8)
1150 * cvmx_agl_gmx_bad_reg
1152 * AGL_GMX_BAD_REG = A collection of things that have gone very, very wrong
1156 * OUT_OVR[0], LOSTSTAT[0], OVRFLW, TXPOP, TXPSH will be reset when MIX0_CTL[RESET] is set to 1.
1157 * OUT_OVR[1], LOSTSTAT[1], OVRFLW1, TXPOP1, TXPSH1 will be reset when MIX1_CTL[RESET] is set to 1.
1158 * STATOVR will be reset when both MIX0/1_CTL[RESET] are set to 1.
1160 union cvmx_agl_gmx_bad_reg {
1162 struct cvmx_agl_gmx_bad_reg_s {
1163 #ifdef __BIG_ENDIAN_BITFIELD
1164 uint64_t reserved_38_63 : 26;
1165 uint64_t txpsh1 : 1; /**< TX FIFO overflow (MII1) */
1166 uint64_t txpop1 : 1; /**< TX FIFO underflow (MII1) */
1167 uint64_t ovrflw1 : 1; /**< RX FIFO overflow (MII1) */
1168 uint64_t txpsh : 1; /**< TX FIFO overflow (MII0) */
1169 uint64_t txpop : 1; /**< TX FIFO underflow (MII0) */
1170 uint64_t ovrflw : 1; /**< RX FIFO overflow (MII0) */
1171 uint64_t reserved_27_31 : 5;
1172 uint64_t statovr : 1; /**< TX Statistics overflow */
1173 uint64_t reserved_24_25 : 2;
1174 uint64_t loststat : 2; /**< TX Statistics data was over-written
1175 In MII/RGMII, one bit per port
1176 TX Stats are corrupted */
1177 uint64_t reserved_4_21 : 18;
1178 uint64_t out_ovr : 2; /**< Outbound data FIFO overflow */
1179 uint64_t reserved_0_1 : 2;
1181 uint64_t reserved_0_1 : 2;
1182 uint64_t out_ovr : 2;
1183 uint64_t reserved_4_21 : 18;
1184 uint64_t loststat : 2;
1185 uint64_t reserved_24_25 : 2;
1186 uint64_t statovr : 1;
1187 uint64_t reserved_27_31 : 5;
1188 uint64_t ovrflw : 1;
1191 uint64_t ovrflw1 : 1;
1192 uint64_t txpop1 : 1;
1193 uint64_t txpsh1 : 1;
1194 uint64_t reserved_38_63 : 26;
1197 struct cvmx_agl_gmx_bad_reg_cn52xx {
1198 #ifdef __BIG_ENDIAN_BITFIELD
1199 uint64_t reserved_38_63 : 26;
1200 uint64_t txpsh1 : 1; /**< TX FIFO overflow (MII1) */
1201 uint64_t txpop1 : 1; /**< TX FIFO underflow (MII1) */
1202 uint64_t ovrflw1 : 1; /**< RX FIFO overflow (MII1) */
1203 uint64_t txpsh : 1; /**< TX FIFO overflow (MII0) */
1204 uint64_t txpop : 1; /**< TX FIFO underflow (MII0) */
1205 uint64_t ovrflw : 1; /**< RX FIFO overflow (MII0) */
1206 uint64_t reserved_27_31 : 5;
1207 uint64_t statovr : 1; /**< TX Statistics overflow */
1208 uint64_t reserved_23_25 : 3;
1209 uint64_t loststat : 1; /**< TX Statistics data was over-written
1210 TX Stats are corrupted */
1211 uint64_t reserved_4_21 : 18;
1212 uint64_t out_ovr : 2; /**< Outbound data FIFO overflow */
1213 uint64_t reserved_0_1 : 2;
1215 uint64_t reserved_0_1 : 2;
1216 uint64_t out_ovr : 2;
1217 uint64_t reserved_4_21 : 18;
1218 uint64_t loststat : 1;
1219 uint64_t reserved_23_25 : 3;
1220 uint64_t statovr : 1;
1221 uint64_t reserved_27_31 : 5;
1222 uint64_t ovrflw : 1;
1225 uint64_t ovrflw1 : 1;
1226 uint64_t txpop1 : 1;
1227 uint64_t txpsh1 : 1;
1228 uint64_t reserved_38_63 : 26;
1231 struct cvmx_agl_gmx_bad_reg_cn52xx cn52xxp1;
1232 struct cvmx_agl_gmx_bad_reg_cn56xx {
1233 #ifdef __BIG_ENDIAN_BITFIELD
1234 uint64_t reserved_35_63 : 29;
1235 uint64_t txpsh : 1; /**< TX FIFO overflow */
1236 uint64_t txpop : 1; /**< TX FIFO underflow */
1237 uint64_t ovrflw : 1; /**< RX FIFO overflow */
1238 uint64_t reserved_27_31 : 5;
1239 uint64_t statovr : 1; /**< TX Statistics overflow */
1240 uint64_t reserved_23_25 : 3;
1241 uint64_t loststat : 1; /**< TX Statistics data was over-written
1242 TX Stats are corrupted */
1243 uint64_t reserved_3_21 : 19;
1244 uint64_t out_ovr : 1; /**< Outbound data FIFO overflow */
1245 uint64_t reserved_0_1 : 2;
1247 uint64_t reserved_0_1 : 2;
1248 uint64_t out_ovr : 1;
1249 uint64_t reserved_3_21 : 19;
1250 uint64_t loststat : 1;
1251 uint64_t reserved_23_25 : 3;
1252 uint64_t statovr : 1;
1253 uint64_t reserved_27_31 : 5;
1254 uint64_t ovrflw : 1;
1257 uint64_t reserved_35_63 : 29;
1260 struct cvmx_agl_gmx_bad_reg_cn56xx cn56xxp1;
1261 struct cvmx_agl_gmx_bad_reg_s cn61xx;
1262 struct cvmx_agl_gmx_bad_reg_s cn63xx;
1263 struct cvmx_agl_gmx_bad_reg_s cn63xxp1;
1264 struct cvmx_agl_gmx_bad_reg_s cn66xx;
1265 struct cvmx_agl_gmx_bad_reg_s cn68xx;
1266 struct cvmx_agl_gmx_bad_reg_s cn68xxp1;
1268 typedef union cvmx_agl_gmx_bad_reg cvmx_agl_gmx_bad_reg_t;
1273 * AGL_GMX_BIST = GMX BIST Results
1277 * Not reset when MIX*_CTL[RESET] is set to 1.
1280 union cvmx_agl_gmx_bist {
1282 struct cvmx_agl_gmx_bist_s {
1283 #ifdef __BIG_ENDIAN_BITFIELD
1284 uint64_t reserved_25_63 : 39;
1285 uint64_t status : 25; /**< BIST Results.
1286 HW sets a bit in BIST for for memory that fails
1287 - 0: gmx#.inb.fif_bnk0
1288 - 1: gmx#.inb.fif_bnk1
1289 - 2: gmx#.inb.fif_bnk2
1290 - 3: gmx#.inb.fif_bnk3
1291 - 4: gmx#.inb.fif_bnk_ext0
1292 - 5: gmx#.inb.fif_bnk_ext1
1293 - 6: gmx#.inb.fif_bnk_ext2
1294 - 7: gmx#.inb.fif_bnk_ext3
1295 - 8: gmx#.outb.fif.fif_bnk0
1296 - 9: gmx#.outb.fif.fif_bnk1
1299 - 12: gmx#.outb.fif.fif_bnk_ext0
1300 - 13: gmx#.outb.fif.fif_bnk_ext1
1307 - 20: gmx#.csr.drf20x32m2_bist
1308 - 21: gmx#.csr.drf20x48m2_bist
1309 - 22: gmx#.outb.stat.drf16x27m1_bist
1310 - 23: gmx#.outb.stat.drf40x64m1_bist
1313 uint64_t status : 25;
1314 uint64_t reserved_25_63 : 39;
1317 struct cvmx_agl_gmx_bist_cn52xx {
1318 #ifdef __BIG_ENDIAN_BITFIELD
1319 uint64_t reserved_10_63 : 54;
1320 uint64_t status : 10; /**< BIST Results.
1321 HW sets a bit in BIST for for memory that fails
1322 - 0: gmx#.inb.drf128x78m1_bist
1323 - 1: gmx#.outb.fif.drf128x71m1_bist
1324 - 2: gmx#.csr.gmi0.srf8x64m1_bist
1325 - 3: gmx#.csr.gmi1.srf8x64m1_bist
1328 - 6: gmx#.csr.drf20x80m1_bist
1329 - 7: gmx#.outb.stat.drf16x27m1_bist
1330 - 8: gmx#.outb.stat.drf40x64m1_bist
1333 uint64_t status : 10;
1334 uint64_t reserved_10_63 : 54;
1337 struct cvmx_agl_gmx_bist_cn52xx cn52xxp1;
1338 struct cvmx_agl_gmx_bist_cn52xx cn56xx;
1339 struct cvmx_agl_gmx_bist_cn52xx cn56xxp1;
1340 struct cvmx_agl_gmx_bist_s cn61xx;
1341 struct cvmx_agl_gmx_bist_s cn63xx;
1342 struct cvmx_agl_gmx_bist_s cn63xxp1;
1343 struct cvmx_agl_gmx_bist_s cn66xx;
1344 struct cvmx_agl_gmx_bist_s cn68xx;
1345 struct cvmx_agl_gmx_bist_s cn68xxp1;
1347 typedef union cvmx_agl_gmx_bist cvmx_agl_gmx_bist_t;
1350 * cvmx_agl_gmx_drv_ctl
1352 * AGL_GMX_DRV_CTL = GMX Drive Control
1356 * NCTL, PCTL, BYP_EN will be reset when MIX0_CTL[RESET] is set to 1.
1357 * NCTL1, PCTL1, BYP_EN1 will be reset when MIX1_CTL[RESET] is set to 1.
1359 union cvmx_agl_gmx_drv_ctl {
1361 struct cvmx_agl_gmx_drv_ctl_s {
1362 #ifdef __BIG_ENDIAN_BITFIELD
1363 uint64_t reserved_49_63 : 15;
1364 uint64_t byp_en1 : 1; /**< Compensation Controller Bypass Enable (MII1) */
1365 uint64_t reserved_45_47 : 3;
1366 uint64_t pctl1 : 5; /**< AGL PCTL (MII1) */
1367 uint64_t reserved_37_39 : 3;
1368 uint64_t nctl1 : 5; /**< AGL NCTL (MII1) */
1369 uint64_t reserved_17_31 : 15;
1370 uint64_t byp_en : 1; /**< Compensation Controller Bypass Enable */
1371 uint64_t reserved_13_15 : 3;
1372 uint64_t pctl : 5; /**< AGL PCTL */
1373 uint64_t reserved_5_7 : 3;
1374 uint64_t nctl : 5; /**< AGL NCTL */
1377 uint64_t reserved_5_7 : 3;
1379 uint64_t reserved_13_15 : 3;
1380 uint64_t byp_en : 1;
1381 uint64_t reserved_17_31 : 15;
1383 uint64_t reserved_37_39 : 3;
1385 uint64_t reserved_45_47 : 3;
1386 uint64_t byp_en1 : 1;
1387 uint64_t reserved_49_63 : 15;
1390 struct cvmx_agl_gmx_drv_ctl_s cn52xx;
1391 struct cvmx_agl_gmx_drv_ctl_s cn52xxp1;
1392 struct cvmx_agl_gmx_drv_ctl_cn56xx {
1393 #ifdef __BIG_ENDIAN_BITFIELD
1394 uint64_t reserved_17_63 : 47;
1395 uint64_t byp_en : 1; /**< Compensation Controller Bypass Enable */
1396 uint64_t reserved_13_15 : 3;
1397 uint64_t pctl : 5; /**< AGL PCTL */
1398 uint64_t reserved_5_7 : 3;
1399 uint64_t nctl : 5; /**< AGL NCTL */
1402 uint64_t reserved_5_7 : 3;
1404 uint64_t reserved_13_15 : 3;
1405 uint64_t byp_en : 1;
1406 uint64_t reserved_17_63 : 47;
1409 struct cvmx_agl_gmx_drv_ctl_cn56xx cn56xxp1;
1411 typedef union cvmx_agl_gmx_drv_ctl cvmx_agl_gmx_drv_ctl_t;
1414 * cvmx_agl_gmx_inf_mode
1416 * AGL_GMX_INF_MODE = Interface Mode
1420 * Not reset when MIX*_CTL[RESET] is set to 1.
1423 union cvmx_agl_gmx_inf_mode {
1425 struct cvmx_agl_gmx_inf_mode_s {
1426 #ifdef __BIG_ENDIAN_BITFIELD
1427 uint64_t reserved_2_63 : 62;
1428 uint64_t en : 1; /**< Interface Enable */
1429 uint64_t reserved_0_0 : 1;
1431 uint64_t reserved_0_0 : 1;
1433 uint64_t reserved_2_63 : 62;
1436 struct cvmx_agl_gmx_inf_mode_s cn52xx;
1437 struct cvmx_agl_gmx_inf_mode_s cn52xxp1;
1438 struct cvmx_agl_gmx_inf_mode_s cn56xx;
1439 struct cvmx_agl_gmx_inf_mode_s cn56xxp1;
1441 typedef union cvmx_agl_gmx_inf_mode cvmx_agl_gmx_inf_mode_t;
1444 * cvmx_agl_gmx_prt#_cfg
1446 * AGL_GMX_PRT_CFG = Port description
1450 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
1453 union cvmx_agl_gmx_prtx_cfg {
1455 struct cvmx_agl_gmx_prtx_cfg_s {
1456 #ifdef __BIG_ENDIAN_BITFIELD
1457 uint64_t reserved_14_63 : 50;
1458 uint64_t tx_idle : 1; /**< TX Machine is idle */
1459 uint64_t rx_idle : 1; /**< RX Machine is idle */
1460 uint64_t reserved_9_11 : 3;
1461 uint64_t speed_msb : 1; /**< Link Speed MSB [SPEED_MSB:SPEED]
1462 10 = 10Mbs operation
1463 00 = 100Mbs operation
1464 01 = 1000Mbs operation
1466 uint64_t reserved_7_7 : 1;
1467 uint64_t burst : 1; /**< Half-Duplex Burst Enable
1468 Only valid for 1000Mbs half-duplex operation
1469 0 = burst length of 0x2000 (halfdup / 1000Mbs)
1470 1 = burst length of 0x0 (all other modes) */
1471 uint64_t tx_en : 1; /**< Port enable. Must be set for Octane to send
1472 RMGII traffic. When this bit clear on a given
1473 port, then all packet cycles will appear as
1474 inter-frame cycles. */
1475 uint64_t rx_en : 1; /**< Port enable. Must be set for Octane to receive
1476 RMGII traffic. When this bit clear on a given
1477 port, then the all packet cycles will appear as
1478 inter-frame cycles. */
1479 uint64_t slottime : 1; /**< Slot Time for Half-Duplex operation
1480 0 = 512 bitimes (10/100Mbs operation)
1481 1 = 4096 bitimes (1000Mbs operation) */
1482 uint64_t duplex : 1; /**< Duplex
1483 0 = Half Duplex (collisions/extentions/bursts)
1485 uint64_t speed : 1; /**< Link Speed LSB [SPEED_MSB:SPEED]
1486 10 = 10Mbs operation
1487 00 = 100Mbs operation
1488 01 = 1000Mbs operation
1490 uint64_t en : 1; /**< Link Enable
1491 When EN is clear, packets will not be received
1492 or transmitted (including PAUSE and JAM packets).
1493 If EN is cleared while a packet is currently
1494 being received or transmitted, the packet will
1495 be allowed to complete before the bus is idled.
1496 On the RX side, subsequent packets in a burst
1501 uint64_t duplex : 1;
1502 uint64_t slottime : 1;
1506 uint64_t reserved_7_7 : 1;
1507 uint64_t speed_msb : 1;
1508 uint64_t reserved_9_11 : 3;
1509 uint64_t rx_idle : 1;
1510 uint64_t tx_idle : 1;
1511 uint64_t reserved_14_63 : 50;
1514 struct cvmx_agl_gmx_prtx_cfg_cn52xx {
1515 #ifdef __BIG_ENDIAN_BITFIELD
1516 uint64_t reserved_6_63 : 58;
1517 uint64_t tx_en : 1; /**< Port enable. Must be set for Octane to send
1518 RMGII traffic. When this bit clear on a given
1519 port, then all MII cycles will appear as
1520 inter-frame cycles. */
1521 uint64_t rx_en : 1; /**< Port enable. Must be set for Octane to receive
1522 RMGII traffic. When this bit clear on a given
1523 port, then the all MII cycles will appear as
1524 inter-frame cycles. */
1525 uint64_t slottime : 1; /**< Slot Time for Half-Duplex operation
1526 0 = 512 bitimes (10/100Mbs operation)
1528 uint64_t duplex : 1; /**< Duplex
1529 0 = Half Duplex (collisions/extentions/bursts)
1531 uint64_t speed : 1; /**< Link Speed
1532 0 = 10/100Mbs operation
1534 uint64_t en : 1; /**< Link Enable
1535 When EN is clear, packets will not be received
1536 or transmitted (including PAUSE and JAM packets).
1537 If EN is cleared while a packet is currently
1538 being received or transmitted, the packet will
1539 be allowed to complete before the bus is idled.
1540 On the RX side, subsequent packets in a burst
1545 uint64_t duplex : 1;
1546 uint64_t slottime : 1;
1549 uint64_t reserved_6_63 : 58;
1552 struct cvmx_agl_gmx_prtx_cfg_cn52xx cn52xxp1;
1553 struct cvmx_agl_gmx_prtx_cfg_cn52xx cn56xx;
1554 struct cvmx_agl_gmx_prtx_cfg_cn52xx cn56xxp1;
1555 struct cvmx_agl_gmx_prtx_cfg_s cn61xx;
1556 struct cvmx_agl_gmx_prtx_cfg_s cn63xx;
1557 struct cvmx_agl_gmx_prtx_cfg_s cn63xxp1;
1558 struct cvmx_agl_gmx_prtx_cfg_s cn66xx;
1559 struct cvmx_agl_gmx_prtx_cfg_s cn68xx;
1560 struct cvmx_agl_gmx_prtx_cfg_s cn68xxp1;
1562 typedef union cvmx_agl_gmx_prtx_cfg cvmx_agl_gmx_prtx_cfg_t;
1565 * cvmx_agl_gmx_rx#_adr_cam0
1567 * AGL_GMX_RX_ADR_CAM = Address Filtering Control
1571 * Not reset when MIX*_CTL[RESET] is set to 1.
1574 union cvmx_agl_gmx_rxx_adr_cam0 {
1576 struct cvmx_agl_gmx_rxx_adr_cam0_s {
1577 #ifdef __BIG_ENDIAN_BITFIELD
1578 uint64_t adr : 64; /**< The DMAC address to match on
1579 Each entry contributes 8bits to one of 8 matchers.
1580 The CAM matches against unicst or multicst DMAC
1586 struct cvmx_agl_gmx_rxx_adr_cam0_s cn52xx;
1587 struct cvmx_agl_gmx_rxx_adr_cam0_s cn52xxp1;
1588 struct cvmx_agl_gmx_rxx_adr_cam0_s cn56xx;
1589 struct cvmx_agl_gmx_rxx_adr_cam0_s cn56xxp1;
1590 struct cvmx_agl_gmx_rxx_adr_cam0_s cn61xx;
1591 struct cvmx_agl_gmx_rxx_adr_cam0_s cn63xx;
1592 struct cvmx_agl_gmx_rxx_adr_cam0_s cn63xxp1;
1593 struct cvmx_agl_gmx_rxx_adr_cam0_s cn66xx;
1594 struct cvmx_agl_gmx_rxx_adr_cam0_s cn68xx;
1595 struct cvmx_agl_gmx_rxx_adr_cam0_s cn68xxp1;
1597 typedef union cvmx_agl_gmx_rxx_adr_cam0 cvmx_agl_gmx_rxx_adr_cam0_t;
1600 * cvmx_agl_gmx_rx#_adr_cam1
1602 * AGL_GMX_RX_ADR_CAM = Address Filtering Control
1606 * Not reset when MIX*_CTL[RESET] is set to 1.
1609 union cvmx_agl_gmx_rxx_adr_cam1 {
1611 struct cvmx_agl_gmx_rxx_adr_cam1_s {
1612 #ifdef __BIG_ENDIAN_BITFIELD
1613 uint64_t adr : 64; /**< The DMAC address to match on
1614 Each entry contributes 8bits to one of 8 matchers.
1615 The CAM matches against unicst or multicst DMAC
1621 struct cvmx_agl_gmx_rxx_adr_cam1_s cn52xx;
1622 struct cvmx_agl_gmx_rxx_adr_cam1_s cn52xxp1;
1623 struct cvmx_agl_gmx_rxx_adr_cam1_s cn56xx;
1624 struct cvmx_agl_gmx_rxx_adr_cam1_s cn56xxp1;
1625 struct cvmx_agl_gmx_rxx_adr_cam1_s cn61xx;
1626 struct cvmx_agl_gmx_rxx_adr_cam1_s cn63xx;
1627 struct cvmx_agl_gmx_rxx_adr_cam1_s cn63xxp1;
1628 struct cvmx_agl_gmx_rxx_adr_cam1_s cn66xx;
1629 struct cvmx_agl_gmx_rxx_adr_cam1_s cn68xx;
1630 struct cvmx_agl_gmx_rxx_adr_cam1_s cn68xxp1;
1632 typedef union cvmx_agl_gmx_rxx_adr_cam1 cvmx_agl_gmx_rxx_adr_cam1_t;
1635 * cvmx_agl_gmx_rx#_adr_cam2
1637 * AGL_GMX_RX_ADR_CAM = Address Filtering Control
1641 * Not reset when MIX*_CTL[RESET] is set to 1.
1644 union cvmx_agl_gmx_rxx_adr_cam2 {
1646 struct cvmx_agl_gmx_rxx_adr_cam2_s {
1647 #ifdef __BIG_ENDIAN_BITFIELD
1648 uint64_t adr : 64; /**< The DMAC address to match on
1649 Each entry contributes 8bits to one of 8 matchers.
1650 The CAM matches against unicst or multicst DMAC
1656 struct cvmx_agl_gmx_rxx_adr_cam2_s cn52xx;
1657 struct cvmx_agl_gmx_rxx_adr_cam2_s cn52xxp1;
1658 struct cvmx_agl_gmx_rxx_adr_cam2_s cn56xx;
1659 struct cvmx_agl_gmx_rxx_adr_cam2_s cn56xxp1;
1660 struct cvmx_agl_gmx_rxx_adr_cam2_s cn61xx;
1661 struct cvmx_agl_gmx_rxx_adr_cam2_s cn63xx;
1662 struct cvmx_agl_gmx_rxx_adr_cam2_s cn63xxp1;
1663 struct cvmx_agl_gmx_rxx_adr_cam2_s cn66xx;
1664 struct cvmx_agl_gmx_rxx_adr_cam2_s cn68xx;
1665 struct cvmx_agl_gmx_rxx_adr_cam2_s cn68xxp1;
1667 typedef union cvmx_agl_gmx_rxx_adr_cam2 cvmx_agl_gmx_rxx_adr_cam2_t;
1670 * cvmx_agl_gmx_rx#_adr_cam3
1672 * AGL_GMX_RX_ADR_CAM = Address Filtering Control
1676 * Not reset when MIX*_CTL[RESET] is set to 1.
1679 union cvmx_agl_gmx_rxx_adr_cam3 {
1681 struct cvmx_agl_gmx_rxx_adr_cam3_s {
1682 #ifdef __BIG_ENDIAN_BITFIELD
1683 uint64_t adr : 64; /**< The DMAC address to match on
1684 Each entry contributes 8bits to one of 8 matchers.
1685 The CAM matches against unicst or multicst DMAC
1691 struct cvmx_agl_gmx_rxx_adr_cam3_s cn52xx;
1692 struct cvmx_agl_gmx_rxx_adr_cam3_s cn52xxp1;
1693 struct cvmx_agl_gmx_rxx_adr_cam3_s cn56xx;
1694 struct cvmx_agl_gmx_rxx_adr_cam3_s cn56xxp1;
1695 struct cvmx_agl_gmx_rxx_adr_cam3_s cn61xx;
1696 struct cvmx_agl_gmx_rxx_adr_cam3_s cn63xx;
1697 struct cvmx_agl_gmx_rxx_adr_cam3_s cn63xxp1;
1698 struct cvmx_agl_gmx_rxx_adr_cam3_s cn66xx;
1699 struct cvmx_agl_gmx_rxx_adr_cam3_s cn68xx;
1700 struct cvmx_agl_gmx_rxx_adr_cam3_s cn68xxp1;
1702 typedef union cvmx_agl_gmx_rxx_adr_cam3 cvmx_agl_gmx_rxx_adr_cam3_t;
1705 * cvmx_agl_gmx_rx#_adr_cam4
1707 * AGL_GMX_RX_ADR_CAM = Address Filtering Control
1711 * Not reset when MIX*_CTL[RESET] is set to 1.
1714 union cvmx_agl_gmx_rxx_adr_cam4 {
1716 struct cvmx_agl_gmx_rxx_adr_cam4_s {
1717 #ifdef __BIG_ENDIAN_BITFIELD
1718 uint64_t adr : 64; /**< The DMAC address to match on
1719 Each entry contributes 8bits to one of 8 matchers.
1720 The CAM matches against unicst or multicst DMAC
1726 struct cvmx_agl_gmx_rxx_adr_cam4_s cn52xx;
1727 struct cvmx_agl_gmx_rxx_adr_cam4_s cn52xxp1;
1728 struct cvmx_agl_gmx_rxx_adr_cam4_s cn56xx;
1729 struct cvmx_agl_gmx_rxx_adr_cam4_s cn56xxp1;
1730 struct cvmx_agl_gmx_rxx_adr_cam4_s cn61xx;
1731 struct cvmx_agl_gmx_rxx_adr_cam4_s cn63xx;
1732 struct cvmx_agl_gmx_rxx_adr_cam4_s cn63xxp1;
1733 struct cvmx_agl_gmx_rxx_adr_cam4_s cn66xx;
1734 struct cvmx_agl_gmx_rxx_adr_cam4_s cn68xx;
1735 struct cvmx_agl_gmx_rxx_adr_cam4_s cn68xxp1;
1737 typedef union cvmx_agl_gmx_rxx_adr_cam4 cvmx_agl_gmx_rxx_adr_cam4_t;
1740 * cvmx_agl_gmx_rx#_adr_cam5
1742 * AGL_GMX_RX_ADR_CAM = Address Filtering Control
1746 * Not reset when MIX*_CTL[RESET] is set to 1.
1749 union cvmx_agl_gmx_rxx_adr_cam5 {
1751 struct cvmx_agl_gmx_rxx_adr_cam5_s {
1752 #ifdef __BIG_ENDIAN_BITFIELD
1753 uint64_t adr : 64; /**< The DMAC address to match on
1754 Each entry contributes 8bits to one of 8 matchers.
1755 The CAM matches against unicst or multicst DMAC
1761 struct cvmx_agl_gmx_rxx_adr_cam5_s cn52xx;
1762 struct cvmx_agl_gmx_rxx_adr_cam5_s cn52xxp1;
1763 struct cvmx_agl_gmx_rxx_adr_cam5_s cn56xx;
1764 struct cvmx_agl_gmx_rxx_adr_cam5_s cn56xxp1;
1765 struct cvmx_agl_gmx_rxx_adr_cam5_s cn61xx;
1766 struct cvmx_agl_gmx_rxx_adr_cam5_s cn63xx;
1767 struct cvmx_agl_gmx_rxx_adr_cam5_s cn63xxp1;
1768 struct cvmx_agl_gmx_rxx_adr_cam5_s cn66xx;
1769 struct cvmx_agl_gmx_rxx_adr_cam5_s cn68xx;
1770 struct cvmx_agl_gmx_rxx_adr_cam5_s cn68xxp1;
1772 typedef union cvmx_agl_gmx_rxx_adr_cam5 cvmx_agl_gmx_rxx_adr_cam5_t;
1775 * cvmx_agl_gmx_rx#_adr_cam_en
1777 * AGL_GMX_RX_ADR_CAM_EN = Address Filtering Control Enable
1781 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
1784 union cvmx_agl_gmx_rxx_adr_cam_en {
1786 struct cvmx_agl_gmx_rxx_adr_cam_en_s {
1787 #ifdef __BIG_ENDIAN_BITFIELD
1788 uint64_t reserved_8_63 : 56;
1789 uint64_t en : 8; /**< CAM Entry Enables */
1792 uint64_t reserved_8_63 : 56;
1795 struct cvmx_agl_gmx_rxx_adr_cam_en_s cn52xx;
1796 struct cvmx_agl_gmx_rxx_adr_cam_en_s cn52xxp1;
1797 struct cvmx_agl_gmx_rxx_adr_cam_en_s cn56xx;
1798 struct cvmx_agl_gmx_rxx_adr_cam_en_s cn56xxp1;
1799 struct cvmx_agl_gmx_rxx_adr_cam_en_s cn61xx;
1800 struct cvmx_agl_gmx_rxx_adr_cam_en_s cn63xx;
1801 struct cvmx_agl_gmx_rxx_adr_cam_en_s cn63xxp1;
1802 struct cvmx_agl_gmx_rxx_adr_cam_en_s cn66xx;
1803 struct cvmx_agl_gmx_rxx_adr_cam_en_s cn68xx;
1804 struct cvmx_agl_gmx_rxx_adr_cam_en_s cn68xxp1;
1806 typedef union cvmx_agl_gmx_rxx_adr_cam_en cvmx_agl_gmx_rxx_adr_cam_en_t;
1809 * cvmx_agl_gmx_rx#_adr_ctl
1811 * AGL_GMX_RX_ADR_CTL = Address Filtering Control
1816 * Here is some pseudo code that represents the address filter behavior.
1819 * bool dmac_addr_filter(uint8 prt, uint48 dmac) [
1820 * ASSERT(prt >= 0 && prt <= 3);
1821 * if (is_bcst(dmac)) // broadcast accept
1822 * return (AGL_GMX_RX[prt]_ADR_CTL[BCST] ? ACCEPT : REJECT);
1823 * if (is_mcst(dmac) & AGL_GMX_RX[prt]_ADR_CTL[MCST] == 1) // multicast reject
1825 * if (is_mcst(dmac) & AGL_GMX_RX[prt]_ADR_CTL[MCST] == 2) // multicast accept
1830 * for (i=0; i<8; i++) [
1831 * if (AGL_GMX_RX[prt]_ADR_CAM_EN[EN<i>] == 0)
1833 * uint48 unswizzled_mac_adr = 0x0;
1834 * for (j=5; j>=0; j--) [
1835 * unswizzled_mac_adr = (unswizzled_mac_adr << 8) | AGL_GMX_RX[prt]_ADR_CAM[j][ADR<i*8+7:i*8>];
1837 * if (unswizzled_mac_adr == dmac) [
1844 * return (AGL_GMX_RX[prt]_ADR_CTL[CAM_MODE] ? ACCEPT : REJECT);
1846 * return (AGL_GMX_RX[prt]_ADR_CTL[CAM_MODE] ? REJECT : ACCEPT);
1850 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
1852 union cvmx_agl_gmx_rxx_adr_ctl {
1854 struct cvmx_agl_gmx_rxx_adr_ctl_s {
1855 #ifdef __BIG_ENDIAN_BITFIELD
1856 uint64_t reserved_4_63 : 60;
1857 uint64_t cam_mode : 1; /**< Allow or deny DMAC address filter
1858 0 = reject the packet on DMAC address match
1859 1 = accept the packet on DMAC address match */
1860 uint64_t mcst : 2; /**< Multicast Mode
1861 0 = Use the Address Filter CAM
1862 1 = Force reject all multicast packets
1863 2 = Force accept all multicast packets
1865 uint64_t bcst : 1; /**< Accept All Broadcast Packets */
1869 uint64_t cam_mode : 1;
1870 uint64_t reserved_4_63 : 60;
1873 struct cvmx_agl_gmx_rxx_adr_ctl_s cn52xx;
1874 struct cvmx_agl_gmx_rxx_adr_ctl_s cn52xxp1;
1875 struct cvmx_agl_gmx_rxx_adr_ctl_s cn56xx;
1876 struct cvmx_agl_gmx_rxx_adr_ctl_s cn56xxp1;
1877 struct cvmx_agl_gmx_rxx_adr_ctl_s cn61xx;
1878 struct cvmx_agl_gmx_rxx_adr_ctl_s cn63xx;
1879 struct cvmx_agl_gmx_rxx_adr_ctl_s cn63xxp1;
1880 struct cvmx_agl_gmx_rxx_adr_ctl_s cn66xx;
1881 struct cvmx_agl_gmx_rxx_adr_ctl_s cn68xx;
1882 struct cvmx_agl_gmx_rxx_adr_ctl_s cn68xxp1;
1884 typedef union cvmx_agl_gmx_rxx_adr_ctl cvmx_agl_gmx_rxx_adr_ctl_t;
1887 * cvmx_agl_gmx_rx#_decision
1889 * AGL_GMX_RX_DECISION = The byte count to decide when to accept or filter a packet
1893 * As each byte in a packet is received by GMX, the L2 byte count is compared
1894 * against the AGL_GMX_RX_DECISION[CNT]. The L2 byte count is the number of bytes
1895 * from the beginning of the L2 header (DMAC). In normal operation, the L2
1896 * header begins after the PREAMBLE+SFD (AGL_GMX_RX_FRM_CTL[PRE_CHK]=1) and any
1897 * optional UDD skip data (AGL_GMX_RX_UDD_SKP[LEN]).
1899 * When AGL_GMX_RX_FRM_CTL[PRE_CHK] is clear, PREAMBLE+SFD are prepended to the
1900 * packet and would require UDD skip length to account for them.
1903 * Port Mode <=AGL_GMX_RX_DECISION bytes (default=24) >AGL_GMX_RX_DECISION bytes (default=24)
1905 * MII/Full Duplex accept packet apply filters
1906 * no filtering is applied accept packet based on DMAC and PAUSE packet filters
1908 * MII/Half Duplex drop packet apply filters
1909 * packet is unconditionally dropped accept packet based on DMAC
1911 * where l2_size = MAX(0, total_packet_size - AGL_GMX_RX_UDD_SKP[LEN] - ((AGL_GMX_RX_FRM_CTL[PRE_CHK]==1)*8)
1913 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
1915 union cvmx_agl_gmx_rxx_decision {
1917 struct cvmx_agl_gmx_rxx_decision_s {
1918 #ifdef __BIG_ENDIAN_BITFIELD
1919 uint64_t reserved_5_63 : 59;
1920 uint64_t cnt : 5; /**< The byte count to decide when to accept or filter
1924 uint64_t reserved_5_63 : 59;
1927 struct cvmx_agl_gmx_rxx_decision_s cn52xx;
1928 struct cvmx_agl_gmx_rxx_decision_s cn52xxp1;
1929 struct cvmx_agl_gmx_rxx_decision_s cn56xx;
1930 struct cvmx_agl_gmx_rxx_decision_s cn56xxp1;
1931 struct cvmx_agl_gmx_rxx_decision_s cn61xx;
1932 struct cvmx_agl_gmx_rxx_decision_s cn63xx;
1933 struct cvmx_agl_gmx_rxx_decision_s cn63xxp1;
1934 struct cvmx_agl_gmx_rxx_decision_s cn66xx;
1935 struct cvmx_agl_gmx_rxx_decision_s cn68xx;
1936 struct cvmx_agl_gmx_rxx_decision_s cn68xxp1;
1938 typedef union cvmx_agl_gmx_rxx_decision cvmx_agl_gmx_rxx_decision_t;
1941 * cvmx_agl_gmx_rx#_frm_chk
1943 * AGL_GMX_RX_FRM_CHK = Which frame errors will set the ERR bit of the frame
1947 * If AGL_GMX_RX_UDD_SKP[LEN] != 0, then LENERR will be forced to zero in HW.
1949 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
1951 union cvmx_agl_gmx_rxx_frm_chk {
1953 struct cvmx_agl_gmx_rxx_frm_chk_s {
1954 #ifdef __BIG_ENDIAN_BITFIELD
1955 uint64_t reserved_10_63 : 54;
1956 uint64_t niberr : 1; /**< Nibble error */
1957 uint64_t skperr : 1; /**< Skipper error */
1958 uint64_t rcverr : 1; /**< Frame was received with packet data reception error */
1959 uint64_t lenerr : 1; /**< Frame was received with length error */
1960 uint64_t alnerr : 1; /**< Frame was received with an alignment error */
1961 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
1962 uint64_t jabber : 1; /**< Frame was received with length > sys_length */
1963 uint64_t maxerr : 1; /**< Frame was received with length > max_length */
1964 uint64_t carext : 1; /**< Carrier extend error */
1965 uint64_t minerr : 1; /**< Frame was received with length < min_length */
1967 uint64_t minerr : 1;
1968 uint64_t carext : 1;
1969 uint64_t maxerr : 1;
1970 uint64_t jabber : 1;
1971 uint64_t fcserr : 1;
1972 uint64_t alnerr : 1;
1973 uint64_t lenerr : 1;
1974 uint64_t rcverr : 1;
1975 uint64_t skperr : 1;
1976 uint64_t niberr : 1;
1977 uint64_t reserved_10_63 : 54;
1980 struct cvmx_agl_gmx_rxx_frm_chk_cn52xx {
1981 #ifdef __BIG_ENDIAN_BITFIELD
1982 uint64_t reserved_9_63 : 55;
1983 uint64_t skperr : 1; /**< Skipper error */
1984 uint64_t rcverr : 1; /**< Frame was received with MII Data reception error */
1985 uint64_t lenerr : 1; /**< Frame was received with length error */
1986 uint64_t alnerr : 1; /**< Frame was received with an alignment error */
1987 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
1988 uint64_t jabber : 1; /**< Frame was received with length > sys_length */
1989 uint64_t maxerr : 1; /**< Frame was received with length > max_length */
1990 uint64_t reserved_1_1 : 1;
1991 uint64_t minerr : 1; /**< Frame was received with length < min_length */
1993 uint64_t minerr : 1;
1994 uint64_t reserved_1_1 : 1;
1995 uint64_t maxerr : 1;
1996 uint64_t jabber : 1;
1997 uint64_t fcserr : 1;
1998 uint64_t alnerr : 1;
1999 uint64_t lenerr : 1;
2000 uint64_t rcverr : 1;
2001 uint64_t skperr : 1;
2002 uint64_t reserved_9_63 : 55;
2005 struct cvmx_agl_gmx_rxx_frm_chk_cn52xx cn52xxp1;
2006 struct cvmx_agl_gmx_rxx_frm_chk_cn52xx cn56xx;
2007 struct cvmx_agl_gmx_rxx_frm_chk_cn52xx cn56xxp1;
2008 struct cvmx_agl_gmx_rxx_frm_chk_s cn61xx;
2009 struct cvmx_agl_gmx_rxx_frm_chk_s cn63xx;
2010 struct cvmx_agl_gmx_rxx_frm_chk_s cn63xxp1;
2011 struct cvmx_agl_gmx_rxx_frm_chk_s cn66xx;
2012 struct cvmx_agl_gmx_rxx_frm_chk_s cn68xx;
2013 struct cvmx_agl_gmx_rxx_frm_chk_s cn68xxp1;
2015 typedef union cvmx_agl_gmx_rxx_frm_chk cvmx_agl_gmx_rxx_frm_chk_t;
2018 * cvmx_agl_gmx_rx#_frm_ctl
2020 * AGL_GMX_RX_FRM_CTL = Frame Control
2025 * When PRE_CHK is set (indicating that the PREAMBLE will be sent), PRE_STRP
2026 * determines if the PREAMBLE+SFD bytes are thrown away or sent to the Octane
2027 * core as part of the packet.
2029 * In either mode, the PREAMBLE+SFD bytes are not counted toward the packet
2030 * size when checking against the MIN and MAX bounds. Furthermore, the bytes
2031 * are skipped when locating the start of the L2 header for DMAC and Control
2032 * frame recognition.
2035 * These bits control how the HW handles incoming PAUSE packets. Here are
2036 * the most common modes of operation:
2037 * CTL_BCK=1,CTL_DRP=1 - HW does it all
2038 * CTL_BCK=0,CTL_DRP=0 - SW sees all pause frames
2039 * CTL_BCK=0,CTL_DRP=1 - all pause frames are completely ignored
2041 * These control bits should be set to CTL_BCK=0,CTL_DRP=0 in halfdup mode.
2042 * Since PAUSE packets only apply to fulldup operation, any PAUSE packet
2043 * would constitute an exception which should be handled by the processing
2044 * cores. PAUSE packets should not be forwarded.
2046 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2048 union cvmx_agl_gmx_rxx_frm_ctl {
2050 struct cvmx_agl_gmx_rxx_frm_ctl_s {
2051 #ifdef __BIG_ENDIAN_BITFIELD
2052 uint64_t reserved_13_63 : 51;
2053 uint64_t ptp_mode : 1; /**< Timestamp mode
2054 When PTP_MODE is set, a 64-bit timestamp will be
2055 prepended to every incoming packet. The timestamp
2056 bytes are added to the packet in such a way as to
2057 not modify the packet's receive byte count. This
2058 implies that the AGL_GMX_RX_JABBER,
2059 AGL_GMX_RX_FRM_MIN, AGL_GMX_RX_FRM_MAX,
2060 AGL_GMX_RX_DECISION, AGL_GMX_RX_UDD_SKP, and the
2061 AGL_GMX_RX_STATS_* do not require any adjustment
2062 as they operate on the received packet size.
2063 If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1. */
2064 uint64_t reserved_11_11 : 1;
2065 uint64_t null_dis : 1; /**< When set, do not modify the MOD bits on NULL ticks
2066 due to PARITAL packets */
2067 uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte
2068 regardless of the number of previous PREAMBLE
2069 nibbles. In this mode, PRE_STRP should be set to
2070 account for the variable nature of the PREAMBLE.
2071 PRE_CHK must be set to enable this and all
2072 PREAMBLE features. */
2073 uint64_t pad_len : 1; /**< When set, disables the length check for non-min
2074 sized pkts with padding in the client data */
2075 uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */
2076 uint64_t pre_free : 1; /**< When set, PREAMBLE checking is less strict.
2077 AGL will begin the frame at the first SFD.
2078 PRE_FREE must be set if PRE_ALIGN is set.
2079 PRE_CHK must be set to enable this and all
2080 PREAMBLE features. */
2081 uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */
2082 uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign
2083 Multicast address */
2084 uint64_t ctl_bck : 1; /**< Forward pause information to TX block */
2085 uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */
2086 uint64_t pre_strp : 1; /**< Strip off the preamble (when present)
2087 0=PREAMBLE+SFD is sent to core as part of frame
2088 1=PREAMBLE+SFD is dropped
2089 PRE_STRP must be set if PRE_ALIGN is set.
2090 PRE_CHK must be set to enable this and all
2091 PREAMBLE features. */
2092 uint64_t pre_chk : 1; /**< This port is configured to send a valid 802.3
2093 PREAMBLE to begin every frame. AGL checks that a
2094 valid PREAMBLE is received (based on PRE_FREE).
2095 When a problem does occur within the PREAMBLE
2096 seqeunce, the frame is marked as bad and not sent
2097 into the core. The AGL_GMX_RX_INT_REG[PCTERR]
2098 interrupt is also raised. */
2100 uint64_t pre_chk : 1;
2101 uint64_t pre_strp : 1;
2102 uint64_t ctl_drp : 1;
2103 uint64_t ctl_bck : 1;
2104 uint64_t ctl_mcst : 1;
2105 uint64_t ctl_smac : 1;
2106 uint64_t pre_free : 1;
2107 uint64_t vlan_len : 1;
2108 uint64_t pad_len : 1;
2109 uint64_t pre_align : 1;
2110 uint64_t null_dis : 1;
2111 uint64_t reserved_11_11 : 1;
2112 uint64_t ptp_mode : 1;
2113 uint64_t reserved_13_63 : 51;
2116 struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx {
2117 #ifdef __BIG_ENDIAN_BITFIELD
2118 uint64_t reserved_10_63 : 54;
2119 uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte
2120 regardless of the number of previous PREAMBLE
2121 nibbles. In this mode, PREAMBLE can be consumed
2122 by the HW so when PRE_ALIGN is set, PRE_FREE,
2123 PRE_STRP must be set for correct operation.
2124 PRE_CHK must be set to enable this and all
2125 PREAMBLE features. */
2126 uint64_t pad_len : 1; /**< When set, disables the length check for non-min
2127 sized pkts with padding in the client data */
2128 uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */
2129 uint64_t pre_free : 1; /**< When set, PREAMBLE checking is less strict.
2130 0 - 254 cycles of PREAMBLE followed by SFD
2131 PRE_FREE must be set if PRE_ALIGN is set.
2132 PRE_CHK must be set to enable this and all
2133 PREAMBLE features. */
2134 uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */
2135 uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign
2136 Multicast address */
2137 uint64_t ctl_bck : 1; /**< Forward pause information to TX block */
2138 uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */
2139 uint64_t pre_strp : 1; /**< Strip off the preamble (when present)
2140 0=PREAMBLE+SFD is sent to core as part of frame
2141 1=PREAMBLE+SFD is dropped
2142 PRE_STRP must be set if PRE_ALIGN is set.
2143 PRE_CHK must be set to enable this and all
2144 PREAMBLE features. */
2145 uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD
2146 to begin every frame. GMX checks that the
2147 PREAMBLE is sent correctly */
2149 uint64_t pre_chk : 1;
2150 uint64_t pre_strp : 1;
2151 uint64_t ctl_drp : 1;
2152 uint64_t ctl_bck : 1;
2153 uint64_t ctl_mcst : 1;
2154 uint64_t ctl_smac : 1;
2155 uint64_t pre_free : 1;
2156 uint64_t vlan_len : 1;
2157 uint64_t pad_len : 1;
2158 uint64_t pre_align : 1;
2159 uint64_t reserved_10_63 : 54;
2162 struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx cn52xxp1;
2163 struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx cn56xx;
2164 struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx cn56xxp1;
2165 struct cvmx_agl_gmx_rxx_frm_ctl_s cn61xx;
2166 struct cvmx_agl_gmx_rxx_frm_ctl_s cn63xx;
2167 struct cvmx_agl_gmx_rxx_frm_ctl_s cn63xxp1;
2168 struct cvmx_agl_gmx_rxx_frm_ctl_s cn66xx;
2169 struct cvmx_agl_gmx_rxx_frm_ctl_s cn68xx;
2170 struct cvmx_agl_gmx_rxx_frm_ctl_s cn68xxp1;
2172 typedef union cvmx_agl_gmx_rxx_frm_ctl cvmx_agl_gmx_rxx_frm_ctl_t;
2175 * cvmx_agl_gmx_rx#_frm_max
2177 * AGL_GMX_RX_FRM_MAX = Frame Max length
2181 * When changing the LEN field, be sure that LEN does not exceed
2182 * AGL_GMX_RX_JABBER[CNT]. Failure to meet this constraint will cause packets that
2183 * are within the maximum length parameter to be rejected because they exceed
2184 * the AGL_GMX_RX_JABBER[CNT] limit.
2188 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2190 union cvmx_agl_gmx_rxx_frm_max {
2192 struct cvmx_agl_gmx_rxx_frm_max_s {
2193 #ifdef __BIG_ENDIAN_BITFIELD
2194 uint64_t reserved_16_63 : 48;
2195 uint64_t len : 16; /**< Byte count for Max-sized frame check
2196 AGL_GMX_RXn_FRM_CHK[MAXERR] enables the check
2198 If enabled, failing packets set the MAXERR
2199 interrupt and the MIX opcode is set to OVER_FCS
2200 (0x3, if packet has bad FCS) or OVER_ERR (0x4, if
2201 packet has good FCS).
2202 LEN <= AGL_GMX_RX_JABBER[CNT] */
2205 uint64_t reserved_16_63 : 48;
2208 struct cvmx_agl_gmx_rxx_frm_max_s cn52xx;
2209 struct cvmx_agl_gmx_rxx_frm_max_s cn52xxp1;
2210 struct cvmx_agl_gmx_rxx_frm_max_s cn56xx;
2211 struct cvmx_agl_gmx_rxx_frm_max_s cn56xxp1;
2212 struct cvmx_agl_gmx_rxx_frm_max_s cn61xx;
2213 struct cvmx_agl_gmx_rxx_frm_max_s cn63xx;
2214 struct cvmx_agl_gmx_rxx_frm_max_s cn63xxp1;
2215 struct cvmx_agl_gmx_rxx_frm_max_s cn66xx;
2216 struct cvmx_agl_gmx_rxx_frm_max_s cn68xx;
2217 struct cvmx_agl_gmx_rxx_frm_max_s cn68xxp1;
2219 typedef union cvmx_agl_gmx_rxx_frm_max cvmx_agl_gmx_rxx_frm_max_t;
2222 * cvmx_agl_gmx_rx#_frm_min
2224 * AGL_GMX_RX_FRM_MIN = Frame Min length
2228 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2231 union cvmx_agl_gmx_rxx_frm_min {
2233 struct cvmx_agl_gmx_rxx_frm_min_s {
2234 #ifdef __BIG_ENDIAN_BITFIELD
2235 uint64_t reserved_16_63 : 48;
2236 uint64_t len : 16; /**< Byte count for Min-sized frame check
2237 AGL_GMX_RXn_FRM_CHK[MINERR] enables the check
2239 If enabled, failing packets set the MINERR
2240 interrupt and the MIX opcode is set to UNDER_FCS
2241 (0x6, if packet has bad FCS) or UNDER_ERR (0x8,
2242 if packet has good FCS). */
2245 uint64_t reserved_16_63 : 48;
2248 struct cvmx_agl_gmx_rxx_frm_min_s cn52xx;
2249 struct cvmx_agl_gmx_rxx_frm_min_s cn52xxp1;
2250 struct cvmx_agl_gmx_rxx_frm_min_s cn56xx;
2251 struct cvmx_agl_gmx_rxx_frm_min_s cn56xxp1;
2252 struct cvmx_agl_gmx_rxx_frm_min_s cn61xx;
2253 struct cvmx_agl_gmx_rxx_frm_min_s cn63xx;
2254 struct cvmx_agl_gmx_rxx_frm_min_s cn63xxp1;
2255 struct cvmx_agl_gmx_rxx_frm_min_s cn66xx;
2256 struct cvmx_agl_gmx_rxx_frm_min_s cn68xx;
2257 struct cvmx_agl_gmx_rxx_frm_min_s cn68xxp1;
2259 typedef union cvmx_agl_gmx_rxx_frm_min cvmx_agl_gmx_rxx_frm_min_t;
2262 * cvmx_agl_gmx_rx#_ifg
2264 * AGL_GMX_RX_IFG = RX Min IFG
2268 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2271 union cvmx_agl_gmx_rxx_ifg {
2273 struct cvmx_agl_gmx_rxx_ifg_s {
2274 #ifdef __BIG_ENDIAN_BITFIELD
2275 uint64_t reserved_4_63 : 60;
2276 uint64_t ifg : 4; /**< Min IFG (in IFG*8 bits) between packets used to
2277 determine IFGERR. Normally IFG is 96 bits.
2278 Note in some operating modes, IFG cycles can be
2279 inserted or removed in order to achieve clock rate
2280 adaptation. For these reasons, the default value
2281 is slightly conservative and does not check upto
2282 the full 96 bits of IFG. */
2285 uint64_t reserved_4_63 : 60;
2288 struct cvmx_agl_gmx_rxx_ifg_s cn52xx;
2289 struct cvmx_agl_gmx_rxx_ifg_s cn52xxp1;
2290 struct cvmx_agl_gmx_rxx_ifg_s cn56xx;
2291 struct cvmx_agl_gmx_rxx_ifg_s cn56xxp1;
2292 struct cvmx_agl_gmx_rxx_ifg_s cn61xx;
2293 struct cvmx_agl_gmx_rxx_ifg_s cn63xx;
2294 struct cvmx_agl_gmx_rxx_ifg_s cn63xxp1;
2295 struct cvmx_agl_gmx_rxx_ifg_s cn66xx;
2296 struct cvmx_agl_gmx_rxx_ifg_s cn68xx;
2297 struct cvmx_agl_gmx_rxx_ifg_s cn68xxp1;
2299 typedef union cvmx_agl_gmx_rxx_ifg cvmx_agl_gmx_rxx_ifg_t;
2302 * cvmx_agl_gmx_rx#_int_en
2304 * AGL_GMX_RX_INT_EN = Interrupt Enable
2308 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2311 union cvmx_agl_gmx_rxx_int_en {
2313 struct cvmx_agl_gmx_rxx_int_en_s {
2314 #ifdef __BIG_ENDIAN_BITFIELD
2315 uint64_t reserved_20_63 : 44;
2316 uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
2317 uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex | NS */
2318 uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed | NS */
2319 uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus | NS */
2320 uint64_t ifgerr : 1; /**< Interframe Gap Violation */
2321 uint64_t coldet : 1; /**< Collision Detection */
2322 uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
2323 uint64_t rsverr : 1; /**< Packet reserved opcodes */
2324 uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
2325 uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow */
2326 uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) | NS */
2327 uint64_t skperr : 1; /**< Skipper error */
2328 uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
2329 uint64_t lenerr : 1; /**< Frame was received with length error */
2330 uint64_t alnerr : 1; /**< Frame was received with an alignment error */
2331 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
2332 uint64_t jabber : 1; /**< Frame was received with length > sys_length */
2333 uint64_t maxerr : 1; /**< Frame was received with length > max_length */
2334 uint64_t carext : 1; /**< Carrier extend error */
2335 uint64_t minerr : 1; /**< Frame was received with length < min_length */
2337 uint64_t minerr : 1;
2338 uint64_t carext : 1;
2339 uint64_t maxerr : 1;
2340 uint64_t jabber : 1;
2341 uint64_t fcserr : 1;
2342 uint64_t alnerr : 1;
2343 uint64_t lenerr : 1;
2344 uint64_t rcverr : 1;
2345 uint64_t skperr : 1;
2346 uint64_t niberr : 1;
2347 uint64_t ovrerr : 1;
2348 uint64_t pcterr : 1;
2349 uint64_t rsverr : 1;
2350 uint64_t falerr : 1;
2351 uint64_t coldet : 1;
2352 uint64_t ifgerr : 1;
2353 uint64_t phy_link : 1;
2354 uint64_t phy_spd : 1;
2355 uint64_t phy_dupx : 1;
2356 uint64_t pause_drp : 1;
2357 uint64_t reserved_20_63 : 44;
2360 struct cvmx_agl_gmx_rxx_int_en_cn52xx {
2361 #ifdef __BIG_ENDIAN_BITFIELD
2362 uint64_t reserved_20_63 : 44;
2363 uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
2364 uint64_t reserved_16_18 : 3;
2365 uint64_t ifgerr : 1; /**< Interframe Gap Violation */
2366 uint64_t coldet : 1; /**< Collision Detection */
2367 uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
2368 uint64_t rsverr : 1; /**< MII reserved opcodes */
2369 uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
2370 uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow */
2371 uint64_t reserved_9_9 : 1;
2372 uint64_t skperr : 1; /**< Skipper error */
2373 uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
2374 uint64_t lenerr : 1; /**< Frame was received with length error */
2375 uint64_t alnerr : 1; /**< Frame was received with an alignment error */
2376 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
2377 uint64_t jabber : 1; /**< Frame was received with length > sys_length */
2378 uint64_t maxerr : 1; /**< Frame was received with length > max_length */
2379 uint64_t reserved_1_1 : 1;
2380 uint64_t minerr : 1; /**< Frame was received with length < min_length */
2382 uint64_t minerr : 1;
2383 uint64_t reserved_1_1 : 1;
2384 uint64_t maxerr : 1;
2385 uint64_t jabber : 1;
2386 uint64_t fcserr : 1;
2387 uint64_t alnerr : 1;
2388 uint64_t lenerr : 1;
2389 uint64_t rcverr : 1;
2390 uint64_t skperr : 1;
2391 uint64_t reserved_9_9 : 1;
2392 uint64_t ovrerr : 1;
2393 uint64_t pcterr : 1;
2394 uint64_t rsverr : 1;
2395 uint64_t falerr : 1;
2396 uint64_t coldet : 1;
2397 uint64_t ifgerr : 1;
2398 uint64_t reserved_16_18 : 3;
2399 uint64_t pause_drp : 1;
2400 uint64_t reserved_20_63 : 44;
2403 struct cvmx_agl_gmx_rxx_int_en_cn52xx cn52xxp1;
2404 struct cvmx_agl_gmx_rxx_int_en_cn52xx cn56xx;
2405 struct cvmx_agl_gmx_rxx_int_en_cn52xx cn56xxp1;
2406 struct cvmx_agl_gmx_rxx_int_en_s cn61xx;
2407 struct cvmx_agl_gmx_rxx_int_en_s cn63xx;
2408 struct cvmx_agl_gmx_rxx_int_en_s cn63xxp1;
2409 struct cvmx_agl_gmx_rxx_int_en_s cn66xx;
2410 struct cvmx_agl_gmx_rxx_int_en_s cn68xx;
2411 struct cvmx_agl_gmx_rxx_int_en_s cn68xxp1;
2413 typedef union cvmx_agl_gmx_rxx_int_en cvmx_agl_gmx_rxx_int_en_t;
2416 * cvmx_agl_gmx_rx#_int_reg
2418 * AGL_GMX_RX_INT_REG = Interrupt Register
2422 * (1) exceptions will only be raised to the control processor if the
2423 * corresponding bit in the AGL_GMX_RX_INT_EN register is set.
2425 * (2) exception conditions 10:0 can also set the rcv/opcode in the received
2426 * packet's workQ entry. The AGL_GMX_RX_FRM_CHK register provides a bit mask
2427 * for configuring which conditions set the error.
2429 * (3) in half duplex operation, the expectation is that collisions will appear
2432 * (4) JABBER - An RX Jabber error indicates that a packet was received which
2433 * is longer than the maximum allowed packet as defined by the
2434 * system. GMX will truncate the packet at the JABBER count.
2435 * Failure to do so could lead to system instabilty.
2437 * (6) MAXERR - for untagged frames, the total frame DA+SA+TL+DATA+PAD+FCS >
2438 * AGL_GMX_RX_FRM_MAX. For tagged frames, DA+SA+VLAN+TL+DATA+PAD+FCS
2439 * > AGL_GMX_RX_FRM_MAX + 4*VLAN_VAL + 4*VLAN_STACKED.
2441 * (7) MINERR - total frame DA+SA+TL+DATA+PAD+FCS < AGL_GMX_RX_FRM_MIN.
2443 * (8) ALNERR - Indicates that the packet received was not an integer number of
2444 * bytes. If FCS checking is enabled, ALNERR will only assert if
2445 * the FCS is bad. If FCS checking is disabled, ALNERR will
2446 * assert in all non-integer frame cases.
2448 * (9) Collisions - Collisions can only occur in half-duplex mode. A collision
2449 * is assumed by the receiver when the received
2450 * frame < AGL_GMX_RX_FRM_MIN - this is normally a MINERR
2452 * (A) LENERR - Length errors occur when the received packet does not match the
2453 * length field. LENERR is only checked for packets between 64
2454 * and 1500 bytes. For untagged frames, the length must exact
2455 * match. For tagged frames the length or length+4 must match.
2457 * (B) PCTERR - checks that the frame begins with a valid PREAMBLE sequence.
2458 * Does not check the number of PREAMBLE cycles.
2462 * OVRERR is an architectural assertion check internal to GMX to
2463 * make sure no assumption was violated. In a correctly operating
2464 * system, this interrupt can never fire.
2466 * GMX has an internal arbiter which selects which of 4 ports to
2467 * buffer in the main RX FIFO. If we normally buffer 8 bytes,
2468 * then each port will typically push a tick every 8 cycles - if
2469 * the packet interface is going as fast as possible. If there
2470 * are four ports, they push every two cycles. So that's the
2471 * assumption. That the inbound module will always be able to
2472 * consume the tick before another is produced. If that doesn't
2473 * happen - that's when OVRERR will assert.
2475 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2477 union cvmx_agl_gmx_rxx_int_reg {
2479 struct cvmx_agl_gmx_rxx_int_reg_s {
2480 #ifdef __BIG_ENDIAN_BITFIELD
2481 uint64_t reserved_20_63 : 44;
2482 uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
2483 uint64_t phy_dupx : 1; /**< Change in the RGMII inbound LinkDuplex | NS */
2484 uint64_t phy_spd : 1; /**< Change in the RGMII inbound LinkSpeed | NS */
2485 uint64_t phy_link : 1; /**< Change in the RGMII inbound LinkStatus | NS */
2486 uint64_t ifgerr : 1; /**< Interframe Gap Violation
2487 Does not necessarily indicate a failure */
2488 uint64_t coldet : 1; /**< Collision Detection */
2489 uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
2490 uint64_t rsverr : 1; /**< Packet reserved opcodes */
2491 uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
2492 uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
2493 This interrupt should never assert */
2494 uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) | NS */
2495 uint64_t skperr : 1; /**< Skipper error */
2496 uint64_t rcverr : 1; /**< Frame was received with Packet Data reception error */
2497 uint64_t lenerr : 1; /**< Frame was received with length error */
2498 uint64_t alnerr : 1; /**< Frame was received with an alignment error */
2499 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
2500 uint64_t jabber : 1; /**< Frame was received with length > sys_length */
2501 uint64_t maxerr : 1; /**< Frame was received with length > max_length */
2502 uint64_t carext : 1; /**< Carrier extend error */
2503 uint64_t minerr : 1; /**< Frame was received with length < min_length */
2505 uint64_t minerr : 1;
2506 uint64_t carext : 1;
2507 uint64_t maxerr : 1;
2508 uint64_t jabber : 1;
2509 uint64_t fcserr : 1;
2510 uint64_t alnerr : 1;
2511 uint64_t lenerr : 1;
2512 uint64_t rcverr : 1;
2513 uint64_t skperr : 1;
2514 uint64_t niberr : 1;
2515 uint64_t ovrerr : 1;
2516 uint64_t pcterr : 1;
2517 uint64_t rsverr : 1;
2518 uint64_t falerr : 1;
2519 uint64_t coldet : 1;
2520 uint64_t ifgerr : 1;
2521 uint64_t phy_link : 1;
2522 uint64_t phy_spd : 1;
2523 uint64_t phy_dupx : 1;
2524 uint64_t pause_drp : 1;
2525 uint64_t reserved_20_63 : 44;
2528 struct cvmx_agl_gmx_rxx_int_reg_cn52xx {
2529 #ifdef __BIG_ENDIAN_BITFIELD
2530 uint64_t reserved_20_63 : 44;
2531 uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
2532 uint64_t reserved_16_18 : 3;
2533 uint64_t ifgerr : 1; /**< Interframe Gap Violation
2534 Does not necessarily indicate a failure */
2535 uint64_t coldet : 1; /**< Collision Detection */
2536 uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
2537 uint64_t rsverr : 1; /**< MII reserved opcodes */
2538 uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
2539 uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
2540 This interrupt should never assert */
2541 uint64_t reserved_9_9 : 1;
2542 uint64_t skperr : 1; /**< Skipper error */
2543 uint64_t rcverr : 1; /**< Frame was received with MII Data reception error */
2544 uint64_t lenerr : 1; /**< Frame was received with length error */
2545 uint64_t alnerr : 1; /**< Frame was received with an alignment error */
2546 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
2547 uint64_t jabber : 1; /**< Frame was received with length > sys_length */
2548 uint64_t maxerr : 1; /**< Frame was received with length > max_length */
2549 uint64_t reserved_1_1 : 1;
2550 uint64_t minerr : 1; /**< Frame was received with length < min_length */
2552 uint64_t minerr : 1;
2553 uint64_t reserved_1_1 : 1;
2554 uint64_t maxerr : 1;
2555 uint64_t jabber : 1;
2556 uint64_t fcserr : 1;
2557 uint64_t alnerr : 1;
2558 uint64_t lenerr : 1;
2559 uint64_t rcverr : 1;
2560 uint64_t skperr : 1;
2561 uint64_t reserved_9_9 : 1;
2562 uint64_t ovrerr : 1;
2563 uint64_t pcterr : 1;
2564 uint64_t rsverr : 1;
2565 uint64_t falerr : 1;
2566 uint64_t coldet : 1;
2567 uint64_t ifgerr : 1;
2568 uint64_t reserved_16_18 : 3;
2569 uint64_t pause_drp : 1;
2570 uint64_t reserved_20_63 : 44;
2573 struct cvmx_agl_gmx_rxx_int_reg_cn52xx cn52xxp1;
2574 struct cvmx_agl_gmx_rxx_int_reg_cn52xx cn56xx;
2575 struct cvmx_agl_gmx_rxx_int_reg_cn52xx cn56xxp1;
2576 struct cvmx_agl_gmx_rxx_int_reg_s cn61xx;
2577 struct cvmx_agl_gmx_rxx_int_reg_s cn63xx;
2578 struct cvmx_agl_gmx_rxx_int_reg_s cn63xxp1;
2579 struct cvmx_agl_gmx_rxx_int_reg_s cn66xx;
2580 struct cvmx_agl_gmx_rxx_int_reg_s cn68xx;
2581 struct cvmx_agl_gmx_rxx_int_reg_s cn68xxp1;
2583 typedef union cvmx_agl_gmx_rxx_int_reg cvmx_agl_gmx_rxx_int_reg_t;
2586 * cvmx_agl_gmx_rx#_jabber
2588 * AGL_GMX_RX_JABBER = The max size packet after which GMX will truncate
2592 * CNT must be 8-byte aligned such that CNT[2:0] == 0
2594 * The packet that will be sent to the packet input logic will have an
2595 * additionl 8 bytes if AGL_GMX_RX_FRM_CTL[PRE_CHK] is set and
2596 * AGL_GMX_RX_FRM_CTL[PRE_STRP] is clear. The max packet that will be sent is
2599 * max_sized_packet = AGL_GMX_RX_JABBER[CNT]+((AGL_GMX_RX_FRM_CTL[PRE_CHK] & !AGL_GMX_RX_FRM_CTL[PRE_STRP])*8)
2601 * Be sure the CNT field value is at least as large as the
2602 * AGL_GMX_RX_FRM_MAX[LEN] value. Failure to meet this constraint will cause
2603 * packets that are within the AGL_GMX_RX_FRM_MAX[LEN] length to be rejected
2604 * because they exceed the CNT limit.
2606 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2608 union cvmx_agl_gmx_rxx_jabber {
2610 struct cvmx_agl_gmx_rxx_jabber_s {
2611 #ifdef __BIG_ENDIAN_BITFIELD
2612 uint64_t reserved_16_63 : 48;
2613 uint64_t cnt : 16; /**< Byte count for jabber check
2614 Failing packets set the JABBER interrupt and are
2615 optionally sent with opcode==JABBER
2616 GMX will truncate the packet to CNT bytes
2617 CNT >= AGL_GMX_RX_FRM_MAX[LEN] */
2620 uint64_t reserved_16_63 : 48;
2623 struct cvmx_agl_gmx_rxx_jabber_s cn52xx;
2624 struct cvmx_agl_gmx_rxx_jabber_s cn52xxp1;
2625 struct cvmx_agl_gmx_rxx_jabber_s cn56xx;
2626 struct cvmx_agl_gmx_rxx_jabber_s cn56xxp1;
2627 struct cvmx_agl_gmx_rxx_jabber_s cn61xx;
2628 struct cvmx_agl_gmx_rxx_jabber_s cn63xx;
2629 struct cvmx_agl_gmx_rxx_jabber_s cn63xxp1;
2630 struct cvmx_agl_gmx_rxx_jabber_s cn66xx;
2631 struct cvmx_agl_gmx_rxx_jabber_s cn68xx;
2632 struct cvmx_agl_gmx_rxx_jabber_s cn68xxp1;
2634 typedef union cvmx_agl_gmx_rxx_jabber cvmx_agl_gmx_rxx_jabber_t;
2637 * cvmx_agl_gmx_rx#_pause_drop_time
2639 * AGL_GMX_RX_PAUSE_DROP_TIME = The TIME field in a PAUSE Packet which was dropped due to GMX RX FIFO full condition
2643 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2646 union cvmx_agl_gmx_rxx_pause_drop_time {
2648 struct cvmx_agl_gmx_rxx_pause_drop_time_s {
2649 #ifdef __BIG_ENDIAN_BITFIELD
2650 uint64_t reserved_16_63 : 48;
2651 uint64_t status : 16; /**< Time extracted from the dropped PAUSE packet */
2653 uint64_t status : 16;
2654 uint64_t reserved_16_63 : 48;
2657 struct cvmx_agl_gmx_rxx_pause_drop_time_s cn52xx;
2658 struct cvmx_agl_gmx_rxx_pause_drop_time_s cn52xxp1;
2659 struct cvmx_agl_gmx_rxx_pause_drop_time_s cn56xx;
2660 struct cvmx_agl_gmx_rxx_pause_drop_time_s cn56xxp1;
2661 struct cvmx_agl_gmx_rxx_pause_drop_time_s cn61xx;
2662 struct cvmx_agl_gmx_rxx_pause_drop_time_s cn63xx;
2663 struct cvmx_agl_gmx_rxx_pause_drop_time_s cn63xxp1;
2664 struct cvmx_agl_gmx_rxx_pause_drop_time_s cn66xx;
2665 struct cvmx_agl_gmx_rxx_pause_drop_time_s cn68xx;
2666 struct cvmx_agl_gmx_rxx_pause_drop_time_s cn68xxp1;
2668 typedef union cvmx_agl_gmx_rxx_pause_drop_time cvmx_agl_gmx_rxx_pause_drop_time_t;
2671 * cvmx_agl_gmx_rx#_rx_inbnd
2673 * AGL_GMX_RX_INBND = RGMII InBand Link Status
2677 * These fields are only valid if the attached PHY is operating in RGMII mode
2678 * and supports the optional in-band status (see section 3.4.1 of the RGMII
2679 * specification, version 1.3 for more information).
2681 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2683 union cvmx_agl_gmx_rxx_rx_inbnd {
2685 struct cvmx_agl_gmx_rxx_rx_inbnd_s {
2686 #ifdef __BIG_ENDIAN_BITFIELD
2687 uint64_t reserved_4_63 : 60;
2688 uint64_t duplex : 1; /**< RGMII Inbound LinkDuplex | NS
2691 uint64_t speed : 2; /**< RGMII Inbound LinkSpeed | NS
2696 uint64_t status : 1; /**< RGMII Inbound LinkStatus | NS
2700 uint64_t status : 1;
2702 uint64_t duplex : 1;
2703 uint64_t reserved_4_63 : 60;
2706 struct cvmx_agl_gmx_rxx_rx_inbnd_s cn61xx;
2707 struct cvmx_agl_gmx_rxx_rx_inbnd_s cn63xx;
2708 struct cvmx_agl_gmx_rxx_rx_inbnd_s cn63xxp1;
2709 struct cvmx_agl_gmx_rxx_rx_inbnd_s cn66xx;
2710 struct cvmx_agl_gmx_rxx_rx_inbnd_s cn68xx;
2711 struct cvmx_agl_gmx_rxx_rx_inbnd_s cn68xxp1;
2713 typedef union cvmx_agl_gmx_rxx_rx_inbnd cvmx_agl_gmx_rxx_rx_inbnd_t;
2716 * cvmx_agl_gmx_rx#_stats_ctl
2718 * AGL_GMX_RX_STATS_CTL = RX Stats Control register
2722 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2725 union cvmx_agl_gmx_rxx_stats_ctl {
2727 struct cvmx_agl_gmx_rxx_stats_ctl_s {
2728 #ifdef __BIG_ENDIAN_BITFIELD
2729 uint64_t reserved_1_63 : 63;
2730 uint64_t rd_clr : 1; /**< RX Stats registers will clear on reads */
2732 uint64_t rd_clr : 1;
2733 uint64_t reserved_1_63 : 63;
2736 struct cvmx_agl_gmx_rxx_stats_ctl_s cn52xx;
2737 struct cvmx_agl_gmx_rxx_stats_ctl_s cn52xxp1;
2738 struct cvmx_agl_gmx_rxx_stats_ctl_s cn56xx;
2739 struct cvmx_agl_gmx_rxx_stats_ctl_s cn56xxp1;
2740 struct cvmx_agl_gmx_rxx_stats_ctl_s cn61xx;
2741 struct cvmx_agl_gmx_rxx_stats_ctl_s cn63xx;
2742 struct cvmx_agl_gmx_rxx_stats_ctl_s cn63xxp1;
2743 struct cvmx_agl_gmx_rxx_stats_ctl_s cn66xx;
2744 struct cvmx_agl_gmx_rxx_stats_ctl_s cn68xx;
2745 struct cvmx_agl_gmx_rxx_stats_ctl_s cn68xxp1;
2747 typedef union cvmx_agl_gmx_rxx_stats_ctl cvmx_agl_gmx_rxx_stats_ctl_t;
2750 * cvmx_agl_gmx_rx#_stats_octs
2753 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
2754 * - Counters will wrap
2755 * - Not reset when MIX*_CTL[RESET] is set to 1.
2757 union cvmx_agl_gmx_rxx_stats_octs {
2759 struct cvmx_agl_gmx_rxx_stats_octs_s {
2760 #ifdef __BIG_ENDIAN_BITFIELD
2761 uint64_t reserved_48_63 : 16;
2762 uint64_t cnt : 48; /**< Octet count of received good packets */
2765 uint64_t reserved_48_63 : 16;
2768 struct cvmx_agl_gmx_rxx_stats_octs_s cn52xx;
2769 struct cvmx_agl_gmx_rxx_stats_octs_s cn52xxp1;
2770 struct cvmx_agl_gmx_rxx_stats_octs_s cn56xx;
2771 struct cvmx_agl_gmx_rxx_stats_octs_s cn56xxp1;
2772 struct cvmx_agl_gmx_rxx_stats_octs_s cn61xx;
2773 struct cvmx_agl_gmx_rxx_stats_octs_s cn63xx;
2774 struct cvmx_agl_gmx_rxx_stats_octs_s cn63xxp1;
2775 struct cvmx_agl_gmx_rxx_stats_octs_s cn66xx;
2776 struct cvmx_agl_gmx_rxx_stats_octs_s cn68xx;
2777 struct cvmx_agl_gmx_rxx_stats_octs_s cn68xxp1;
2779 typedef union cvmx_agl_gmx_rxx_stats_octs cvmx_agl_gmx_rxx_stats_octs_t;
2782 * cvmx_agl_gmx_rx#_stats_octs_ctl
2785 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
2786 * - Counters will wrap
2787 * - Not reset when MIX*_CTL[RESET] is set to 1.
2789 union cvmx_agl_gmx_rxx_stats_octs_ctl {
2791 struct cvmx_agl_gmx_rxx_stats_octs_ctl_s {
2792 #ifdef __BIG_ENDIAN_BITFIELD
2793 uint64_t reserved_48_63 : 16;
2794 uint64_t cnt : 48; /**< Octet count of received pause packets */
2797 uint64_t reserved_48_63 : 16;
2800 struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn52xx;
2801 struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn52xxp1;
2802 struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn56xx;
2803 struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn56xxp1;
2804 struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn61xx;
2805 struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn63xx;
2806 struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn63xxp1;
2807 struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn66xx;
2808 struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn68xx;
2809 struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn68xxp1;
2811 typedef union cvmx_agl_gmx_rxx_stats_octs_ctl cvmx_agl_gmx_rxx_stats_octs_ctl_t;
2814 * cvmx_agl_gmx_rx#_stats_octs_dmac
2817 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
2818 * - Counters will wrap
2819 * - Not reset when MIX*_CTL[RESET] is set to 1.
2821 union cvmx_agl_gmx_rxx_stats_octs_dmac {
2823 struct cvmx_agl_gmx_rxx_stats_octs_dmac_s {
2824 #ifdef __BIG_ENDIAN_BITFIELD
2825 uint64_t reserved_48_63 : 16;
2826 uint64_t cnt : 48; /**< Octet count of filtered dmac packets */
2829 uint64_t reserved_48_63 : 16;
2832 struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn52xx;
2833 struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn52xxp1;
2834 struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn56xx;
2835 struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn56xxp1;
2836 struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn61xx;
2837 struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn63xx;
2838 struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn63xxp1;
2839 struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn66xx;
2840 struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn68xx;
2841 struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn68xxp1;
2843 typedef union cvmx_agl_gmx_rxx_stats_octs_dmac cvmx_agl_gmx_rxx_stats_octs_dmac_t;
2846 * cvmx_agl_gmx_rx#_stats_octs_drp
2849 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
2850 * - Counters will wrap
2851 * - Not reset when MIX*_CTL[RESET] is set to 1.
2853 union cvmx_agl_gmx_rxx_stats_octs_drp {
2855 struct cvmx_agl_gmx_rxx_stats_octs_drp_s {
2856 #ifdef __BIG_ENDIAN_BITFIELD
2857 uint64_t reserved_48_63 : 16;
2858 uint64_t cnt : 48; /**< Octet count of dropped packets */
2861 uint64_t reserved_48_63 : 16;
2864 struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn52xx;
2865 struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn52xxp1;
2866 struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn56xx;
2867 struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn56xxp1;
2868 struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn61xx;
2869 struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn63xx;
2870 struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn63xxp1;
2871 struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn66xx;
2872 struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn68xx;
2873 struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn68xxp1;
2875 typedef union cvmx_agl_gmx_rxx_stats_octs_drp cvmx_agl_gmx_rxx_stats_octs_drp_t;
2878 * cvmx_agl_gmx_rx#_stats_pkts
2880 * AGL_GMX_RX_STATS_PKTS
2882 * Count of good received packets - packets that are not recognized as PAUSE
2883 * packets, dropped due the DMAC filter, dropped due FIFO full status, or
2884 * have any other OPCODE (FCS, Length, etc).
2887 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
2888 * - Counters will wrap
2889 * - Not reset when MIX*_CTL[RESET] is set to 1.
2891 union cvmx_agl_gmx_rxx_stats_pkts {
2893 struct cvmx_agl_gmx_rxx_stats_pkts_s {
2894 #ifdef __BIG_ENDIAN_BITFIELD
2895 uint64_t reserved_32_63 : 32;
2896 uint64_t cnt : 32; /**< Count of received good packets */
2899 uint64_t reserved_32_63 : 32;
2902 struct cvmx_agl_gmx_rxx_stats_pkts_s cn52xx;
2903 struct cvmx_agl_gmx_rxx_stats_pkts_s cn52xxp1;
2904 struct cvmx_agl_gmx_rxx_stats_pkts_s cn56xx;
2905 struct cvmx_agl_gmx_rxx_stats_pkts_s cn56xxp1;
2906 struct cvmx_agl_gmx_rxx_stats_pkts_s cn61xx;
2907 struct cvmx_agl_gmx_rxx_stats_pkts_s cn63xx;
2908 struct cvmx_agl_gmx_rxx_stats_pkts_s cn63xxp1;
2909 struct cvmx_agl_gmx_rxx_stats_pkts_s cn66xx;
2910 struct cvmx_agl_gmx_rxx_stats_pkts_s cn68xx;
2911 struct cvmx_agl_gmx_rxx_stats_pkts_s cn68xxp1;
2913 typedef union cvmx_agl_gmx_rxx_stats_pkts cvmx_agl_gmx_rxx_stats_pkts_t;
2916 * cvmx_agl_gmx_rx#_stats_pkts_bad
2918 * AGL_GMX_RX_STATS_PKTS_BAD
2920 * Count of all packets received with some error that were not dropped
2921 * either due to the dmac filter or lack of room in the receive FIFO.
2924 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
2925 * - Counters will wrap
2926 * - Not reset when MIX*_CTL[RESET] is set to 1.
2928 union cvmx_agl_gmx_rxx_stats_pkts_bad {
2930 struct cvmx_agl_gmx_rxx_stats_pkts_bad_s {
2931 #ifdef __BIG_ENDIAN_BITFIELD
2932 uint64_t reserved_32_63 : 32;
2933 uint64_t cnt : 32; /**< Count of bad packets */
2936 uint64_t reserved_32_63 : 32;
2939 struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn52xx;
2940 struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn52xxp1;
2941 struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn56xx;
2942 struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn56xxp1;
2943 struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn61xx;
2944 struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn63xx;
2945 struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn63xxp1;
2946 struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn66xx;
2947 struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn68xx;
2948 struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn68xxp1;
2950 typedef union cvmx_agl_gmx_rxx_stats_pkts_bad cvmx_agl_gmx_rxx_stats_pkts_bad_t;
2953 * cvmx_agl_gmx_rx#_stats_pkts_ctl
2955 * AGL_GMX_RX_STATS_PKTS_CTL
2957 * Count of all packets received that were recognized as Flow Control or
2958 * PAUSE packets. PAUSE packets with any kind of error are counted in
2959 * AGL_GMX_RX_STATS_PKTS_BAD. Pause packets can be optionally dropped or
2960 * forwarded based on the AGL_GMX_RX_FRM_CTL[CTL_DRP] bit. This count
2961 * increments regardless of whether the packet is dropped. Pause packets
2962 * will never be counted in AGL_GMX_RX_STATS_PKTS. Packets dropped due the dmac
2963 * filter will be counted in AGL_GMX_RX_STATS_PKTS_DMAC and not here.
2966 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
2967 * - Counters will wrap
2968 * - Not reset when MIX*_CTL[RESET] is set to 1.
2970 union cvmx_agl_gmx_rxx_stats_pkts_ctl {
2972 struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s {
2973 #ifdef __BIG_ENDIAN_BITFIELD
2974 uint64_t reserved_32_63 : 32;
2975 uint64_t cnt : 32; /**< Count of received pause packets */
2978 uint64_t reserved_32_63 : 32;
2981 struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn52xx;
2982 struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn52xxp1;
2983 struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn56xx;
2984 struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn56xxp1;
2985 struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn61xx;
2986 struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn63xx;
2987 struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn63xxp1;
2988 struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn66xx;
2989 struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn68xx;
2990 struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn68xxp1;
2992 typedef union cvmx_agl_gmx_rxx_stats_pkts_ctl cvmx_agl_gmx_rxx_stats_pkts_ctl_t;
2995 * cvmx_agl_gmx_rx#_stats_pkts_dmac
2997 * AGL_GMX_RX_STATS_PKTS_DMAC
2999 * Count of all packets received that were dropped by the dmac filter.
3000 * Packets that match the DMAC will be dropped and counted here regardless
3001 * of if they were bad packets. These packets will never be counted in
3002 * AGL_GMX_RX_STATS_PKTS.
3004 * Some packets that were not able to satisify the DECISION_CNT may not
3005 * actually be dropped by Octeon, but they will be counted here as if they
3009 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
3010 * - Counters will wrap
3011 * - Not reset when MIX*_CTL[RESET] is set to 1.
3013 union cvmx_agl_gmx_rxx_stats_pkts_dmac {
3015 struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s {
3016 #ifdef __BIG_ENDIAN_BITFIELD
3017 uint64_t reserved_32_63 : 32;
3018 uint64_t cnt : 32; /**< Count of filtered dmac packets */
3021 uint64_t reserved_32_63 : 32;
3024 struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn52xx;
3025 struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn52xxp1;
3026 struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn56xx;
3027 struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn56xxp1;
3028 struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn61xx;
3029 struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn63xx;
3030 struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn63xxp1;
3031 struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn66xx;
3032 struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn68xx;
3033 struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn68xxp1;
3035 typedef union cvmx_agl_gmx_rxx_stats_pkts_dmac cvmx_agl_gmx_rxx_stats_pkts_dmac_t;
3038 * cvmx_agl_gmx_rx#_stats_pkts_drp
3040 * AGL_GMX_RX_STATS_PKTS_DRP
3042 * Count of all packets received that were dropped due to a full receive
3043 * FIFO. This counts good and bad packets received - all packets dropped by
3044 * the FIFO. It does not count packets dropped by the dmac or pause packet
3048 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
3049 * - Counters will wrap
3050 * - Not reset when MIX*_CTL[RESET] is set to 1.
3052 union cvmx_agl_gmx_rxx_stats_pkts_drp {
3054 struct cvmx_agl_gmx_rxx_stats_pkts_drp_s {
3055 #ifdef __BIG_ENDIAN_BITFIELD
3056 uint64_t reserved_32_63 : 32;
3057 uint64_t cnt : 32; /**< Count of dropped packets */
3060 uint64_t reserved_32_63 : 32;
3063 struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn52xx;
3064 struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn52xxp1;
3065 struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn56xx;
3066 struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn56xxp1;
3067 struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn61xx;
3068 struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn63xx;
3069 struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn63xxp1;
3070 struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn66xx;
3071 struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn68xx;
3072 struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn68xxp1;
3074 typedef union cvmx_agl_gmx_rxx_stats_pkts_drp cvmx_agl_gmx_rxx_stats_pkts_drp_t;
3077 * cvmx_agl_gmx_rx#_udd_skp
3079 * AGL_GMX_RX_UDD_SKP = Amount of User-defined data before the start of the L2 data
3083 * (1) The skip bytes are part of the packet and will be sent down the NCB
3084 * packet interface and will be handled by PKI.
3086 * (2) The system can determine if the UDD bytes are included in the FCS check
3087 * by using the FCSSEL field - if the FCS check is enabled.
3089 * (3) Assume that the preamble/sfd is always at the start of the frame - even
3090 * before UDD bytes. In most cases, there will be no preamble in these
3091 * cases since it will be MII to MII communication without a PHY
3094 * (4) We can still do address filtering and control packet filtering is the
3097 * (5) UDD_SKP must be 0 in half-duplex operation unless
3098 * AGL_GMX_RX_FRM_CTL[PRE_CHK] is clear. If AGL_GMX_RX_FRM_CTL[PRE_CHK] is set,
3099 * then UDD_SKP will normally be 8.
3101 * (6) In all cases, the UDD bytes will be sent down the packet interface as
3102 * part of the packet. The UDD bytes are never stripped from the actual
3105 * (7) If LEN != 0, then AGL_GMX_RX_FRM_CHK[LENERR] will be disabled and AGL_GMX_RX_INT_REG[LENERR] will be zero
3107 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3109 union cvmx_agl_gmx_rxx_udd_skp {
3111 struct cvmx_agl_gmx_rxx_udd_skp_s {
3112 #ifdef __BIG_ENDIAN_BITFIELD
3113 uint64_t reserved_9_63 : 55;
3114 uint64_t fcssel : 1; /**< Include the skip bytes in the FCS calculation
3115 0 = all skip bytes are included in FCS
3116 1 = the skip bytes are not included in FCS */
3117 uint64_t reserved_7_7 : 1;
3118 uint64_t len : 7; /**< Amount of User-defined data before the start of
3119 the L2 data. Zero means L2 comes first.
3123 uint64_t reserved_7_7 : 1;
3124 uint64_t fcssel : 1;
3125 uint64_t reserved_9_63 : 55;
3128 struct cvmx_agl_gmx_rxx_udd_skp_s cn52xx;
3129 struct cvmx_agl_gmx_rxx_udd_skp_s cn52xxp1;
3130 struct cvmx_agl_gmx_rxx_udd_skp_s cn56xx;
3131 struct cvmx_agl_gmx_rxx_udd_skp_s cn56xxp1;
3132 struct cvmx_agl_gmx_rxx_udd_skp_s cn61xx;
3133 struct cvmx_agl_gmx_rxx_udd_skp_s cn63xx;
3134 struct cvmx_agl_gmx_rxx_udd_skp_s cn63xxp1;
3135 struct cvmx_agl_gmx_rxx_udd_skp_s cn66xx;
3136 struct cvmx_agl_gmx_rxx_udd_skp_s cn68xx;
3137 struct cvmx_agl_gmx_rxx_udd_skp_s cn68xxp1;
3139 typedef union cvmx_agl_gmx_rxx_udd_skp cvmx_agl_gmx_rxx_udd_skp_t;
3142 * cvmx_agl_gmx_rx_bp_drop#
3144 * AGL_GMX_RX_BP_DROP = FIFO mark for packet drop
3148 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3151 union cvmx_agl_gmx_rx_bp_dropx {
3153 struct cvmx_agl_gmx_rx_bp_dropx_s {
3154 #ifdef __BIG_ENDIAN_BITFIELD
3155 uint64_t reserved_6_63 : 58;
3156 uint64_t mark : 6; /**< Number of 8B ticks to reserve in the RX FIFO.
3157 When the FIFO exceeds this count, packets will
3158 be dropped and not buffered.
3159 MARK should typically be programmed to 2.
3160 Failure to program correctly can lead to system
3164 uint64_t reserved_6_63 : 58;
3167 struct cvmx_agl_gmx_rx_bp_dropx_s cn52xx;
3168 struct cvmx_agl_gmx_rx_bp_dropx_s cn52xxp1;
3169 struct cvmx_agl_gmx_rx_bp_dropx_s cn56xx;
3170 struct cvmx_agl_gmx_rx_bp_dropx_s cn56xxp1;
3171 struct cvmx_agl_gmx_rx_bp_dropx_s cn61xx;
3172 struct cvmx_agl_gmx_rx_bp_dropx_s cn63xx;
3173 struct cvmx_agl_gmx_rx_bp_dropx_s cn63xxp1;
3174 struct cvmx_agl_gmx_rx_bp_dropx_s cn66xx;
3175 struct cvmx_agl_gmx_rx_bp_dropx_s cn68xx;
3176 struct cvmx_agl_gmx_rx_bp_dropx_s cn68xxp1;
3178 typedef union cvmx_agl_gmx_rx_bp_dropx cvmx_agl_gmx_rx_bp_dropx_t;
3181 * cvmx_agl_gmx_rx_bp_off#
3183 * AGL_GMX_RX_BP_OFF = Lowater mark for packet drop
3187 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3190 union cvmx_agl_gmx_rx_bp_offx {
3192 struct cvmx_agl_gmx_rx_bp_offx_s {
3193 #ifdef __BIG_ENDIAN_BITFIELD
3194 uint64_t reserved_6_63 : 58;
3195 uint64_t mark : 6; /**< Water mark (8B ticks) to deassert backpressure */
3198 uint64_t reserved_6_63 : 58;
3201 struct cvmx_agl_gmx_rx_bp_offx_s cn52xx;
3202 struct cvmx_agl_gmx_rx_bp_offx_s cn52xxp1;
3203 struct cvmx_agl_gmx_rx_bp_offx_s cn56xx;
3204 struct cvmx_agl_gmx_rx_bp_offx_s cn56xxp1;
3205 struct cvmx_agl_gmx_rx_bp_offx_s cn61xx;
3206 struct cvmx_agl_gmx_rx_bp_offx_s cn63xx;
3207 struct cvmx_agl_gmx_rx_bp_offx_s cn63xxp1;
3208 struct cvmx_agl_gmx_rx_bp_offx_s cn66xx;
3209 struct cvmx_agl_gmx_rx_bp_offx_s cn68xx;
3210 struct cvmx_agl_gmx_rx_bp_offx_s cn68xxp1;
3212 typedef union cvmx_agl_gmx_rx_bp_offx cvmx_agl_gmx_rx_bp_offx_t;
3215 * cvmx_agl_gmx_rx_bp_on#
3217 * AGL_GMX_RX_BP_ON = Hiwater mark for port/interface backpressure
3221 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3224 union cvmx_agl_gmx_rx_bp_onx {
3226 struct cvmx_agl_gmx_rx_bp_onx_s {
3227 #ifdef __BIG_ENDIAN_BITFIELD
3228 uint64_t reserved_9_63 : 55;
3229 uint64_t mark : 9; /**< Hiwater mark (8B ticks) for backpressure. */
3232 uint64_t reserved_9_63 : 55;
3235 struct cvmx_agl_gmx_rx_bp_onx_s cn52xx;
3236 struct cvmx_agl_gmx_rx_bp_onx_s cn52xxp1;
3237 struct cvmx_agl_gmx_rx_bp_onx_s cn56xx;
3238 struct cvmx_agl_gmx_rx_bp_onx_s cn56xxp1;
3239 struct cvmx_agl_gmx_rx_bp_onx_s cn61xx;
3240 struct cvmx_agl_gmx_rx_bp_onx_s cn63xx;
3241 struct cvmx_agl_gmx_rx_bp_onx_s cn63xxp1;
3242 struct cvmx_agl_gmx_rx_bp_onx_s cn66xx;
3243 struct cvmx_agl_gmx_rx_bp_onx_s cn68xx;
3244 struct cvmx_agl_gmx_rx_bp_onx_s cn68xxp1;
3246 typedef union cvmx_agl_gmx_rx_bp_onx cvmx_agl_gmx_rx_bp_onx_t;
3249 * cvmx_agl_gmx_rx_prt_info
3251 * AGL_GMX_RX_PRT_INFO = state information for the ports
3255 * COMMIT[0], DROP[0] will be reset when MIX0_CTL[RESET] is set to 1.
3256 * COMMIT[1], DROP[1] will be reset when MIX1_CTL[RESET] is set to 1.
3258 union cvmx_agl_gmx_rx_prt_info {
3260 struct cvmx_agl_gmx_rx_prt_info_s {
3261 #ifdef __BIG_ENDIAN_BITFIELD
3262 uint64_t reserved_18_63 : 46;
3263 uint64_t drop : 2; /**< Port indication that data was dropped */
3264 uint64_t reserved_2_15 : 14;
3265 uint64_t commit : 2; /**< Port indication that SOP was accepted */
3267 uint64_t commit : 2;
3268 uint64_t reserved_2_15 : 14;
3270 uint64_t reserved_18_63 : 46;
3273 struct cvmx_agl_gmx_rx_prt_info_s cn52xx;
3274 struct cvmx_agl_gmx_rx_prt_info_s cn52xxp1;
3275 struct cvmx_agl_gmx_rx_prt_info_cn56xx {
3276 #ifdef __BIG_ENDIAN_BITFIELD
3277 uint64_t reserved_17_63 : 47;
3278 uint64_t drop : 1; /**< Port indication that data was dropped */
3279 uint64_t reserved_1_15 : 15;
3280 uint64_t commit : 1; /**< Port indication that SOP was accepted */
3282 uint64_t commit : 1;
3283 uint64_t reserved_1_15 : 15;
3285 uint64_t reserved_17_63 : 47;
3288 struct cvmx_agl_gmx_rx_prt_info_cn56xx cn56xxp1;
3289 struct cvmx_agl_gmx_rx_prt_info_s cn61xx;
3290 struct cvmx_agl_gmx_rx_prt_info_s cn63xx;
3291 struct cvmx_agl_gmx_rx_prt_info_s cn63xxp1;
3292 struct cvmx_agl_gmx_rx_prt_info_s cn66xx;
3293 struct cvmx_agl_gmx_rx_prt_info_s cn68xx;
3294 struct cvmx_agl_gmx_rx_prt_info_s cn68xxp1;
3296 typedef union cvmx_agl_gmx_rx_prt_info cvmx_agl_gmx_rx_prt_info_t;
3299 * cvmx_agl_gmx_rx_tx_status
3301 * AGL_GMX_RX_TX_STATUS = GMX RX/TX Status
3305 * RX[0], TX[0] will be reset when MIX0_CTL[RESET] is set to 1.
3306 * RX[1], TX[1] will be reset when MIX1_CTL[RESET] is set to 1.
3308 union cvmx_agl_gmx_rx_tx_status {
3310 struct cvmx_agl_gmx_rx_tx_status_s {
3311 #ifdef __BIG_ENDIAN_BITFIELD
3312 uint64_t reserved_6_63 : 58;
3313 uint64_t tx : 2; /**< Transmit data since last read */
3314 uint64_t reserved_2_3 : 2;
3315 uint64_t rx : 2; /**< Receive data since last read */
3318 uint64_t reserved_2_3 : 2;
3320 uint64_t reserved_6_63 : 58;
3323 struct cvmx_agl_gmx_rx_tx_status_s cn52xx;
3324 struct cvmx_agl_gmx_rx_tx_status_s cn52xxp1;
3325 struct cvmx_agl_gmx_rx_tx_status_cn56xx {
3326 #ifdef __BIG_ENDIAN_BITFIELD
3327 uint64_t reserved_5_63 : 59;
3328 uint64_t tx : 1; /**< Transmit data since last read */
3329 uint64_t reserved_1_3 : 3;
3330 uint64_t rx : 1; /**< Receive data since last read */
3333 uint64_t reserved_1_3 : 3;
3335 uint64_t reserved_5_63 : 59;
3338 struct cvmx_agl_gmx_rx_tx_status_cn56xx cn56xxp1;
3339 struct cvmx_agl_gmx_rx_tx_status_s cn61xx;
3340 struct cvmx_agl_gmx_rx_tx_status_s cn63xx;
3341 struct cvmx_agl_gmx_rx_tx_status_s cn63xxp1;
3342 struct cvmx_agl_gmx_rx_tx_status_s cn66xx;
3343 struct cvmx_agl_gmx_rx_tx_status_s cn68xx;
3344 struct cvmx_agl_gmx_rx_tx_status_s cn68xxp1;
3346 typedef union cvmx_agl_gmx_rx_tx_status cvmx_agl_gmx_rx_tx_status_t;
3349 * cvmx_agl_gmx_smac#
3351 * AGL_GMX_SMAC = Packet SMAC
3355 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3358 union cvmx_agl_gmx_smacx {
3360 struct cvmx_agl_gmx_smacx_s {
3361 #ifdef __BIG_ENDIAN_BITFIELD
3362 uint64_t reserved_48_63 : 16;
3363 uint64_t smac : 48; /**< The SMAC field is used for generating and
3364 accepting Control Pause packets */
3367 uint64_t reserved_48_63 : 16;
3370 struct cvmx_agl_gmx_smacx_s cn52xx;
3371 struct cvmx_agl_gmx_smacx_s cn52xxp1;
3372 struct cvmx_agl_gmx_smacx_s cn56xx;
3373 struct cvmx_agl_gmx_smacx_s cn56xxp1;
3374 struct cvmx_agl_gmx_smacx_s cn61xx;
3375 struct cvmx_agl_gmx_smacx_s cn63xx;
3376 struct cvmx_agl_gmx_smacx_s cn63xxp1;
3377 struct cvmx_agl_gmx_smacx_s cn66xx;
3378 struct cvmx_agl_gmx_smacx_s cn68xx;
3379 struct cvmx_agl_gmx_smacx_s cn68xxp1;
3381 typedef union cvmx_agl_gmx_smacx cvmx_agl_gmx_smacx_t;
3384 * cvmx_agl_gmx_stat_bp
3386 * AGL_GMX_STAT_BP = Number of cycles that the TX/Stats block has help up operation
3390 * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
3394 * It has no relationship with the TX FIFO per se. The TX engine sends packets
3395 * from PKO and upon completion, sends a command to the TX stats block for an
3396 * update based on the packet size. The stats operation can take a few cycles -
3397 * normally not enough to be visible considering the 64B min packet size that is
3398 * ethernet convention.
3400 * In the rare case in which SW attempted to schedule really, really, small packets
3401 * or the sclk (6xxx) is running ass-slow, then the stats updates may not happen in
3402 * real time and can back up the TX engine.
3404 * This counter is the number of cycles in which the TX engine was stalled. In
3405 * normal operation, it should always be zeros.
3407 union cvmx_agl_gmx_stat_bp {
3409 struct cvmx_agl_gmx_stat_bp_s {
3410 #ifdef __BIG_ENDIAN_BITFIELD
3411 uint64_t reserved_17_63 : 47;
3412 uint64_t bp : 1; /**< Current TX stats BP state
3413 When the TX stats machine cannot update the stats
3414 registers quickly enough, the machine has the
3415 ability to BP TX datapath. This is a rare event
3416 and will not occur in normal operation.
3417 0 = no backpressure is applied
3418 1 = backpressure is applied to TX datapath to
3419 allow stat update operations to complete */
3420 uint64_t cnt : 16; /**< Number of cycles that BP has been asserted
3421 Saturating counter */
3425 uint64_t reserved_17_63 : 47;
3428 struct cvmx_agl_gmx_stat_bp_s cn52xx;
3429 struct cvmx_agl_gmx_stat_bp_s cn52xxp1;
3430 struct cvmx_agl_gmx_stat_bp_s cn56xx;
3431 struct cvmx_agl_gmx_stat_bp_s cn56xxp1;
3432 struct cvmx_agl_gmx_stat_bp_s cn61xx;
3433 struct cvmx_agl_gmx_stat_bp_s cn63xx;
3434 struct cvmx_agl_gmx_stat_bp_s cn63xxp1;
3435 struct cvmx_agl_gmx_stat_bp_s cn66xx;
3436 struct cvmx_agl_gmx_stat_bp_s cn68xx;
3437 struct cvmx_agl_gmx_stat_bp_s cn68xxp1;
3439 typedef union cvmx_agl_gmx_stat_bp cvmx_agl_gmx_stat_bp_t;
3442 * cvmx_agl_gmx_tx#_append
3444 * AGL_GMX_TX_APPEND = Packet TX Append Control
3448 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3451 union cvmx_agl_gmx_txx_append {
3453 struct cvmx_agl_gmx_txx_append_s {
3454 #ifdef __BIG_ENDIAN_BITFIELD
3455 uint64_t reserved_4_63 : 60;
3456 uint64_t force_fcs : 1; /**< Append the Ethernet FCS on each pause packet
3457 when FCS is clear. Pause packets are normally
3458 padded to 60 bytes. If
3459 AGL_GMX_TX_MIN_PKT[MIN_SIZE] exceeds 59, then
3460 FORCE_FCS will not be used. */
3461 uint64_t fcs : 1; /**< Append the Ethernet FCS on each packet */
3462 uint64_t pad : 1; /**< Append PAD bytes such that min sized */
3463 uint64_t preamble : 1; /**< Prepend the Ethernet preamble on each transfer */
3465 uint64_t preamble : 1;
3468 uint64_t force_fcs : 1;
3469 uint64_t reserved_4_63 : 60;
3472 struct cvmx_agl_gmx_txx_append_s cn52xx;
3473 struct cvmx_agl_gmx_txx_append_s cn52xxp1;
3474 struct cvmx_agl_gmx_txx_append_s cn56xx;
3475 struct cvmx_agl_gmx_txx_append_s cn56xxp1;
3476 struct cvmx_agl_gmx_txx_append_s cn61xx;
3477 struct cvmx_agl_gmx_txx_append_s cn63xx;
3478 struct cvmx_agl_gmx_txx_append_s cn63xxp1;
3479 struct cvmx_agl_gmx_txx_append_s cn66xx;
3480 struct cvmx_agl_gmx_txx_append_s cn68xx;
3481 struct cvmx_agl_gmx_txx_append_s cn68xxp1;
3483 typedef union cvmx_agl_gmx_txx_append cvmx_agl_gmx_txx_append_t;
3486 * cvmx_agl_gmx_tx#_clk
3488 * AGL_GMX_TX_CLK = RGMII TX Clock Generation Register
3492 * Normal Programming Values:
3493 * (1) RGMII, 1000Mbs (AGL_GMX_PRT_CFG[SPEED]==1), CLK_CNT == 1
3494 * (2) RGMII, 10/100Mbs (AGL_GMX_PRT_CFG[SPEED]==0), CLK_CNT == 50/5
3495 * (3) MII, 10/100Mbs (AGL_GMX_PRT_CFG[SPEED]==0), CLK_CNT == 1
3498 * Given a 125MHz PLL reference clock...
3499 * CLK_CNT == 1 ==> 125.0MHz TXC clock period (8ns* 1)
3500 * CLK_CNT == 5 ==> 25.0MHz TXC clock period (8ns* 5)
3501 * CLK_CNT == 50 ==> 2.5MHz TXC clock period (8ns*50)
3503 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3505 union cvmx_agl_gmx_txx_clk {
3507 struct cvmx_agl_gmx_txx_clk_s {
3508 #ifdef __BIG_ENDIAN_BITFIELD
3509 uint64_t reserved_6_63 : 58;
3510 uint64_t clk_cnt : 6; /**< Controls the RGMII TXC frequency | NS
3512 rgm_ref_clk(period)*CLK_CNT */
3514 uint64_t clk_cnt : 6;
3515 uint64_t reserved_6_63 : 58;
3518 struct cvmx_agl_gmx_txx_clk_s cn61xx;
3519 struct cvmx_agl_gmx_txx_clk_s cn63xx;
3520 struct cvmx_agl_gmx_txx_clk_s cn63xxp1;
3521 struct cvmx_agl_gmx_txx_clk_s cn66xx;
3522 struct cvmx_agl_gmx_txx_clk_s cn68xx;
3523 struct cvmx_agl_gmx_txx_clk_s cn68xxp1;
3525 typedef union cvmx_agl_gmx_txx_clk cvmx_agl_gmx_txx_clk_t;
3528 * cvmx_agl_gmx_tx#_ctl
3530 * AGL_GMX_TX_CTL = TX Control register
3534 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3537 union cvmx_agl_gmx_txx_ctl {
3539 struct cvmx_agl_gmx_txx_ctl_s {
3540 #ifdef __BIG_ENDIAN_BITFIELD
3541 uint64_t reserved_2_63 : 62;
3542 uint64_t xsdef_en : 1; /**< Enables the excessive deferral check for stats
3544 uint64_t xscol_en : 1; /**< Enables the excessive collision check for stats
3547 uint64_t xscol_en : 1;
3548 uint64_t xsdef_en : 1;
3549 uint64_t reserved_2_63 : 62;
3552 struct cvmx_agl_gmx_txx_ctl_s cn52xx;
3553 struct cvmx_agl_gmx_txx_ctl_s cn52xxp1;
3554 struct cvmx_agl_gmx_txx_ctl_s cn56xx;
3555 struct cvmx_agl_gmx_txx_ctl_s cn56xxp1;
3556 struct cvmx_agl_gmx_txx_ctl_s cn61xx;
3557 struct cvmx_agl_gmx_txx_ctl_s cn63xx;
3558 struct cvmx_agl_gmx_txx_ctl_s cn63xxp1;
3559 struct cvmx_agl_gmx_txx_ctl_s cn66xx;
3560 struct cvmx_agl_gmx_txx_ctl_s cn68xx;
3561 struct cvmx_agl_gmx_txx_ctl_s cn68xxp1;
3563 typedef union cvmx_agl_gmx_txx_ctl cvmx_agl_gmx_txx_ctl_t;
3566 * cvmx_agl_gmx_tx#_min_pkt
3568 * AGL_GMX_TX_MIN_PKT = Packet TX Min Size Packet (PAD upto min size)
3572 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3575 union cvmx_agl_gmx_txx_min_pkt {
3577 struct cvmx_agl_gmx_txx_min_pkt_s {
3578 #ifdef __BIG_ENDIAN_BITFIELD
3579 uint64_t reserved_8_63 : 56;
3580 uint64_t min_size : 8; /**< Min frame in bytes before the FCS is applied
3581 Padding is only appened when
3582 AGL_GMX_TX_APPEND[PAD] for the coresponding packet
3583 port is set. Packets will be padded to
3584 MIN_SIZE+1 The reset value will pad to 60 bytes. */
3586 uint64_t min_size : 8;
3587 uint64_t reserved_8_63 : 56;
3590 struct cvmx_agl_gmx_txx_min_pkt_s cn52xx;
3591 struct cvmx_agl_gmx_txx_min_pkt_s cn52xxp1;
3592 struct cvmx_agl_gmx_txx_min_pkt_s cn56xx;
3593 struct cvmx_agl_gmx_txx_min_pkt_s cn56xxp1;
3594 struct cvmx_agl_gmx_txx_min_pkt_s cn61xx;
3595 struct cvmx_agl_gmx_txx_min_pkt_s cn63xx;
3596 struct cvmx_agl_gmx_txx_min_pkt_s cn63xxp1;
3597 struct cvmx_agl_gmx_txx_min_pkt_s cn66xx;
3598 struct cvmx_agl_gmx_txx_min_pkt_s cn68xx;
3599 struct cvmx_agl_gmx_txx_min_pkt_s cn68xxp1;
3601 typedef union cvmx_agl_gmx_txx_min_pkt cvmx_agl_gmx_txx_min_pkt_t;
3604 * cvmx_agl_gmx_tx#_pause_pkt_interval
3606 * AGL_GMX_TX_PAUSE_PKT_INTERVAL = Packet TX Pause Packet transmission interval - how often PAUSE packets will be sent
3610 * Choosing proper values of AGL_GMX_TX_PAUSE_PKT_TIME[TIME] and
3611 * AGL_GMX_TX_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to the system
3612 * designer. It is suggested that TIME be much greater than INTERVAL and
3613 * AGL_GMX_TX_PAUSE_ZERO[SEND] be set. This allows a periodic refresh of the PAUSE
3614 * count and then when the backpressure condition is lifted, a PAUSE packet
3615 * with TIME==0 will be sent indicating that Octane is ready for additional
3618 * If the system chooses to not set AGL_GMX_TX_PAUSE_ZERO[SEND], then it is
3619 * suggested that TIME and INTERVAL are programmed such that they satisify the
3622 * INTERVAL <= TIME - (largest_pkt_size + IFG + pause_pkt_size)
3624 * where largest_pkt_size is that largest packet that the system can send
3625 * (normally 1518B), IFG is the interframe gap and pause_pkt_size is the size
3626 * of the PAUSE packet (normally 64B).
3628 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3630 union cvmx_agl_gmx_txx_pause_pkt_interval {
3632 struct cvmx_agl_gmx_txx_pause_pkt_interval_s {
3633 #ifdef __BIG_ENDIAN_BITFIELD
3634 uint64_t reserved_16_63 : 48;
3635 uint64_t interval : 16; /**< Arbitrate for a pause packet every (INTERVAL*512)
3637 Normally, 0 < INTERVAL < AGL_GMX_TX_PAUSE_PKT_TIME
3638 INTERVAL=0, will only send a single PAUSE packet
3639 for each backpressure event */
3641 uint64_t interval : 16;
3642 uint64_t reserved_16_63 : 48;
3645 struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn52xx;
3646 struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn52xxp1;
3647 struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn56xx;
3648 struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn56xxp1;
3649 struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn61xx;
3650 struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn63xx;
3651 struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn63xxp1;
3652 struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn66xx;
3653 struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn68xx;
3654 struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn68xxp1;
3656 typedef union cvmx_agl_gmx_txx_pause_pkt_interval cvmx_agl_gmx_txx_pause_pkt_interval_t;
3659 * cvmx_agl_gmx_tx#_pause_pkt_time
3661 * AGL_GMX_TX_PAUSE_PKT_TIME = Packet TX Pause Packet pause_time field
3665 * Choosing proper values of AGL_GMX_TX_PAUSE_PKT_TIME[TIME] and
3666 * AGL_GMX_TX_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to the system
3667 * designer. It is suggested that TIME be much greater than INTERVAL and
3668 * AGL_GMX_TX_PAUSE_ZERO[SEND] be set. This allows a periodic refresh of the PAUSE
3669 * count and then when the backpressure condition is lifted, a PAUSE packet
3670 * with TIME==0 will be sent indicating that Octane is ready for additional
3673 * If the system chooses to not set AGL_GMX_TX_PAUSE_ZERO[SEND], then it is
3674 * suggested that TIME and INTERVAL are programmed such that they satisify the
3677 * INTERVAL <= TIME - (largest_pkt_size + IFG + pause_pkt_size)
3679 * where largest_pkt_size is that largest packet that the system can send
3680 * (normally 1518B), IFG is the interframe gap and pause_pkt_size is the size
3681 * of the PAUSE packet (normally 64B).
3683 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3685 union cvmx_agl_gmx_txx_pause_pkt_time {
3687 struct cvmx_agl_gmx_txx_pause_pkt_time_s {
3688 #ifdef __BIG_ENDIAN_BITFIELD
3689 uint64_t reserved_16_63 : 48;
3690 uint64_t time : 16; /**< The pause_time field placed is outbnd pause pkts
3691 pause_time is in 512 bit-times
3692 Normally, TIME > AGL_GMX_TX_PAUSE_PKT_INTERVAL */
3695 uint64_t reserved_16_63 : 48;
3698 struct cvmx_agl_gmx_txx_pause_pkt_time_s cn52xx;
3699 struct cvmx_agl_gmx_txx_pause_pkt_time_s cn52xxp1;
3700 struct cvmx_agl_gmx_txx_pause_pkt_time_s cn56xx;
3701 struct cvmx_agl_gmx_txx_pause_pkt_time_s cn56xxp1;
3702 struct cvmx_agl_gmx_txx_pause_pkt_time_s cn61xx;
3703 struct cvmx_agl_gmx_txx_pause_pkt_time_s cn63xx;
3704 struct cvmx_agl_gmx_txx_pause_pkt_time_s cn63xxp1;
3705 struct cvmx_agl_gmx_txx_pause_pkt_time_s cn66xx;
3706 struct cvmx_agl_gmx_txx_pause_pkt_time_s cn68xx;
3707 struct cvmx_agl_gmx_txx_pause_pkt_time_s cn68xxp1;
3709 typedef union cvmx_agl_gmx_txx_pause_pkt_time cvmx_agl_gmx_txx_pause_pkt_time_t;
3712 * cvmx_agl_gmx_tx#_pause_togo
3714 * AGL_GMX_TX_PAUSE_TOGO = Packet TX Amount of time remaining to backpressure
3718 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3721 union cvmx_agl_gmx_txx_pause_togo {
3723 struct cvmx_agl_gmx_txx_pause_togo_s {
3724 #ifdef __BIG_ENDIAN_BITFIELD
3725 uint64_t reserved_16_63 : 48;
3726 uint64_t time : 16; /**< Amount of time remaining to backpressure */
3729 uint64_t reserved_16_63 : 48;
3732 struct cvmx_agl_gmx_txx_pause_togo_s cn52xx;
3733 struct cvmx_agl_gmx_txx_pause_togo_s cn52xxp1;
3734 struct cvmx_agl_gmx_txx_pause_togo_s cn56xx;
3735 struct cvmx_agl_gmx_txx_pause_togo_s cn56xxp1;
3736 struct cvmx_agl_gmx_txx_pause_togo_s cn61xx;
3737 struct cvmx_agl_gmx_txx_pause_togo_s cn63xx;
3738 struct cvmx_agl_gmx_txx_pause_togo_s cn63xxp1;
3739 struct cvmx_agl_gmx_txx_pause_togo_s cn66xx;
3740 struct cvmx_agl_gmx_txx_pause_togo_s cn68xx;
3741 struct cvmx_agl_gmx_txx_pause_togo_s cn68xxp1;
3743 typedef union cvmx_agl_gmx_txx_pause_togo cvmx_agl_gmx_txx_pause_togo_t;
3746 * cvmx_agl_gmx_tx#_pause_zero
3748 * AGL_GMX_TX_PAUSE_ZERO = Packet TX Amount of time remaining to backpressure
3752 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3755 union cvmx_agl_gmx_txx_pause_zero {
3757 struct cvmx_agl_gmx_txx_pause_zero_s {
3758 #ifdef __BIG_ENDIAN_BITFIELD
3759 uint64_t reserved_1_63 : 63;
3760 uint64_t send : 1; /**< When backpressure condition clear, send PAUSE
3761 packet with pause_time of zero to enable the
3765 uint64_t reserved_1_63 : 63;
3768 struct cvmx_agl_gmx_txx_pause_zero_s cn52xx;
3769 struct cvmx_agl_gmx_txx_pause_zero_s cn52xxp1;
3770 struct cvmx_agl_gmx_txx_pause_zero_s cn56xx;
3771 struct cvmx_agl_gmx_txx_pause_zero_s cn56xxp1;
3772 struct cvmx_agl_gmx_txx_pause_zero_s cn61xx;
3773 struct cvmx_agl_gmx_txx_pause_zero_s cn63xx;
3774 struct cvmx_agl_gmx_txx_pause_zero_s cn63xxp1;
3775 struct cvmx_agl_gmx_txx_pause_zero_s cn66xx;
3776 struct cvmx_agl_gmx_txx_pause_zero_s cn68xx;
3777 struct cvmx_agl_gmx_txx_pause_zero_s cn68xxp1;
3779 typedef union cvmx_agl_gmx_txx_pause_zero cvmx_agl_gmx_txx_pause_zero_t;
3782 * cvmx_agl_gmx_tx#_soft_pause
3784 * AGL_GMX_TX_SOFT_PAUSE = Packet TX Software Pause
3788 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3791 union cvmx_agl_gmx_txx_soft_pause {
3793 struct cvmx_agl_gmx_txx_soft_pause_s {
3794 #ifdef __BIG_ENDIAN_BITFIELD
3795 uint64_t reserved_16_63 : 48;
3796 uint64_t time : 16; /**< Back off the TX bus for (TIME*512) bit-times
3797 for full-duplex operation only */
3800 uint64_t reserved_16_63 : 48;
3803 struct cvmx_agl_gmx_txx_soft_pause_s cn52xx;
3804 struct cvmx_agl_gmx_txx_soft_pause_s cn52xxp1;
3805 struct cvmx_agl_gmx_txx_soft_pause_s cn56xx;
3806 struct cvmx_agl_gmx_txx_soft_pause_s cn56xxp1;
3807 struct cvmx_agl_gmx_txx_soft_pause_s cn61xx;
3808 struct cvmx_agl_gmx_txx_soft_pause_s cn63xx;
3809 struct cvmx_agl_gmx_txx_soft_pause_s cn63xxp1;
3810 struct cvmx_agl_gmx_txx_soft_pause_s cn66xx;
3811 struct cvmx_agl_gmx_txx_soft_pause_s cn68xx;
3812 struct cvmx_agl_gmx_txx_soft_pause_s cn68xxp1;
3814 typedef union cvmx_agl_gmx_txx_soft_pause cvmx_agl_gmx_txx_soft_pause_t;
3817 * cvmx_agl_gmx_tx#_stat0
3819 * AGL_GMX_TX_STAT0 = AGL_GMX_TX_STATS_XSDEF / AGL_GMX_TX_STATS_XSCOL
3823 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
3824 * - Counters will wrap
3825 * - Not reset when MIX*_CTL[RESET] is set to 1.
3827 union cvmx_agl_gmx_txx_stat0 {
3829 struct cvmx_agl_gmx_txx_stat0_s {
3830 #ifdef __BIG_ENDIAN_BITFIELD
3831 uint64_t xsdef : 32; /**< Number of packets dropped (never successfully
3832 sent) due to excessive deferal */
3833 uint64_t xscol : 32; /**< Number of packets dropped (never successfully
3834 sent) due to excessive collision. Defined by
3835 AGL_GMX_TX_COL_ATTEMPT[LIMIT]. */
3837 uint64_t xscol : 32;
3838 uint64_t xsdef : 32;
3841 struct cvmx_agl_gmx_txx_stat0_s cn52xx;
3842 struct cvmx_agl_gmx_txx_stat0_s cn52xxp1;
3843 struct cvmx_agl_gmx_txx_stat0_s cn56xx;
3844 struct cvmx_agl_gmx_txx_stat0_s cn56xxp1;
3845 struct cvmx_agl_gmx_txx_stat0_s cn61xx;
3846 struct cvmx_agl_gmx_txx_stat0_s cn63xx;
3847 struct cvmx_agl_gmx_txx_stat0_s cn63xxp1;
3848 struct cvmx_agl_gmx_txx_stat0_s cn66xx;
3849 struct cvmx_agl_gmx_txx_stat0_s cn68xx;
3850 struct cvmx_agl_gmx_txx_stat0_s cn68xxp1;
3852 typedef union cvmx_agl_gmx_txx_stat0 cvmx_agl_gmx_txx_stat0_t;
3855 * cvmx_agl_gmx_tx#_stat1
3857 * AGL_GMX_TX_STAT1 = AGL_GMX_TX_STATS_SCOL / AGL_GMX_TX_STATS_MCOL
3861 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
3862 * - Counters will wrap
3863 * - Not reset when MIX*_CTL[RESET] is set to 1.
3865 union cvmx_agl_gmx_txx_stat1 {
3867 struct cvmx_agl_gmx_txx_stat1_s {
3868 #ifdef __BIG_ENDIAN_BITFIELD
3869 uint64_t scol : 32; /**< Number of packets sent with a single collision */
3870 uint64_t mcol : 32; /**< Number of packets sent with multiple collisions
3871 but < AGL_GMX_TX_COL_ATTEMPT[LIMIT]. */
3877 struct cvmx_agl_gmx_txx_stat1_s cn52xx;
3878 struct cvmx_agl_gmx_txx_stat1_s cn52xxp1;
3879 struct cvmx_agl_gmx_txx_stat1_s cn56xx;
3880 struct cvmx_agl_gmx_txx_stat1_s cn56xxp1;
3881 struct cvmx_agl_gmx_txx_stat1_s cn61xx;
3882 struct cvmx_agl_gmx_txx_stat1_s cn63xx;
3883 struct cvmx_agl_gmx_txx_stat1_s cn63xxp1;
3884 struct cvmx_agl_gmx_txx_stat1_s cn66xx;
3885 struct cvmx_agl_gmx_txx_stat1_s cn68xx;
3886 struct cvmx_agl_gmx_txx_stat1_s cn68xxp1;
3888 typedef union cvmx_agl_gmx_txx_stat1 cvmx_agl_gmx_txx_stat1_t;
3891 * cvmx_agl_gmx_tx#_stat2
3893 * AGL_GMX_TX_STAT2 = AGL_GMX_TX_STATS_OCTS
3897 * - Octect counts are the sum of all data transmitted on the wire including
3898 * packet data, pad bytes, fcs bytes, pause bytes, and jam bytes. The octect
3899 * counts do not include PREAMBLE byte or EXTEND cycles.
3900 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
3901 * - Counters will wrap
3902 * - Not reset when MIX*_CTL[RESET] is set to 1.
3904 union cvmx_agl_gmx_txx_stat2 {
3906 struct cvmx_agl_gmx_txx_stat2_s {
3907 #ifdef __BIG_ENDIAN_BITFIELD
3908 uint64_t reserved_48_63 : 16;
3909 uint64_t octs : 48; /**< Number of total octets sent on the interface.
3910 Does not count octets from frames that were
3911 truncated due to collisions in halfdup mode. */
3914 uint64_t reserved_48_63 : 16;
3917 struct cvmx_agl_gmx_txx_stat2_s cn52xx;
3918 struct cvmx_agl_gmx_txx_stat2_s cn52xxp1;
3919 struct cvmx_agl_gmx_txx_stat2_s cn56xx;
3920 struct cvmx_agl_gmx_txx_stat2_s cn56xxp1;
3921 struct cvmx_agl_gmx_txx_stat2_s cn61xx;
3922 struct cvmx_agl_gmx_txx_stat2_s cn63xx;
3923 struct cvmx_agl_gmx_txx_stat2_s cn63xxp1;
3924 struct cvmx_agl_gmx_txx_stat2_s cn66xx;
3925 struct cvmx_agl_gmx_txx_stat2_s cn68xx;
3926 struct cvmx_agl_gmx_txx_stat2_s cn68xxp1;
3928 typedef union cvmx_agl_gmx_txx_stat2 cvmx_agl_gmx_txx_stat2_t;
3931 * cvmx_agl_gmx_tx#_stat3
3933 * AGL_GMX_TX_STAT3 = AGL_GMX_TX_STATS_PKTS
3937 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
3938 * - Counters will wrap
3939 * - Not reset when MIX*_CTL[RESET] is set to 1.
3941 union cvmx_agl_gmx_txx_stat3 {
3943 struct cvmx_agl_gmx_txx_stat3_s {
3944 #ifdef __BIG_ENDIAN_BITFIELD
3945 uint64_t reserved_32_63 : 32;
3946 uint64_t pkts : 32; /**< Number of total frames sent on the interface.
3947 Does not count frames that were truncated due to
3948 collisions in halfdup mode. */
3951 uint64_t reserved_32_63 : 32;
3954 struct cvmx_agl_gmx_txx_stat3_s cn52xx;
3955 struct cvmx_agl_gmx_txx_stat3_s cn52xxp1;
3956 struct cvmx_agl_gmx_txx_stat3_s cn56xx;
3957 struct cvmx_agl_gmx_txx_stat3_s cn56xxp1;
3958 struct cvmx_agl_gmx_txx_stat3_s cn61xx;
3959 struct cvmx_agl_gmx_txx_stat3_s cn63xx;
3960 struct cvmx_agl_gmx_txx_stat3_s cn63xxp1;
3961 struct cvmx_agl_gmx_txx_stat3_s cn66xx;
3962 struct cvmx_agl_gmx_txx_stat3_s cn68xx;
3963 struct cvmx_agl_gmx_txx_stat3_s cn68xxp1;
3965 typedef union cvmx_agl_gmx_txx_stat3 cvmx_agl_gmx_txx_stat3_t;
3968 * cvmx_agl_gmx_tx#_stat4
3970 * AGL_GMX_TX_STAT4 = AGL_GMX_TX_STATS_HIST1 (64) / AGL_GMX_TX_STATS_HIST0 (<64)
3974 * - Packet length is the sum of all data transmitted on the wire for the given
3975 * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
3976 * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles.
3977 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
3978 * - Counters will wrap
3979 * - Not reset when MIX*_CTL[RESET] is set to 1.
3981 union cvmx_agl_gmx_txx_stat4 {
3983 struct cvmx_agl_gmx_txx_stat4_s {
3984 #ifdef __BIG_ENDIAN_BITFIELD
3985 uint64_t hist1 : 32; /**< Number of packets sent with an octet count of 64. */
3986 uint64_t hist0 : 32; /**< Number of packets sent with an octet count
3989 uint64_t hist0 : 32;
3990 uint64_t hist1 : 32;
3993 struct cvmx_agl_gmx_txx_stat4_s cn52xx;
3994 struct cvmx_agl_gmx_txx_stat4_s cn52xxp1;
3995 struct cvmx_agl_gmx_txx_stat4_s cn56xx;
3996 struct cvmx_agl_gmx_txx_stat4_s cn56xxp1;
3997 struct cvmx_agl_gmx_txx_stat4_s cn61xx;
3998 struct cvmx_agl_gmx_txx_stat4_s cn63xx;
3999 struct cvmx_agl_gmx_txx_stat4_s cn63xxp1;
4000 struct cvmx_agl_gmx_txx_stat4_s cn66xx;
4001 struct cvmx_agl_gmx_txx_stat4_s cn68xx;
4002 struct cvmx_agl_gmx_txx_stat4_s cn68xxp1;
4004 typedef union cvmx_agl_gmx_txx_stat4 cvmx_agl_gmx_txx_stat4_t;
4007 * cvmx_agl_gmx_tx#_stat5
4009 * AGL_GMX_TX_STAT5 = AGL_GMX_TX_STATS_HIST3 (128- 255) / AGL_GMX_TX_STATS_HIST2 (65- 127)
4013 * - Packet length is the sum of all data transmitted on the wire for the given
4014 * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
4015 * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles.
4016 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
4017 * - Counters will wrap
4018 * - Not reset when MIX*_CTL[RESET] is set to 1.
4020 union cvmx_agl_gmx_txx_stat5 {
4022 struct cvmx_agl_gmx_txx_stat5_s {
4023 #ifdef __BIG_ENDIAN_BITFIELD
4024 uint64_t hist3 : 32; /**< Number of packets sent with an octet count of
4026 uint64_t hist2 : 32; /**< Number of packets sent with an octet count of
4029 uint64_t hist2 : 32;
4030 uint64_t hist3 : 32;
4033 struct cvmx_agl_gmx_txx_stat5_s cn52xx;
4034 struct cvmx_agl_gmx_txx_stat5_s cn52xxp1;
4035 struct cvmx_agl_gmx_txx_stat5_s cn56xx;
4036 struct cvmx_agl_gmx_txx_stat5_s cn56xxp1;
4037 struct cvmx_agl_gmx_txx_stat5_s cn61xx;
4038 struct cvmx_agl_gmx_txx_stat5_s cn63xx;
4039 struct cvmx_agl_gmx_txx_stat5_s cn63xxp1;
4040 struct cvmx_agl_gmx_txx_stat5_s cn66xx;
4041 struct cvmx_agl_gmx_txx_stat5_s cn68xx;
4042 struct cvmx_agl_gmx_txx_stat5_s cn68xxp1;
4044 typedef union cvmx_agl_gmx_txx_stat5 cvmx_agl_gmx_txx_stat5_t;
4047 * cvmx_agl_gmx_tx#_stat6
4049 * AGL_GMX_TX_STAT6 = AGL_GMX_TX_STATS_HIST5 (512-1023) / AGL_GMX_TX_STATS_HIST4 (256-511)
4053 * - Packet length is the sum of all data transmitted on the wire for the given
4054 * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
4055 * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles.
4056 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
4057 * - Counters will wrap
4058 * - Not reset when MIX*_CTL[RESET] is set to 1.
4060 union cvmx_agl_gmx_txx_stat6 {
4062 struct cvmx_agl_gmx_txx_stat6_s {
4063 #ifdef __BIG_ENDIAN_BITFIELD
4064 uint64_t hist5 : 32; /**< Number of packets sent with an octet count of
4066 uint64_t hist4 : 32; /**< Number of packets sent with an octet count of
4069 uint64_t hist4 : 32;
4070 uint64_t hist5 : 32;
4073 struct cvmx_agl_gmx_txx_stat6_s cn52xx;
4074 struct cvmx_agl_gmx_txx_stat6_s cn52xxp1;
4075 struct cvmx_agl_gmx_txx_stat6_s cn56xx;
4076 struct cvmx_agl_gmx_txx_stat6_s cn56xxp1;
4077 struct cvmx_agl_gmx_txx_stat6_s cn61xx;
4078 struct cvmx_agl_gmx_txx_stat6_s cn63xx;
4079 struct cvmx_agl_gmx_txx_stat6_s cn63xxp1;
4080 struct cvmx_agl_gmx_txx_stat6_s cn66xx;
4081 struct cvmx_agl_gmx_txx_stat6_s cn68xx;
4082 struct cvmx_agl_gmx_txx_stat6_s cn68xxp1;
4084 typedef union cvmx_agl_gmx_txx_stat6 cvmx_agl_gmx_txx_stat6_t;
4087 * cvmx_agl_gmx_tx#_stat7
4089 * AGL_GMX_TX_STAT7 = AGL_GMX_TX_STATS_HIST7 (1024-1518) / AGL_GMX_TX_STATS_HIST6 (>1518)
4093 * - Packet length is the sum of all data transmitted on the wire for the given
4094 * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
4095 * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles.
4096 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
4097 * - Counters will wrap
4098 * - Not reset when MIX*_CTL[RESET] is set to 1.
4100 union cvmx_agl_gmx_txx_stat7 {
4102 struct cvmx_agl_gmx_txx_stat7_s {
4103 #ifdef __BIG_ENDIAN_BITFIELD
4104 uint64_t hist7 : 32; /**< Number of packets sent with an octet count
4106 uint64_t hist6 : 32; /**< Number of packets sent with an octet count of
4109 uint64_t hist6 : 32;
4110 uint64_t hist7 : 32;
4113 struct cvmx_agl_gmx_txx_stat7_s cn52xx;
4114 struct cvmx_agl_gmx_txx_stat7_s cn52xxp1;
4115 struct cvmx_agl_gmx_txx_stat7_s cn56xx;
4116 struct cvmx_agl_gmx_txx_stat7_s cn56xxp1;
4117 struct cvmx_agl_gmx_txx_stat7_s cn61xx;
4118 struct cvmx_agl_gmx_txx_stat7_s cn63xx;
4119 struct cvmx_agl_gmx_txx_stat7_s cn63xxp1;
4120 struct cvmx_agl_gmx_txx_stat7_s cn66xx;
4121 struct cvmx_agl_gmx_txx_stat7_s cn68xx;
4122 struct cvmx_agl_gmx_txx_stat7_s cn68xxp1;
4124 typedef union cvmx_agl_gmx_txx_stat7 cvmx_agl_gmx_txx_stat7_t;
4127 * cvmx_agl_gmx_tx#_stat8
4129 * AGL_GMX_TX_STAT8 = AGL_GMX_TX_STATS_MCST / AGL_GMX_TX_STATS_BCST
4133 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
4134 * - Counters will wrap
4135 * - Note, GMX determines if the packet is MCST or BCST from the DMAC of the
4136 * packet. GMX assumes that the DMAC lies in the first 6 bytes of the packet
4137 * as per the 802.3 frame definition. If the system requires additional data
4138 * before the L2 header, then the MCST and BCST counters may not reflect
4139 * reality and should be ignored by software.
4140 * - Not reset when MIX*_CTL[RESET] is set to 1.
4142 union cvmx_agl_gmx_txx_stat8 {
4144 struct cvmx_agl_gmx_txx_stat8_s {
4145 #ifdef __BIG_ENDIAN_BITFIELD
4146 uint64_t mcst : 32; /**< Number of packets sent to multicast DMAC.
4147 Does not include BCST packets. */
4148 uint64_t bcst : 32; /**< Number of packets sent to broadcast DMAC.
4149 Does not include MCST packets. */
4155 struct cvmx_agl_gmx_txx_stat8_s cn52xx;
4156 struct cvmx_agl_gmx_txx_stat8_s cn52xxp1;
4157 struct cvmx_agl_gmx_txx_stat8_s cn56xx;
4158 struct cvmx_agl_gmx_txx_stat8_s cn56xxp1;
4159 struct cvmx_agl_gmx_txx_stat8_s cn61xx;
4160 struct cvmx_agl_gmx_txx_stat8_s cn63xx;
4161 struct cvmx_agl_gmx_txx_stat8_s cn63xxp1;
4162 struct cvmx_agl_gmx_txx_stat8_s cn66xx;
4163 struct cvmx_agl_gmx_txx_stat8_s cn68xx;
4164 struct cvmx_agl_gmx_txx_stat8_s cn68xxp1;
4166 typedef union cvmx_agl_gmx_txx_stat8 cvmx_agl_gmx_txx_stat8_t;
4169 * cvmx_agl_gmx_tx#_stat9
4171 * AGL_GMX_TX_STAT9 = AGL_GMX_TX_STATS_UNDFLW / AGL_GMX_TX_STATS_CTL
4175 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
4176 * - Counters will wrap
4177 * - Not reset when MIX*_CTL[RESET] is set to 1.
4179 union cvmx_agl_gmx_txx_stat9 {
4181 struct cvmx_agl_gmx_txx_stat9_s {
4182 #ifdef __BIG_ENDIAN_BITFIELD
4183 uint64_t undflw : 32; /**< Number of underflow packets */
4184 uint64_t ctl : 32; /**< Number of Control packets (PAUSE flow control)
4185 generated by GMX. It does not include control
4186 packets forwarded or generated by the PP's. */
4189 uint64_t undflw : 32;
4192 struct cvmx_agl_gmx_txx_stat9_s cn52xx;
4193 struct cvmx_agl_gmx_txx_stat9_s cn52xxp1;
4194 struct cvmx_agl_gmx_txx_stat9_s cn56xx;
4195 struct cvmx_agl_gmx_txx_stat9_s cn56xxp1;
4196 struct cvmx_agl_gmx_txx_stat9_s cn61xx;
4197 struct cvmx_agl_gmx_txx_stat9_s cn63xx;
4198 struct cvmx_agl_gmx_txx_stat9_s cn63xxp1;
4199 struct cvmx_agl_gmx_txx_stat9_s cn66xx;
4200 struct cvmx_agl_gmx_txx_stat9_s cn68xx;
4201 struct cvmx_agl_gmx_txx_stat9_s cn68xxp1;
4203 typedef union cvmx_agl_gmx_txx_stat9 cvmx_agl_gmx_txx_stat9_t;
4206 * cvmx_agl_gmx_tx#_stats_ctl
4208 * AGL_GMX_TX_STATS_CTL = TX Stats Control register
4212 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
4215 union cvmx_agl_gmx_txx_stats_ctl {
4217 struct cvmx_agl_gmx_txx_stats_ctl_s {
4218 #ifdef __BIG_ENDIAN_BITFIELD
4219 uint64_t reserved_1_63 : 63;
4220 uint64_t rd_clr : 1; /**< Stats registers will clear on reads */
4222 uint64_t rd_clr : 1;
4223 uint64_t reserved_1_63 : 63;
4226 struct cvmx_agl_gmx_txx_stats_ctl_s cn52xx;
4227 struct cvmx_agl_gmx_txx_stats_ctl_s cn52xxp1;
4228 struct cvmx_agl_gmx_txx_stats_ctl_s cn56xx;
4229 struct cvmx_agl_gmx_txx_stats_ctl_s cn56xxp1;
4230 struct cvmx_agl_gmx_txx_stats_ctl_s cn61xx;
4231 struct cvmx_agl_gmx_txx_stats_ctl_s cn63xx;
4232 struct cvmx_agl_gmx_txx_stats_ctl_s cn63xxp1;
4233 struct cvmx_agl_gmx_txx_stats_ctl_s cn66xx;
4234 struct cvmx_agl_gmx_txx_stats_ctl_s cn68xx;
4235 struct cvmx_agl_gmx_txx_stats_ctl_s cn68xxp1;
4237 typedef union cvmx_agl_gmx_txx_stats_ctl cvmx_agl_gmx_txx_stats_ctl_t;
4240 * cvmx_agl_gmx_tx#_thresh
4242 * AGL_GMX_TX_THRESH = Packet TX Threshold
4246 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
4249 union cvmx_agl_gmx_txx_thresh {
4251 struct cvmx_agl_gmx_txx_thresh_s {
4252 #ifdef __BIG_ENDIAN_BITFIELD
4253 uint64_t reserved_6_63 : 58;
4254 uint64_t cnt : 6; /**< Number of 16B ticks to accumulate in the TX FIFO
4255 before sending on the packet interface
4256 This register should be large enough to prevent
4257 underflow on the packet interface and must never
4258 be set below 4. This register cannot exceed the
4259 the TX FIFO depth which is 128, 8B entries. */
4262 uint64_t reserved_6_63 : 58;
4265 struct cvmx_agl_gmx_txx_thresh_s cn52xx;
4266 struct cvmx_agl_gmx_txx_thresh_s cn52xxp1;
4267 struct cvmx_agl_gmx_txx_thresh_s cn56xx;
4268 struct cvmx_agl_gmx_txx_thresh_s cn56xxp1;
4269 struct cvmx_agl_gmx_txx_thresh_s cn61xx;
4270 struct cvmx_agl_gmx_txx_thresh_s cn63xx;
4271 struct cvmx_agl_gmx_txx_thresh_s cn63xxp1;
4272 struct cvmx_agl_gmx_txx_thresh_s cn66xx;
4273 struct cvmx_agl_gmx_txx_thresh_s cn68xx;
4274 struct cvmx_agl_gmx_txx_thresh_s cn68xxp1;
4276 typedef union cvmx_agl_gmx_txx_thresh cvmx_agl_gmx_txx_thresh_t;
4279 * cvmx_agl_gmx_tx_bp
4281 * AGL_GMX_TX_BP = Packet TX BackPressure Register
4285 * BP[0] will be reset when MIX0_CTL[RESET] is set to 1.
4286 * BP[1] will be reset when MIX1_CTL[RESET] is set to 1.
4288 union cvmx_agl_gmx_tx_bp {
4290 struct cvmx_agl_gmx_tx_bp_s {
4291 #ifdef __BIG_ENDIAN_BITFIELD
4292 uint64_t reserved_2_63 : 62;
4293 uint64_t bp : 2; /**< Port BackPressure status
4295 1=Port should be back pressured */
4298 uint64_t reserved_2_63 : 62;
4301 struct cvmx_agl_gmx_tx_bp_s cn52xx;
4302 struct cvmx_agl_gmx_tx_bp_s cn52xxp1;
4303 struct cvmx_agl_gmx_tx_bp_cn56xx {
4304 #ifdef __BIG_ENDIAN_BITFIELD
4305 uint64_t reserved_1_63 : 63;
4306 uint64_t bp : 1; /**< Port BackPressure status
4308 1=Port should be back pressured */
4311 uint64_t reserved_1_63 : 63;
4314 struct cvmx_agl_gmx_tx_bp_cn56xx cn56xxp1;
4315 struct cvmx_agl_gmx_tx_bp_s cn61xx;
4316 struct cvmx_agl_gmx_tx_bp_s cn63xx;
4317 struct cvmx_agl_gmx_tx_bp_s cn63xxp1;
4318 struct cvmx_agl_gmx_tx_bp_s cn66xx;
4319 struct cvmx_agl_gmx_tx_bp_s cn68xx;
4320 struct cvmx_agl_gmx_tx_bp_s cn68xxp1;
4322 typedef union cvmx_agl_gmx_tx_bp cvmx_agl_gmx_tx_bp_t;
4325 * cvmx_agl_gmx_tx_col_attempt
4327 * AGL_GMX_TX_COL_ATTEMPT = Packet TX collision attempts before dropping frame
4331 * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
4334 union cvmx_agl_gmx_tx_col_attempt {
4336 struct cvmx_agl_gmx_tx_col_attempt_s {
4337 #ifdef __BIG_ENDIAN_BITFIELD
4338 uint64_t reserved_5_63 : 59;
4339 uint64_t limit : 5; /**< Collision Attempts */
4342 uint64_t reserved_5_63 : 59;
4345 struct cvmx_agl_gmx_tx_col_attempt_s cn52xx;
4346 struct cvmx_agl_gmx_tx_col_attempt_s cn52xxp1;
4347 struct cvmx_agl_gmx_tx_col_attempt_s cn56xx;
4348 struct cvmx_agl_gmx_tx_col_attempt_s cn56xxp1;
4349 struct cvmx_agl_gmx_tx_col_attempt_s cn61xx;
4350 struct cvmx_agl_gmx_tx_col_attempt_s cn63xx;
4351 struct cvmx_agl_gmx_tx_col_attempt_s cn63xxp1;
4352 struct cvmx_agl_gmx_tx_col_attempt_s cn66xx;
4353 struct cvmx_agl_gmx_tx_col_attempt_s cn68xx;
4354 struct cvmx_agl_gmx_tx_col_attempt_s cn68xxp1;
4356 typedef union cvmx_agl_gmx_tx_col_attempt cvmx_agl_gmx_tx_col_attempt_t;
4359 * cvmx_agl_gmx_tx_ifg
4364 * AGL_GMX_TX_IFG = Packet TX Interframe Gap
4368 * * Programming IFG1 and IFG2.
4370 * For half-duplex systems that require IEEE 802.3 compatibility, IFG1 must
4371 * be in the range of 1-8, IFG2 must be in the range of 4-12, and the
4372 * IFG1+IFG2 sum must be 12.
4374 * For full-duplex systems that require IEEE 802.3 compatibility, IFG1 must
4375 * be in the range of 1-11, IFG2 must be in the range of 1-11, and the
4376 * IFG1+IFG2 sum must be 12.
4378 * For all other systems, IFG1 and IFG2 can be any value in the range of
4379 * 1-15. Allowing for a total possible IFG sum of 2-30.
4381 * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
4383 union cvmx_agl_gmx_tx_ifg {
4385 struct cvmx_agl_gmx_tx_ifg_s {
4386 #ifdef __BIG_ENDIAN_BITFIELD
4387 uint64_t reserved_8_63 : 56;
4388 uint64_t ifg2 : 4; /**< 1/3 of the interframe gap timing
4389 If CRS is detected during IFG2, then the
4390 interFrameSpacing timer is not reset and a frame
4391 is transmited once the timer expires. */
4392 uint64_t ifg1 : 4; /**< 2/3 of the interframe gap timing
4393 If CRS is detected during IFG1, then the
4394 interFrameSpacing timer is reset and a frame is
4399 uint64_t reserved_8_63 : 56;
4402 struct cvmx_agl_gmx_tx_ifg_s cn52xx;
4403 struct cvmx_agl_gmx_tx_ifg_s cn52xxp1;
4404 struct cvmx_agl_gmx_tx_ifg_s cn56xx;
4405 struct cvmx_agl_gmx_tx_ifg_s cn56xxp1;
4406 struct cvmx_agl_gmx_tx_ifg_s cn61xx;
4407 struct cvmx_agl_gmx_tx_ifg_s cn63xx;
4408 struct cvmx_agl_gmx_tx_ifg_s cn63xxp1;
4409 struct cvmx_agl_gmx_tx_ifg_s cn66xx;
4410 struct cvmx_agl_gmx_tx_ifg_s cn68xx;
4411 struct cvmx_agl_gmx_tx_ifg_s cn68xxp1;
4413 typedef union cvmx_agl_gmx_tx_ifg cvmx_agl_gmx_tx_ifg_t;
4416 * cvmx_agl_gmx_tx_int_en
4418 * AGL_GMX_TX_INT_EN = Interrupt Enable
4422 * UNDFLW[0], XSCOL[0], XSDEF[0], LATE_COL[0], PTP_LOST[0] will be reset when MIX0_CTL[RESET] is set to 1.
4423 * UNDFLW[1], XSCOL[1], XSDEF[1], LATE_COL[1], PTP_LOST[1] will be reset when MIX1_CTL[RESET] is set to 1.
4424 * PKO_NXA will bee reset when both MIX0/1_CTL[RESET] are set to 1.
4426 union cvmx_agl_gmx_tx_int_en {
4428 struct cvmx_agl_gmx_tx_int_en_s {
4429 #ifdef __BIG_ENDIAN_BITFIELD
4430 uint64_t reserved_22_63 : 42;
4431 uint64_t ptp_lost : 2; /**< A packet with a PTP request was not able to be
4432 sent due to XSCOL */
4433 uint64_t reserved_18_19 : 2;
4434 uint64_t late_col : 2; /**< TX Late Collision */
4435 uint64_t reserved_14_15 : 2;
4436 uint64_t xsdef : 2; /**< TX Excessive deferral (halfdup mode only) */
4437 uint64_t reserved_10_11 : 2;
4438 uint64_t xscol : 2; /**< TX Excessive collisions (halfdup mode only) */
4439 uint64_t reserved_4_7 : 4;
4440 uint64_t undflw : 2; /**< TX Underflow */
4441 uint64_t reserved_1_1 : 1;
4442 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
4444 uint64_t pko_nxa : 1;
4445 uint64_t reserved_1_1 : 1;
4446 uint64_t undflw : 2;
4447 uint64_t reserved_4_7 : 4;
4449 uint64_t reserved_10_11 : 2;
4451 uint64_t reserved_14_15 : 2;
4452 uint64_t late_col : 2;
4453 uint64_t reserved_18_19 : 2;
4454 uint64_t ptp_lost : 2;
4455 uint64_t reserved_22_63 : 42;
4458 struct cvmx_agl_gmx_tx_int_en_cn52xx {
4459 #ifdef __BIG_ENDIAN_BITFIELD
4460 uint64_t reserved_18_63 : 46;
4461 uint64_t late_col : 2; /**< TX Late Collision */
4462 uint64_t reserved_14_15 : 2;
4463 uint64_t xsdef : 2; /**< TX Excessive deferral (MII/halfdup mode only) */
4464 uint64_t reserved_10_11 : 2;
4465 uint64_t xscol : 2; /**< TX Excessive collisions (MII/halfdup mode only) */
4466 uint64_t reserved_4_7 : 4;
4467 uint64_t undflw : 2; /**< TX Underflow (MII mode only) */
4468 uint64_t reserved_1_1 : 1;
4469 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
4471 uint64_t pko_nxa : 1;
4472 uint64_t reserved_1_1 : 1;
4473 uint64_t undflw : 2;
4474 uint64_t reserved_4_7 : 4;
4476 uint64_t reserved_10_11 : 2;
4478 uint64_t reserved_14_15 : 2;
4479 uint64_t late_col : 2;
4480 uint64_t reserved_18_63 : 46;
4483 struct cvmx_agl_gmx_tx_int_en_cn52xx cn52xxp1;
4484 struct cvmx_agl_gmx_tx_int_en_cn56xx {
4485 #ifdef __BIG_ENDIAN_BITFIELD
4486 uint64_t reserved_17_63 : 47;
4487 uint64_t late_col : 1; /**< TX Late Collision */
4488 uint64_t reserved_13_15 : 3;
4489 uint64_t xsdef : 1; /**< TX Excessive deferral (MII/halfdup mode only) */
4490 uint64_t reserved_9_11 : 3;
4491 uint64_t xscol : 1; /**< TX Excessive collisions (MII/halfdup mode only) */
4492 uint64_t reserved_3_7 : 5;
4493 uint64_t undflw : 1; /**< TX Underflow (MII mode only) */
4494 uint64_t reserved_1_1 : 1;
4495 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
4497 uint64_t pko_nxa : 1;
4498 uint64_t reserved_1_1 : 1;
4499 uint64_t undflw : 1;
4500 uint64_t reserved_3_7 : 5;
4502 uint64_t reserved_9_11 : 3;
4504 uint64_t reserved_13_15 : 3;
4505 uint64_t late_col : 1;
4506 uint64_t reserved_17_63 : 47;
4509 struct cvmx_agl_gmx_tx_int_en_cn56xx cn56xxp1;
4510 struct cvmx_agl_gmx_tx_int_en_s cn61xx;
4511 struct cvmx_agl_gmx_tx_int_en_s cn63xx;
4512 struct cvmx_agl_gmx_tx_int_en_s cn63xxp1;
4513 struct cvmx_agl_gmx_tx_int_en_s cn66xx;
4514 struct cvmx_agl_gmx_tx_int_en_s cn68xx;
4515 struct cvmx_agl_gmx_tx_int_en_s cn68xxp1;
4517 typedef union cvmx_agl_gmx_tx_int_en cvmx_agl_gmx_tx_int_en_t;
4520 * cvmx_agl_gmx_tx_int_reg
4522 * AGL_GMX_TX_INT_REG = Interrupt Register
4526 * UNDFLW[0], XSCOL[0], XSDEF[0], LATE_COL[0], PTP_LOST[0] will be reset when MIX0_CTL[RESET] is set to 1.
4527 * UNDFLW[1], XSCOL[1], XSDEF[1], LATE_COL[1], PTP_LOST[1] will be reset when MIX1_CTL[RESET] is set to 1.
4528 * PKO_NXA will bee reset when both MIX0/1_CTL[RESET] are set to 1.
4530 union cvmx_agl_gmx_tx_int_reg {
4532 struct cvmx_agl_gmx_tx_int_reg_s {
4533 #ifdef __BIG_ENDIAN_BITFIELD
4534 uint64_t reserved_22_63 : 42;
4535 uint64_t ptp_lost : 2; /**< A packet with a PTP request was not able to be
4536 sent due to XSCOL */
4537 uint64_t reserved_18_19 : 2;
4538 uint64_t late_col : 2; /**< TX Late Collision */
4539 uint64_t reserved_14_15 : 2;
4540 uint64_t xsdef : 2; /**< TX Excessive deferral (halfdup mode only) */
4541 uint64_t reserved_10_11 : 2;
4542 uint64_t xscol : 2; /**< TX Excessive collisions (halfdup mode only) */
4543 uint64_t reserved_4_7 : 4;
4544 uint64_t undflw : 2; /**< TX Underflow */
4545 uint64_t reserved_1_1 : 1;
4546 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
4548 uint64_t pko_nxa : 1;
4549 uint64_t reserved_1_1 : 1;
4550 uint64_t undflw : 2;
4551 uint64_t reserved_4_7 : 4;
4553 uint64_t reserved_10_11 : 2;
4555 uint64_t reserved_14_15 : 2;
4556 uint64_t late_col : 2;
4557 uint64_t reserved_18_19 : 2;
4558 uint64_t ptp_lost : 2;
4559 uint64_t reserved_22_63 : 42;
4562 struct cvmx_agl_gmx_tx_int_reg_cn52xx {
4563 #ifdef __BIG_ENDIAN_BITFIELD
4564 uint64_t reserved_18_63 : 46;
4565 uint64_t late_col : 2; /**< TX Late Collision */
4566 uint64_t reserved_14_15 : 2;
4567 uint64_t xsdef : 2; /**< TX Excessive deferral (MII/halfdup mode only) */
4568 uint64_t reserved_10_11 : 2;
4569 uint64_t xscol : 2; /**< TX Excessive collisions (MII/halfdup mode only) */
4570 uint64_t reserved_4_7 : 4;
4571 uint64_t undflw : 2; /**< TX Underflow (MII mode only) */
4572 uint64_t reserved_1_1 : 1;
4573 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
4575 uint64_t pko_nxa : 1;
4576 uint64_t reserved_1_1 : 1;
4577 uint64_t undflw : 2;
4578 uint64_t reserved_4_7 : 4;
4580 uint64_t reserved_10_11 : 2;
4582 uint64_t reserved_14_15 : 2;
4583 uint64_t late_col : 2;
4584 uint64_t reserved_18_63 : 46;
4587 struct cvmx_agl_gmx_tx_int_reg_cn52xx cn52xxp1;
4588 struct cvmx_agl_gmx_tx_int_reg_cn56xx {
4589 #ifdef __BIG_ENDIAN_BITFIELD
4590 uint64_t reserved_17_63 : 47;
4591 uint64_t late_col : 1; /**< TX Late Collision */
4592 uint64_t reserved_13_15 : 3;
4593 uint64_t xsdef : 1; /**< TX Excessive deferral (MII/halfdup mode only) */
4594 uint64_t reserved_9_11 : 3;
4595 uint64_t xscol : 1; /**< TX Excessive collisions (MII/halfdup mode only) */
4596 uint64_t reserved_3_7 : 5;
4597 uint64_t undflw : 1; /**< TX Underflow (MII mode only) */
4598 uint64_t reserved_1_1 : 1;
4599 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
4601 uint64_t pko_nxa : 1;
4602 uint64_t reserved_1_1 : 1;
4603 uint64_t undflw : 1;
4604 uint64_t reserved_3_7 : 5;
4606 uint64_t reserved_9_11 : 3;
4608 uint64_t reserved_13_15 : 3;
4609 uint64_t late_col : 1;
4610 uint64_t reserved_17_63 : 47;
4613 struct cvmx_agl_gmx_tx_int_reg_cn56xx cn56xxp1;
4614 struct cvmx_agl_gmx_tx_int_reg_s cn61xx;
4615 struct cvmx_agl_gmx_tx_int_reg_s cn63xx;
4616 struct cvmx_agl_gmx_tx_int_reg_s cn63xxp1;
4617 struct cvmx_agl_gmx_tx_int_reg_s cn66xx;
4618 struct cvmx_agl_gmx_tx_int_reg_s cn68xx;
4619 struct cvmx_agl_gmx_tx_int_reg_s cn68xxp1;
4621 typedef union cvmx_agl_gmx_tx_int_reg cvmx_agl_gmx_tx_int_reg_t;
4624 * cvmx_agl_gmx_tx_jam
4626 * AGL_GMX_TX_JAM = Packet TX Jam Pattern
4630 * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
4633 union cvmx_agl_gmx_tx_jam {
4635 struct cvmx_agl_gmx_tx_jam_s {
4636 #ifdef __BIG_ENDIAN_BITFIELD
4637 uint64_t reserved_8_63 : 56;
4638 uint64_t jam : 8; /**< Jam pattern */
4641 uint64_t reserved_8_63 : 56;
4644 struct cvmx_agl_gmx_tx_jam_s cn52xx;
4645 struct cvmx_agl_gmx_tx_jam_s cn52xxp1;
4646 struct cvmx_agl_gmx_tx_jam_s cn56xx;
4647 struct cvmx_agl_gmx_tx_jam_s cn56xxp1;
4648 struct cvmx_agl_gmx_tx_jam_s cn61xx;
4649 struct cvmx_agl_gmx_tx_jam_s cn63xx;
4650 struct cvmx_agl_gmx_tx_jam_s cn63xxp1;
4651 struct cvmx_agl_gmx_tx_jam_s cn66xx;
4652 struct cvmx_agl_gmx_tx_jam_s cn68xx;
4653 struct cvmx_agl_gmx_tx_jam_s cn68xxp1;
4655 typedef union cvmx_agl_gmx_tx_jam cvmx_agl_gmx_tx_jam_t;
4658 * cvmx_agl_gmx_tx_lfsr
4660 * AGL_GMX_TX_LFSR = LFSR used to implement truncated binary exponential backoff
4664 * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
4667 union cvmx_agl_gmx_tx_lfsr {
4669 struct cvmx_agl_gmx_tx_lfsr_s {
4670 #ifdef __BIG_ENDIAN_BITFIELD
4671 uint64_t reserved_16_63 : 48;
4672 uint64_t lfsr : 16; /**< The current state of the LFSR used to feed random
4673 numbers to compute truncated binary exponential
4677 uint64_t reserved_16_63 : 48;
4680 struct cvmx_agl_gmx_tx_lfsr_s cn52xx;
4681 struct cvmx_agl_gmx_tx_lfsr_s cn52xxp1;
4682 struct cvmx_agl_gmx_tx_lfsr_s cn56xx;
4683 struct cvmx_agl_gmx_tx_lfsr_s cn56xxp1;
4684 struct cvmx_agl_gmx_tx_lfsr_s cn61xx;
4685 struct cvmx_agl_gmx_tx_lfsr_s cn63xx;
4686 struct cvmx_agl_gmx_tx_lfsr_s cn63xxp1;
4687 struct cvmx_agl_gmx_tx_lfsr_s cn66xx;
4688 struct cvmx_agl_gmx_tx_lfsr_s cn68xx;
4689 struct cvmx_agl_gmx_tx_lfsr_s cn68xxp1;
4691 typedef union cvmx_agl_gmx_tx_lfsr cvmx_agl_gmx_tx_lfsr_t;
4694 * cvmx_agl_gmx_tx_ovr_bp
4696 * AGL_GMX_TX_OVR_BP = Packet TX Override BackPressure
4700 * IGN_FULL[0], BP[0], EN[0] will be reset when MIX0_CTL[RESET] is set to 1.
4701 * IGN_FULL[1], BP[1], EN[1] will be reset when MIX1_CTL[RESET] is set to 1.
4703 union cvmx_agl_gmx_tx_ovr_bp {
4705 struct cvmx_agl_gmx_tx_ovr_bp_s {
4706 #ifdef __BIG_ENDIAN_BITFIELD
4707 uint64_t reserved_10_63 : 54;
4708 uint64_t en : 2; /**< Per port Enable back pressure override */
4709 uint64_t reserved_6_7 : 2;
4710 uint64_t bp : 2; /**< Port BackPressure status to use
4712 1=Port should be back pressured */
4713 uint64_t reserved_2_3 : 2;
4714 uint64_t ign_full : 2; /**< Ignore the RX FIFO full when computing BP */
4716 uint64_t ign_full : 2;
4717 uint64_t reserved_2_3 : 2;
4719 uint64_t reserved_6_7 : 2;
4721 uint64_t reserved_10_63 : 54;
4724 struct cvmx_agl_gmx_tx_ovr_bp_s cn52xx;
4725 struct cvmx_agl_gmx_tx_ovr_bp_s cn52xxp1;
4726 struct cvmx_agl_gmx_tx_ovr_bp_cn56xx {
4727 #ifdef __BIG_ENDIAN_BITFIELD
4728 uint64_t reserved_9_63 : 55;
4729 uint64_t en : 1; /**< Per port Enable back pressure override */
4730 uint64_t reserved_5_7 : 3;
4731 uint64_t bp : 1; /**< Port BackPressure status to use
4733 1=Port should be back pressured */
4734 uint64_t reserved_1_3 : 3;
4735 uint64_t ign_full : 1; /**< Ignore the RX FIFO full when computing BP */
4737 uint64_t ign_full : 1;
4738 uint64_t reserved_1_3 : 3;
4740 uint64_t reserved_5_7 : 3;
4742 uint64_t reserved_9_63 : 55;
4745 struct cvmx_agl_gmx_tx_ovr_bp_cn56xx cn56xxp1;
4746 struct cvmx_agl_gmx_tx_ovr_bp_s cn61xx;
4747 struct cvmx_agl_gmx_tx_ovr_bp_s cn63xx;
4748 struct cvmx_agl_gmx_tx_ovr_bp_s cn63xxp1;
4749 struct cvmx_agl_gmx_tx_ovr_bp_s cn66xx;
4750 struct cvmx_agl_gmx_tx_ovr_bp_s cn68xx;
4751 struct cvmx_agl_gmx_tx_ovr_bp_s cn68xxp1;
4753 typedef union cvmx_agl_gmx_tx_ovr_bp cvmx_agl_gmx_tx_ovr_bp_t;
4756 * cvmx_agl_gmx_tx_pause_pkt_dmac
4758 * AGL_GMX_TX_PAUSE_PKT_DMAC = Packet TX Pause Packet DMAC field
4762 * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
4765 union cvmx_agl_gmx_tx_pause_pkt_dmac {
4767 struct cvmx_agl_gmx_tx_pause_pkt_dmac_s {
4768 #ifdef __BIG_ENDIAN_BITFIELD
4769 uint64_t reserved_48_63 : 16;
4770 uint64_t dmac : 48; /**< The DMAC field placed is outbnd pause pkts */
4773 uint64_t reserved_48_63 : 16;
4776 struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn52xx;
4777 struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn52xxp1;
4778 struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn56xx;
4779 struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn56xxp1;
4780 struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn61xx;
4781 struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn63xx;
4782 struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn63xxp1;
4783 struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn66xx;
4784 struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn68xx;
4785 struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn68xxp1;
4787 typedef union cvmx_agl_gmx_tx_pause_pkt_dmac cvmx_agl_gmx_tx_pause_pkt_dmac_t;
4790 * cvmx_agl_gmx_tx_pause_pkt_type
4792 * AGL_GMX_TX_PAUSE_PKT_TYPE = Packet TX Pause Packet TYPE field
4796 * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
4799 union cvmx_agl_gmx_tx_pause_pkt_type {
4801 struct cvmx_agl_gmx_tx_pause_pkt_type_s {
4802 #ifdef __BIG_ENDIAN_BITFIELD
4803 uint64_t reserved_16_63 : 48;
4804 uint64_t type : 16; /**< The TYPE field placed is outbnd pause pkts */
4807 uint64_t reserved_16_63 : 48;
4810 struct cvmx_agl_gmx_tx_pause_pkt_type_s cn52xx;
4811 struct cvmx_agl_gmx_tx_pause_pkt_type_s cn52xxp1;
4812 struct cvmx_agl_gmx_tx_pause_pkt_type_s cn56xx;
4813 struct cvmx_agl_gmx_tx_pause_pkt_type_s cn56xxp1;
4814 struct cvmx_agl_gmx_tx_pause_pkt_type_s cn61xx;
4815 struct cvmx_agl_gmx_tx_pause_pkt_type_s cn63xx;
4816 struct cvmx_agl_gmx_tx_pause_pkt_type_s cn63xxp1;
4817 struct cvmx_agl_gmx_tx_pause_pkt_type_s cn66xx;
4818 struct cvmx_agl_gmx_tx_pause_pkt_type_s cn68xx;
4819 struct cvmx_agl_gmx_tx_pause_pkt_type_s cn68xxp1;
4821 typedef union cvmx_agl_gmx_tx_pause_pkt_type cvmx_agl_gmx_tx_pause_pkt_type_t;
4826 * AGL_PRT_CTL = AGL Port Control
4830 * The RGMII timing specification requires that devices transmit clock and
4831 * data synchronously. The specification requires external sources (namely
4832 * the PC board trace routes) to introduce the appropriate 1.5 to 2.0 ns of
4835 * To eliminate the need for the PC board delays, the MIX RGMII interface
4836 * has optional onboard DLL's for both transmit and receive. For correct
4837 * operation, at most one of the transmitter, board, or receiver involved
4838 * in an RGMII link should introduce delay. By default/reset,
4839 * the MIX RGMII receivers delay the received clock, and the MIX
4840 * RGMII transmitters do not delay the transmitted clock. Whether this
4841 * default works as-is with a given link partner depends on the behavior
4842 * of the link partner and the PC board.
4844 * These are the possible modes of MIX RGMII receive operation:
4845 * o AGL_PRTx_CTL[CLKRX_BYP] = 0 (reset value) - The OCTEON MIX RGMII
4846 * receive interface introduces clock delay using its internal DLL.
4847 * This mode is appropriate if neither the remote
4848 * transmitter nor the PC board delays the clock.
4849 * o AGL_PRTx_CTL[CLKRX_BYP] = 1, [CLKRX_SET] = 0x0 - The OCTEON MIX
4850 * RGMII receive interface introduces no clock delay. This mode
4851 * is appropriate if either the remote transmitter or the PC board
4854 * These are the possible modes of MIX RGMII transmit operation:
4855 * o AGL_PRTx_CTL[CLKTX_BYP] = 1, [CLKTX_SET] = 0x0 (reset value) -
4856 * The OCTEON MIX RGMII transmit interface introduces no clock
4857 * delay. This mode is appropriate is either the remote receiver
4858 * or the PC board delays the clock.
4859 * o AGL_PRTx_CTL[CLKTX_BYP] = 0 - The OCTEON MIX RGMII transmit
4860 * interface introduces clock delay using its internal DLL.
4861 * This mode is appropriate if neither the remote receiver
4862 * nor the PC board delays the clock.
4864 * AGL_PRT0_CTL will be reset when MIX0_CTL[RESET] is set to 1.
4865 * AGL_PRT1_CTL will be reset when MIX1_CTL[RESET] is set to 1.
4867 union cvmx_agl_prtx_ctl {
4869 struct cvmx_agl_prtx_ctl_s {
4870 #ifdef __BIG_ENDIAN_BITFIELD
4871 uint64_t drv_byp : 1; /**< Bypass the compensation controller and use
4872 DRV_NCTL and DRV_PCTL */
4873 uint64_t reserved_62_62 : 1;
4874 uint64_t cmp_pctl : 6; /**< PCTL drive strength from the compensation ctl */
4875 uint64_t reserved_54_55 : 2;
4876 uint64_t cmp_nctl : 6; /**< NCTL drive strength from the compensation ctl */
4877 uint64_t reserved_46_47 : 2;
4878 uint64_t drv_pctl : 6; /**< PCTL drive strength to use in bypass mode
4879 Reset value of 19 is for 50 ohm termination */
4880 uint64_t reserved_38_39 : 2;
4881 uint64_t drv_nctl : 6; /**< NCTL drive strength to use in bypass mode
4882 Reset value of 15 is for 50 ohm termination */
4883 uint64_t reserved_29_31 : 3;
4884 uint64_t clk_set : 5; /**< The clock delay as determined by the DLL */
4885 uint64_t clkrx_byp : 1; /**< Bypass the RX clock delay setting
4886 Skews RXC from RXD,RXCTL in RGMII mode
4887 By default, HW internally shifts the RXC clock
4888 to sample RXD,RXCTL assuming clock and data and
4889 sourced synchronously from the link partner.
4890 In MII mode, the CLKRX_BYP is forced to 1. */
4891 uint64_t reserved_21_22 : 2;
4892 uint64_t clkrx_set : 5; /**< RX clock delay setting to use in bypass mode
4893 Skews RXC from RXD in RGMII mode */
4894 uint64_t clktx_byp : 1; /**< Bypass the TX clock delay setting
4895 Skews TXC from TXD,TXCTL in RGMII mode
4896 By default, clock and data and sourced
4898 In MII mode, the CLKRX_BYP is forced to 1. */
4899 uint64_t reserved_13_14 : 2;
4900 uint64_t clktx_set : 5; /**< TX clock delay setting to use in bypass mode
4901 Skews TXC from TXD in RGMII mode */
4902 uint64_t reserved_5_7 : 3;
4903 uint64_t dllrst : 1; /**< DLL Reset */
4904 uint64_t comp : 1; /**< Compensation Enable */
4905 uint64_t enable : 1; /**< Port Enable */
4906 uint64_t clkrst : 1; /**< Clock Tree Reset */
4907 uint64_t mode : 1; /**< Port Mode
4908 MODE must be set the same for all ports in which
4909 AGL_PRTx_CTL[ENABLE] is set.
4914 uint64_t clkrst : 1;
4915 uint64_t enable : 1;
4917 uint64_t dllrst : 1;
4918 uint64_t reserved_5_7 : 3;
4919 uint64_t clktx_set : 5;
4920 uint64_t reserved_13_14 : 2;
4921 uint64_t clktx_byp : 1;
4922 uint64_t clkrx_set : 5;
4923 uint64_t reserved_21_22 : 2;
4924 uint64_t clkrx_byp : 1;
4925 uint64_t clk_set : 5;
4926 uint64_t reserved_29_31 : 3;
4927 uint64_t drv_nctl : 6;
4928 uint64_t reserved_38_39 : 2;
4929 uint64_t drv_pctl : 6;
4930 uint64_t reserved_46_47 : 2;
4931 uint64_t cmp_nctl : 6;
4932 uint64_t reserved_54_55 : 2;
4933 uint64_t cmp_pctl : 6;
4934 uint64_t reserved_62_62 : 1;
4935 uint64_t drv_byp : 1;
4938 struct cvmx_agl_prtx_ctl_s cn61xx;
4939 struct cvmx_agl_prtx_ctl_s cn63xx;
4940 struct cvmx_agl_prtx_ctl_s cn63xxp1;
4941 struct cvmx_agl_prtx_ctl_s cn66xx;
4942 struct cvmx_agl_prtx_ctl_s cn68xx;
4943 struct cvmx_agl_prtx_ctl_s cn68xxp1;
4945 typedef union cvmx_agl_prtx_ctl cvmx_agl_prtx_ctl_t;