1 /***********************license start***************
2 * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Networks nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
44 * Configuration and status register (CSR) type definitions for
47 * This file is auto generated. Do not edit.
52 #ifndef __CVMX_AGL_TYPEDEFS_H__
53 #define __CVMX_AGL_TYPEDEFS_H__
55 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
56 #define CVMX_AGL_GMX_BAD_REG CVMX_AGL_GMX_BAD_REG_FUNC()
57 static inline uint64_t CVMX_AGL_GMX_BAD_REG_FUNC(void)
59 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
60 cvmx_warn("CVMX_AGL_GMX_BAD_REG not supported on this chip\n");
61 return CVMX_ADD_IO_SEG(0x00011800E0000518ull);
64 #define CVMX_AGL_GMX_BAD_REG (CVMX_ADD_IO_SEG(0x00011800E0000518ull))
66 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
67 #define CVMX_AGL_GMX_BIST CVMX_AGL_GMX_BIST_FUNC()
68 static inline uint64_t CVMX_AGL_GMX_BIST_FUNC(void)
70 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
71 cvmx_warn("CVMX_AGL_GMX_BIST not supported on this chip\n");
72 return CVMX_ADD_IO_SEG(0x00011800E0000400ull);
75 #define CVMX_AGL_GMX_BIST (CVMX_ADD_IO_SEG(0x00011800E0000400ull))
77 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
78 #define CVMX_AGL_GMX_DRV_CTL CVMX_AGL_GMX_DRV_CTL_FUNC()
79 static inline uint64_t CVMX_AGL_GMX_DRV_CTL_FUNC(void)
81 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
82 cvmx_warn("CVMX_AGL_GMX_DRV_CTL not supported on this chip\n");
83 return CVMX_ADD_IO_SEG(0x00011800E00007F0ull);
86 #define CVMX_AGL_GMX_DRV_CTL (CVMX_ADD_IO_SEG(0x00011800E00007F0ull))
88 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
89 #define CVMX_AGL_GMX_INF_MODE CVMX_AGL_GMX_INF_MODE_FUNC()
90 static inline uint64_t CVMX_AGL_GMX_INF_MODE_FUNC(void)
92 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)))
93 cvmx_warn("CVMX_AGL_GMX_INF_MODE not supported on this chip\n");
94 return CVMX_ADD_IO_SEG(0x00011800E00007F8ull);
97 #define CVMX_AGL_GMX_INF_MODE (CVMX_ADD_IO_SEG(0x00011800E00007F8ull))
99 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
100 static inline uint64_t CVMX_AGL_GMX_PRTX_CFG(unsigned long offset)
103 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
104 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
105 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
106 cvmx_warn("CVMX_AGL_GMX_PRTX_CFG(%lu) is invalid on this chip\n", offset);
107 return CVMX_ADD_IO_SEG(0x00011800E0000010ull) + ((offset) & 1) * 2048;
110 #define CVMX_AGL_GMX_PRTX_CFG(offset) (CVMX_ADD_IO_SEG(0x00011800E0000010ull) + ((offset) & 1) * 2048)
112 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
113 static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM0(unsigned long offset)
116 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
117 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
118 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
119 cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM0(%lu) is invalid on this chip\n", offset);
120 return CVMX_ADD_IO_SEG(0x00011800E0000180ull) + ((offset) & 1) * 2048;
123 #define CVMX_AGL_GMX_RXX_ADR_CAM0(offset) (CVMX_ADD_IO_SEG(0x00011800E0000180ull) + ((offset) & 1) * 2048)
125 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
126 static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM1(unsigned long offset)
129 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
130 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
131 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
132 cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM1(%lu) is invalid on this chip\n", offset);
133 return CVMX_ADD_IO_SEG(0x00011800E0000188ull) + ((offset) & 1) * 2048;
136 #define CVMX_AGL_GMX_RXX_ADR_CAM1(offset) (CVMX_ADD_IO_SEG(0x00011800E0000188ull) + ((offset) & 1) * 2048)
138 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
139 static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM2(unsigned long offset)
142 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
143 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
144 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
145 cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM2(%lu) is invalid on this chip\n", offset);
146 return CVMX_ADD_IO_SEG(0x00011800E0000190ull) + ((offset) & 1) * 2048;
149 #define CVMX_AGL_GMX_RXX_ADR_CAM2(offset) (CVMX_ADD_IO_SEG(0x00011800E0000190ull) + ((offset) & 1) * 2048)
151 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
152 static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM3(unsigned long offset)
155 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
156 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
157 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
158 cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM3(%lu) is invalid on this chip\n", offset);
159 return CVMX_ADD_IO_SEG(0x00011800E0000198ull) + ((offset) & 1) * 2048;
162 #define CVMX_AGL_GMX_RXX_ADR_CAM3(offset) (CVMX_ADD_IO_SEG(0x00011800E0000198ull) + ((offset) & 1) * 2048)
164 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
165 static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM4(unsigned long offset)
168 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
169 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
170 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
171 cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM4(%lu) is invalid on this chip\n", offset);
172 return CVMX_ADD_IO_SEG(0x00011800E00001A0ull) + ((offset) & 1) * 2048;
175 #define CVMX_AGL_GMX_RXX_ADR_CAM4(offset) (CVMX_ADD_IO_SEG(0x00011800E00001A0ull) + ((offset) & 1) * 2048)
177 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
178 static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM5(unsigned long offset)
181 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
182 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
183 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
184 cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM5(%lu) is invalid on this chip\n", offset);
185 return CVMX_ADD_IO_SEG(0x00011800E00001A8ull) + ((offset) & 1) * 2048;
188 #define CVMX_AGL_GMX_RXX_ADR_CAM5(offset) (CVMX_ADD_IO_SEG(0x00011800E00001A8ull) + ((offset) & 1) * 2048)
190 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
191 static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CAM_EN(unsigned long offset)
194 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
195 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
196 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
197 cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CAM_EN(%lu) is invalid on this chip\n", offset);
198 return CVMX_ADD_IO_SEG(0x00011800E0000108ull) + ((offset) & 1) * 2048;
201 #define CVMX_AGL_GMX_RXX_ADR_CAM_EN(offset) (CVMX_ADD_IO_SEG(0x00011800E0000108ull) + ((offset) & 1) * 2048)
203 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
204 static inline uint64_t CVMX_AGL_GMX_RXX_ADR_CTL(unsigned long offset)
207 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
208 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
209 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
210 cvmx_warn("CVMX_AGL_GMX_RXX_ADR_CTL(%lu) is invalid on this chip\n", offset);
211 return CVMX_ADD_IO_SEG(0x00011800E0000100ull) + ((offset) & 1) * 2048;
214 #define CVMX_AGL_GMX_RXX_ADR_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000100ull) + ((offset) & 1) * 2048)
216 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
217 static inline uint64_t CVMX_AGL_GMX_RXX_DECISION(unsigned long offset)
220 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
221 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
222 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
223 cvmx_warn("CVMX_AGL_GMX_RXX_DECISION(%lu) is invalid on this chip\n", offset);
224 return CVMX_ADD_IO_SEG(0x00011800E0000040ull) + ((offset) & 1) * 2048;
227 #define CVMX_AGL_GMX_RXX_DECISION(offset) (CVMX_ADD_IO_SEG(0x00011800E0000040ull) + ((offset) & 1) * 2048)
229 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
230 static inline uint64_t CVMX_AGL_GMX_RXX_FRM_CHK(unsigned long offset)
233 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
234 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
235 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
236 cvmx_warn("CVMX_AGL_GMX_RXX_FRM_CHK(%lu) is invalid on this chip\n", offset);
237 return CVMX_ADD_IO_SEG(0x00011800E0000020ull) + ((offset) & 1) * 2048;
240 #define CVMX_AGL_GMX_RXX_FRM_CHK(offset) (CVMX_ADD_IO_SEG(0x00011800E0000020ull) + ((offset) & 1) * 2048)
242 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
243 static inline uint64_t CVMX_AGL_GMX_RXX_FRM_CTL(unsigned long offset)
246 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
247 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
248 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
249 cvmx_warn("CVMX_AGL_GMX_RXX_FRM_CTL(%lu) is invalid on this chip\n", offset);
250 return CVMX_ADD_IO_SEG(0x00011800E0000018ull) + ((offset) & 1) * 2048;
253 #define CVMX_AGL_GMX_RXX_FRM_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000018ull) + ((offset) & 1) * 2048)
255 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
256 static inline uint64_t CVMX_AGL_GMX_RXX_FRM_MAX(unsigned long offset)
259 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
260 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
261 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
262 cvmx_warn("CVMX_AGL_GMX_RXX_FRM_MAX(%lu) is invalid on this chip\n", offset);
263 return CVMX_ADD_IO_SEG(0x00011800E0000030ull) + ((offset) & 1) * 2048;
266 #define CVMX_AGL_GMX_RXX_FRM_MAX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000030ull) + ((offset) & 1) * 2048)
268 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
269 static inline uint64_t CVMX_AGL_GMX_RXX_FRM_MIN(unsigned long offset)
272 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
273 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
274 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
275 cvmx_warn("CVMX_AGL_GMX_RXX_FRM_MIN(%lu) is invalid on this chip\n", offset);
276 return CVMX_ADD_IO_SEG(0x00011800E0000028ull) + ((offset) & 1) * 2048;
279 #define CVMX_AGL_GMX_RXX_FRM_MIN(offset) (CVMX_ADD_IO_SEG(0x00011800E0000028ull) + ((offset) & 1) * 2048)
281 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
282 static inline uint64_t CVMX_AGL_GMX_RXX_IFG(unsigned long offset)
285 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
286 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
287 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
288 cvmx_warn("CVMX_AGL_GMX_RXX_IFG(%lu) is invalid on this chip\n", offset);
289 return CVMX_ADD_IO_SEG(0x00011800E0000058ull) + ((offset) & 1) * 2048;
292 #define CVMX_AGL_GMX_RXX_IFG(offset) (CVMX_ADD_IO_SEG(0x00011800E0000058ull) + ((offset) & 1) * 2048)
294 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
295 static inline uint64_t CVMX_AGL_GMX_RXX_INT_EN(unsigned long offset)
298 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
299 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
300 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
301 cvmx_warn("CVMX_AGL_GMX_RXX_INT_EN(%lu) is invalid on this chip\n", offset);
302 return CVMX_ADD_IO_SEG(0x00011800E0000008ull) + ((offset) & 1) * 2048;
305 #define CVMX_AGL_GMX_RXX_INT_EN(offset) (CVMX_ADD_IO_SEG(0x00011800E0000008ull) + ((offset) & 1) * 2048)
307 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
308 static inline uint64_t CVMX_AGL_GMX_RXX_INT_REG(unsigned long offset)
311 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
312 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
313 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
314 cvmx_warn("CVMX_AGL_GMX_RXX_INT_REG(%lu) is invalid on this chip\n", offset);
315 return CVMX_ADD_IO_SEG(0x00011800E0000000ull) + ((offset) & 1) * 2048;
318 #define CVMX_AGL_GMX_RXX_INT_REG(offset) (CVMX_ADD_IO_SEG(0x00011800E0000000ull) + ((offset) & 1) * 2048)
320 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
321 static inline uint64_t CVMX_AGL_GMX_RXX_JABBER(unsigned long offset)
324 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
325 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
326 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
327 cvmx_warn("CVMX_AGL_GMX_RXX_JABBER(%lu) is invalid on this chip\n", offset);
328 return CVMX_ADD_IO_SEG(0x00011800E0000038ull) + ((offset) & 1) * 2048;
331 #define CVMX_AGL_GMX_RXX_JABBER(offset) (CVMX_ADD_IO_SEG(0x00011800E0000038ull) + ((offset) & 1) * 2048)
333 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
334 static inline uint64_t CVMX_AGL_GMX_RXX_PAUSE_DROP_TIME(unsigned long offset)
337 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
338 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
339 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
340 cvmx_warn("CVMX_AGL_GMX_RXX_PAUSE_DROP_TIME(%lu) is invalid on this chip\n", offset);
341 return CVMX_ADD_IO_SEG(0x00011800E0000068ull) + ((offset) & 1) * 2048;
344 #define CVMX_AGL_GMX_RXX_PAUSE_DROP_TIME(offset) (CVMX_ADD_IO_SEG(0x00011800E0000068ull) + ((offset) & 1) * 2048)
346 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
347 static inline uint64_t CVMX_AGL_GMX_RXX_RX_INBND(unsigned long offset)
350 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
351 cvmx_warn("CVMX_AGL_GMX_RXX_RX_INBND(%lu) is invalid on this chip\n", offset);
352 return CVMX_ADD_IO_SEG(0x00011800E0000060ull) + ((offset) & 1) * 2048;
355 #define CVMX_AGL_GMX_RXX_RX_INBND(offset) (CVMX_ADD_IO_SEG(0x00011800E0000060ull) + ((offset) & 1) * 2048)
357 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
358 static inline uint64_t CVMX_AGL_GMX_RXX_STATS_CTL(unsigned long offset)
361 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
362 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
363 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
364 cvmx_warn("CVMX_AGL_GMX_RXX_STATS_CTL(%lu) is invalid on this chip\n", offset);
365 return CVMX_ADD_IO_SEG(0x00011800E0000050ull) + ((offset) & 1) * 2048;
368 #define CVMX_AGL_GMX_RXX_STATS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000050ull) + ((offset) & 1) * 2048)
370 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
371 static inline uint64_t CVMX_AGL_GMX_RXX_STATS_OCTS(unsigned long offset)
374 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
375 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
376 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
377 cvmx_warn("CVMX_AGL_GMX_RXX_STATS_OCTS(%lu) is invalid on this chip\n", offset);
378 return CVMX_ADD_IO_SEG(0x00011800E0000088ull) + ((offset) & 1) * 2048;
381 #define CVMX_AGL_GMX_RXX_STATS_OCTS(offset) (CVMX_ADD_IO_SEG(0x00011800E0000088ull) + ((offset) & 1) * 2048)
383 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
384 static inline uint64_t CVMX_AGL_GMX_RXX_STATS_OCTS_CTL(unsigned long offset)
387 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
388 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
389 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
390 cvmx_warn("CVMX_AGL_GMX_RXX_STATS_OCTS_CTL(%lu) is invalid on this chip\n", offset);
391 return CVMX_ADD_IO_SEG(0x00011800E0000098ull) + ((offset) & 1) * 2048;
394 #define CVMX_AGL_GMX_RXX_STATS_OCTS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000098ull) + ((offset) & 1) * 2048)
396 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
397 static inline uint64_t CVMX_AGL_GMX_RXX_STATS_OCTS_DMAC(unsigned long offset)
400 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
401 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
402 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
403 cvmx_warn("CVMX_AGL_GMX_RXX_STATS_OCTS_DMAC(%lu) is invalid on this chip\n", offset);
404 return CVMX_ADD_IO_SEG(0x00011800E00000A8ull) + ((offset) & 1) * 2048;
407 #define CVMX_AGL_GMX_RXX_STATS_OCTS_DMAC(offset) (CVMX_ADD_IO_SEG(0x00011800E00000A8ull) + ((offset) & 1) * 2048)
409 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
410 static inline uint64_t CVMX_AGL_GMX_RXX_STATS_OCTS_DRP(unsigned long offset)
413 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
414 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
415 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
416 cvmx_warn("CVMX_AGL_GMX_RXX_STATS_OCTS_DRP(%lu) is invalid on this chip\n", offset);
417 return CVMX_ADD_IO_SEG(0x00011800E00000B8ull) + ((offset) & 1) * 2048;
420 #define CVMX_AGL_GMX_RXX_STATS_OCTS_DRP(offset) (CVMX_ADD_IO_SEG(0x00011800E00000B8ull) + ((offset) & 1) * 2048)
422 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
423 static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS(unsigned long offset)
426 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
427 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
428 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
429 cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS(%lu) is invalid on this chip\n", offset);
430 return CVMX_ADD_IO_SEG(0x00011800E0000080ull) + ((offset) & 1) * 2048;
433 #define CVMX_AGL_GMX_RXX_STATS_PKTS(offset) (CVMX_ADD_IO_SEG(0x00011800E0000080ull) + ((offset) & 1) * 2048)
435 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
436 static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(unsigned long offset)
439 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
440 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
441 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
442 cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(%lu) is invalid on this chip\n", offset);
443 return CVMX_ADD_IO_SEG(0x00011800E00000C0ull) + ((offset) & 1) * 2048;
446 #define CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(offset) (CVMX_ADD_IO_SEG(0x00011800E00000C0ull) + ((offset) & 1) * 2048)
448 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
449 static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS_CTL(unsigned long offset)
452 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
453 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
454 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
455 cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS_CTL(%lu) is invalid on this chip\n", offset);
456 return CVMX_ADD_IO_SEG(0x00011800E0000090ull) + ((offset) & 1) * 2048;
459 #define CVMX_AGL_GMX_RXX_STATS_PKTS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000090ull) + ((offset) & 1) * 2048)
461 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
462 static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS_DMAC(unsigned long offset)
465 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
466 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
467 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
468 cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS_DMAC(%lu) is invalid on this chip\n", offset);
469 return CVMX_ADD_IO_SEG(0x00011800E00000A0ull) + ((offset) & 1) * 2048;
472 #define CVMX_AGL_GMX_RXX_STATS_PKTS_DMAC(offset) (CVMX_ADD_IO_SEG(0x00011800E00000A0ull) + ((offset) & 1) * 2048)
474 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
475 static inline uint64_t CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(unsigned long offset)
478 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
479 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
480 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
481 cvmx_warn("CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(%lu) is invalid on this chip\n", offset);
482 return CVMX_ADD_IO_SEG(0x00011800E00000B0ull) + ((offset) & 1) * 2048;
485 #define CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(offset) (CVMX_ADD_IO_SEG(0x00011800E00000B0ull) + ((offset) & 1) * 2048)
487 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
488 static inline uint64_t CVMX_AGL_GMX_RXX_UDD_SKP(unsigned long offset)
491 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
492 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
493 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
494 cvmx_warn("CVMX_AGL_GMX_RXX_UDD_SKP(%lu) is invalid on this chip\n", offset);
495 return CVMX_ADD_IO_SEG(0x00011800E0000048ull) + ((offset) & 1) * 2048;
498 #define CVMX_AGL_GMX_RXX_UDD_SKP(offset) (CVMX_ADD_IO_SEG(0x00011800E0000048ull) + ((offset) & 1) * 2048)
500 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
501 static inline uint64_t CVMX_AGL_GMX_RX_BP_DROPX(unsigned long offset)
504 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
505 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
506 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
507 cvmx_warn("CVMX_AGL_GMX_RX_BP_DROPX(%lu) is invalid on this chip\n", offset);
508 return CVMX_ADD_IO_SEG(0x00011800E0000420ull) + ((offset) & 1) * 8;
511 #define CVMX_AGL_GMX_RX_BP_DROPX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000420ull) + ((offset) & 1) * 8)
513 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
514 static inline uint64_t CVMX_AGL_GMX_RX_BP_OFFX(unsigned long offset)
517 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
518 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
519 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
520 cvmx_warn("CVMX_AGL_GMX_RX_BP_OFFX(%lu) is invalid on this chip\n", offset);
521 return CVMX_ADD_IO_SEG(0x00011800E0000460ull) + ((offset) & 1) * 8;
524 #define CVMX_AGL_GMX_RX_BP_OFFX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000460ull) + ((offset) & 1) * 8)
526 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
527 static inline uint64_t CVMX_AGL_GMX_RX_BP_ONX(unsigned long offset)
530 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
531 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
532 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
533 cvmx_warn("CVMX_AGL_GMX_RX_BP_ONX(%lu) is invalid on this chip\n", offset);
534 return CVMX_ADD_IO_SEG(0x00011800E0000440ull) + ((offset) & 1) * 8;
537 #define CVMX_AGL_GMX_RX_BP_ONX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000440ull) + ((offset) & 1) * 8)
539 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
540 #define CVMX_AGL_GMX_RX_PRT_INFO CVMX_AGL_GMX_RX_PRT_INFO_FUNC()
541 static inline uint64_t CVMX_AGL_GMX_RX_PRT_INFO_FUNC(void)
543 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
544 cvmx_warn("CVMX_AGL_GMX_RX_PRT_INFO not supported on this chip\n");
545 return CVMX_ADD_IO_SEG(0x00011800E00004E8ull);
548 #define CVMX_AGL_GMX_RX_PRT_INFO (CVMX_ADD_IO_SEG(0x00011800E00004E8ull))
550 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
551 #define CVMX_AGL_GMX_RX_TX_STATUS CVMX_AGL_GMX_RX_TX_STATUS_FUNC()
552 static inline uint64_t CVMX_AGL_GMX_RX_TX_STATUS_FUNC(void)
554 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
555 cvmx_warn("CVMX_AGL_GMX_RX_TX_STATUS not supported on this chip\n");
556 return CVMX_ADD_IO_SEG(0x00011800E00007E8ull);
559 #define CVMX_AGL_GMX_RX_TX_STATUS (CVMX_ADD_IO_SEG(0x00011800E00007E8ull))
561 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
562 static inline uint64_t CVMX_AGL_GMX_SMACX(unsigned long offset)
565 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
566 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
567 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
568 cvmx_warn("CVMX_AGL_GMX_SMACX(%lu) is invalid on this chip\n", offset);
569 return CVMX_ADD_IO_SEG(0x00011800E0000230ull) + ((offset) & 1) * 2048;
572 #define CVMX_AGL_GMX_SMACX(offset) (CVMX_ADD_IO_SEG(0x00011800E0000230ull) + ((offset) & 1) * 2048)
574 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
575 #define CVMX_AGL_GMX_STAT_BP CVMX_AGL_GMX_STAT_BP_FUNC()
576 static inline uint64_t CVMX_AGL_GMX_STAT_BP_FUNC(void)
578 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
579 cvmx_warn("CVMX_AGL_GMX_STAT_BP not supported on this chip\n");
580 return CVMX_ADD_IO_SEG(0x00011800E0000520ull);
583 #define CVMX_AGL_GMX_STAT_BP (CVMX_ADD_IO_SEG(0x00011800E0000520ull))
585 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
586 static inline uint64_t CVMX_AGL_GMX_TXX_APPEND(unsigned long offset)
589 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
590 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
591 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
592 cvmx_warn("CVMX_AGL_GMX_TXX_APPEND(%lu) is invalid on this chip\n", offset);
593 return CVMX_ADD_IO_SEG(0x00011800E0000218ull) + ((offset) & 1) * 2048;
596 #define CVMX_AGL_GMX_TXX_APPEND(offset) (CVMX_ADD_IO_SEG(0x00011800E0000218ull) + ((offset) & 1) * 2048)
598 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
599 static inline uint64_t CVMX_AGL_GMX_TXX_CLK(unsigned long offset)
602 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
603 cvmx_warn("CVMX_AGL_GMX_TXX_CLK(%lu) is invalid on this chip\n", offset);
604 return CVMX_ADD_IO_SEG(0x00011800E0000208ull) + ((offset) & 1) * 2048;
607 #define CVMX_AGL_GMX_TXX_CLK(offset) (CVMX_ADD_IO_SEG(0x00011800E0000208ull) + ((offset) & 1) * 2048)
609 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
610 static inline uint64_t CVMX_AGL_GMX_TXX_CTL(unsigned long offset)
613 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
614 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
615 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
616 cvmx_warn("CVMX_AGL_GMX_TXX_CTL(%lu) is invalid on this chip\n", offset);
617 return CVMX_ADD_IO_SEG(0x00011800E0000270ull) + ((offset) & 1) * 2048;
620 #define CVMX_AGL_GMX_TXX_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000270ull) + ((offset) & 1) * 2048)
622 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
623 static inline uint64_t CVMX_AGL_GMX_TXX_MIN_PKT(unsigned long offset)
626 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
627 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
628 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
629 cvmx_warn("CVMX_AGL_GMX_TXX_MIN_PKT(%lu) is invalid on this chip\n", offset);
630 return CVMX_ADD_IO_SEG(0x00011800E0000240ull) + ((offset) & 1) * 2048;
633 #define CVMX_AGL_GMX_TXX_MIN_PKT(offset) (CVMX_ADD_IO_SEG(0x00011800E0000240ull) + ((offset) & 1) * 2048)
635 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
636 static inline uint64_t CVMX_AGL_GMX_TXX_PAUSE_PKT_INTERVAL(unsigned long offset)
639 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
640 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
641 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
642 cvmx_warn("CVMX_AGL_GMX_TXX_PAUSE_PKT_INTERVAL(%lu) is invalid on this chip\n", offset);
643 return CVMX_ADD_IO_SEG(0x00011800E0000248ull) + ((offset) & 1) * 2048;
646 #define CVMX_AGL_GMX_TXX_PAUSE_PKT_INTERVAL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000248ull) + ((offset) & 1) * 2048)
648 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
649 static inline uint64_t CVMX_AGL_GMX_TXX_PAUSE_PKT_TIME(unsigned long offset)
652 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
653 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
654 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
655 cvmx_warn("CVMX_AGL_GMX_TXX_PAUSE_PKT_TIME(%lu) is invalid on this chip\n", offset);
656 return CVMX_ADD_IO_SEG(0x00011800E0000238ull) + ((offset) & 1) * 2048;
659 #define CVMX_AGL_GMX_TXX_PAUSE_PKT_TIME(offset) (CVMX_ADD_IO_SEG(0x00011800E0000238ull) + ((offset) & 1) * 2048)
661 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
662 static inline uint64_t CVMX_AGL_GMX_TXX_PAUSE_TOGO(unsigned long offset)
665 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
666 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
667 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
668 cvmx_warn("CVMX_AGL_GMX_TXX_PAUSE_TOGO(%lu) is invalid on this chip\n", offset);
669 return CVMX_ADD_IO_SEG(0x00011800E0000258ull) + ((offset) & 1) * 2048;
672 #define CVMX_AGL_GMX_TXX_PAUSE_TOGO(offset) (CVMX_ADD_IO_SEG(0x00011800E0000258ull) + ((offset) & 1) * 2048)
674 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
675 static inline uint64_t CVMX_AGL_GMX_TXX_PAUSE_ZERO(unsigned long offset)
678 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
679 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
680 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
681 cvmx_warn("CVMX_AGL_GMX_TXX_PAUSE_ZERO(%lu) is invalid on this chip\n", offset);
682 return CVMX_ADD_IO_SEG(0x00011800E0000260ull) + ((offset) & 1) * 2048;
685 #define CVMX_AGL_GMX_TXX_PAUSE_ZERO(offset) (CVMX_ADD_IO_SEG(0x00011800E0000260ull) + ((offset) & 1) * 2048)
687 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
688 static inline uint64_t CVMX_AGL_GMX_TXX_SOFT_PAUSE(unsigned long offset)
691 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
692 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
693 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
694 cvmx_warn("CVMX_AGL_GMX_TXX_SOFT_PAUSE(%lu) is invalid on this chip\n", offset);
695 return CVMX_ADD_IO_SEG(0x00011800E0000250ull) + ((offset) & 1) * 2048;
698 #define CVMX_AGL_GMX_TXX_SOFT_PAUSE(offset) (CVMX_ADD_IO_SEG(0x00011800E0000250ull) + ((offset) & 1) * 2048)
700 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
701 static inline uint64_t CVMX_AGL_GMX_TXX_STAT0(unsigned long offset)
704 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
705 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
706 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
707 cvmx_warn("CVMX_AGL_GMX_TXX_STAT0(%lu) is invalid on this chip\n", offset);
708 return CVMX_ADD_IO_SEG(0x00011800E0000280ull) + ((offset) & 1) * 2048;
711 #define CVMX_AGL_GMX_TXX_STAT0(offset) (CVMX_ADD_IO_SEG(0x00011800E0000280ull) + ((offset) & 1) * 2048)
713 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
714 static inline uint64_t CVMX_AGL_GMX_TXX_STAT1(unsigned long offset)
717 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
718 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
719 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
720 cvmx_warn("CVMX_AGL_GMX_TXX_STAT1(%lu) is invalid on this chip\n", offset);
721 return CVMX_ADD_IO_SEG(0x00011800E0000288ull) + ((offset) & 1) * 2048;
724 #define CVMX_AGL_GMX_TXX_STAT1(offset) (CVMX_ADD_IO_SEG(0x00011800E0000288ull) + ((offset) & 1) * 2048)
726 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
727 static inline uint64_t CVMX_AGL_GMX_TXX_STAT2(unsigned long offset)
730 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
731 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
732 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
733 cvmx_warn("CVMX_AGL_GMX_TXX_STAT2(%lu) is invalid on this chip\n", offset);
734 return CVMX_ADD_IO_SEG(0x00011800E0000290ull) + ((offset) & 1) * 2048;
737 #define CVMX_AGL_GMX_TXX_STAT2(offset) (CVMX_ADD_IO_SEG(0x00011800E0000290ull) + ((offset) & 1) * 2048)
739 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
740 static inline uint64_t CVMX_AGL_GMX_TXX_STAT3(unsigned long offset)
743 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
744 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
745 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
746 cvmx_warn("CVMX_AGL_GMX_TXX_STAT3(%lu) is invalid on this chip\n", offset);
747 return CVMX_ADD_IO_SEG(0x00011800E0000298ull) + ((offset) & 1) * 2048;
750 #define CVMX_AGL_GMX_TXX_STAT3(offset) (CVMX_ADD_IO_SEG(0x00011800E0000298ull) + ((offset) & 1) * 2048)
752 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
753 static inline uint64_t CVMX_AGL_GMX_TXX_STAT4(unsigned long offset)
756 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
757 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
758 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
759 cvmx_warn("CVMX_AGL_GMX_TXX_STAT4(%lu) is invalid on this chip\n", offset);
760 return CVMX_ADD_IO_SEG(0x00011800E00002A0ull) + ((offset) & 1) * 2048;
763 #define CVMX_AGL_GMX_TXX_STAT4(offset) (CVMX_ADD_IO_SEG(0x00011800E00002A0ull) + ((offset) & 1) * 2048)
765 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
766 static inline uint64_t CVMX_AGL_GMX_TXX_STAT5(unsigned long offset)
769 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
770 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
771 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
772 cvmx_warn("CVMX_AGL_GMX_TXX_STAT5(%lu) is invalid on this chip\n", offset);
773 return CVMX_ADD_IO_SEG(0x00011800E00002A8ull) + ((offset) & 1) * 2048;
776 #define CVMX_AGL_GMX_TXX_STAT5(offset) (CVMX_ADD_IO_SEG(0x00011800E00002A8ull) + ((offset) & 1) * 2048)
778 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
779 static inline uint64_t CVMX_AGL_GMX_TXX_STAT6(unsigned long offset)
782 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
783 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
784 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
785 cvmx_warn("CVMX_AGL_GMX_TXX_STAT6(%lu) is invalid on this chip\n", offset);
786 return CVMX_ADD_IO_SEG(0x00011800E00002B0ull) + ((offset) & 1) * 2048;
789 #define CVMX_AGL_GMX_TXX_STAT6(offset) (CVMX_ADD_IO_SEG(0x00011800E00002B0ull) + ((offset) & 1) * 2048)
791 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
792 static inline uint64_t CVMX_AGL_GMX_TXX_STAT7(unsigned long offset)
795 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
796 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
797 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
798 cvmx_warn("CVMX_AGL_GMX_TXX_STAT7(%lu) is invalid on this chip\n", offset);
799 return CVMX_ADD_IO_SEG(0x00011800E00002B8ull) + ((offset) & 1) * 2048;
802 #define CVMX_AGL_GMX_TXX_STAT7(offset) (CVMX_ADD_IO_SEG(0x00011800E00002B8ull) + ((offset) & 1) * 2048)
804 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
805 static inline uint64_t CVMX_AGL_GMX_TXX_STAT8(unsigned long offset)
808 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
809 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
810 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
811 cvmx_warn("CVMX_AGL_GMX_TXX_STAT8(%lu) is invalid on this chip\n", offset);
812 return CVMX_ADD_IO_SEG(0x00011800E00002C0ull) + ((offset) & 1) * 2048;
815 #define CVMX_AGL_GMX_TXX_STAT8(offset) (CVMX_ADD_IO_SEG(0x00011800E00002C0ull) + ((offset) & 1) * 2048)
817 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
818 static inline uint64_t CVMX_AGL_GMX_TXX_STAT9(unsigned long offset)
821 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
822 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
823 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
824 cvmx_warn("CVMX_AGL_GMX_TXX_STAT9(%lu) is invalid on this chip\n", offset);
825 return CVMX_ADD_IO_SEG(0x00011800E00002C8ull) + ((offset) & 1) * 2048;
828 #define CVMX_AGL_GMX_TXX_STAT9(offset) (CVMX_ADD_IO_SEG(0x00011800E00002C8ull) + ((offset) & 1) * 2048)
830 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
831 static inline uint64_t CVMX_AGL_GMX_TXX_STATS_CTL(unsigned long offset)
834 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
835 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
836 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
837 cvmx_warn("CVMX_AGL_GMX_TXX_STATS_CTL(%lu) is invalid on this chip\n", offset);
838 return CVMX_ADD_IO_SEG(0x00011800E0000268ull) + ((offset) & 1) * 2048;
841 #define CVMX_AGL_GMX_TXX_STATS_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0000268ull) + ((offset) & 1) * 2048)
843 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
844 static inline uint64_t CVMX_AGL_GMX_TXX_THRESH(unsigned long offset)
847 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
848 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset == 0))) ||
849 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
850 cvmx_warn("CVMX_AGL_GMX_TXX_THRESH(%lu) is invalid on this chip\n", offset);
851 return CVMX_ADD_IO_SEG(0x00011800E0000210ull) + ((offset) & 1) * 2048;
854 #define CVMX_AGL_GMX_TXX_THRESH(offset) (CVMX_ADD_IO_SEG(0x00011800E0000210ull) + ((offset) & 1) * 2048)
856 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
857 #define CVMX_AGL_GMX_TX_BP CVMX_AGL_GMX_TX_BP_FUNC()
858 static inline uint64_t CVMX_AGL_GMX_TX_BP_FUNC(void)
860 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
861 cvmx_warn("CVMX_AGL_GMX_TX_BP not supported on this chip\n");
862 return CVMX_ADD_IO_SEG(0x00011800E00004D0ull);
865 #define CVMX_AGL_GMX_TX_BP (CVMX_ADD_IO_SEG(0x00011800E00004D0ull))
867 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
868 #define CVMX_AGL_GMX_TX_COL_ATTEMPT CVMX_AGL_GMX_TX_COL_ATTEMPT_FUNC()
869 static inline uint64_t CVMX_AGL_GMX_TX_COL_ATTEMPT_FUNC(void)
871 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
872 cvmx_warn("CVMX_AGL_GMX_TX_COL_ATTEMPT not supported on this chip\n");
873 return CVMX_ADD_IO_SEG(0x00011800E0000498ull);
876 #define CVMX_AGL_GMX_TX_COL_ATTEMPT (CVMX_ADD_IO_SEG(0x00011800E0000498ull))
878 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
879 #define CVMX_AGL_GMX_TX_IFG CVMX_AGL_GMX_TX_IFG_FUNC()
880 static inline uint64_t CVMX_AGL_GMX_TX_IFG_FUNC(void)
882 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
883 cvmx_warn("CVMX_AGL_GMX_TX_IFG not supported on this chip\n");
884 return CVMX_ADD_IO_SEG(0x00011800E0000488ull);
887 #define CVMX_AGL_GMX_TX_IFG (CVMX_ADD_IO_SEG(0x00011800E0000488ull))
889 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
890 #define CVMX_AGL_GMX_TX_INT_EN CVMX_AGL_GMX_TX_INT_EN_FUNC()
891 static inline uint64_t CVMX_AGL_GMX_TX_INT_EN_FUNC(void)
893 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
894 cvmx_warn("CVMX_AGL_GMX_TX_INT_EN not supported on this chip\n");
895 return CVMX_ADD_IO_SEG(0x00011800E0000508ull);
898 #define CVMX_AGL_GMX_TX_INT_EN (CVMX_ADD_IO_SEG(0x00011800E0000508ull))
900 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
901 #define CVMX_AGL_GMX_TX_INT_REG CVMX_AGL_GMX_TX_INT_REG_FUNC()
902 static inline uint64_t CVMX_AGL_GMX_TX_INT_REG_FUNC(void)
904 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
905 cvmx_warn("CVMX_AGL_GMX_TX_INT_REG not supported on this chip\n");
906 return CVMX_ADD_IO_SEG(0x00011800E0000500ull);
909 #define CVMX_AGL_GMX_TX_INT_REG (CVMX_ADD_IO_SEG(0x00011800E0000500ull))
911 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
912 #define CVMX_AGL_GMX_TX_JAM CVMX_AGL_GMX_TX_JAM_FUNC()
913 static inline uint64_t CVMX_AGL_GMX_TX_JAM_FUNC(void)
915 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
916 cvmx_warn("CVMX_AGL_GMX_TX_JAM not supported on this chip\n");
917 return CVMX_ADD_IO_SEG(0x00011800E0000490ull);
920 #define CVMX_AGL_GMX_TX_JAM (CVMX_ADD_IO_SEG(0x00011800E0000490ull))
922 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
923 #define CVMX_AGL_GMX_TX_LFSR CVMX_AGL_GMX_TX_LFSR_FUNC()
924 static inline uint64_t CVMX_AGL_GMX_TX_LFSR_FUNC(void)
926 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
927 cvmx_warn("CVMX_AGL_GMX_TX_LFSR not supported on this chip\n");
928 return CVMX_ADD_IO_SEG(0x00011800E00004F8ull);
931 #define CVMX_AGL_GMX_TX_LFSR (CVMX_ADD_IO_SEG(0x00011800E00004F8ull))
933 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
934 #define CVMX_AGL_GMX_TX_OVR_BP CVMX_AGL_GMX_TX_OVR_BP_FUNC()
935 static inline uint64_t CVMX_AGL_GMX_TX_OVR_BP_FUNC(void)
937 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
938 cvmx_warn("CVMX_AGL_GMX_TX_OVR_BP not supported on this chip\n");
939 return CVMX_ADD_IO_SEG(0x00011800E00004C8ull);
942 #define CVMX_AGL_GMX_TX_OVR_BP (CVMX_ADD_IO_SEG(0x00011800E00004C8ull))
944 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
945 #define CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC_FUNC()
946 static inline uint64_t CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC_FUNC(void)
948 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
949 cvmx_warn("CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC not supported on this chip\n");
950 return CVMX_ADD_IO_SEG(0x00011800E00004A0ull);
953 #define CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC (CVMX_ADD_IO_SEG(0x00011800E00004A0ull))
955 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
956 #define CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE_FUNC()
957 static inline uint64_t CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE_FUNC(void)
959 if (!(OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)))
960 cvmx_warn("CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE not supported on this chip\n");
961 return CVMX_ADD_IO_SEG(0x00011800E00004A8ull);
964 #define CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE (CVMX_ADD_IO_SEG(0x00011800E00004A8ull))
966 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
967 static inline uint64_t CVMX_AGL_PRTX_CTL(unsigned long offset)
970 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
971 cvmx_warn("CVMX_AGL_PRTX_CTL(%lu) is invalid on this chip\n", offset);
972 return CVMX_ADD_IO_SEG(0x00011800E0002000ull) + ((offset) & 1) * 8;
975 #define CVMX_AGL_PRTX_CTL(offset) (CVMX_ADD_IO_SEG(0x00011800E0002000ull) + ((offset) & 1) * 8)
979 * cvmx_agl_gmx_bad_reg
981 * AGL_GMX_BAD_REG = A collection of things that have gone very, very wrong
985 * OUT_OVR[0], LOSTSTAT[0], OVRFLW, TXPOP, TXPSH will be reset when MIX0_CTL[RESET] is set to 1.
986 * OUT_OVR[1], LOSTSTAT[1], OVRFLW1, TXPOP1, TXPSH1 will be reset when MIX1_CTL[RESET] is set to 1.
987 * STATOVR will be reset when both MIX0/1_CTL[RESET] are set to 1.
989 union cvmx_agl_gmx_bad_reg
992 struct cvmx_agl_gmx_bad_reg_s
994 #if __BYTE_ORDER == __BIG_ENDIAN
995 uint64_t reserved_38_63 : 26;
996 uint64_t txpsh1 : 1; /**< TX FIFO overflow (MII1) */
997 uint64_t txpop1 : 1; /**< TX FIFO underflow (MII1) */
998 uint64_t ovrflw1 : 1; /**< RX FIFO overflow (MII1) */
999 uint64_t txpsh : 1; /**< TX FIFO overflow (MII0) */
1000 uint64_t txpop : 1; /**< TX FIFO underflow (MII0) */
1001 uint64_t ovrflw : 1; /**< RX FIFO overflow (MII0) */
1002 uint64_t reserved_27_31 : 5;
1003 uint64_t statovr : 1; /**< TX Statistics overflow */
1004 uint64_t reserved_24_25 : 2;
1005 uint64_t loststat : 2; /**< TX Statistics data was over-written
1006 In MII/RGMII, one bit per port
1007 TX Stats are corrupted */
1008 uint64_t reserved_4_21 : 18;
1009 uint64_t out_ovr : 2; /**< Outbound data FIFO overflow */
1010 uint64_t reserved_0_1 : 2;
1012 uint64_t reserved_0_1 : 2;
1013 uint64_t out_ovr : 2;
1014 uint64_t reserved_4_21 : 18;
1015 uint64_t loststat : 2;
1016 uint64_t reserved_24_25 : 2;
1017 uint64_t statovr : 1;
1018 uint64_t reserved_27_31 : 5;
1019 uint64_t ovrflw : 1;
1022 uint64_t ovrflw1 : 1;
1023 uint64_t txpop1 : 1;
1024 uint64_t txpsh1 : 1;
1025 uint64_t reserved_38_63 : 26;
1028 struct cvmx_agl_gmx_bad_reg_cn52xx
1030 #if __BYTE_ORDER == __BIG_ENDIAN
1031 uint64_t reserved_38_63 : 26;
1032 uint64_t txpsh1 : 1; /**< TX FIFO overflow (MII1) */
1033 uint64_t txpop1 : 1; /**< TX FIFO underflow (MII1) */
1034 uint64_t ovrflw1 : 1; /**< RX FIFO overflow (MII1) */
1035 uint64_t txpsh : 1; /**< TX FIFO overflow (MII0) */
1036 uint64_t txpop : 1; /**< TX FIFO underflow (MII0) */
1037 uint64_t ovrflw : 1; /**< RX FIFO overflow (MII0) */
1038 uint64_t reserved_27_31 : 5;
1039 uint64_t statovr : 1; /**< TX Statistics overflow */
1040 uint64_t reserved_23_25 : 3;
1041 uint64_t loststat : 1; /**< TX Statistics data was over-written
1042 TX Stats are corrupted */
1043 uint64_t reserved_4_21 : 18;
1044 uint64_t out_ovr : 2; /**< Outbound data FIFO overflow */
1045 uint64_t reserved_0_1 : 2;
1047 uint64_t reserved_0_1 : 2;
1048 uint64_t out_ovr : 2;
1049 uint64_t reserved_4_21 : 18;
1050 uint64_t loststat : 1;
1051 uint64_t reserved_23_25 : 3;
1052 uint64_t statovr : 1;
1053 uint64_t reserved_27_31 : 5;
1054 uint64_t ovrflw : 1;
1057 uint64_t ovrflw1 : 1;
1058 uint64_t txpop1 : 1;
1059 uint64_t txpsh1 : 1;
1060 uint64_t reserved_38_63 : 26;
1063 struct cvmx_agl_gmx_bad_reg_cn52xx cn52xxp1;
1064 struct cvmx_agl_gmx_bad_reg_cn56xx
1066 #if __BYTE_ORDER == __BIG_ENDIAN
1067 uint64_t reserved_35_63 : 29;
1068 uint64_t txpsh : 1; /**< TX FIFO overflow */
1069 uint64_t txpop : 1; /**< TX FIFO underflow */
1070 uint64_t ovrflw : 1; /**< RX FIFO overflow */
1071 uint64_t reserved_27_31 : 5;
1072 uint64_t statovr : 1; /**< TX Statistics overflow */
1073 uint64_t reserved_23_25 : 3;
1074 uint64_t loststat : 1; /**< TX Statistics data was over-written
1075 TX Stats are corrupted */
1076 uint64_t reserved_3_21 : 19;
1077 uint64_t out_ovr : 1; /**< Outbound data FIFO overflow */
1078 uint64_t reserved_0_1 : 2;
1080 uint64_t reserved_0_1 : 2;
1081 uint64_t out_ovr : 1;
1082 uint64_t reserved_3_21 : 19;
1083 uint64_t loststat : 1;
1084 uint64_t reserved_23_25 : 3;
1085 uint64_t statovr : 1;
1086 uint64_t reserved_27_31 : 5;
1087 uint64_t ovrflw : 1;
1090 uint64_t reserved_35_63 : 29;
1093 struct cvmx_agl_gmx_bad_reg_cn56xx cn56xxp1;
1094 struct cvmx_agl_gmx_bad_reg_s cn63xx;
1095 struct cvmx_agl_gmx_bad_reg_s cn63xxp1;
1097 typedef union cvmx_agl_gmx_bad_reg cvmx_agl_gmx_bad_reg_t;
1102 * AGL_GMX_BIST = GMX BIST Results
1106 * Not reset when MIX*_CTL[RESET] is set to 1.
1109 union cvmx_agl_gmx_bist
1112 struct cvmx_agl_gmx_bist_s
1114 #if __BYTE_ORDER == __BIG_ENDIAN
1115 uint64_t reserved_25_63 : 39;
1116 uint64_t status : 25; /**< BIST Results.
1117 HW sets a bit in BIST for for memory that fails
1118 - 0: gmx#.inb.fif_bnk0
1119 - 1: gmx#.inb.fif_bnk1
1120 - 2: gmx#.inb.fif_bnk2
1121 - 3: gmx#.inb.fif_bnk3
1122 - 4: gmx#.inb.fif_bnk_ext0
1123 - 5: gmx#.inb.fif_bnk_ext1
1124 - 6: gmx#.inb.fif_bnk_ext2
1125 - 7: gmx#.inb.fif_bnk_ext3
1126 - 8: gmx#.outb.fif.fif_bnk0
1127 - 9: gmx#.outb.fif.fif_bnk1
1130 - 12: gmx#.outb.fif.fif_bnk_ext0
1131 - 13: gmx#.outb.fif.fif_bnk_ext1
1134 - 16: gmx#.csr.gmi0.srf8x64m1_bist
1135 - 17: gmx#.csr.gmi1.srf8x64m1_bist
1138 - 20: gmx#.csr.drf20x32m2_bist
1139 - 21: gmx#.csr.drf20x48m2_bist
1140 - 22: gmx#.outb.stat.drf16x27m1_bist
1141 - 23: gmx#.outb.stat.drf40x64m1_bist
1144 uint64_t status : 25;
1145 uint64_t reserved_25_63 : 39;
1148 struct cvmx_agl_gmx_bist_cn52xx
1150 #if __BYTE_ORDER == __BIG_ENDIAN
1151 uint64_t reserved_10_63 : 54;
1152 uint64_t status : 10; /**< BIST Results.
1153 HW sets a bit in BIST for for memory that fails
1154 - 0: gmx#.inb.drf128x78m1_bist
1155 - 1: gmx#.outb.fif.drf128x71m1_bist
1156 - 2: gmx#.csr.gmi0.srf8x64m1_bist
1157 - 3: gmx#.csr.gmi1.srf8x64m1_bist
1160 - 6: gmx#.csr.drf20x80m1_bist
1161 - 7: gmx#.outb.stat.drf16x27m1_bist
1162 - 8: gmx#.outb.stat.drf40x64m1_bist
1165 uint64_t status : 10;
1166 uint64_t reserved_10_63 : 54;
1169 struct cvmx_agl_gmx_bist_cn52xx cn52xxp1;
1170 struct cvmx_agl_gmx_bist_cn52xx cn56xx;
1171 struct cvmx_agl_gmx_bist_cn52xx cn56xxp1;
1172 struct cvmx_agl_gmx_bist_s cn63xx;
1173 struct cvmx_agl_gmx_bist_s cn63xxp1;
1175 typedef union cvmx_agl_gmx_bist cvmx_agl_gmx_bist_t;
1178 * cvmx_agl_gmx_drv_ctl
1180 * AGL_GMX_DRV_CTL = GMX Drive Control
1184 * NCTL, PCTL, BYP_EN will be reset when MIX0_CTL[RESET] is set to 1.
1185 * NCTL1, PCTL1, BYP_EN1 will be reset when MIX1_CTL[RESET] is set to 1.
1187 union cvmx_agl_gmx_drv_ctl
1190 struct cvmx_agl_gmx_drv_ctl_s
1192 #if __BYTE_ORDER == __BIG_ENDIAN
1193 uint64_t reserved_49_63 : 15;
1194 uint64_t byp_en1 : 1; /**< Compensation Controller Bypass Enable (MII1) */
1195 uint64_t reserved_45_47 : 3;
1196 uint64_t pctl1 : 5; /**< AGL PCTL (MII1) */
1197 uint64_t reserved_37_39 : 3;
1198 uint64_t nctl1 : 5; /**< AGL NCTL (MII1) */
1199 uint64_t reserved_17_31 : 15;
1200 uint64_t byp_en : 1; /**< Compensation Controller Bypass Enable */
1201 uint64_t reserved_13_15 : 3;
1202 uint64_t pctl : 5; /**< AGL PCTL */
1203 uint64_t reserved_5_7 : 3;
1204 uint64_t nctl : 5; /**< AGL NCTL */
1207 uint64_t reserved_5_7 : 3;
1209 uint64_t reserved_13_15 : 3;
1210 uint64_t byp_en : 1;
1211 uint64_t reserved_17_31 : 15;
1213 uint64_t reserved_37_39 : 3;
1215 uint64_t reserved_45_47 : 3;
1216 uint64_t byp_en1 : 1;
1217 uint64_t reserved_49_63 : 15;
1220 struct cvmx_agl_gmx_drv_ctl_s cn52xx;
1221 struct cvmx_agl_gmx_drv_ctl_s cn52xxp1;
1222 struct cvmx_agl_gmx_drv_ctl_cn56xx
1224 #if __BYTE_ORDER == __BIG_ENDIAN
1225 uint64_t reserved_17_63 : 47;
1226 uint64_t byp_en : 1; /**< Compensation Controller Bypass Enable */
1227 uint64_t reserved_13_15 : 3;
1228 uint64_t pctl : 5; /**< AGL PCTL */
1229 uint64_t reserved_5_7 : 3;
1230 uint64_t nctl : 5; /**< AGL NCTL */
1233 uint64_t reserved_5_7 : 3;
1235 uint64_t reserved_13_15 : 3;
1236 uint64_t byp_en : 1;
1237 uint64_t reserved_17_63 : 47;
1240 struct cvmx_agl_gmx_drv_ctl_cn56xx cn56xxp1;
1242 typedef union cvmx_agl_gmx_drv_ctl cvmx_agl_gmx_drv_ctl_t;
1245 * cvmx_agl_gmx_inf_mode
1247 * AGL_GMX_INF_MODE = Interface Mode
1251 * Not reset when MIX*_CTL[RESET] is set to 1.
1254 union cvmx_agl_gmx_inf_mode
1257 struct cvmx_agl_gmx_inf_mode_s
1259 #if __BYTE_ORDER == __BIG_ENDIAN
1260 uint64_t reserved_2_63 : 62;
1261 uint64_t en : 1; /**< Interface Enable */
1262 uint64_t reserved_0_0 : 1;
1264 uint64_t reserved_0_0 : 1;
1266 uint64_t reserved_2_63 : 62;
1269 struct cvmx_agl_gmx_inf_mode_s cn52xx;
1270 struct cvmx_agl_gmx_inf_mode_s cn52xxp1;
1271 struct cvmx_agl_gmx_inf_mode_s cn56xx;
1272 struct cvmx_agl_gmx_inf_mode_s cn56xxp1;
1274 typedef union cvmx_agl_gmx_inf_mode cvmx_agl_gmx_inf_mode_t;
1277 * cvmx_agl_gmx_prt#_cfg
1279 * AGL_GMX_PRT_CFG = Port description
1283 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
1286 union cvmx_agl_gmx_prtx_cfg
1289 struct cvmx_agl_gmx_prtx_cfg_s
1291 #if __BYTE_ORDER == __BIG_ENDIAN
1292 uint64_t reserved_14_63 : 50;
1293 uint64_t tx_idle : 1; /**< TX Machine is idle */
1294 uint64_t rx_idle : 1; /**< RX Machine is idle */
1295 uint64_t reserved_9_11 : 3;
1296 uint64_t speed_msb : 1; /**< Link Speed MSB [SPEED_MSB:SPEED]
1297 10 = 10Mbs operation
1298 00 = 100Mbs operation
1299 01 = 1000Mbs operation
1301 uint64_t reserved_7_7 : 1;
1302 uint64_t burst : 1; /**< Half-Duplex Burst Enable
1303 Only valid for 1000Mbs half-duplex operation
1304 0 = burst length of 0x2000 (halfdup / 1000Mbs)
1305 1 = burst length of 0x0 (all other modes) */
1306 uint64_t tx_en : 1; /**< Port enable. Must be set for Octane to send
1307 RMGII traffic. When this bit clear on a given
1308 port, then all packet cycles will appear as
1309 inter-frame cycles. */
1310 uint64_t rx_en : 1; /**< Port enable. Must be set for Octane to receive
1311 RMGII traffic. When this bit clear on a given
1312 port, then the all packet cycles will appear as
1313 inter-frame cycles. */
1314 uint64_t slottime : 1; /**< Slot Time for Half-Duplex operation
1315 0 = 512 bitimes (10/100Mbs operation)
1316 1 = 4096 bitimes (1000Mbs operation) */
1317 uint64_t duplex : 1; /**< Duplex
1318 0 = Half Duplex (collisions/extentions/bursts)
1320 uint64_t speed : 1; /**< Link Speed LSB [SPEED_MSB:SPEED]
1321 10 = 10Mbs operation
1322 00 = 100Mbs operation
1323 01 = 1000Mbs operation
1325 uint64_t en : 1; /**< Link Enable
1326 When EN is clear, packets will not be received
1327 or transmitted (including PAUSE and JAM packets).
1328 If EN is cleared while a packet is currently
1329 being received or transmitted, the packet will
1330 be allowed to complete before the bus is idled.
1331 On the RX side, subsequent packets in a burst
1336 uint64_t duplex : 1;
1337 uint64_t slottime : 1;
1341 uint64_t reserved_7_7 : 1;
1342 uint64_t speed_msb : 1;
1343 uint64_t reserved_9_11 : 3;
1344 uint64_t rx_idle : 1;
1345 uint64_t tx_idle : 1;
1346 uint64_t reserved_14_63 : 50;
1349 struct cvmx_agl_gmx_prtx_cfg_cn52xx
1351 #if __BYTE_ORDER == __BIG_ENDIAN
1352 uint64_t reserved_6_63 : 58;
1353 uint64_t tx_en : 1; /**< Port enable. Must be set for Octane to send
1354 RMGII traffic. When this bit clear on a given
1355 port, then all MII cycles will appear as
1356 inter-frame cycles. */
1357 uint64_t rx_en : 1; /**< Port enable. Must be set for Octane to receive
1358 RMGII traffic. When this bit clear on a given
1359 port, then the all MII cycles will appear as
1360 inter-frame cycles. */
1361 uint64_t slottime : 1; /**< Slot Time for Half-Duplex operation
1362 0 = 512 bitimes (10/100Mbs operation)
1364 uint64_t duplex : 1; /**< Duplex
1365 0 = Half Duplex (collisions/extentions/bursts)
1367 uint64_t speed : 1; /**< Link Speed
1368 0 = 10/100Mbs operation
1370 uint64_t en : 1; /**< Link Enable
1371 When EN is clear, packets will not be received
1372 or transmitted (including PAUSE and JAM packets).
1373 If EN is cleared while a packet is currently
1374 being received or transmitted, the packet will
1375 be allowed to complete before the bus is idled.
1376 On the RX side, subsequent packets in a burst
1381 uint64_t duplex : 1;
1382 uint64_t slottime : 1;
1385 uint64_t reserved_6_63 : 58;
1388 struct cvmx_agl_gmx_prtx_cfg_cn52xx cn52xxp1;
1389 struct cvmx_agl_gmx_prtx_cfg_cn52xx cn56xx;
1390 struct cvmx_agl_gmx_prtx_cfg_cn52xx cn56xxp1;
1391 struct cvmx_agl_gmx_prtx_cfg_s cn63xx;
1392 struct cvmx_agl_gmx_prtx_cfg_s cn63xxp1;
1394 typedef union cvmx_agl_gmx_prtx_cfg cvmx_agl_gmx_prtx_cfg_t;
1397 * cvmx_agl_gmx_rx#_adr_cam0
1399 * AGL_GMX_RX_ADR_CAM = Address Filtering Control
1403 * Not reset when MIX*_CTL[RESET] is set to 1.
1406 union cvmx_agl_gmx_rxx_adr_cam0
1409 struct cvmx_agl_gmx_rxx_adr_cam0_s
1411 #if __BYTE_ORDER == __BIG_ENDIAN
1412 uint64_t adr : 64; /**< The DMAC address to match on
1413 Each entry contributes 8bits to one of 8 matchers
1414 Write transactions to AGL_GMX_RX_ADR_CAM will not
1415 change the CSR when AGL_GMX_PRT_CFG[EN] is enabled
1416 The CAM matches against unicst or multicst DMAC
1422 struct cvmx_agl_gmx_rxx_adr_cam0_s cn52xx;
1423 struct cvmx_agl_gmx_rxx_adr_cam0_s cn52xxp1;
1424 struct cvmx_agl_gmx_rxx_adr_cam0_s cn56xx;
1425 struct cvmx_agl_gmx_rxx_adr_cam0_s cn56xxp1;
1426 struct cvmx_agl_gmx_rxx_adr_cam0_s cn63xx;
1427 struct cvmx_agl_gmx_rxx_adr_cam0_s cn63xxp1;
1429 typedef union cvmx_agl_gmx_rxx_adr_cam0 cvmx_agl_gmx_rxx_adr_cam0_t;
1432 * cvmx_agl_gmx_rx#_adr_cam1
1434 * AGL_GMX_RX_ADR_CAM = Address Filtering Control
1438 * Not reset when MIX*_CTL[RESET] is set to 1.
1441 union cvmx_agl_gmx_rxx_adr_cam1
1444 struct cvmx_agl_gmx_rxx_adr_cam1_s
1446 #if __BYTE_ORDER == __BIG_ENDIAN
1447 uint64_t adr : 64; /**< The DMAC address to match on
1448 Each entry contributes 8bits to one of 8 matchers
1449 Write transactions to AGL_GMX_RX_ADR_CAM will not
1450 change the CSR when AGL_GMX_PRT_CFG[EN] is enabled
1451 The CAM matches against unicst or multicst DMAC
1457 struct cvmx_agl_gmx_rxx_adr_cam1_s cn52xx;
1458 struct cvmx_agl_gmx_rxx_adr_cam1_s cn52xxp1;
1459 struct cvmx_agl_gmx_rxx_adr_cam1_s cn56xx;
1460 struct cvmx_agl_gmx_rxx_adr_cam1_s cn56xxp1;
1461 struct cvmx_agl_gmx_rxx_adr_cam1_s cn63xx;
1462 struct cvmx_agl_gmx_rxx_adr_cam1_s cn63xxp1;
1464 typedef union cvmx_agl_gmx_rxx_adr_cam1 cvmx_agl_gmx_rxx_adr_cam1_t;
1467 * cvmx_agl_gmx_rx#_adr_cam2
1469 * AGL_GMX_RX_ADR_CAM = Address Filtering Control
1473 * Not reset when MIX*_CTL[RESET] is set to 1.
1476 union cvmx_agl_gmx_rxx_adr_cam2
1479 struct cvmx_agl_gmx_rxx_adr_cam2_s
1481 #if __BYTE_ORDER == __BIG_ENDIAN
1482 uint64_t adr : 64; /**< The DMAC address to match on
1483 Each entry contributes 8bits to one of 8 matchers
1484 Write transactions to AGL_GMX_RX_ADR_CAM will not
1485 change the CSR when AGL_GMX_PRT_CFG[EN] is enabled
1486 The CAM matches against unicst or multicst DMAC
1492 struct cvmx_agl_gmx_rxx_adr_cam2_s cn52xx;
1493 struct cvmx_agl_gmx_rxx_adr_cam2_s cn52xxp1;
1494 struct cvmx_agl_gmx_rxx_adr_cam2_s cn56xx;
1495 struct cvmx_agl_gmx_rxx_adr_cam2_s cn56xxp1;
1496 struct cvmx_agl_gmx_rxx_adr_cam2_s cn63xx;
1497 struct cvmx_agl_gmx_rxx_adr_cam2_s cn63xxp1;
1499 typedef union cvmx_agl_gmx_rxx_adr_cam2 cvmx_agl_gmx_rxx_adr_cam2_t;
1502 * cvmx_agl_gmx_rx#_adr_cam3
1504 * AGL_GMX_RX_ADR_CAM = Address Filtering Control
1508 * Not reset when MIX*_CTL[RESET] is set to 1.
1511 union cvmx_agl_gmx_rxx_adr_cam3
1514 struct cvmx_agl_gmx_rxx_adr_cam3_s
1516 #if __BYTE_ORDER == __BIG_ENDIAN
1517 uint64_t adr : 64; /**< The DMAC address to match on
1518 Each entry contributes 8bits to one of 8 matchers
1519 Write transactions to AGL_GMX_RX_ADR_CAM will not
1520 change the CSR when AGL_GMX_PRT_CFG[EN] is enabled
1521 The CAM matches against unicst or multicst DMAC
1527 struct cvmx_agl_gmx_rxx_adr_cam3_s cn52xx;
1528 struct cvmx_agl_gmx_rxx_adr_cam3_s cn52xxp1;
1529 struct cvmx_agl_gmx_rxx_adr_cam3_s cn56xx;
1530 struct cvmx_agl_gmx_rxx_adr_cam3_s cn56xxp1;
1531 struct cvmx_agl_gmx_rxx_adr_cam3_s cn63xx;
1532 struct cvmx_agl_gmx_rxx_adr_cam3_s cn63xxp1;
1534 typedef union cvmx_agl_gmx_rxx_adr_cam3 cvmx_agl_gmx_rxx_adr_cam3_t;
1537 * cvmx_agl_gmx_rx#_adr_cam4
1539 * AGL_GMX_RX_ADR_CAM = Address Filtering Control
1543 * Not reset when MIX*_CTL[RESET] is set to 1.
1546 union cvmx_agl_gmx_rxx_adr_cam4
1549 struct cvmx_agl_gmx_rxx_adr_cam4_s
1551 #if __BYTE_ORDER == __BIG_ENDIAN
1552 uint64_t adr : 64; /**< The DMAC address to match on
1553 Each entry contributes 8bits to one of 8 matchers
1554 Write transactions to AGL_GMX_RX_ADR_CAM will not
1555 change the CSR when AGL_GMX_PRT_CFG[EN] is enabled
1556 The CAM matches against unicst or multicst DMAC
1562 struct cvmx_agl_gmx_rxx_adr_cam4_s cn52xx;
1563 struct cvmx_agl_gmx_rxx_adr_cam4_s cn52xxp1;
1564 struct cvmx_agl_gmx_rxx_adr_cam4_s cn56xx;
1565 struct cvmx_agl_gmx_rxx_adr_cam4_s cn56xxp1;
1566 struct cvmx_agl_gmx_rxx_adr_cam4_s cn63xx;
1567 struct cvmx_agl_gmx_rxx_adr_cam4_s cn63xxp1;
1569 typedef union cvmx_agl_gmx_rxx_adr_cam4 cvmx_agl_gmx_rxx_adr_cam4_t;
1572 * cvmx_agl_gmx_rx#_adr_cam5
1574 * AGL_GMX_RX_ADR_CAM = Address Filtering Control
1578 * Not reset when MIX*_CTL[RESET] is set to 1.
1581 union cvmx_agl_gmx_rxx_adr_cam5
1584 struct cvmx_agl_gmx_rxx_adr_cam5_s
1586 #if __BYTE_ORDER == __BIG_ENDIAN
1587 uint64_t adr : 64; /**< The DMAC address to match on
1588 Each entry contributes 8bits to one of 8 matchers
1589 Write transactions to AGL_GMX_RX_ADR_CAM will not
1590 change the CSR when AGL_GMX_PRT_CFG[EN] is enabled
1591 The CAM matches against unicst or multicst DMAC
1597 struct cvmx_agl_gmx_rxx_adr_cam5_s cn52xx;
1598 struct cvmx_agl_gmx_rxx_adr_cam5_s cn52xxp1;
1599 struct cvmx_agl_gmx_rxx_adr_cam5_s cn56xx;
1600 struct cvmx_agl_gmx_rxx_adr_cam5_s cn56xxp1;
1601 struct cvmx_agl_gmx_rxx_adr_cam5_s cn63xx;
1602 struct cvmx_agl_gmx_rxx_adr_cam5_s cn63xxp1;
1604 typedef union cvmx_agl_gmx_rxx_adr_cam5 cvmx_agl_gmx_rxx_adr_cam5_t;
1607 * cvmx_agl_gmx_rx#_adr_cam_en
1609 * AGL_GMX_RX_ADR_CAM_EN = Address Filtering Control Enable
1613 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
1616 union cvmx_agl_gmx_rxx_adr_cam_en
1619 struct cvmx_agl_gmx_rxx_adr_cam_en_s
1621 #if __BYTE_ORDER == __BIG_ENDIAN
1622 uint64_t reserved_8_63 : 56;
1623 uint64_t en : 8; /**< CAM Entry Enables */
1626 uint64_t reserved_8_63 : 56;
1629 struct cvmx_agl_gmx_rxx_adr_cam_en_s cn52xx;
1630 struct cvmx_agl_gmx_rxx_adr_cam_en_s cn52xxp1;
1631 struct cvmx_agl_gmx_rxx_adr_cam_en_s cn56xx;
1632 struct cvmx_agl_gmx_rxx_adr_cam_en_s cn56xxp1;
1633 struct cvmx_agl_gmx_rxx_adr_cam_en_s cn63xx;
1634 struct cvmx_agl_gmx_rxx_adr_cam_en_s cn63xxp1;
1636 typedef union cvmx_agl_gmx_rxx_adr_cam_en cvmx_agl_gmx_rxx_adr_cam_en_t;
1639 * cvmx_agl_gmx_rx#_adr_ctl
1641 * AGL_GMX_RX_ADR_CTL = Address Filtering Control
1646 * Here is some pseudo code that represents the address filter behavior.
1649 * bool dmac_addr_filter(uint8 prt, uint48 dmac) [
1650 * ASSERT(prt >= 0 && prt <= 3);
1651 * if (is_bcst(dmac)) // broadcast accept
1652 * return (AGL_GMX_RX[prt]_ADR_CTL[BCST] ? ACCEPT : REJECT);
1653 * if (is_mcst(dmac) & AGL_GMX_RX[prt]_ADR_CTL[MCST] == 1) // multicast reject
1655 * if (is_mcst(dmac) & AGL_GMX_RX[prt]_ADR_CTL[MCST] == 2) // multicast accept
1660 * for (i=0; i<8; i++) [
1661 * if (AGL_GMX_RX[prt]_ADR_CAM_EN[EN<i>] == 0)
1663 * uint48 unswizzled_mac_adr = 0x0;
1664 * for (j=5; j>=0; j--) [
1665 * unswizzled_mac_adr = (unswizzled_mac_adr << 8) | AGL_GMX_RX[prt]_ADR_CAM[j][ADR<i*8+7:i*8>];
1667 * if (unswizzled_mac_adr == dmac) [
1674 * return (AGL_GMX_RX[prt]_ADR_CTL[CAM_MODE] ? ACCEPT : REJECT);
1676 * return (AGL_GMX_RX[prt]_ADR_CTL[CAM_MODE] ? REJECT : ACCEPT);
1680 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
1682 union cvmx_agl_gmx_rxx_adr_ctl
1685 struct cvmx_agl_gmx_rxx_adr_ctl_s
1687 #if __BYTE_ORDER == __BIG_ENDIAN
1688 uint64_t reserved_4_63 : 60;
1689 uint64_t cam_mode : 1; /**< Allow or deny DMAC address filter
1690 0 = reject the packet on DMAC address match
1691 1 = accept the packet on DMAC address match */
1692 uint64_t mcst : 2; /**< Multicast Mode
1693 0 = Use the Address Filter CAM
1694 1 = Force reject all multicast packets
1695 2 = Force accept all multicast packets
1697 uint64_t bcst : 1; /**< Accept All Broadcast Packets */
1701 uint64_t cam_mode : 1;
1702 uint64_t reserved_4_63 : 60;
1705 struct cvmx_agl_gmx_rxx_adr_ctl_s cn52xx;
1706 struct cvmx_agl_gmx_rxx_adr_ctl_s cn52xxp1;
1707 struct cvmx_agl_gmx_rxx_adr_ctl_s cn56xx;
1708 struct cvmx_agl_gmx_rxx_adr_ctl_s cn56xxp1;
1709 struct cvmx_agl_gmx_rxx_adr_ctl_s cn63xx;
1710 struct cvmx_agl_gmx_rxx_adr_ctl_s cn63xxp1;
1712 typedef union cvmx_agl_gmx_rxx_adr_ctl cvmx_agl_gmx_rxx_adr_ctl_t;
1715 * cvmx_agl_gmx_rx#_decision
1717 * AGL_GMX_RX_DECISION = The byte count to decide when to accept or filter a packet
1721 * As each byte in a packet is received by GMX, the L2 byte count is compared
1722 * against the AGL_GMX_RX_DECISION[CNT]. The L2 byte count is the number of bytes
1723 * from the beginning of the L2 header (DMAC). In normal operation, the L2
1724 * header begins after the PREAMBLE+SFD (AGL_GMX_RX_FRM_CTL[PRE_CHK]=1) and any
1725 * optional UDD skip data (AGL_GMX_RX_UDD_SKP[LEN]).
1727 * When AGL_GMX_RX_FRM_CTL[PRE_CHK] is clear, PREAMBLE+SFD are prepended to the
1728 * packet and would require UDD skip length to account for them.
1731 * Port Mode <=AGL_GMX_RX_DECISION bytes (default=24) >AGL_GMX_RX_DECISION bytes (default=24)
1733 * MII/Full Duplex accept packet apply filters
1734 * no filtering is applied accept packet based on DMAC and PAUSE packet filters
1736 * MII/Half Duplex drop packet apply filters
1737 * packet is unconditionally dropped accept packet based on DMAC
1739 * where l2_size = MAX(0, total_packet_size - AGL_GMX_RX_UDD_SKP[LEN] - ((AGL_GMX_RX_FRM_CTL[PRE_CHK]==1)*8)
1741 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
1743 union cvmx_agl_gmx_rxx_decision
1746 struct cvmx_agl_gmx_rxx_decision_s
1748 #if __BYTE_ORDER == __BIG_ENDIAN
1749 uint64_t reserved_5_63 : 59;
1750 uint64_t cnt : 5; /**< The byte count to decide when to accept or filter
1754 uint64_t reserved_5_63 : 59;
1757 struct cvmx_agl_gmx_rxx_decision_s cn52xx;
1758 struct cvmx_agl_gmx_rxx_decision_s cn52xxp1;
1759 struct cvmx_agl_gmx_rxx_decision_s cn56xx;
1760 struct cvmx_agl_gmx_rxx_decision_s cn56xxp1;
1761 struct cvmx_agl_gmx_rxx_decision_s cn63xx;
1762 struct cvmx_agl_gmx_rxx_decision_s cn63xxp1;
1764 typedef union cvmx_agl_gmx_rxx_decision cvmx_agl_gmx_rxx_decision_t;
1767 * cvmx_agl_gmx_rx#_frm_chk
1769 * AGL_GMX_RX_FRM_CHK = Which frame errors will set the ERR bit of the frame
1773 * If AGL_GMX_RX_UDD_SKP[LEN] != 0, then LENERR will be forced to zero in HW.
1775 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
1777 union cvmx_agl_gmx_rxx_frm_chk
1780 struct cvmx_agl_gmx_rxx_frm_chk_s
1782 #if __BYTE_ORDER == __BIG_ENDIAN
1783 uint64_t reserved_10_63 : 54;
1784 uint64_t niberr : 1; /**< Nibble error */
1785 uint64_t skperr : 1; /**< Skipper error */
1786 uint64_t rcverr : 1; /**< Frame was received with packet data reception error */
1787 uint64_t lenerr : 1; /**< Frame was received with length error */
1788 uint64_t alnerr : 1; /**< Frame was received with an alignment error */
1789 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
1790 uint64_t jabber : 1; /**< Frame was received with length > sys_length */
1791 uint64_t maxerr : 1; /**< Frame was received with length > max_length */
1792 uint64_t carext : 1; /**< Carrier extend error */
1793 uint64_t minerr : 1; /**< Frame was received with length < min_length */
1795 uint64_t minerr : 1;
1796 uint64_t carext : 1;
1797 uint64_t maxerr : 1;
1798 uint64_t jabber : 1;
1799 uint64_t fcserr : 1;
1800 uint64_t alnerr : 1;
1801 uint64_t lenerr : 1;
1802 uint64_t rcverr : 1;
1803 uint64_t skperr : 1;
1804 uint64_t niberr : 1;
1805 uint64_t reserved_10_63 : 54;
1808 struct cvmx_agl_gmx_rxx_frm_chk_cn52xx
1810 #if __BYTE_ORDER == __BIG_ENDIAN
1811 uint64_t reserved_9_63 : 55;
1812 uint64_t skperr : 1; /**< Skipper error */
1813 uint64_t rcverr : 1; /**< Frame was received with MII Data reception error */
1814 uint64_t lenerr : 1; /**< Frame was received with length error */
1815 uint64_t alnerr : 1; /**< Frame was received with an alignment error */
1816 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
1817 uint64_t jabber : 1; /**< Frame was received with length > sys_length */
1818 uint64_t maxerr : 1; /**< Frame was received with length > max_length */
1819 uint64_t reserved_1_1 : 1;
1820 uint64_t minerr : 1; /**< Frame was received with length < min_length */
1822 uint64_t minerr : 1;
1823 uint64_t reserved_1_1 : 1;
1824 uint64_t maxerr : 1;
1825 uint64_t jabber : 1;
1826 uint64_t fcserr : 1;
1827 uint64_t alnerr : 1;
1828 uint64_t lenerr : 1;
1829 uint64_t rcverr : 1;
1830 uint64_t skperr : 1;
1831 uint64_t reserved_9_63 : 55;
1834 struct cvmx_agl_gmx_rxx_frm_chk_cn52xx cn52xxp1;
1835 struct cvmx_agl_gmx_rxx_frm_chk_cn52xx cn56xx;
1836 struct cvmx_agl_gmx_rxx_frm_chk_cn52xx cn56xxp1;
1837 struct cvmx_agl_gmx_rxx_frm_chk_s cn63xx;
1838 struct cvmx_agl_gmx_rxx_frm_chk_s cn63xxp1;
1840 typedef union cvmx_agl_gmx_rxx_frm_chk cvmx_agl_gmx_rxx_frm_chk_t;
1843 * cvmx_agl_gmx_rx#_frm_ctl
1845 * AGL_GMX_RX_FRM_CTL = Frame Control
1850 * When PRE_CHK is set (indicating that the PREAMBLE will be sent), PRE_STRP
1851 * determines if the PREAMBLE+SFD bytes are thrown away or sent to the Octane
1852 * core as part of the packet.
1854 * In either mode, the PREAMBLE+SFD bytes are not counted toward the packet
1855 * size when checking against the MIN and MAX bounds. Furthermore, the bytes
1856 * are skipped when locating the start of the L2 header for DMAC and Control
1857 * frame recognition.
1860 * These bits control how the HW handles incoming PAUSE packets. Here are
1861 * the most common modes of operation:
1862 * CTL_BCK=1,CTL_DRP=1 - HW does it all
1863 * CTL_BCK=0,CTL_DRP=0 - SW sees all pause frames
1864 * CTL_BCK=0,CTL_DRP=1 - all pause frames are completely ignored
1866 * These control bits should be set to CTL_BCK=0,CTL_DRP=0 in halfdup mode.
1867 * Since PAUSE packets only apply to fulldup operation, any PAUSE packet
1868 * would constitute an exception which should be handled by the processing
1869 * cores. PAUSE packets should not be forwarded.
1871 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
1873 union cvmx_agl_gmx_rxx_frm_ctl
1876 struct cvmx_agl_gmx_rxx_frm_ctl_s
1878 #if __BYTE_ORDER == __BIG_ENDIAN
1879 uint64_t reserved_13_63 : 51;
1880 uint64_t ptp_mode : 1; /**< Timestamp mode
1881 When PTP_MODE is set, a 64-bit timestamp will be
1882 prepended to every incoming packet. The timestamp
1883 bytes are added to the packet in such a way as to
1884 not modify the packet's receive byte count. This
1885 implies that the AGL_GMX_RX_JABBER,
1886 AGL_GMX_RX_FRM_MIN, AGL_GMX_RX_FRM_MAX,
1887 AGL_GMX_RX_DECISION, AGL_GMX_RX_UDD_SKP, and the
1888 AGL_GMX_RX_STATS_* do not require any adjustment
1889 as they operate on the received packet size.
1890 If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1. */
1891 uint64_t reserved_11_11 : 1;
1892 uint64_t null_dis : 1; /**< When set, do not modify the MOD bits on NULL ticks
1893 due to PARITAL packets */
1894 uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte
1895 regardless of the number of previous PREAMBLE
1896 nibbles. In this mode, PRE_STRP should be set to
1897 account for the variable nature of the PREAMBLE.
1898 PRE_CHK must be set to enable this and all
1899 PREAMBLE features. */
1900 uint64_t pad_len : 1; /**< When set, disables the length check for non-min
1901 sized pkts with padding in the client data */
1902 uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */
1903 uint64_t pre_free : 1; /**< When set, PREAMBLE checking is less strict.
1904 AGL will begin the frame at the first SFD.
1905 PRE_FREE must be set if PRE_ALIGN is set.
1906 PRE_CHK must be set to enable this and all
1907 PREAMBLE features. */
1908 uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */
1909 uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign
1910 Multicast address */
1911 uint64_t ctl_bck : 1; /**< Forward pause information to TX block */
1912 uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */
1913 uint64_t pre_strp : 1; /**< Strip off the preamble (when present)
1914 0=PREAMBLE+SFD is sent to core as part of frame
1915 1=PREAMBLE+SFD is dropped
1916 PRE_STRP must be set if PRE_ALIGN is set.
1917 PRE_CHK must be set to enable this and all
1918 PREAMBLE features. */
1919 uint64_t pre_chk : 1; /**< This port is configured to send a valid 802.3
1920 PREAMBLE to begin every frame. AGL checks that a
1921 valid PREAMBLE is received (based on PRE_FREE).
1922 When a problem does occur within the PREAMBLE
1923 seqeunce, the frame is marked as bad and not sent
1924 into the core. The AGL_GMX_RX_INT_REG[PCTERR]
1925 interrupt is also raised. */
1927 uint64_t pre_chk : 1;
1928 uint64_t pre_strp : 1;
1929 uint64_t ctl_drp : 1;
1930 uint64_t ctl_bck : 1;
1931 uint64_t ctl_mcst : 1;
1932 uint64_t ctl_smac : 1;
1933 uint64_t pre_free : 1;
1934 uint64_t vlan_len : 1;
1935 uint64_t pad_len : 1;
1936 uint64_t pre_align : 1;
1937 uint64_t null_dis : 1;
1938 uint64_t reserved_11_11 : 1;
1939 uint64_t ptp_mode : 1;
1940 uint64_t reserved_13_63 : 51;
1943 struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx
1945 #if __BYTE_ORDER == __BIG_ENDIAN
1946 uint64_t reserved_10_63 : 54;
1947 uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte
1948 regardless of the number of previous PREAMBLE
1949 nibbles. In this mode, PREAMBLE can be consumed
1950 by the HW so when PRE_ALIGN is set, PRE_FREE,
1951 PRE_STRP must be set for correct operation.
1952 PRE_CHK must be set to enable this and all
1953 PREAMBLE features. */
1954 uint64_t pad_len : 1; /**< When set, disables the length check for non-min
1955 sized pkts with padding in the client data */
1956 uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */
1957 uint64_t pre_free : 1; /**< When set, PREAMBLE checking is less strict.
1958 0 - 254 cycles of PREAMBLE followed by SFD
1959 PRE_FREE must be set if PRE_ALIGN is set.
1960 PRE_CHK must be set to enable this and all
1961 PREAMBLE features. */
1962 uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */
1963 uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign
1964 Multicast address */
1965 uint64_t ctl_bck : 1; /**< Forward pause information to TX block */
1966 uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */
1967 uint64_t pre_strp : 1; /**< Strip off the preamble (when present)
1968 0=PREAMBLE+SFD is sent to core as part of frame
1969 1=PREAMBLE+SFD is dropped
1970 PRE_STRP must be set if PRE_ALIGN is set.
1971 PRE_CHK must be set to enable this and all
1972 PREAMBLE features. */
1973 uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD
1974 to begin every frame. GMX checks that the
1975 PREAMBLE is sent correctly */
1977 uint64_t pre_chk : 1;
1978 uint64_t pre_strp : 1;
1979 uint64_t ctl_drp : 1;
1980 uint64_t ctl_bck : 1;
1981 uint64_t ctl_mcst : 1;
1982 uint64_t ctl_smac : 1;
1983 uint64_t pre_free : 1;
1984 uint64_t vlan_len : 1;
1985 uint64_t pad_len : 1;
1986 uint64_t pre_align : 1;
1987 uint64_t reserved_10_63 : 54;
1990 struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx cn52xxp1;
1991 struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx cn56xx;
1992 struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx cn56xxp1;
1993 struct cvmx_agl_gmx_rxx_frm_ctl_s cn63xx;
1994 struct cvmx_agl_gmx_rxx_frm_ctl_s cn63xxp1;
1996 typedef union cvmx_agl_gmx_rxx_frm_ctl cvmx_agl_gmx_rxx_frm_ctl_t;
1999 * cvmx_agl_gmx_rx#_frm_max
2001 * AGL_GMX_RX_FRM_MAX = Frame Max length
2005 * When changing the LEN field, be sure that LEN does not exceed
2006 * AGL_GMX_RX_JABBER[CNT]. Failure to meet this constraint will cause packets that
2007 * are within the maximum length parameter to be rejected because they exceed
2008 * the AGL_GMX_RX_JABBER[CNT] limit.
2012 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2014 union cvmx_agl_gmx_rxx_frm_max
2017 struct cvmx_agl_gmx_rxx_frm_max_s
2019 #if __BYTE_ORDER == __BIG_ENDIAN
2020 uint64_t reserved_16_63 : 48;
2021 uint64_t len : 16; /**< Byte count for Max-sized frame check
2022 AGL_GMX_RXn_FRM_CHK[MAXERR] enables the check
2024 If enabled, failing packets set the MAXERR
2025 interrupt and the MIX opcode is set to OVER_FCS
2026 (0x3, if packet has bad FCS) or OVER_ERR (0x4, if
2027 packet has good FCS).
2028 LEN <= AGL_GMX_RX_JABBER[CNT] */
2031 uint64_t reserved_16_63 : 48;
2034 struct cvmx_agl_gmx_rxx_frm_max_s cn52xx;
2035 struct cvmx_agl_gmx_rxx_frm_max_s cn52xxp1;
2036 struct cvmx_agl_gmx_rxx_frm_max_s cn56xx;
2037 struct cvmx_agl_gmx_rxx_frm_max_s cn56xxp1;
2038 struct cvmx_agl_gmx_rxx_frm_max_s cn63xx;
2039 struct cvmx_agl_gmx_rxx_frm_max_s cn63xxp1;
2041 typedef union cvmx_agl_gmx_rxx_frm_max cvmx_agl_gmx_rxx_frm_max_t;
2044 * cvmx_agl_gmx_rx#_frm_min
2046 * AGL_GMX_RX_FRM_MIN = Frame Min length
2050 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2053 union cvmx_agl_gmx_rxx_frm_min
2056 struct cvmx_agl_gmx_rxx_frm_min_s
2058 #if __BYTE_ORDER == __BIG_ENDIAN
2059 uint64_t reserved_16_63 : 48;
2060 uint64_t len : 16; /**< Byte count for Min-sized frame check
2061 AGL_GMX_RXn_FRM_CHK[MINERR] enables the check
2063 If enabled, failing packets set the MINERR
2064 interrupt and the MIX opcode is set to UNDER_FCS
2065 (0x6, if packet has bad FCS) or UNDER_ERR (0x8,
2066 if packet has good FCS). */
2069 uint64_t reserved_16_63 : 48;
2072 struct cvmx_agl_gmx_rxx_frm_min_s cn52xx;
2073 struct cvmx_agl_gmx_rxx_frm_min_s cn52xxp1;
2074 struct cvmx_agl_gmx_rxx_frm_min_s cn56xx;
2075 struct cvmx_agl_gmx_rxx_frm_min_s cn56xxp1;
2076 struct cvmx_agl_gmx_rxx_frm_min_s cn63xx;
2077 struct cvmx_agl_gmx_rxx_frm_min_s cn63xxp1;
2079 typedef union cvmx_agl_gmx_rxx_frm_min cvmx_agl_gmx_rxx_frm_min_t;
2082 * cvmx_agl_gmx_rx#_ifg
2084 * AGL_GMX_RX_IFG = RX Min IFG
2088 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2091 union cvmx_agl_gmx_rxx_ifg
2094 struct cvmx_agl_gmx_rxx_ifg_s
2096 #if __BYTE_ORDER == __BIG_ENDIAN
2097 uint64_t reserved_4_63 : 60;
2098 uint64_t ifg : 4; /**< Min IFG (in IFG*8 bits) between packets used to
2099 determine IFGERR. Normally IFG is 96 bits.
2100 Note in some operating modes, IFG cycles can be
2101 inserted or removed in order to achieve clock rate
2102 adaptation. For these reasons, the default value
2103 is slightly conservative and does not check upto
2104 the full 96 bits of IFG. */
2107 uint64_t reserved_4_63 : 60;
2110 struct cvmx_agl_gmx_rxx_ifg_s cn52xx;
2111 struct cvmx_agl_gmx_rxx_ifg_s cn52xxp1;
2112 struct cvmx_agl_gmx_rxx_ifg_s cn56xx;
2113 struct cvmx_agl_gmx_rxx_ifg_s cn56xxp1;
2114 struct cvmx_agl_gmx_rxx_ifg_s cn63xx;
2115 struct cvmx_agl_gmx_rxx_ifg_s cn63xxp1;
2117 typedef union cvmx_agl_gmx_rxx_ifg cvmx_agl_gmx_rxx_ifg_t;
2120 * cvmx_agl_gmx_rx#_int_en
2122 * AGL_GMX_RX_INT_EN = Interrupt Enable
2126 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2129 union cvmx_agl_gmx_rxx_int_en
2132 struct cvmx_agl_gmx_rxx_int_en_s
2134 #if __BYTE_ORDER == __BIG_ENDIAN
2135 uint64_t reserved_20_63 : 44;
2136 uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
2137 uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex | NS */
2138 uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed | NS */
2139 uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus | NS */
2140 uint64_t ifgerr : 1; /**< Interframe Gap Violation */
2141 uint64_t coldet : 1; /**< Collision Detection */
2142 uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
2143 uint64_t rsverr : 1; /**< Packet reserved opcodes */
2144 uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
2145 uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow */
2146 uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) | NS */
2147 uint64_t skperr : 1; /**< Skipper error */
2148 uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
2149 uint64_t lenerr : 1; /**< Frame was received with length error */
2150 uint64_t alnerr : 1; /**< Frame was received with an alignment error */
2151 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
2152 uint64_t jabber : 1; /**< Frame was received with length > sys_length */
2153 uint64_t maxerr : 1; /**< Frame was received with length > max_length */
2154 uint64_t carext : 1; /**< Carrier extend error */
2155 uint64_t minerr : 1; /**< Frame was received with length < min_length */
2157 uint64_t minerr : 1;
2158 uint64_t carext : 1;
2159 uint64_t maxerr : 1;
2160 uint64_t jabber : 1;
2161 uint64_t fcserr : 1;
2162 uint64_t alnerr : 1;
2163 uint64_t lenerr : 1;
2164 uint64_t rcverr : 1;
2165 uint64_t skperr : 1;
2166 uint64_t niberr : 1;
2167 uint64_t ovrerr : 1;
2168 uint64_t pcterr : 1;
2169 uint64_t rsverr : 1;
2170 uint64_t falerr : 1;
2171 uint64_t coldet : 1;
2172 uint64_t ifgerr : 1;
2173 uint64_t phy_link : 1;
2174 uint64_t phy_spd : 1;
2175 uint64_t phy_dupx : 1;
2176 uint64_t pause_drp : 1;
2177 uint64_t reserved_20_63 : 44;
2180 struct cvmx_agl_gmx_rxx_int_en_cn52xx
2182 #if __BYTE_ORDER == __BIG_ENDIAN
2183 uint64_t reserved_20_63 : 44;
2184 uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
2185 uint64_t reserved_16_18 : 3;
2186 uint64_t ifgerr : 1; /**< Interframe Gap Violation */
2187 uint64_t coldet : 1; /**< Collision Detection */
2188 uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
2189 uint64_t rsverr : 1; /**< MII reserved opcodes */
2190 uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
2191 uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow */
2192 uint64_t reserved_9_9 : 1;
2193 uint64_t skperr : 1; /**< Skipper error */
2194 uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
2195 uint64_t lenerr : 1; /**< Frame was received with length error */
2196 uint64_t alnerr : 1; /**< Frame was received with an alignment error */
2197 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
2198 uint64_t jabber : 1; /**< Frame was received with length > sys_length */
2199 uint64_t maxerr : 1; /**< Frame was received with length > max_length */
2200 uint64_t reserved_1_1 : 1;
2201 uint64_t minerr : 1; /**< Frame was received with length < min_length */
2203 uint64_t minerr : 1;
2204 uint64_t reserved_1_1 : 1;
2205 uint64_t maxerr : 1;
2206 uint64_t jabber : 1;
2207 uint64_t fcserr : 1;
2208 uint64_t alnerr : 1;
2209 uint64_t lenerr : 1;
2210 uint64_t rcverr : 1;
2211 uint64_t skperr : 1;
2212 uint64_t reserved_9_9 : 1;
2213 uint64_t ovrerr : 1;
2214 uint64_t pcterr : 1;
2215 uint64_t rsverr : 1;
2216 uint64_t falerr : 1;
2217 uint64_t coldet : 1;
2218 uint64_t ifgerr : 1;
2219 uint64_t reserved_16_18 : 3;
2220 uint64_t pause_drp : 1;
2221 uint64_t reserved_20_63 : 44;
2224 struct cvmx_agl_gmx_rxx_int_en_cn52xx cn52xxp1;
2225 struct cvmx_agl_gmx_rxx_int_en_cn52xx cn56xx;
2226 struct cvmx_agl_gmx_rxx_int_en_cn52xx cn56xxp1;
2227 struct cvmx_agl_gmx_rxx_int_en_s cn63xx;
2228 struct cvmx_agl_gmx_rxx_int_en_s cn63xxp1;
2230 typedef union cvmx_agl_gmx_rxx_int_en cvmx_agl_gmx_rxx_int_en_t;
2233 * cvmx_agl_gmx_rx#_int_reg
2235 * AGL_GMX_RX_INT_REG = Interrupt Register
2239 * (1) exceptions will only be raised to the control processor if the
2240 * corresponding bit in the AGL_GMX_RX_INT_EN register is set.
2242 * (2) exception conditions 10:0 can also set the rcv/opcode in the received
2243 * packet's workQ entry. The AGL_GMX_RX_FRM_CHK register provides a bit mask
2244 * for configuring which conditions set the error.
2246 * (3) in half duplex operation, the expectation is that collisions will appear
2249 * (4) JABBER - An RX Jabber error indicates that a packet was received which
2250 * is longer than the maximum allowed packet as defined by the
2251 * system. GMX will truncate the packet at the JABBER count.
2252 * Failure to do so could lead to system instabilty.
2254 * (6) MAXERR - for untagged frames, the total frame DA+SA+TL+DATA+PAD+FCS >
2255 * AGL_GMX_RX_FRM_MAX. For tagged frames, DA+SA+VLAN+TL+DATA+PAD+FCS
2256 * > AGL_GMX_RX_FRM_MAX + 4*VLAN_VAL + 4*VLAN_STACKED.
2258 * (7) MINERR - total frame DA+SA+TL+DATA+PAD+FCS < AGL_GMX_RX_FRM_MIN.
2260 * (8) ALNERR - Indicates that the packet received was not an integer number of
2261 * bytes. If FCS checking is enabled, ALNERR will only assert if
2262 * the FCS is bad. If FCS checking is disabled, ALNERR will
2263 * assert in all non-integer frame cases.
2265 * (9) Collisions - Collisions can only occur in half-duplex mode. A collision
2266 * is assumed by the receiver when the received
2267 * frame < AGL_GMX_RX_FRM_MIN - this is normally a MINERR
2269 * (A) LENERR - Length errors occur when the received packet does not match the
2270 * length field. LENERR is only checked for packets between 64
2271 * and 1500 bytes. For untagged frames, the length must exact
2272 * match. For tagged frames the length or length+4 must match.
2274 * (B) PCTERR - checks that the frame begins with a valid PREAMBLE sequence.
2275 * Does not check the number of PREAMBLE cycles.
2277 * (C) OVRERR - Not to be included in the HRM
2279 * OVRERR is an architectural assertion check internal to GMX to
2280 * make sure no assumption was violated. In a correctly operating
2281 * system, this interrupt can never fire.
2283 * GMX has an internal arbiter which selects which of 4 ports to
2284 * buffer in the main RX FIFO. If we normally buffer 8 bytes,
2285 * then each port will typically push a tick every 8 cycles - if
2286 * the packet interface is going as fast as possible. If there
2287 * are four ports, they push every two cycles. So that's the
2288 * assumption. That the inbound module will always be able to
2289 * consume the tick before another is produced. If that doesn't
2290 * happen - that's when OVRERR will assert.
2292 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2294 union cvmx_agl_gmx_rxx_int_reg
2297 struct cvmx_agl_gmx_rxx_int_reg_s
2299 #if __BYTE_ORDER == __BIG_ENDIAN
2300 uint64_t reserved_20_63 : 44;
2301 uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
2302 uint64_t phy_dupx : 1; /**< Change in the RGMII inbound LinkDuplex | NS */
2303 uint64_t phy_spd : 1; /**< Change in the RGMII inbound LinkSpeed | NS */
2304 uint64_t phy_link : 1; /**< Change in the RGMII inbound LinkStatus | NS */
2305 uint64_t ifgerr : 1; /**< Interframe Gap Violation
2306 Does not necessarily indicate a failure */
2307 uint64_t coldet : 1; /**< Collision Detection */
2308 uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
2309 uint64_t rsverr : 1; /**< Packet reserved opcodes */
2310 uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
2311 uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
2312 This interrupt should never assert */
2313 uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) | NS */
2314 uint64_t skperr : 1; /**< Skipper error */
2315 uint64_t rcverr : 1; /**< Frame was received with Packet Data reception error */
2316 uint64_t lenerr : 1; /**< Frame was received with length error */
2317 uint64_t alnerr : 1; /**< Frame was received with an alignment error */
2318 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
2319 uint64_t jabber : 1; /**< Frame was received with length > sys_length */
2320 uint64_t maxerr : 1; /**< Frame was received with length > max_length */
2321 uint64_t carext : 1; /**< Carrier extend error */
2322 uint64_t minerr : 1; /**< Frame was received with length < min_length */
2324 uint64_t minerr : 1;
2325 uint64_t carext : 1;
2326 uint64_t maxerr : 1;
2327 uint64_t jabber : 1;
2328 uint64_t fcserr : 1;
2329 uint64_t alnerr : 1;
2330 uint64_t lenerr : 1;
2331 uint64_t rcverr : 1;
2332 uint64_t skperr : 1;
2333 uint64_t niberr : 1;
2334 uint64_t ovrerr : 1;
2335 uint64_t pcterr : 1;
2336 uint64_t rsverr : 1;
2337 uint64_t falerr : 1;
2338 uint64_t coldet : 1;
2339 uint64_t ifgerr : 1;
2340 uint64_t phy_link : 1;
2341 uint64_t phy_spd : 1;
2342 uint64_t phy_dupx : 1;
2343 uint64_t pause_drp : 1;
2344 uint64_t reserved_20_63 : 44;
2347 struct cvmx_agl_gmx_rxx_int_reg_cn52xx
2349 #if __BYTE_ORDER == __BIG_ENDIAN
2350 uint64_t reserved_20_63 : 44;
2351 uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
2352 uint64_t reserved_16_18 : 3;
2353 uint64_t ifgerr : 1; /**< Interframe Gap Violation
2354 Does not necessarily indicate a failure */
2355 uint64_t coldet : 1; /**< Collision Detection */
2356 uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
2357 uint64_t rsverr : 1; /**< MII reserved opcodes */
2358 uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
2359 uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
2360 This interrupt should never assert */
2361 uint64_t reserved_9_9 : 1;
2362 uint64_t skperr : 1; /**< Skipper error */
2363 uint64_t rcverr : 1; /**< Frame was received with MII Data reception error */
2364 uint64_t lenerr : 1; /**< Frame was received with length error */
2365 uint64_t alnerr : 1; /**< Frame was received with an alignment error */
2366 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
2367 uint64_t jabber : 1; /**< Frame was received with length > sys_length */
2368 uint64_t maxerr : 1; /**< Frame was received with length > max_length */
2369 uint64_t reserved_1_1 : 1;
2370 uint64_t minerr : 1; /**< Frame was received with length < min_length */
2372 uint64_t minerr : 1;
2373 uint64_t reserved_1_1 : 1;
2374 uint64_t maxerr : 1;
2375 uint64_t jabber : 1;
2376 uint64_t fcserr : 1;
2377 uint64_t alnerr : 1;
2378 uint64_t lenerr : 1;
2379 uint64_t rcverr : 1;
2380 uint64_t skperr : 1;
2381 uint64_t reserved_9_9 : 1;
2382 uint64_t ovrerr : 1;
2383 uint64_t pcterr : 1;
2384 uint64_t rsverr : 1;
2385 uint64_t falerr : 1;
2386 uint64_t coldet : 1;
2387 uint64_t ifgerr : 1;
2388 uint64_t reserved_16_18 : 3;
2389 uint64_t pause_drp : 1;
2390 uint64_t reserved_20_63 : 44;
2393 struct cvmx_agl_gmx_rxx_int_reg_cn52xx cn52xxp1;
2394 struct cvmx_agl_gmx_rxx_int_reg_cn52xx cn56xx;
2395 struct cvmx_agl_gmx_rxx_int_reg_cn52xx cn56xxp1;
2396 struct cvmx_agl_gmx_rxx_int_reg_s cn63xx;
2397 struct cvmx_agl_gmx_rxx_int_reg_s cn63xxp1;
2399 typedef union cvmx_agl_gmx_rxx_int_reg cvmx_agl_gmx_rxx_int_reg_t;
2402 * cvmx_agl_gmx_rx#_jabber
2404 * AGL_GMX_RX_JABBER = The max size packet after which GMX will truncate
2408 * CNT must be 8-byte aligned such that CNT[2:0] == 0
2410 * The packet that will be sent to the packet input logic will have an
2411 * additionl 8 bytes if AGL_GMX_RX_FRM_CTL[PRE_CHK] is set and
2412 * AGL_GMX_RX_FRM_CTL[PRE_STRP] is clear. The max packet that will be sent is
2415 * max_sized_packet = AGL_GMX_RX_JABBER[CNT]+((AGL_GMX_RX_FRM_CTL[PRE_CHK] & !AGL_GMX_RX_FRM_CTL[PRE_STRP])*8)
2417 * Be sure the CNT field value is at least as large as the
2418 * AGL_GMX_RX_FRM_MAX[LEN] value. Failure to meet this constraint will cause
2419 * packets that are within the AGL_GMX_RX_FRM_MAX[LEN] length to be rejected
2420 * because they exceed the CNT limit.
2422 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2424 union cvmx_agl_gmx_rxx_jabber
2427 struct cvmx_agl_gmx_rxx_jabber_s
2429 #if __BYTE_ORDER == __BIG_ENDIAN
2430 uint64_t reserved_16_63 : 48;
2431 uint64_t cnt : 16; /**< Byte count for jabber check
2432 Failing packets set the JABBER interrupt and are
2433 optionally sent with opcode==JABBER
2434 GMX will truncate the packet to CNT bytes
2435 CNT >= AGL_GMX_RX_FRM_MAX[LEN] */
2438 uint64_t reserved_16_63 : 48;
2441 struct cvmx_agl_gmx_rxx_jabber_s cn52xx;
2442 struct cvmx_agl_gmx_rxx_jabber_s cn52xxp1;
2443 struct cvmx_agl_gmx_rxx_jabber_s cn56xx;
2444 struct cvmx_agl_gmx_rxx_jabber_s cn56xxp1;
2445 struct cvmx_agl_gmx_rxx_jabber_s cn63xx;
2446 struct cvmx_agl_gmx_rxx_jabber_s cn63xxp1;
2448 typedef union cvmx_agl_gmx_rxx_jabber cvmx_agl_gmx_rxx_jabber_t;
2451 * cvmx_agl_gmx_rx#_pause_drop_time
2453 * AGL_GMX_RX_PAUSE_DROP_TIME = The TIME field in a PAUSE Packet which was dropped due to GMX RX FIFO full condition
2457 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2460 union cvmx_agl_gmx_rxx_pause_drop_time
2463 struct cvmx_agl_gmx_rxx_pause_drop_time_s
2465 #if __BYTE_ORDER == __BIG_ENDIAN
2466 uint64_t reserved_16_63 : 48;
2467 uint64_t status : 16; /**< Time extracted from the dropped PAUSE packet */
2469 uint64_t status : 16;
2470 uint64_t reserved_16_63 : 48;
2473 struct cvmx_agl_gmx_rxx_pause_drop_time_s cn52xx;
2474 struct cvmx_agl_gmx_rxx_pause_drop_time_s cn52xxp1;
2475 struct cvmx_agl_gmx_rxx_pause_drop_time_s cn56xx;
2476 struct cvmx_agl_gmx_rxx_pause_drop_time_s cn56xxp1;
2477 struct cvmx_agl_gmx_rxx_pause_drop_time_s cn63xx;
2478 struct cvmx_agl_gmx_rxx_pause_drop_time_s cn63xxp1;
2480 typedef union cvmx_agl_gmx_rxx_pause_drop_time cvmx_agl_gmx_rxx_pause_drop_time_t;
2483 * cvmx_agl_gmx_rx#_rx_inbnd
2485 * AGL_GMX_RX_INBND = RGMII InBand Link Status
2489 * These fields are only valid if the attached PHY is operating in RGMII mode
2490 * and supports the optional in-band status (see section 3.4.1 of the RGMII
2491 * specification, version 1.3 for more information).
2493 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2495 union cvmx_agl_gmx_rxx_rx_inbnd
2498 struct cvmx_agl_gmx_rxx_rx_inbnd_s
2500 #if __BYTE_ORDER == __BIG_ENDIAN
2501 uint64_t reserved_4_63 : 60;
2502 uint64_t duplex : 1; /**< RGMII Inbound LinkDuplex | NS
2505 uint64_t speed : 2; /**< RGMII Inbound LinkSpeed | NS
2510 uint64_t status : 1; /**< RGMII Inbound LinkStatus | NS
2514 uint64_t status : 1;
2516 uint64_t duplex : 1;
2517 uint64_t reserved_4_63 : 60;
2520 struct cvmx_agl_gmx_rxx_rx_inbnd_s cn63xx;
2521 struct cvmx_agl_gmx_rxx_rx_inbnd_s cn63xxp1;
2523 typedef union cvmx_agl_gmx_rxx_rx_inbnd cvmx_agl_gmx_rxx_rx_inbnd_t;
2526 * cvmx_agl_gmx_rx#_stats_ctl
2528 * AGL_GMX_RX_STATS_CTL = RX Stats Control register
2532 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2535 union cvmx_agl_gmx_rxx_stats_ctl
2538 struct cvmx_agl_gmx_rxx_stats_ctl_s
2540 #if __BYTE_ORDER == __BIG_ENDIAN
2541 uint64_t reserved_1_63 : 63;
2542 uint64_t rd_clr : 1; /**< RX Stats registers will clear on reads */
2544 uint64_t rd_clr : 1;
2545 uint64_t reserved_1_63 : 63;
2548 struct cvmx_agl_gmx_rxx_stats_ctl_s cn52xx;
2549 struct cvmx_agl_gmx_rxx_stats_ctl_s cn52xxp1;
2550 struct cvmx_agl_gmx_rxx_stats_ctl_s cn56xx;
2551 struct cvmx_agl_gmx_rxx_stats_ctl_s cn56xxp1;
2552 struct cvmx_agl_gmx_rxx_stats_ctl_s cn63xx;
2553 struct cvmx_agl_gmx_rxx_stats_ctl_s cn63xxp1;
2555 typedef union cvmx_agl_gmx_rxx_stats_ctl cvmx_agl_gmx_rxx_stats_ctl_t;
2558 * cvmx_agl_gmx_rx#_stats_octs
2561 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
2562 * - Counters will wrap
2563 * - Not reset when MIX*_CTL[RESET] is set to 1.
2565 union cvmx_agl_gmx_rxx_stats_octs
2568 struct cvmx_agl_gmx_rxx_stats_octs_s
2570 #if __BYTE_ORDER == __BIG_ENDIAN
2571 uint64_t reserved_48_63 : 16;
2572 uint64_t cnt : 48; /**< Octet count of received good packets */
2575 uint64_t reserved_48_63 : 16;
2578 struct cvmx_agl_gmx_rxx_stats_octs_s cn52xx;
2579 struct cvmx_agl_gmx_rxx_stats_octs_s cn52xxp1;
2580 struct cvmx_agl_gmx_rxx_stats_octs_s cn56xx;
2581 struct cvmx_agl_gmx_rxx_stats_octs_s cn56xxp1;
2582 struct cvmx_agl_gmx_rxx_stats_octs_s cn63xx;
2583 struct cvmx_agl_gmx_rxx_stats_octs_s cn63xxp1;
2585 typedef union cvmx_agl_gmx_rxx_stats_octs cvmx_agl_gmx_rxx_stats_octs_t;
2588 * cvmx_agl_gmx_rx#_stats_octs_ctl
2591 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
2592 * - Counters will wrap
2593 * - Not reset when MIX*_CTL[RESET] is set to 1.
2595 union cvmx_agl_gmx_rxx_stats_octs_ctl
2598 struct cvmx_agl_gmx_rxx_stats_octs_ctl_s
2600 #if __BYTE_ORDER == __BIG_ENDIAN
2601 uint64_t reserved_48_63 : 16;
2602 uint64_t cnt : 48; /**< Octet count of received pause packets */
2605 uint64_t reserved_48_63 : 16;
2608 struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn52xx;
2609 struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn52xxp1;
2610 struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn56xx;
2611 struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn56xxp1;
2612 struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn63xx;
2613 struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn63xxp1;
2615 typedef union cvmx_agl_gmx_rxx_stats_octs_ctl cvmx_agl_gmx_rxx_stats_octs_ctl_t;
2618 * cvmx_agl_gmx_rx#_stats_octs_dmac
2621 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
2622 * - Counters will wrap
2623 * - Not reset when MIX*_CTL[RESET] is set to 1.
2625 union cvmx_agl_gmx_rxx_stats_octs_dmac
2628 struct cvmx_agl_gmx_rxx_stats_octs_dmac_s
2630 #if __BYTE_ORDER == __BIG_ENDIAN
2631 uint64_t reserved_48_63 : 16;
2632 uint64_t cnt : 48; /**< Octet count of filtered dmac packets */
2635 uint64_t reserved_48_63 : 16;
2638 struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn52xx;
2639 struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn52xxp1;
2640 struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn56xx;
2641 struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn56xxp1;
2642 struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn63xx;
2643 struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn63xxp1;
2645 typedef union cvmx_agl_gmx_rxx_stats_octs_dmac cvmx_agl_gmx_rxx_stats_octs_dmac_t;
2648 * cvmx_agl_gmx_rx#_stats_octs_drp
2651 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
2652 * - Counters will wrap
2653 * - Not reset when MIX*_CTL[RESET] is set to 1.
2655 union cvmx_agl_gmx_rxx_stats_octs_drp
2658 struct cvmx_agl_gmx_rxx_stats_octs_drp_s
2660 #if __BYTE_ORDER == __BIG_ENDIAN
2661 uint64_t reserved_48_63 : 16;
2662 uint64_t cnt : 48; /**< Octet count of dropped packets */
2665 uint64_t reserved_48_63 : 16;
2668 struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn52xx;
2669 struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn52xxp1;
2670 struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn56xx;
2671 struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn56xxp1;
2672 struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn63xx;
2673 struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn63xxp1;
2675 typedef union cvmx_agl_gmx_rxx_stats_octs_drp cvmx_agl_gmx_rxx_stats_octs_drp_t;
2678 * cvmx_agl_gmx_rx#_stats_pkts
2680 * AGL_GMX_RX_STATS_PKTS
2682 * Count of good received packets - packets that are not recognized as PAUSE
2683 * packets, dropped due the DMAC filter, dropped due FIFO full status, or
2684 * have any other OPCODE (FCS, Length, etc).
2687 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
2688 * - Counters will wrap
2689 * - Not reset when MIX*_CTL[RESET] is set to 1.
2691 union cvmx_agl_gmx_rxx_stats_pkts
2694 struct cvmx_agl_gmx_rxx_stats_pkts_s
2696 #if __BYTE_ORDER == __BIG_ENDIAN
2697 uint64_t reserved_32_63 : 32;
2698 uint64_t cnt : 32; /**< Count of received good packets */
2701 uint64_t reserved_32_63 : 32;
2704 struct cvmx_agl_gmx_rxx_stats_pkts_s cn52xx;
2705 struct cvmx_agl_gmx_rxx_stats_pkts_s cn52xxp1;
2706 struct cvmx_agl_gmx_rxx_stats_pkts_s cn56xx;
2707 struct cvmx_agl_gmx_rxx_stats_pkts_s cn56xxp1;
2708 struct cvmx_agl_gmx_rxx_stats_pkts_s cn63xx;
2709 struct cvmx_agl_gmx_rxx_stats_pkts_s cn63xxp1;
2711 typedef union cvmx_agl_gmx_rxx_stats_pkts cvmx_agl_gmx_rxx_stats_pkts_t;
2714 * cvmx_agl_gmx_rx#_stats_pkts_bad
2716 * AGL_GMX_RX_STATS_PKTS_BAD
2718 * Count of all packets received with some error that were not dropped
2719 * either due to the dmac filter or lack of room in the receive FIFO.
2722 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
2723 * - Counters will wrap
2724 * - Not reset when MIX*_CTL[RESET] is set to 1.
2726 union cvmx_agl_gmx_rxx_stats_pkts_bad
2729 struct cvmx_agl_gmx_rxx_stats_pkts_bad_s
2731 #if __BYTE_ORDER == __BIG_ENDIAN
2732 uint64_t reserved_32_63 : 32;
2733 uint64_t cnt : 32; /**< Count of bad packets */
2736 uint64_t reserved_32_63 : 32;
2739 struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn52xx;
2740 struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn52xxp1;
2741 struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn56xx;
2742 struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn56xxp1;
2743 struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn63xx;
2744 struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn63xxp1;
2746 typedef union cvmx_agl_gmx_rxx_stats_pkts_bad cvmx_agl_gmx_rxx_stats_pkts_bad_t;
2749 * cvmx_agl_gmx_rx#_stats_pkts_ctl
2751 * AGL_GMX_RX_STATS_PKTS_CTL
2753 * Count of all packets received that were recognized as Flow Control or
2754 * PAUSE packets. PAUSE packets with any kind of error are counted in
2755 * AGL_GMX_RX_STATS_PKTS_BAD. Pause packets can be optionally dropped or
2756 * forwarded based on the AGL_GMX_RX_FRM_CTL[CTL_DRP] bit. This count
2757 * increments regardless of whether the packet is dropped. Pause packets
2758 * will never be counted in AGL_GMX_RX_STATS_PKTS. Packets dropped due the dmac
2759 * filter will be counted in AGL_GMX_RX_STATS_PKTS_DMAC and not here.
2762 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
2763 * - Counters will wrap
2764 * - Not reset when MIX*_CTL[RESET] is set to 1.
2766 union cvmx_agl_gmx_rxx_stats_pkts_ctl
2769 struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s
2771 #if __BYTE_ORDER == __BIG_ENDIAN
2772 uint64_t reserved_32_63 : 32;
2773 uint64_t cnt : 32; /**< Count of received pause packets */
2776 uint64_t reserved_32_63 : 32;
2779 struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn52xx;
2780 struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn52xxp1;
2781 struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn56xx;
2782 struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn56xxp1;
2783 struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn63xx;
2784 struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn63xxp1;
2786 typedef union cvmx_agl_gmx_rxx_stats_pkts_ctl cvmx_agl_gmx_rxx_stats_pkts_ctl_t;
2789 * cvmx_agl_gmx_rx#_stats_pkts_dmac
2791 * AGL_GMX_RX_STATS_PKTS_DMAC
2793 * Count of all packets received that were dropped by the dmac filter.
2794 * Packets that match the DMAC will be dropped and counted here regardless
2795 * of if they were bad packets. These packets will never be counted in
2796 * AGL_GMX_RX_STATS_PKTS.
2798 * Some packets that were not able to satisify the DECISION_CNT may not
2799 * actually be dropped by Octeon, but they will be counted here as if they
2803 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
2804 * - Counters will wrap
2805 * - Not reset when MIX*_CTL[RESET] is set to 1.
2807 union cvmx_agl_gmx_rxx_stats_pkts_dmac
2810 struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s
2812 #if __BYTE_ORDER == __BIG_ENDIAN
2813 uint64_t reserved_32_63 : 32;
2814 uint64_t cnt : 32; /**< Count of filtered dmac packets */
2817 uint64_t reserved_32_63 : 32;
2820 struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn52xx;
2821 struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn52xxp1;
2822 struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn56xx;
2823 struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn56xxp1;
2824 struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn63xx;
2825 struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn63xxp1;
2827 typedef union cvmx_agl_gmx_rxx_stats_pkts_dmac cvmx_agl_gmx_rxx_stats_pkts_dmac_t;
2830 * cvmx_agl_gmx_rx#_stats_pkts_drp
2832 * AGL_GMX_RX_STATS_PKTS_DRP
2834 * Count of all packets received that were dropped due to a full receive
2835 * FIFO. This counts good and bad packets received - all packets dropped by
2836 * the FIFO. It does not count packets dropped by the dmac or pause packet
2840 * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set
2841 * - Counters will wrap
2842 * - Not reset when MIX*_CTL[RESET] is set to 1.
2844 union cvmx_agl_gmx_rxx_stats_pkts_drp
2847 struct cvmx_agl_gmx_rxx_stats_pkts_drp_s
2849 #if __BYTE_ORDER == __BIG_ENDIAN
2850 uint64_t reserved_32_63 : 32;
2851 uint64_t cnt : 32; /**< Count of dropped packets */
2854 uint64_t reserved_32_63 : 32;
2857 struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn52xx;
2858 struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn52xxp1;
2859 struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn56xx;
2860 struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn56xxp1;
2861 struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn63xx;
2862 struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn63xxp1;
2864 typedef union cvmx_agl_gmx_rxx_stats_pkts_drp cvmx_agl_gmx_rxx_stats_pkts_drp_t;
2867 * cvmx_agl_gmx_rx#_udd_skp
2869 * AGL_GMX_RX_UDD_SKP = Amount of User-defined data before the start of the L2 data
2873 * (1) The skip bytes are part of the packet and will be sent down the NCB
2874 * packet interface and will be handled by PKI.
2876 * (2) The system can determine if the UDD bytes are included in the FCS check
2877 * by using the FCSSEL field - if the FCS check is enabled.
2879 * (3) Assume that the preamble/sfd is always at the start of the frame - even
2880 * before UDD bytes. In most cases, there will be no preamble in these
2881 * cases since it will be MII to MII communication without a PHY
2884 * (4) We can still do address filtering and control packet filtering is the
2887 * (5) UDD_SKP must be 0 in half-duplex operation unless
2888 * AGL_GMX_RX_FRM_CTL[PRE_CHK] is clear. If AGL_GMX_RX_FRM_CTL[PRE_CHK] is set,
2889 * then UDD_SKP will normally be 8.
2891 * (6) In all cases, the UDD bytes will be sent down the packet interface as
2892 * part of the packet. The UDD bytes are never stripped from the actual
2895 * (7) If LEN != 0, then AGL_GMX_RX_FRM_CHK[LENERR] will be disabled and AGL_GMX_RX_INT_REG[LENERR] will be zero
2897 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2899 union cvmx_agl_gmx_rxx_udd_skp
2902 struct cvmx_agl_gmx_rxx_udd_skp_s
2904 #if __BYTE_ORDER == __BIG_ENDIAN
2905 uint64_t reserved_9_63 : 55;
2906 uint64_t fcssel : 1; /**< Include the skip bytes in the FCS calculation
2907 0 = all skip bytes are included in FCS
2908 1 = the skip bytes are not included in FCS */
2909 uint64_t reserved_7_7 : 1;
2910 uint64_t len : 7; /**< Amount of User-defined data before the start of
2911 the L2 data. Zero means L2 comes first.
2915 uint64_t reserved_7_7 : 1;
2916 uint64_t fcssel : 1;
2917 uint64_t reserved_9_63 : 55;
2920 struct cvmx_agl_gmx_rxx_udd_skp_s cn52xx;
2921 struct cvmx_agl_gmx_rxx_udd_skp_s cn52xxp1;
2922 struct cvmx_agl_gmx_rxx_udd_skp_s cn56xx;
2923 struct cvmx_agl_gmx_rxx_udd_skp_s cn56xxp1;
2924 struct cvmx_agl_gmx_rxx_udd_skp_s cn63xx;
2925 struct cvmx_agl_gmx_rxx_udd_skp_s cn63xxp1;
2927 typedef union cvmx_agl_gmx_rxx_udd_skp cvmx_agl_gmx_rxx_udd_skp_t;
2930 * cvmx_agl_gmx_rx_bp_drop#
2932 * AGL_GMX_RX_BP_DROP = FIFO mark for packet drop
2936 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2939 union cvmx_agl_gmx_rx_bp_dropx
2942 struct cvmx_agl_gmx_rx_bp_dropx_s
2944 #if __BYTE_ORDER == __BIG_ENDIAN
2945 uint64_t reserved_6_63 : 58;
2946 uint64_t mark : 6; /**< Number of 8B ticks to reserve in the RX FIFO.
2947 When the FIFO exceeds this count, packets will
2948 be dropped and not buffered.
2949 MARK should typically be programmed to 2.
2950 Failure to program correctly can lead to system
2954 uint64_t reserved_6_63 : 58;
2957 struct cvmx_agl_gmx_rx_bp_dropx_s cn52xx;
2958 struct cvmx_agl_gmx_rx_bp_dropx_s cn52xxp1;
2959 struct cvmx_agl_gmx_rx_bp_dropx_s cn56xx;
2960 struct cvmx_agl_gmx_rx_bp_dropx_s cn56xxp1;
2961 struct cvmx_agl_gmx_rx_bp_dropx_s cn63xx;
2962 struct cvmx_agl_gmx_rx_bp_dropx_s cn63xxp1;
2964 typedef union cvmx_agl_gmx_rx_bp_dropx cvmx_agl_gmx_rx_bp_dropx_t;
2967 * cvmx_agl_gmx_rx_bp_off#
2969 * AGL_GMX_RX_BP_OFF = Lowater mark for packet drop
2973 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
2976 union cvmx_agl_gmx_rx_bp_offx
2979 struct cvmx_agl_gmx_rx_bp_offx_s
2981 #if __BYTE_ORDER == __BIG_ENDIAN
2982 uint64_t reserved_6_63 : 58;
2983 uint64_t mark : 6; /**< Water mark (8B ticks) to deassert backpressure */
2986 uint64_t reserved_6_63 : 58;
2989 struct cvmx_agl_gmx_rx_bp_offx_s cn52xx;
2990 struct cvmx_agl_gmx_rx_bp_offx_s cn52xxp1;
2991 struct cvmx_agl_gmx_rx_bp_offx_s cn56xx;
2992 struct cvmx_agl_gmx_rx_bp_offx_s cn56xxp1;
2993 struct cvmx_agl_gmx_rx_bp_offx_s cn63xx;
2994 struct cvmx_agl_gmx_rx_bp_offx_s cn63xxp1;
2996 typedef union cvmx_agl_gmx_rx_bp_offx cvmx_agl_gmx_rx_bp_offx_t;
2999 * cvmx_agl_gmx_rx_bp_on#
3001 * AGL_GMX_RX_BP_ON = Hiwater mark for port/interface backpressure
3005 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3008 union cvmx_agl_gmx_rx_bp_onx
3011 struct cvmx_agl_gmx_rx_bp_onx_s
3013 #if __BYTE_ORDER == __BIG_ENDIAN
3014 uint64_t reserved_9_63 : 55;
3015 uint64_t mark : 9; /**< Hiwater mark (8B ticks) for backpressure. */
3018 uint64_t reserved_9_63 : 55;
3021 struct cvmx_agl_gmx_rx_bp_onx_s cn52xx;
3022 struct cvmx_agl_gmx_rx_bp_onx_s cn52xxp1;
3023 struct cvmx_agl_gmx_rx_bp_onx_s cn56xx;
3024 struct cvmx_agl_gmx_rx_bp_onx_s cn56xxp1;
3025 struct cvmx_agl_gmx_rx_bp_onx_s cn63xx;
3026 struct cvmx_agl_gmx_rx_bp_onx_s cn63xxp1;
3028 typedef union cvmx_agl_gmx_rx_bp_onx cvmx_agl_gmx_rx_bp_onx_t;
3031 * cvmx_agl_gmx_rx_prt_info
3033 * AGL_GMX_RX_PRT_INFO = state information for the ports
3037 * COMMIT[0], DROP[0] will be reset when MIX0_CTL[RESET] is set to 1.
3038 * COMMIT[1], DROP[1] will be reset when MIX1_CTL[RESET] is set to 1.
3040 union cvmx_agl_gmx_rx_prt_info
3043 struct cvmx_agl_gmx_rx_prt_info_s
3045 #if __BYTE_ORDER == __BIG_ENDIAN
3046 uint64_t reserved_18_63 : 46;
3047 uint64_t drop : 2; /**< Port indication that data was dropped */
3048 uint64_t reserved_2_15 : 14;
3049 uint64_t commit : 2; /**< Port indication that SOP was accepted */
3051 uint64_t commit : 2;
3052 uint64_t reserved_2_15 : 14;
3054 uint64_t reserved_18_63 : 46;
3057 struct cvmx_agl_gmx_rx_prt_info_s cn52xx;
3058 struct cvmx_agl_gmx_rx_prt_info_s cn52xxp1;
3059 struct cvmx_agl_gmx_rx_prt_info_cn56xx
3061 #if __BYTE_ORDER == __BIG_ENDIAN
3062 uint64_t reserved_17_63 : 47;
3063 uint64_t drop : 1; /**< Port indication that data was dropped */
3064 uint64_t reserved_1_15 : 15;
3065 uint64_t commit : 1; /**< Port indication that SOP was accepted */
3067 uint64_t commit : 1;
3068 uint64_t reserved_1_15 : 15;
3070 uint64_t reserved_17_63 : 47;
3073 struct cvmx_agl_gmx_rx_prt_info_cn56xx cn56xxp1;
3074 struct cvmx_agl_gmx_rx_prt_info_s cn63xx;
3075 struct cvmx_agl_gmx_rx_prt_info_s cn63xxp1;
3077 typedef union cvmx_agl_gmx_rx_prt_info cvmx_agl_gmx_rx_prt_info_t;
3080 * cvmx_agl_gmx_rx_tx_status
3082 * AGL_GMX_RX_TX_STATUS = GMX RX/TX Status
3086 * RX[0], TX[0] will be reset when MIX0_CTL[RESET] is set to 1.
3087 * RX[1], TX[1] will be reset when MIX1_CTL[RESET] is set to 1.
3089 union cvmx_agl_gmx_rx_tx_status
3092 struct cvmx_agl_gmx_rx_tx_status_s
3094 #if __BYTE_ORDER == __BIG_ENDIAN
3095 uint64_t reserved_6_63 : 58;
3096 uint64_t tx : 2; /**< Transmit data since last read */
3097 uint64_t reserved_2_3 : 2;
3098 uint64_t rx : 2; /**< Receive data since last read */
3101 uint64_t reserved_2_3 : 2;
3103 uint64_t reserved_6_63 : 58;
3106 struct cvmx_agl_gmx_rx_tx_status_s cn52xx;
3107 struct cvmx_agl_gmx_rx_tx_status_s cn52xxp1;
3108 struct cvmx_agl_gmx_rx_tx_status_cn56xx
3110 #if __BYTE_ORDER == __BIG_ENDIAN
3111 uint64_t reserved_5_63 : 59;
3112 uint64_t tx : 1; /**< Transmit data since last read */
3113 uint64_t reserved_1_3 : 3;
3114 uint64_t rx : 1; /**< Receive data since last read */
3117 uint64_t reserved_1_3 : 3;
3119 uint64_t reserved_5_63 : 59;
3122 struct cvmx_agl_gmx_rx_tx_status_cn56xx cn56xxp1;
3123 struct cvmx_agl_gmx_rx_tx_status_s cn63xx;
3124 struct cvmx_agl_gmx_rx_tx_status_s cn63xxp1;
3126 typedef union cvmx_agl_gmx_rx_tx_status cvmx_agl_gmx_rx_tx_status_t;
3129 * cvmx_agl_gmx_smac#
3131 * AGL_GMX_SMAC = Packet SMAC
3135 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3138 union cvmx_agl_gmx_smacx
3141 struct cvmx_agl_gmx_smacx_s
3143 #if __BYTE_ORDER == __BIG_ENDIAN
3144 uint64_t reserved_48_63 : 16;
3145 uint64_t smac : 48; /**< The SMAC field is used for generating and
3146 accepting Control Pause packets */
3149 uint64_t reserved_48_63 : 16;
3152 struct cvmx_agl_gmx_smacx_s cn52xx;
3153 struct cvmx_agl_gmx_smacx_s cn52xxp1;
3154 struct cvmx_agl_gmx_smacx_s cn56xx;
3155 struct cvmx_agl_gmx_smacx_s cn56xxp1;
3156 struct cvmx_agl_gmx_smacx_s cn63xx;
3157 struct cvmx_agl_gmx_smacx_s cn63xxp1;
3159 typedef union cvmx_agl_gmx_smacx cvmx_agl_gmx_smacx_t;
3162 * cvmx_agl_gmx_stat_bp
3164 * AGL_GMX_STAT_BP = Number of cycles that the TX/Stats block has help up operation
3168 * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
3171 union cvmx_agl_gmx_stat_bp
3174 struct cvmx_agl_gmx_stat_bp_s
3176 #if __BYTE_ORDER == __BIG_ENDIAN
3177 uint64_t reserved_17_63 : 47;
3178 uint64_t bp : 1; /**< Current BP state */
3179 uint64_t cnt : 16; /**< Number of cycles that BP has been asserted
3180 Saturating counter */
3184 uint64_t reserved_17_63 : 47;
3187 struct cvmx_agl_gmx_stat_bp_s cn52xx;
3188 struct cvmx_agl_gmx_stat_bp_s cn52xxp1;
3189 struct cvmx_agl_gmx_stat_bp_s cn56xx;
3190 struct cvmx_agl_gmx_stat_bp_s cn56xxp1;
3191 struct cvmx_agl_gmx_stat_bp_s cn63xx;
3192 struct cvmx_agl_gmx_stat_bp_s cn63xxp1;
3194 typedef union cvmx_agl_gmx_stat_bp cvmx_agl_gmx_stat_bp_t;
3197 * cvmx_agl_gmx_tx#_append
3199 * AGL_GMX_TX_APPEND = Packet TX Append Control
3203 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3206 union cvmx_agl_gmx_txx_append
3209 struct cvmx_agl_gmx_txx_append_s
3211 #if __BYTE_ORDER == __BIG_ENDIAN
3212 uint64_t reserved_4_63 : 60;
3213 uint64_t force_fcs : 1; /**< Append the Ethernet FCS on each pause packet
3214 when FCS is clear. Pause packets are normally
3215 padded to 60 bytes. If
3216 AGL_GMX_TX_MIN_PKT[MIN_SIZE] exceeds 59, then
3217 FORCE_FCS will not be used. */
3218 uint64_t fcs : 1; /**< Append the Ethernet FCS on each packet */
3219 uint64_t pad : 1; /**< Append PAD bytes such that min sized */
3220 uint64_t preamble : 1; /**< Prepend the Ethernet preamble on each transfer */
3222 uint64_t preamble : 1;
3225 uint64_t force_fcs : 1;
3226 uint64_t reserved_4_63 : 60;
3229 struct cvmx_agl_gmx_txx_append_s cn52xx;
3230 struct cvmx_agl_gmx_txx_append_s cn52xxp1;
3231 struct cvmx_agl_gmx_txx_append_s cn56xx;
3232 struct cvmx_agl_gmx_txx_append_s cn56xxp1;
3233 struct cvmx_agl_gmx_txx_append_s cn63xx;
3234 struct cvmx_agl_gmx_txx_append_s cn63xxp1;
3236 typedef union cvmx_agl_gmx_txx_append cvmx_agl_gmx_txx_append_t;
3239 * cvmx_agl_gmx_tx#_clk
3241 * AGL_GMX_TX_CLK = RGMII TX Clock Generation Register
3245 * Normal Programming Values:
3246 * (1) RGMII, 1000Mbs (AGL_GMX_PRT_CFG[SPEED]==1), CLK_CNT == 1
3247 * (2) RGMII, 10/100Mbs (AGL_GMX_PRT_CFG[SPEED]==0), CLK_CNT == 50/5
3248 * (3) MII, 10/100Mbs (AGL_GMX_PRT_CFG[SPEED]==0), CLK_CNT == 1
3251 * Given a 125MHz PLL reference clock...
3252 * CLK_CNT == 1 ==> 125.0MHz TXC clock period (8ns* 1)
3253 * CLK_CNT == 5 ==> 25.0MHz TXC clock period (8ns* 5)
3254 * CLK_CNT == 50 ==> 2.5MHz TXC clock period (8ns*50)
3256 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3258 union cvmx_agl_gmx_txx_clk
3261 struct cvmx_agl_gmx_txx_clk_s
3263 #if __BYTE_ORDER == __BIG_ENDIAN
3264 uint64_t reserved_6_63 : 58;
3265 uint64_t clk_cnt : 6; /**< Controls the RGMII TXC frequency | NS
3267 rgm_ref_clk(period)*CLK_CNT */
3269 uint64_t clk_cnt : 6;
3270 uint64_t reserved_6_63 : 58;
3273 struct cvmx_agl_gmx_txx_clk_s cn63xx;
3274 struct cvmx_agl_gmx_txx_clk_s cn63xxp1;
3276 typedef union cvmx_agl_gmx_txx_clk cvmx_agl_gmx_txx_clk_t;
3279 * cvmx_agl_gmx_tx#_ctl
3281 * AGL_GMX_TX_CTL = TX Control register
3285 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3288 union cvmx_agl_gmx_txx_ctl
3291 struct cvmx_agl_gmx_txx_ctl_s
3293 #if __BYTE_ORDER == __BIG_ENDIAN
3294 uint64_t reserved_2_63 : 62;
3295 uint64_t xsdef_en : 1; /**< Enables the excessive deferral check for stats
3297 uint64_t xscol_en : 1; /**< Enables the excessive collision check for stats
3300 uint64_t xscol_en : 1;
3301 uint64_t xsdef_en : 1;
3302 uint64_t reserved_2_63 : 62;
3305 struct cvmx_agl_gmx_txx_ctl_s cn52xx;
3306 struct cvmx_agl_gmx_txx_ctl_s cn52xxp1;
3307 struct cvmx_agl_gmx_txx_ctl_s cn56xx;
3308 struct cvmx_agl_gmx_txx_ctl_s cn56xxp1;
3309 struct cvmx_agl_gmx_txx_ctl_s cn63xx;
3310 struct cvmx_agl_gmx_txx_ctl_s cn63xxp1;
3312 typedef union cvmx_agl_gmx_txx_ctl cvmx_agl_gmx_txx_ctl_t;
3315 * cvmx_agl_gmx_tx#_min_pkt
3317 * AGL_GMX_TX_MIN_PKT = Packet TX Min Size Packet (PAD upto min size)
3321 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3324 union cvmx_agl_gmx_txx_min_pkt
3327 struct cvmx_agl_gmx_txx_min_pkt_s
3329 #if __BYTE_ORDER == __BIG_ENDIAN
3330 uint64_t reserved_8_63 : 56;
3331 uint64_t min_size : 8; /**< Min frame in bytes before the FCS is applied
3332 Padding is only appened when
3333 AGL_GMX_TX_APPEND[PAD] for the coresponding packet
3334 port is set. Packets will be padded to
3335 MIN_SIZE+1 The reset value will pad to 60 bytes. */
3337 uint64_t min_size : 8;
3338 uint64_t reserved_8_63 : 56;
3341 struct cvmx_agl_gmx_txx_min_pkt_s cn52xx;
3342 struct cvmx_agl_gmx_txx_min_pkt_s cn52xxp1;
3343 struct cvmx_agl_gmx_txx_min_pkt_s cn56xx;
3344 struct cvmx_agl_gmx_txx_min_pkt_s cn56xxp1;
3345 struct cvmx_agl_gmx_txx_min_pkt_s cn63xx;
3346 struct cvmx_agl_gmx_txx_min_pkt_s cn63xxp1;
3348 typedef union cvmx_agl_gmx_txx_min_pkt cvmx_agl_gmx_txx_min_pkt_t;
3351 * cvmx_agl_gmx_tx#_pause_pkt_interval
3353 * AGL_GMX_TX_PAUSE_PKT_INTERVAL = Packet TX Pause Packet transmission interval - how often PAUSE packets will be sent
3357 * Choosing proper values of AGL_GMX_TX_PAUSE_PKT_TIME[TIME] and
3358 * AGL_GMX_TX_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to the system
3359 * designer. It is suggested that TIME be much greater than INTERVAL and
3360 * AGL_GMX_TX_PAUSE_ZERO[SEND] be set. This allows a periodic refresh of the PAUSE
3361 * count and then when the backpressure condition is lifted, a PAUSE packet
3362 * with TIME==0 will be sent indicating that Octane is ready for additional
3365 * If the system chooses to not set AGL_GMX_TX_PAUSE_ZERO[SEND], then it is
3366 * suggested that TIME and INTERVAL are programmed such that they satisify the
3369 * INTERVAL <= TIME - (largest_pkt_size + IFG + pause_pkt_size)
3371 * where largest_pkt_size is that largest packet that the system can send
3372 * (normally 1518B), IFG is the interframe gap and pause_pkt_size is the size
3373 * of the PAUSE packet (normally 64B).
3375 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3377 union cvmx_agl_gmx_txx_pause_pkt_interval
3380 struct cvmx_agl_gmx_txx_pause_pkt_interval_s
3382 #if __BYTE_ORDER == __BIG_ENDIAN
3383 uint64_t reserved_16_63 : 48;
3384 uint64_t interval : 16; /**< Arbitrate for a pause packet every (INTERVAL*512)
3386 Normally, 0 < INTERVAL < AGL_GMX_TX_PAUSE_PKT_TIME
3387 INTERVAL=0, will only send a single PAUSE packet
3388 for each backpressure event */
3390 uint64_t interval : 16;
3391 uint64_t reserved_16_63 : 48;
3394 struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn52xx;
3395 struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn52xxp1;
3396 struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn56xx;
3397 struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn56xxp1;
3398 struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn63xx;
3399 struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn63xxp1;
3401 typedef union cvmx_agl_gmx_txx_pause_pkt_interval cvmx_agl_gmx_txx_pause_pkt_interval_t;
3404 * cvmx_agl_gmx_tx#_pause_pkt_time
3406 * AGL_GMX_TX_PAUSE_PKT_TIME = Packet TX Pause Packet pause_time field
3410 * Choosing proper values of AGL_GMX_TX_PAUSE_PKT_TIME[TIME] and
3411 * AGL_GMX_TX_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to the system
3412 * designer. It is suggested that TIME be much greater than INTERVAL and
3413 * AGL_GMX_TX_PAUSE_ZERO[SEND] be set. This allows a periodic refresh of the PAUSE
3414 * count and then when the backpressure condition is lifted, a PAUSE packet
3415 * with TIME==0 will be sent indicating that Octane is ready for additional
3418 * If the system chooses to not set AGL_GMX_TX_PAUSE_ZERO[SEND], then it is
3419 * suggested that TIME and INTERVAL are programmed such that they satisify the
3422 * INTERVAL <= TIME - (largest_pkt_size + IFG + pause_pkt_size)
3424 * where largest_pkt_size is that largest packet that the system can send
3425 * (normally 1518B), IFG is the interframe gap and pause_pkt_size is the size
3426 * of the PAUSE packet (normally 64B).
3428 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3430 union cvmx_agl_gmx_txx_pause_pkt_time
3433 struct cvmx_agl_gmx_txx_pause_pkt_time_s
3435 #if __BYTE_ORDER == __BIG_ENDIAN
3436 uint64_t reserved_16_63 : 48;
3437 uint64_t time : 16; /**< The pause_time field placed is outbnd pause pkts
3438 pause_time is in 512 bit-times
3439 Normally, TIME > AGL_GMX_TX_PAUSE_PKT_INTERVAL */
3442 uint64_t reserved_16_63 : 48;
3445 struct cvmx_agl_gmx_txx_pause_pkt_time_s cn52xx;
3446 struct cvmx_agl_gmx_txx_pause_pkt_time_s cn52xxp1;
3447 struct cvmx_agl_gmx_txx_pause_pkt_time_s cn56xx;
3448 struct cvmx_agl_gmx_txx_pause_pkt_time_s cn56xxp1;
3449 struct cvmx_agl_gmx_txx_pause_pkt_time_s cn63xx;
3450 struct cvmx_agl_gmx_txx_pause_pkt_time_s cn63xxp1;
3452 typedef union cvmx_agl_gmx_txx_pause_pkt_time cvmx_agl_gmx_txx_pause_pkt_time_t;
3455 * cvmx_agl_gmx_tx#_pause_togo
3457 * AGL_GMX_TX_PAUSE_TOGO = Packet TX Amount of time remaining to backpressure
3461 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3464 union cvmx_agl_gmx_txx_pause_togo
3467 struct cvmx_agl_gmx_txx_pause_togo_s
3469 #if __BYTE_ORDER == __BIG_ENDIAN
3470 uint64_t reserved_16_63 : 48;
3471 uint64_t time : 16; /**< Amount of time remaining to backpressure */
3474 uint64_t reserved_16_63 : 48;
3477 struct cvmx_agl_gmx_txx_pause_togo_s cn52xx;
3478 struct cvmx_agl_gmx_txx_pause_togo_s cn52xxp1;
3479 struct cvmx_agl_gmx_txx_pause_togo_s cn56xx;
3480 struct cvmx_agl_gmx_txx_pause_togo_s cn56xxp1;
3481 struct cvmx_agl_gmx_txx_pause_togo_s cn63xx;
3482 struct cvmx_agl_gmx_txx_pause_togo_s cn63xxp1;
3484 typedef union cvmx_agl_gmx_txx_pause_togo cvmx_agl_gmx_txx_pause_togo_t;
3487 * cvmx_agl_gmx_tx#_pause_zero
3489 * AGL_GMX_TX_PAUSE_ZERO = Packet TX Amount of time remaining to backpressure
3493 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3496 union cvmx_agl_gmx_txx_pause_zero
3499 struct cvmx_agl_gmx_txx_pause_zero_s
3501 #if __BYTE_ORDER == __BIG_ENDIAN
3502 uint64_t reserved_1_63 : 63;
3503 uint64_t send : 1; /**< When backpressure condition clear, send PAUSE
3504 packet with pause_time of zero to enable the
3508 uint64_t reserved_1_63 : 63;
3511 struct cvmx_agl_gmx_txx_pause_zero_s cn52xx;
3512 struct cvmx_agl_gmx_txx_pause_zero_s cn52xxp1;
3513 struct cvmx_agl_gmx_txx_pause_zero_s cn56xx;
3514 struct cvmx_agl_gmx_txx_pause_zero_s cn56xxp1;
3515 struct cvmx_agl_gmx_txx_pause_zero_s cn63xx;
3516 struct cvmx_agl_gmx_txx_pause_zero_s cn63xxp1;
3518 typedef union cvmx_agl_gmx_txx_pause_zero cvmx_agl_gmx_txx_pause_zero_t;
3521 * cvmx_agl_gmx_tx#_soft_pause
3523 * AGL_GMX_TX_SOFT_PAUSE = Packet TX Software Pause
3527 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3530 union cvmx_agl_gmx_txx_soft_pause
3533 struct cvmx_agl_gmx_txx_soft_pause_s
3535 #if __BYTE_ORDER == __BIG_ENDIAN
3536 uint64_t reserved_16_63 : 48;
3537 uint64_t time : 16; /**< Back off the TX bus for (TIME*512) bit-times
3538 for full-duplex operation only */
3541 uint64_t reserved_16_63 : 48;
3544 struct cvmx_agl_gmx_txx_soft_pause_s cn52xx;
3545 struct cvmx_agl_gmx_txx_soft_pause_s cn52xxp1;
3546 struct cvmx_agl_gmx_txx_soft_pause_s cn56xx;
3547 struct cvmx_agl_gmx_txx_soft_pause_s cn56xxp1;
3548 struct cvmx_agl_gmx_txx_soft_pause_s cn63xx;
3549 struct cvmx_agl_gmx_txx_soft_pause_s cn63xxp1;
3551 typedef union cvmx_agl_gmx_txx_soft_pause cvmx_agl_gmx_txx_soft_pause_t;
3554 * cvmx_agl_gmx_tx#_stat0
3556 * AGL_GMX_TX_STAT0 = AGL_GMX_TX_STATS_XSDEF / AGL_GMX_TX_STATS_XSCOL
3560 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
3561 * - Counters will wrap
3562 * - Not reset when MIX*_CTL[RESET] is set to 1.
3564 union cvmx_agl_gmx_txx_stat0
3567 struct cvmx_agl_gmx_txx_stat0_s
3569 #if __BYTE_ORDER == __BIG_ENDIAN
3570 uint64_t xsdef : 32; /**< Number of packets dropped (never successfully
3571 sent) due to excessive deferal */
3572 uint64_t xscol : 32; /**< Number of packets dropped (never successfully
3573 sent) due to excessive collision. Defined by
3574 AGL_GMX_TX_COL_ATTEMPT[LIMIT]. */
3576 uint64_t xscol : 32;
3577 uint64_t xsdef : 32;
3580 struct cvmx_agl_gmx_txx_stat0_s cn52xx;
3581 struct cvmx_agl_gmx_txx_stat0_s cn52xxp1;
3582 struct cvmx_agl_gmx_txx_stat0_s cn56xx;
3583 struct cvmx_agl_gmx_txx_stat0_s cn56xxp1;
3584 struct cvmx_agl_gmx_txx_stat0_s cn63xx;
3585 struct cvmx_agl_gmx_txx_stat0_s cn63xxp1;
3587 typedef union cvmx_agl_gmx_txx_stat0 cvmx_agl_gmx_txx_stat0_t;
3590 * cvmx_agl_gmx_tx#_stat1
3592 * AGL_GMX_TX_STAT1 = AGL_GMX_TX_STATS_SCOL / AGL_GMX_TX_STATS_MCOL
3596 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
3597 * - Counters will wrap
3598 * - Not reset when MIX*_CTL[RESET] is set to 1.
3600 union cvmx_agl_gmx_txx_stat1
3603 struct cvmx_agl_gmx_txx_stat1_s
3605 #if __BYTE_ORDER == __BIG_ENDIAN
3606 uint64_t scol : 32; /**< Number of packets sent with a single collision */
3607 uint64_t mcol : 32; /**< Number of packets sent with multiple collisions
3608 but < AGL_GMX_TX_COL_ATTEMPT[LIMIT]. */
3614 struct cvmx_agl_gmx_txx_stat1_s cn52xx;
3615 struct cvmx_agl_gmx_txx_stat1_s cn52xxp1;
3616 struct cvmx_agl_gmx_txx_stat1_s cn56xx;
3617 struct cvmx_agl_gmx_txx_stat1_s cn56xxp1;
3618 struct cvmx_agl_gmx_txx_stat1_s cn63xx;
3619 struct cvmx_agl_gmx_txx_stat1_s cn63xxp1;
3621 typedef union cvmx_agl_gmx_txx_stat1 cvmx_agl_gmx_txx_stat1_t;
3624 * cvmx_agl_gmx_tx#_stat2
3626 * AGL_GMX_TX_STAT2 = AGL_GMX_TX_STATS_OCTS
3630 * - Octect counts are the sum of all data transmitted on the wire including
3631 * packet data, pad bytes, fcs bytes, pause bytes, and jam bytes. The octect
3632 * counts do not include PREAMBLE byte or EXTEND cycles.
3633 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
3634 * - Counters will wrap
3635 * - Not reset when MIX*_CTL[RESET] is set to 1.
3637 union cvmx_agl_gmx_txx_stat2
3640 struct cvmx_agl_gmx_txx_stat2_s
3642 #if __BYTE_ORDER == __BIG_ENDIAN
3643 uint64_t reserved_48_63 : 16;
3644 uint64_t octs : 48; /**< Number of total octets sent on the interface.
3645 Does not count octets from frames that were
3646 truncated due to collisions in halfdup mode. */
3649 uint64_t reserved_48_63 : 16;
3652 struct cvmx_agl_gmx_txx_stat2_s cn52xx;
3653 struct cvmx_agl_gmx_txx_stat2_s cn52xxp1;
3654 struct cvmx_agl_gmx_txx_stat2_s cn56xx;
3655 struct cvmx_agl_gmx_txx_stat2_s cn56xxp1;
3656 struct cvmx_agl_gmx_txx_stat2_s cn63xx;
3657 struct cvmx_agl_gmx_txx_stat2_s cn63xxp1;
3659 typedef union cvmx_agl_gmx_txx_stat2 cvmx_agl_gmx_txx_stat2_t;
3662 * cvmx_agl_gmx_tx#_stat3
3664 * AGL_GMX_TX_STAT3 = AGL_GMX_TX_STATS_PKTS
3668 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
3669 * - Counters will wrap
3670 * - Not reset when MIX*_CTL[RESET] is set to 1.
3672 union cvmx_agl_gmx_txx_stat3
3675 struct cvmx_agl_gmx_txx_stat3_s
3677 #if __BYTE_ORDER == __BIG_ENDIAN
3678 uint64_t reserved_32_63 : 32;
3679 uint64_t pkts : 32; /**< Number of total frames sent on the interface.
3680 Does not count frames that were truncated due to
3681 collisions in halfdup mode. */
3684 uint64_t reserved_32_63 : 32;
3687 struct cvmx_agl_gmx_txx_stat3_s cn52xx;
3688 struct cvmx_agl_gmx_txx_stat3_s cn52xxp1;
3689 struct cvmx_agl_gmx_txx_stat3_s cn56xx;
3690 struct cvmx_agl_gmx_txx_stat3_s cn56xxp1;
3691 struct cvmx_agl_gmx_txx_stat3_s cn63xx;
3692 struct cvmx_agl_gmx_txx_stat3_s cn63xxp1;
3694 typedef union cvmx_agl_gmx_txx_stat3 cvmx_agl_gmx_txx_stat3_t;
3697 * cvmx_agl_gmx_tx#_stat4
3699 * AGL_GMX_TX_STAT4 = AGL_GMX_TX_STATS_HIST1 (64) / AGL_GMX_TX_STATS_HIST0 (<64)
3703 * - Packet length is the sum of all data transmitted on the wire for the given
3704 * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
3705 * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles.
3706 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
3707 * - Counters will wrap
3708 * - Not reset when MIX*_CTL[RESET] is set to 1.
3710 union cvmx_agl_gmx_txx_stat4
3713 struct cvmx_agl_gmx_txx_stat4_s
3715 #if __BYTE_ORDER == __BIG_ENDIAN
3716 uint64_t hist1 : 32; /**< Number of packets sent with an octet count of 64. */
3717 uint64_t hist0 : 32; /**< Number of packets sent with an octet count
3720 uint64_t hist0 : 32;
3721 uint64_t hist1 : 32;
3724 struct cvmx_agl_gmx_txx_stat4_s cn52xx;
3725 struct cvmx_agl_gmx_txx_stat4_s cn52xxp1;
3726 struct cvmx_agl_gmx_txx_stat4_s cn56xx;
3727 struct cvmx_agl_gmx_txx_stat4_s cn56xxp1;
3728 struct cvmx_agl_gmx_txx_stat4_s cn63xx;
3729 struct cvmx_agl_gmx_txx_stat4_s cn63xxp1;
3731 typedef union cvmx_agl_gmx_txx_stat4 cvmx_agl_gmx_txx_stat4_t;
3734 * cvmx_agl_gmx_tx#_stat5
3736 * AGL_GMX_TX_STAT5 = AGL_GMX_TX_STATS_HIST3 (128- 255) / AGL_GMX_TX_STATS_HIST2 (65- 127)
3740 * - Packet length is the sum of all data transmitted on the wire for the given
3741 * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
3742 * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles.
3743 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
3744 * - Counters will wrap
3745 * - Not reset when MIX*_CTL[RESET] is set to 1.
3747 union cvmx_agl_gmx_txx_stat5
3750 struct cvmx_agl_gmx_txx_stat5_s
3752 #if __BYTE_ORDER == __BIG_ENDIAN
3753 uint64_t hist3 : 32; /**< Number of packets sent with an octet count of
3755 uint64_t hist2 : 32; /**< Number of packets sent with an octet count of
3758 uint64_t hist2 : 32;
3759 uint64_t hist3 : 32;
3762 struct cvmx_agl_gmx_txx_stat5_s cn52xx;
3763 struct cvmx_agl_gmx_txx_stat5_s cn52xxp1;
3764 struct cvmx_agl_gmx_txx_stat5_s cn56xx;
3765 struct cvmx_agl_gmx_txx_stat5_s cn56xxp1;
3766 struct cvmx_agl_gmx_txx_stat5_s cn63xx;
3767 struct cvmx_agl_gmx_txx_stat5_s cn63xxp1;
3769 typedef union cvmx_agl_gmx_txx_stat5 cvmx_agl_gmx_txx_stat5_t;
3772 * cvmx_agl_gmx_tx#_stat6
3774 * AGL_GMX_TX_STAT6 = AGL_GMX_TX_STATS_HIST5 (512-1023) / AGL_GMX_TX_STATS_HIST4 (256-511)
3778 * - Packet length is the sum of all data transmitted on the wire for the given
3779 * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
3780 * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles.
3781 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
3782 * - Counters will wrap
3783 * - Not reset when MIX*_CTL[RESET] is set to 1.
3785 union cvmx_agl_gmx_txx_stat6
3788 struct cvmx_agl_gmx_txx_stat6_s
3790 #if __BYTE_ORDER == __BIG_ENDIAN
3791 uint64_t hist5 : 32; /**< Number of packets sent with an octet count of
3793 uint64_t hist4 : 32; /**< Number of packets sent with an octet count of
3796 uint64_t hist4 : 32;
3797 uint64_t hist5 : 32;
3800 struct cvmx_agl_gmx_txx_stat6_s cn52xx;
3801 struct cvmx_agl_gmx_txx_stat6_s cn52xxp1;
3802 struct cvmx_agl_gmx_txx_stat6_s cn56xx;
3803 struct cvmx_agl_gmx_txx_stat6_s cn56xxp1;
3804 struct cvmx_agl_gmx_txx_stat6_s cn63xx;
3805 struct cvmx_agl_gmx_txx_stat6_s cn63xxp1;
3807 typedef union cvmx_agl_gmx_txx_stat6 cvmx_agl_gmx_txx_stat6_t;
3810 * cvmx_agl_gmx_tx#_stat7
3812 * AGL_GMX_TX_STAT7 = AGL_GMX_TX_STATS_HIST7 (1024-1518) / AGL_GMX_TX_STATS_HIST6 (>1518)
3816 * - Packet length is the sum of all data transmitted on the wire for the given
3817 * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
3818 * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles.
3819 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
3820 * - Counters will wrap
3821 * - Not reset when MIX*_CTL[RESET] is set to 1.
3823 union cvmx_agl_gmx_txx_stat7
3826 struct cvmx_agl_gmx_txx_stat7_s
3828 #if __BYTE_ORDER == __BIG_ENDIAN
3829 uint64_t hist7 : 32; /**< Number of packets sent with an octet count
3831 uint64_t hist6 : 32; /**< Number of packets sent with an octet count of
3834 uint64_t hist6 : 32;
3835 uint64_t hist7 : 32;
3838 struct cvmx_agl_gmx_txx_stat7_s cn52xx;
3839 struct cvmx_agl_gmx_txx_stat7_s cn52xxp1;
3840 struct cvmx_agl_gmx_txx_stat7_s cn56xx;
3841 struct cvmx_agl_gmx_txx_stat7_s cn56xxp1;
3842 struct cvmx_agl_gmx_txx_stat7_s cn63xx;
3843 struct cvmx_agl_gmx_txx_stat7_s cn63xxp1;
3845 typedef union cvmx_agl_gmx_txx_stat7 cvmx_agl_gmx_txx_stat7_t;
3848 * cvmx_agl_gmx_tx#_stat8
3850 * AGL_GMX_TX_STAT8 = AGL_GMX_TX_STATS_MCST / AGL_GMX_TX_STATS_BCST
3854 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
3855 * - Counters will wrap
3856 * - Note, GMX determines if the packet is MCST or BCST from the DMAC of the
3857 * packet. GMX assumes that the DMAC lies in the first 6 bytes of the packet
3858 * as per the 802.3 frame definition. If the system requires additional data
3859 * before the L2 header, then the MCST and BCST counters may not reflect
3860 * reality and should be ignored by software.
3861 * - Not reset when MIX*_CTL[RESET] is set to 1.
3863 union cvmx_agl_gmx_txx_stat8
3866 struct cvmx_agl_gmx_txx_stat8_s
3868 #if __BYTE_ORDER == __BIG_ENDIAN
3869 uint64_t mcst : 32; /**< Number of packets sent to multicast DMAC.
3870 Does not include BCST packets. */
3871 uint64_t bcst : 32; /**< Number of packets sent to broadcast DMAC.
3872 Does not include MCST packets. */
3878 struct cvmx_agl_gmx_txx_stat8_s cn52xx;
3879 struct cvmx_agl_gmx_txx_stat8_s cn52xxp1;
3880 struct cvmx_agl_gmx_txx_stat8_s cn56xx;
3881 struct cvmx_agl_gmx_txx_stat8_s cn56xxp1;
3882 struct cvmx_agl_gmx_txx_stat8_s cn63xx;
3883 struct cvmx_agl_gmx_txx_stat8_s cn63xxp1;
3885 typedef union cvmx_agl_gmx_txx_stat8 cvmx_agl_gmx_txx_stat8_t;
3888 * cvmx_agl_gmx_tx#_stat9
3890 * AGL_GMX_TX_STAT9 = AGL_GMX_TX_STATS_UNDFLW / AGL_GMX_TX_STATS_CTL
3894 * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set
3895 * - Counters will wrap
3896 * - Not reset when MIX*_CTL[RESET] is set to 1.
3898 union cvmx_agl_gmx_txx_stat9
3901 struct cvmx_agl_gmx_txx_stat9_s
3903 #if __BYTE_ORDER == __BIG_ENDIAN
3904 uint64_t undflw : 32; /**< Number of underflow packets */
3905 uint64_t ctl : 32; /**< Number of Control packets (PAUSE flow control)
3906 generated by GMX. It does not include control
3907 packets forwarded or generated by the PP's. */
3910 uint64_t undflw : 32;
3913 struct cvmx_agl_gmx_txx_stat9_s cn52xx;
3914 struct cvmx_agl_gmx_txx_stat9_s cn52xxp1;
3915 struct cvmx_agl_gmx_txx_stat9_s cn56xx;
3916 struct cvmx_agl_gmx_txx_stat9_s cn56xxp1;
3917 struct cvmx_agl_gmx_txx_stat9_s cn63xx;
3918 struct cvmx_agl_gmx_txx_stat9_s cn63xxp1;
3920 typedef union cvmx_agl_gmx_txx_stat9 cvmx_agl_gmx_txx_stat9_t;
3923 * cvmx_agl_gmx_tx#_stats_ctl
3925 * AGL_GMX_TX_STATS_CTL = TX Stats Control register
3929 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3932 union cvmx_agl_gmx_txx_stats_ctl
3935 struct cvmx_agl_gmx_txx_stats_ctl_s
3937 #if __BYTE_ORDER == __BIG_ENDIAN
3938 uint64_t reserved_1_63 : 63;
3939 uint64_t rd_clr : 1; /**< Stats registers will clear on reads */
3941 uint64_t rd_clr : 1;
3942 uint64_t reserved_1_63 : 63;
3945 struct cvmx_agl_gmx_txx_stats_ctl_s cn52xx;
3946 struct cvmx_agl_gmx_txx_stats_ctl_s cn52xxp1;
3947 struct cvmx_agl_gmx_txx_stats_ctl_s cn56xx;
3948 struct cvmx_agl_gmx_txx_stats_ctl_s cn56xxp1;
3949 struct cvmx_agl_gmx_txx_stats_ctl_s cn63xx;
3950 struct cvmx_agl_gmx_txx_stats_ctl_s cn63xxp1;
3952 typedef union cvmx_agl_gmx_txx_stats_ctl cvmx_agl_gmx_txx_stats_ctl_t;
3955 * cvmx_agl_gmx_tx#_thresh
3957 * AGL_GMX_TX_THRESH = Packet TX Threshold
3961 * Additionally reset when MIX<prt>_CTL[RESET] is set to 1.
3964 union cvmx_agl_gmx_txx_thresh
3967 struct cvmx_agl_gmx_txx_thresh_s
3969 #if __BYTE_ORDER == __BIG_ENDIAN
3970 uint64_t reserved_6_63 : 58;
3971 uint64_t cnt : 6; /**< Number of 16B ticks to accumulate in the TX FIFO
3972 before sending on the packet interface
3973 This register should be large enough to prevent
3974 underflow on the packet interface and must never
3975 be set below 4. This register cannot exceed the
3976 the TX FIFO depth which is 128, 8B entries. */
3979 uint64_t reserved_6_63 : 58;
3982 struct cvmx_agl_gmx_txx_thresh_s cn52xx;
3983 struct cvmx_agl_gmx_txx_thresh_s cn52xxp1;
3984 struct cvmx_agl_gmx_txx_thresh_s cn56xx;
3985 struct cvmx_agl_gmx_txx_thresh_s cn56xxp1;
3986 struct cvmx_agl_gmx_txx_thresh_s cn63xx;
3987 struct cvmx_agl_gmx_txx_thresh_s cn63xxp1;
3989 typedef union cvmx_agl_gmx_txx_thresh cvmx_agl_gmx_txx_thresh_t;
3992 * cvmx_agl_gmx_tx_bp
3994 * AGL_GMX_TX_BP = Packet TX BackPressure Register
3998 * BP[0] will be reset when MIX0_CTL[RESET] is set to 1.
3999 * BP[1] will be reset when MIX1_CTL[RESET] is set to 1.
4001 union cvmx_agl_gmx_tx_bp
4004 struct cvmx_agl_gmx_tx_bp_s
4006 #if __BYTE_ORDER == __BIG_ENDIAN
4007 uint64_t reserved_2_63 : 62;
4008 uint64_t bp : 2; /**< Port BackPressure status
4010 1=Port should be back pressured */
4013 uint64_t reserved_2_63 : 62;
4016 struct cvmx_agl_gmx_tx_bp_s cn52xx;
4017 struct cvmx_agl_gmx_tx_bp_s cn52xxp1;
4018 struct cvmx_agl_gmx_tx_bp_cn56xx
4020 #if __BYTE_ORDER == __BIG_ENDIAN
4021 uint64_t reserved_1_63 : 63;
4022 uint64_t bp : 1; /**< Port BackPressure status
4024 1=Port should be back pressured */
4027 uint64_t reserved_1_63 : 63;
4030 struct cvmx_agl_gmx_tx_bp_cn56xx cn56xxp1;
4031 struct cvmx_agl_gmx_tx_bp_s cn63xx;
4032 struct cvmx_agl_gmx_tx_bp_s cn63xxp1;
4034 typedef union cvmx_agl_gmx_tx_bp cvmx_agl_gmx_tx_bp_t;
4037 * cvmx_agl_gmx_tx_col_attempt
4039 * AGL_GMX_TX_COL_ATTEMPT = Packet TX collision attempts before dropping frame
4043 * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
4046 union cvmx_agl_gmx_tx_col_attempt
4049 struct cvmx_agl_gmx_tx_col_attempt_s
4051 #if __BYTE_ORDER == __BIG_ENDIAN
4052 uint64_t reserved_5_63 : 59;
4053 uint64_t limit : 5; /**< Collision Attempts */
4056 uint64_t reserved_5_63 : 59;
4059 struct cvmx_agl_gmx_tx_col_attempt_s cn52xx;
4060 struct cvmx_agl_gmx_tx_col_attempt_s cn52xxp1;
4061 struct cvmx_agl_gmx_tx_col_attempt_s cn56xx;
4062 struct cvmx_agl_gmx_tx_col_attempt_s cn56xxp1;
4063 struct cvmx_agl_gmx_tx_col_attempt_s cn63xx;
4064 struct cvmx_agl_gmx_tx_col_attempt_s cn63xxp1;
4066 typedef union cvmx_agl_gmx_tx_col_attempt cvmx_agl_gmx_tx_col_attempt_t;
4069 * cvmx_agl_gmx_tx_ifg
4074 * AGL_GMX_TX_IFG = Packet TX Interframe Gap
4078 * * Programming IFG1 and IFG2.
4080 * For half-duplex systems that require IEEE 802.3 compatibility, IFG1 must
4081 * be in the range of 1-8, IFG2 must be in the range of 4-12, and the
4082 * IFG1+IFG2 sum must be 12.
4084 * For full-duplex systems that require IEEE 802.3 compatibility, IFG1 must
4085 * be in the range of 1-11, IFG2 must be in the range of 1-11, and the
4086 * IFG1+IFG2 sum must be 12.
4088 * For all other systems, IFG1 and IFG2 can be any value in the range of
4089 * 1-15. Allowing for a total possible IFG sum of 2-30.
4091 * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
4093 union cvmx_agl_gmx_tx_ifg
4096 struct cvmx_agl_gmx_tx_ifg_s
4098 #if __BYTE_ORDER == __BIG_ENDIAN
4099 uint64_t reserved_8_63 : 56;
4100 uint64_t ifg2 : 4; /**< 1/3 of the interframe gap timing
4101 If CRS is detected during IFG2, then the
4102 interFrameSpacing timer is not reset and a frame
4103 is transmited once the timer expires. */
4104 uint64_t ifg1 : 4; /**< 2/3 of the interframe gap timing
4105 If CRS is detected during IFG1, then the
4106 interFrameSpacing timer is reset and a frame is
4111 uint64_t reserved_8_63 : 56;
4114 struct cvmx_agl_gmx_tx_ifg_s cn52xx;
4115 struct cvmx_agl_gmx_tx_ifg_s cn52xxp1;
4116 struct cvmx_agl_gmx_tx_ifg_s cn56xx;
4117 struct cvmx_agl_gmx_tx_ifg_s cn56xxp1;
4118 struct cvmx_agl_gmx_tx_ifg_s cn63xx;
4119 struct cvmx_agl_gmx_tx_ifg_s cn63xxp1;
4121 typedef union cvmx_agl_gmx_tx_ifg cvmx_agl_gmx_tx_ifg_t;
4124 * cvmx_agl_gmx_tx_int_en
4126 * AGL_GMX_TX_INT_EN = Interrupt Enable
4130 * UNDFLW[0], XSCOL[0], XSDEF[0], LATE_COL[0], PTP_LOST[0] will be reset when MIX0_CTL[RESET] is set to 1.
4131 * UNDFLW[1], XSCOL[1], XSDEF[1], LATE_COL[1], PTP_LOST[1] will be reset when MIX1_CTL[RESET] is set to 1.
4132 * PKO_NXA will bee reset when both MIX0/1_CTL[RESET] are set to 1.
4134 union cvmx_agl_gmx_tx_int_en
4137 struct cvmx_agl_gmx_tx_int_en_s
4139 #if __BYTE_ORDER == __BIG_ENDIAN
4140 uint64_t reserved_22_63 : 42;
4141 uint64_t ptp_lost : 2; /**< A packet with a PTP request was not able to be
4142 sent due to XSCOL */
4143 uint64_t reserved_18_19 : 2;
4144 uint64_t late_col : 2; /**< TX Late Collision */
4145 uint64_t reserved_14_15 : 2;
4146 uint64_t xsdef : 2; /**< TX Excessive deferral (halfdup mode only) */
4147 uint64_t reserved_10_11 : 2;
4148 uint64_t xscol : 2; /**< TX Excessive collisions (halfdup mode only) */
4149 uint64_t reserved_4_7 : 4;
4150 uint64_t undflw : 2; /**< TX Underflow */
4151 uint64_t reserved_1_1 : 1;
4152 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
4154 uint64_t pko_nxa : 1;
4155 uint64_t reserved_1_1 : 1;
4156 uint64_t undflw : 2;
4157 uint64_t reserved_4_7 : 4;
4159 uint64_t reserved_10_11 : 2;
4161 uint64_t reserved_14_15 : 2;
4162 uint64_t late_col : 2;
4163 uint64_t reserved_18_19 : 2;
4164 uint64_t ptp_lost : 2;
4165 uint64_t reserved_22_63 : 42;
4168 struct cvmx_agl_gmx_tx_int_en_cn52xx
4170 #if __BYTE_ORDER == __BIG_ENDIAN
4171 uint64_t reserved_18_63 : 46;
4172 uint64_t late_col : 2; /**< TX Late Collision */
4173 uint64_t reserved_14_15 : 2;
4174 uint64_t xsdef : 2; /**< TX Excessive deferral (MII/halfdup mode only) */
4175 uint64_t reserved_10_11 : 2;
4176 uint64_t xscol : 2; /**< TX Excessive collisions (MII/halfdup mode only) */
4177 uint64_t reserved_4_7 : 4;
4178 uint64_t undflw : 2; /**< TX Underflow (MII mode only) */
4179 uint64_t reserved_1_1 : 1;
4180 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
4182 uint64_t pko_nxa : 1;
4183 uint64_t reserved_1_1 : 1;
4184 uint64_t undflw : 2;
4185 uint64_t reserved_4_7 : 4;
4187 uint64_t reserved_10_11 : 2;
4189 uint64_t reserved_14_15 : 2;
4190 uint64_t late_col : 2;
4191 uint64_t reserved_18_63 : 46;
4194 struct cvmx_agl_gmx_tx_int_en_cn52xx cn52xxp1;
4195 struct cvmx_agl_gmx_tx_int_en_cn56xx
4197 #if __BYTE_ORDER == __BIG_ENDIAN
4198 uint64_t reserved_17_63 : 47;
4199 uint64_t late_col : 1; /**< TX Late Collision */
4200 uint64_t reserved_13_15 : 3;
4201 uint64_t xsdef : 1; /**< TX Excessive deferral (MII/halfdup mode only) */
4202 uint64_t reserved_9_11 : 3;
4203 uint64_t xscol : 1; /**< TX Excessive collisions (MII/halfdup mode only) */
4204 uint64_t reserved_3_7 : 5;
4205 uint64_t undflw : 1; /**< TX Underflow (MII mode only) */
4206 uint64_t reserved_1_1 : 1;
4207 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
4209 uint64_t pko_nxa : 1;
4210 uint64_t reserved_1_1 : 1;
4211 uint64_t undflw : 1;
4212 uint64_t reserved_3_7 : 5;
4214 uint64_t reserved_9_11 : 3;
4216 uint64_t reserved_13_15 : 3;
4217 uint64_t late_col : 1;
4218 uint64_t reserved_17_63 : 47;
4221 struct cvmx_agl_gmx_tx_int_en_cn56xx cn56xxp1;
4222 struct cvmx_agl_gmx_tx_int_en_s cn63xx;
4223 struct cvmx_agl_gmx_tx_int_en_s cn63xxp1;
4225 typedef union cvmx_agl_gmx_tx_int_en cvmx_agl_gmx_tx_int_en_t;
4228 * cvmx_agl_gmx_tx_int_reg
4230 * AGL_GMX_TX_INT_REG = Interrupt Register
4234 * UNDFLW[0], XSCOL[0], XSDEF[0], LATE_COL[0], PTP_LOST[0] will be reset when MIX0_CTL[RESET] is set to 1.
4235 * UNDFLW[1], XSCOL[1], XSDEF[1], LATE_COL[1], PTP_LOST[1] will be reset when MIX1_CTL[RESET] is set to 1.
4236 * PKO_NXA will bee reset when both MIX0/1_CTL[RESET] are set to 1.
4238 union cvmx_agl_gmx_tx_int_reg
4241 struct cvmx_agl_gmx_tx_int_reg_s
4243 #if __BYTE_ORDER == __BIG_ENDIAN
4244 uint64_t reserved_22_63 : 42;
4245 uint64_t ptp_lost : 2; /**< A packet with a PTP request was not able to be
4246 sent due to XSCOL */
4247 uint64_t reserved_18_19 : 2;
4248 uint64_t late_col : 2; /**< TX Late Collision */
4249 uint64_t reserved_14_15 : 2;
4250 uint64_t xsdef : 2; /**< TX Excessive deferral (halfdup mode only) */
4251 uint64_t reserved_10_11 : 2;
4252 uint64_t xscol : 2; /**< TX Excessive collisions (halfdup mode only) */
4253 uint64_t reserved_4_7 : 4;
4254 uint64_t undflw : 2; /**< TX Underflow */
4255 uint64_t reserved_1_1 : 1;
4256 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
4258 uint64_t pko_nxa : 1;
4259 uint64_t reserved_1_1 : 1;
4260 uint64_t undflw : 2;
4261 uint64_t reserved_4_7 : 4;
4263 uint64_t reserved_10_11 : 2;
4265 uint64_t reserved_14_15 : 2;
4266 uint64_t late_col : 2;
4267 uint64_t reserved_18_19 : 2;
4268 uint64_t ptp_lost : 2;
4269 uint64_t reserved_22_63 : 42;
4272 struct cvmx_agl_gmx_tx_int_reg_cn52xx
4274 #if __BYTE_ORDER == __BIG_ENDIAN
4275 uint64_t reserved_18_63 : 46;
4276 uint64_t late_col : 2; /**< TX Late Collision */
4277 uint64_t reserved_14_15 : 2;
4278 uint64_t xsdef : 2; /**< TX Excessive deferral (MII/halfdup mode only) */
4279 uint64_t reserved_10_11 : 2;
4280 uint64_t xscol : 2; /**< TX Excessive collisions (MII/halfdup mode only) */
4281 uint64_t reserved_4_7 : 4;
4282 uint64_t undflw : 2; /**< TX Underflow (MII mode only) */
4283 uint64_t reserved_1_1 : 1;
4284 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
4286 uint64_t pko_nxa : 1;
4287 uint64_t reserved_1_1 : 1;
4288 uint64_t undflw : 2;
4289 uint64_t reserved_4_7 : 4;
4291 uint64_t reserved_10_11 : 2;
4293 uint64_t reserved_14_15 : 2;
4294 uint64_t late_col : 2;
4295 uint64_t reserved_18_63 : 46;
4298 struct cvmx_agl_gmx_tx_int_reg_cn52xx cn52xxp1;
4299 struct cvmx_agl_gmx_tx_int_reg_cn56xx
4301 #if __BYTE_ORDER == __BIG_ENDIAN
4302 uint64_t reserved_17_63 : 47;
4303 uint64_t late_col : 1; /**< TX Late Collision */
4304 uint64_t reserved_13_15 : 3;
4305 uint64_t xsdef : 1; /**< TX Excessive deferral (MII/halfdup mode only) */
4306 uint64_t reserved_9_11 : 3;
4307 uint64_t xscol : 1; /**< TX Excessive collisions (MII/halfdup mode only) */
4308 uint64_t reserved_3_7 : 5;
4309 uint64_t undflw : 1; /**< TX Underflow (MII mode only) */
4310 uint64_t reserved_1_1 : 1;
4311 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
4313 uint64_t pko_nxa : 1;
4314 uint64_t reserved_1_1 : 1;
4315 uint64_t undflw : 1;
4316 uint64_t reserved_3_7 : 5;
4318 uint64_t reserved_9_11 : 3;
4320 uint64_t reserved_13_15 : 3;
4321 uint64_t late_col : 1;
4322 uint64_t reserved_17_63 : 47;
4325 struct cvmx_agl_gmx_tx_int_reg_cn56xx cn56xxp1;
4326 struct cvmx_agl_gmx_tx_int_reg_s cn63xx;
4327 struct cvmx_agl_gmx_tx_int_reg_s cn63xxp1;
4329 typedef union cvmx_agl_gmx_tx_int_reg cvmx_agl_gmx_tx_int_reg_t;
4332 * cvmx_agl_gmx_tx_jam
4334 * AGL_GMX_TX_JAM = Packet TX Jam Pattern
4338 * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
4341 union cvmx_agl_gmx_tx_jam
4344 struct cvmx_agl_gmx_tx_jam_s
4346 #if __BYTE_ORDER == __BIG_ENDIAN
4347 uint64_t reserved_8_63 : 56;
4348 uint64_t jam : 8; /**< Jam pattern */
4351 uint64_t reserved_8_63 : 56;
4354 struct cvmx_agl_gmx_tx_jam_s cn52xx;
4355 struct cvmx_agl_gmx_tx_jam_s cn52xxp1;
4356 struct cvmx_agl_gmx_tx_jam_s cn56xx;
4357 struct cvmx_agl_gmx_tx_jam_s cn56xxp1;
4358 struct cvmx_agl_gmx_tx_jam_s cn63xx;
4359 struct cvmx_agl_gmx_tx_jam_s cn63xxp1;
4361 typedef union cvmx_agl_gmx_tx_jam cvmx_agl_gmx_tx_jam_t;
4364 * cvmx_agl_gmx_tx_lfsr
4366 * AGL_GMX_TX_LFSR = LFSR used to implement truncated binary exponential backoff
4370 * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
4373 union cvmx_agl_gmx_tx_lfsr
4376 struct cvmx_agl_gmx_tx_lfsr_s
4378 #if __BYTE_ORDER == __BIG_ENDIAN
4379 uint64_t reserved_16_63 : 48;
4380 uint64_t lfsr : 16; /**< The current state of the LFSR used to feed random
4381 numbers to compute truncated binary exponential
4385 uint64_t reserved_16_63 : 48;
4388 struct cvmx_agl_gmx_tx_lfsr_s cn52xx;
4389 struct cvmx_agl_gmx_tx_lfsr_s cn52xxp1;
4390 struct cvmx_agl_gmx_tx_lfsr_s cn56xx;
4391 struct cvmx_agl_gmx_tx_lfsr_s cn56xxp1;
4392 struct cvmx_agl_gmx_tx_lfsr_s cn63xx;
4393 struct cvmx_agl_gmx_tx_lfsr_s cn63xxp1;
4395 typedef union cvmx_agl_gmx_tx_lfsr cvmx_agl_gmx_tx_lfsr_t;
4398 * cvmx_agl_gmx_tx_ovr_bp
4400 * AGL_GMX_TX_OVR_BP = Packet TX Override BackPressure
4404 * IGN_FULL[0], BP[0], EN[0] will be reset when MIX0_CTL[RESET] is set to 1.
4405 * IGN_FULL[1], BP[1], EN[1] will be reset when MIX1_CTL[RESET] is set to 1.
4407 union cvmx_agl_gmx_tx_ovr_bp
4410 struct cvmx_agl_gmx_tx_ovr_bp_s
4412 #if __BYTE_ORDER == __BIG_ENDIAN
4413 uint64_t reserved_10_63 : 54;
4414 uint64_t en : 2; /**< Per port Enable back pressure override */
4415 uint64_t reserved_6_7 : 2;
4416 uint64_t bp : 2; /**< Port BackPressure status to use
4418 1=Port should be back pressured */
4419 uint64_t reserved_2_3 : 2;
4420 uint64_t ign_full : 2; /**< Ignore the RX FIFO full when computing BP */
4422 uint64_t ign_full : 2;
4423 uint64_t reserved_2_3 : 2;
4425 uint64_t reserved_6_7 : 2;
4427 uint64_t reserved_10_63 : 54;
4430 struct cvmx_agl_gmx_tx_ovr_bp_s cn52xx;
4431 struct cvmx_agl_gmx_tx_ovr_bp_s cn52xxp1;
4432 struct cvmx_agl_gmx_tx_ovr_bp_cn56xx
4434 #if __BYTE_ORDER == __BIG_ENDIAN
4435 uint64_t reserved_9_63 : 55;
4436 uint64_t en : 1; /**< Per port Enable back pressure override */
4437 uint64_t reserved_5_7 : 3;
4438 uint64_t bp : 1; /**< Port BackPressure status to use
4440 1=Port should be back pressured */
4441 uint64_t reserved_1_3 : 3;
4442 uint64_t ign_full : 1; /**< Ignore the RX FIFO full when computing BP */
4444 uint64_t ign_full : 1;
4445 uint64_t reserved_1_3 : 3;
4447 uint64_t reserved_5_7 : 3;
4449 uint64_t reserved_9_63 : 55;
4452 struct cvmx_agl_gmx_tx_ovr_bp_cn56xx cn56xxp1;
4453 struct cvmx_agl_gmx_tx_ovr_bp_s cn63xx;
4454 struct cvmx_agl_gmx_tx_ovr_bp_s cn63xxp1;
4456 typedef union cvmx_agl_gmx_tx_ovr_bp cvmx_agl_gmx_tx_ovr_bp_t;
4459 * cvmx_agl_gmx_tx_pause_pkt_dmac
4461 * AGL_GMX_TX_PAUSE_PKT_DMAC = Packet TX Pause Packet DMAC field
4465 * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
4468 union cvmx_agl_gmx_tx_pause_pkt_dmac
4471 struct cvmx_agl_gmx_tx_pause_pkt_dmac_s
4473 #if __BYTE_ORDER == __BIG_ENDIAN
4474 uint64_t reserved_48_63 : 16;
4475 uint64_t dmac : 48; /**< The DMAC field placed is outbnd pause pkts */
4478 uint64_t reserved_48_63 : 16;
4481 struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn52xx;
4482 struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn52xxp1;
4483 struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn56xx;
4484 struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn56xxp1;
4485 struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn63xx;
4486 struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn63xxp1;
4488 typedef union cvmx_agl_gmx_tx_pause_pkt_dmac cvmx_agl_gmx_tx_pause_pkt_dmac_t;
4491 * cvmx_agl_gmx_tx_pause_pkt_type
4493 * AGL_GMX_TX_PAUSE_PKT_TYPE = Packet TX Pause Packet TYPE field
4497 * Additionally reset when both MIX0/1_CTL[RESET] are set to 1.
4500 union cvmx_agl_gmx_tx_pause_pkt_type
4503 struct cvmx_agl_gmx_tx_pause_pkt_type_s
4505 #if __BYTE_ORDER == __BIG_ENDIAN
4506 uint64_t reserved_16_63 : 48;
4507 uint64_t type : 16; /**< The TYPE field placed is outbnd pause pkts */
4510 uint64_t reserved_16_63 : 48;
4513 struct cvmx_agl_gmx_tx_pause_pkt_type_s cn52xx;
4514 struct cvmx_agl_gmx_tx_pause_pkt_type_s cn52xxp1;
4515 struct cvmx_agl_gmx_tx_pause_pkt_type_s cn56xx;
4516 struct cvmx_agl_gmx_tx_pause_pkt_type_s cn56xxp1;
4517 struct cvmx_agl_gmx_tx_pause_pkt_type_s cn63xx;
4518 struct cvmx_agl_gmx_tx_pause_pkt_type_s cn63xxp1;
4520 typedef union cvmx_agl_gmx_tx_pause_pkt_type cvmx_agl_gmx_tx_pause_pkt_type_t;
4525 * AGL_PRT_CTL = AGL Port Control
4529 * AGL_PRT0_CTL will be reset when MIX0_CTL[RESET] is set to 1.
4530 * AGL_PRT1_CTL will be reset when MIX1_CTL[RESET] is set to 1.
4532 union cvmx_agl_prtx_ctl
4535 struct cvmx_agl_prtx_ctl_s
4537 #if __BYTE_ORDER == __BIG_ENDIAN
4538 uint64_t drv_byp : 1; /**< Bypass the compensation controller and use
4539 DRV_NCTL and DRV_PCTL
4540 Note: the reset value was changed from pass1
4542 uint64_t reserved_62_62 : 1;
4543 uint64_t cmp_pctl : 6; /**< PCTL drive strength from the compensation ctl */
4544 uint64_t reserved_54_55 : 2;
4545 uint64_t cmp_nctl : 6; /**< NCTL drive strength from the compensation ctl */
4546 uint64_t reserved_46_47 : 2;
4547 uint64_t drv_pctl : 6; /**< PCTL drive strength to use in bypass mode
4548 Reset value of 19 is for 50 ohm termination */
4549 uint64_t reserved_38_39 : 2;
4550 uint64_t drv_nctl : 6; /**< NCTL drive strength to use in bypass mode
4551 Reset value of 15 is for 50 ohm termination */
4552 uint64_t reserved_29_31 : 3;
4553 uint64_t clk_set : 5; /**< The clock delay as determined by the DLL */
4554 uint64_t clkrx_byp : 1; /**< Bypass the RX clock delay setting
4555 Skews RXC from RXD,RXCTL in RGMII mode
4556 By default, HW internally shifts the RXC clock
4557 to sample RXD,RXCTL assuming clock and data and
4558 sourced synchronously from the link partner.
4559 In MII mode, the CLKRX_BYP is forced to 1. */
4560 uint64_t reserved_21_22 : 2;
4561 uint64_t clkrx_set : 5; /**< RX clock delay setting to use in bypass mode
4562 Skews RXC from RXD in RGMII mode */
4563 uint64_t clktx_byp : 1; /**< Bypass the TX clock delay setting
4564 Skews TXC from TXD,TXCTL in RGMII mode
4565 Skews RXC from RXD,RXCTL in RGMII mode
4566 By default, clock and data and sourced
4568 In MII mode, the CLKRX_BYP is forced to 1. */
4569 uint64_t reserved_13_14 : 2;
4570 uint64_t clktx_set : 5; /**< TX clock delay setting to use in bypass mode
4571 Skews TXC from TXD in RGMII mode */
4572 uint64_t reserved_5_7 : 3;
4573 uint64_t dllrst : 1; /**< DLL Reset */
4574 uint64_t comp : 1; /**< Compensation Enable */
4575 uint64_t enable : 1; /**< Port Enable
4576 Note: the reset value was changed from pass1
4578 uint64_t clkrst : 1; /**< Clock Tree Reset */
4579 uint64_t mode : 1; /**< Port Mode
4580 MODE must be set the same for all ports in which
4581 AGL_PRTx_CTL[ENABLE] is set.
4586 uint64_t clkrst : 1;
4587 uint64_t enable : 1;
4589 uint64_t dllrst : 1;
4590 uint64_t reserved_5_7 : 3;
4591 uint64_t clktx_set : 5;
4592 uint64_t reserved_13_14 : 2;
4593 uint64_t clktx_byp : 1;
4594 uint64_t clkrx_set : 5;
4595 uint64_t reserved_21_22 : 2;
4596 uint64_t clkrx_byp : 1;
4597 uint64_t clk_set : 5;
4598 uint64_t reserved_29_31 : 3;
4599 uint64_t drv_nctl : 6;
4600 uint64_t reserved_38_39 : 2;
4601 uint64_t drv_pctl : 6;
4602 uint64_t reserved_46_47 : 2;
4603 uint64_t cmp_nctl : 6;
4604 uint64_t reserved_54_55 : 2;
4605 uint64_t cmp_pctl : 6;
4606 uint64_t reserved_62_62 : 1;
4607 uint64_t drv_byp : 1;
4610 struct cvmx_agl_prtx_ctl_s cn63xx;
4611 struct cvmx_agl_prtx_ctl_s cn63xxp1;
4613 typedef union cvmx_agl_prtx_ctl cvmx_agl_prtx_ctl_t;