1 /***********************license start***************
2 * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Networks nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
44 * Configuration and status register (CSR) type definitions for
47 * This file is auto generated. Do not edit.
52 #ifndef __CVMX_GMXX_TYPEDEFS_H__
53 #define __CVMX_GMXX_TYPEDEFS_H__
55 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
56 static inline uint64_t CVMX_GMXX_BAD_REG(unsigned long block_id)
59 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
60 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
61 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
62 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
63 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
64 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
65 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
66 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
67 cvmx_warn("CVMX_GMXX_BAD_REG(%lu) is invalid on this chip\n", block_id);
68 return CVMX_ADD_IO_SEG(0x0001180008000518ull) + ((block_id) & 1) * 0x8000000ull;
71 #define CVMX_GMXX_BAD_REG(block_id) (CVMX_ADD_IO_SEG(0x0001180008000518ull) + ((block_id) & 1) * 0x8000000ull)
73 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
74 static inline uint64_t CVMX_GMXX_BIST(unsigned long block_id)
77 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
78 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
79 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
80 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
81 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
82 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
83 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
84 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
85 cvmx_warn("CVMX_GMXX_BIST(%lu) is invalid on this chip\n", block_id);
86 return CVMX_ADD_IO_SEG(0x0001180008000400ull) + ((block_id) & 1) * 0x8000000ull;
89 #define CVMX_GMXX_BIST(block_id) (CVMX_ADD_IO_SEG(0x0001180008000400ull) + ((block_id) & 1) * 0x8000000ull)
91 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
92 static inline uint64_t CVMX_GMXX_CLK_EN(unsigned long block_id)
95 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
96 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
97 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
98 cvmx_warn("CVMX_GMXX_CLK_EN(%lu) is invalid on this chip\n", block_id);
99 return CVMX_ADD_IO_SEG(0x00011800080007F0ull) + ((block_id) & 1) * 0x8000000ull;
102 #define CVMX_GMXX_CLK_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800080007F0ull) + ((block_id) & 1) * 0x8000000ull)
104 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
105 static inline uint64_t CVMX_GMXX_HG2_CONTROL(unsigned long block_id)
108 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
109 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
110 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
111 cvmx_warn("CVMX_GMXX_HG2_CONTROL(%lu) is invalid on this chip\n", block_id);
112 return CVMX_ADD_IO_SEG(0x0001180008000550ull) + ((block_id) & 1) * 0x8000000ull;
115 #define CVMX_GMXX_HG2_CONTROL(block_id) (CVMX_ADD_IO_SEG(0x0001180008000550ull) + ((block_id) & 1) * 0x8000000ull)
117 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
118 static inline uint64_t CVMX_GMXX_INF_MODE(unsigned long block_id)
121 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
122 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
123 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
124 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
125 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
126 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
127 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
128 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
129 cvmx_warn("CVMX_GMXX_INF_MODE(%lu) is invalid on this chip\n", block_id);
130 return CVMX_ADD_IO_SEG(0x00011800080007F8ull) + ((block_id) & 1) * 0x8000000ull;
133 #define CVMX_GMXX_INF_MODE(block_id) (CVMX_ADD_IO_SEG(0x00011800080007F8ull) + ((block_id) & 1) * 0x8000000ull)
135 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
136 static inline uint64_t CVMX_GMXX_NXA_ADR(unsigned long block_id)
139 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
140 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
141 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
142 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
143 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
144 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
145 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
146 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
147 cvmx_warn("CVMX_GMXX_NXA_ADR(%lu) is invalid on this chip\n", block_id);
148 return CVMX_ADD_IO_SEG(0x0001180008000510ull) + ((block_id) & 1) * 0x8000000ull;
151 #define CVMX_GMXX_NXA_ADR(block_id) (CVMX_ADD_IO_SEG(0x0001180008000510ull) + ((block_id) & 1) * 0x8000000ull)
153 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
154 static inline uint64_t CVMX_GMXX_PRTX_CBFC_CTL(unsigned long offset, unsigned long block_id)
157 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset == 0)) && ((block_id == 0)))) ||
158 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset == 0)) && ((block_id <= 1)))) ||
159 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset == 0)) && ((block_id == 0))))))
160 cvmx_warn("CVMX_GMXX_PRTX_CBFC_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id);
161 return CVMX_ADD_IO_SEG(0x0001180008000580ull) + ((block_id) & 1) * 0x8000000ull;
164 #define CVMX_GMXX_PRTX_CBFC_CTL(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000580ull) + ((block_id) & 1) * 0x8000000ull)
166 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
167 static inline uint64_t CVMX_GMXX_PRTX_CFG(unsigned long offset, unsigned long block_id)
170 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
171 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
172 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
173 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
174 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
175 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
176 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
177 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
178 cvmx_warn("CVMX_GMXX_PRTX_CFG(%lu,%lu) is invalid on this chip\n", offset, block_id);
179 return CVMX_ADD_IO_SEG(0x0001180008000010ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
182 #define CVMX_GMXX_PRTX_CFG(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000010ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
184 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
185 static inline uint64_t CVMX_GMXX_RXX_ADR_CAM0(unsigned long offset, unsigned long block_id)
188 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
189 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
190 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
191 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
192 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
193 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
194 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
195 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
196 cvmx_warn("CVMX_GMXX_RXX_ADR_CAM0(%lu,%lu) is invalid on this chip\n", offset, block_id);
197 return CVMX_ADD_IO_SEG(0x0001180008000180ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
200 #define CVMX_GMXX_RXX_ADR_CAM0(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000180ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
202 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
203 static inline uint64_t CVMX_GMXX_RXX_ADR_CAM1(unsigned long offset, unsigned long block_id)
206 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
207 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
208 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
209 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
210 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
211 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
212 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
213 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
214 cvmx_warn("CVMX_GMXX_RXX_ADR_CAM1(%lu,%lu) is invalid on this chip\n", offset, block_id);
215 return CVMX_ADD_IO_SEG(0x0001180008000188ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
218 #define CVMX_GMXX_RXX_ADR_CAM1(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000188ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
220 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
221 static inline uint64_t CVMX_GMXX_RXX_ADR_CAM2(unsigned long offset, unsigned long block_id)
224 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
225 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
226 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
227 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
228 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
229 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
230 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
231 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
232 cvmx_warn("CVMX_GMXX_RXX_ADR_CAM2(%lu,%lu) is invalid on this chip\n", offset, block_id);
233 return CVMX_ADD_IO_SEG(0x0001180008000190ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
236 #define CVMX_GMXX_RXX_ADR_CAM2(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000190ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
238 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
239 static inline uint64_t CVMX_GMXX_RXX_ADR_CAM3(unsigned long offset, unsigned long block_id)
242 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
243 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
244 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
245 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
246 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
247 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
248 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
249 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
250 cvmx_warn("CVMX_GMXX_RXX_ADR_CAM3(%lu,%lu) is invalid on this chip\n", offset, block_id);
251 return CVMX_ADD_IO_SEG(0x0001180008000198ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
254 #define CVMX_GMXX_RXX_ADR_CAM3(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000198ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
256 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
257 static inline uint64_t CVMX_GMXX_RXX_ADR_CAM4(unsigned long offset, unsigned long block_id)
260 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
261 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
262 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
263 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
264 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
265 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
266 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
267 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
268 cvmx_warn("CVMX_GMXX_RXX_ADR_CAM4(%lu,%lu) is invalid on this chip\n", offset, block_id);
269 return CVMX_ADD_IO_SEG(0x00011800080001A0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
272 #define CVMX_GMXX_RXX_ADR_CAM4(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080001A0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
274 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
275 static inline uint64_t CVMX_GMXX_RXX_ADR_CAM5(unsigned long offset, unsigned long block_id)
278 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
279 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
280 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
281 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
282 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
283 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
284 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
285 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
286 cvmx_warn("CVMX_GMXX_RXX_ADR_CAM5(%lu,%lu) is invalid on this chip\n", offset, block_id);
287 return CVMX_ADD_IO_SEG(0x00011800080001A8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
290 #define CVMX_GMXX_RXX_ADR_CAM5(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080001A8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
292 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
293 static inline uint64_t CVMX_GMXX_RXX_ADR_CAM_EN(unsigned long offset, unsigned long block_id)
296 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
297 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
298 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
299 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
300 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
301 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
302 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
303 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
304 cvmx_warn("CVMX_GMXX_RXX_ADR_CAM_EN(%lu,%lu) is invalid on this chip\n", offset, block_id);
305 return CVMX_ADD_IO_SEG(0x0001180008000108ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
308 #define CVMX_GMXX_RXX_ADR_CAM_EN(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000108ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
310 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
311 static inline uint64_t CVMX_GMXX_RXX_ADR_CTL(unsigned long offset, unsigned long block_id)
314 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
315 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
316 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
317 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
318 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
319 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
320 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
321 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
322 cvmx_warn("CVMX_GMXX_RXX_ADR_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id);
323 return CVMX_ADD_IO_SEG(0x0001180008000100ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
326 #define CVMX_GMXX_RXX_ADR_CTL(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000100ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
328 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
329 static inline uint64_t CVMX_GMXX_RXX_DECISION(unsigned long offset, unsigned long block_id)
332 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
333 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
334 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
335 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
336 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
337 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
338 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
339 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
340 cvmx_warn("CVMX_GMXX_RXX_DECISION(%lu,%lu) is invalid on this chip\n", offset, block_id);
341 return CVMX_ADD_IO_SEG(0x0001180008000040ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
344 #define CVMX_GMXX_RXX_DECISION(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000040ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
346 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
347 static inline uint64_t CVMX_GMXX_RXX_FRM_CHK(unsigned long offset, unsigned long block_id)
350 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
351 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
352 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
353 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
354 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
355 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
356 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
357 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
358 cvmx_warn("CVMX_GMXX_RXX_FRM_CHK(%lu,%lu) is invalid on this chip\n", offset, block_id);
359 return CVMX_ADD_IO_SEG(0x0001180008000020ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
362 #define CVMX_GMXX_RXX_FRM_CHK(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000020ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
364 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
365 static inline uint64_t CVMX_GMXX_RXX_FRM_CTL(unsigned long offset, unsigned long block_id)
368 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
369 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
370 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
371 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
372 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
373 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
374 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
375 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
376 cvmx_warn("CVMX_GMXX_RXX_FRM_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id);
377 return CVMX_ADD_IO_SEG(0x0001180008000018ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
380 #define CVMX_GMXX_RXX_FRM_CTL(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000018ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
382 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
383 static inline uint64_t CVMX_GMXX_RXX_FRM_MAX(unsigned long offset, unsigned long block_id)
386 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
387 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
388 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
389 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
390 cvmx_warn("CVMX_GMXX_RXX_FRM_MAX(%lu,%lu) is invalid on this chip\n", offset, block_id);
391 return CVMX_ADD_IO_SEG(0x0001180008000030ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
394 #define CVMX_GMXX_RXX_FRM_MAX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000030ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
396 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
397 static inline uint64_t CVMX_GMXX_RXX_FRM_MIN(unsigned long offset, unsigned long block_id)
400 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
401 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
402 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
403 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
404 cvmx_warn("CVMX_GMXX_RXX_FRM_MIN(%lu,%lu) is invalid on this chip\n", offset, block_id);
405 return CVMX_ADD_IO_SEG(0x0001180008000028ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
408 #define CVMX_GMXX_RXX_FRM_MIN(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000028ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
410 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
411 static inline uint64_t CVMX_GMXX_RXX_IFG(unsigned long offset, unsigned long block_id)
414 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
415 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
416 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
417 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
418 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
419 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
420 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
421 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
422 cvmx_warn("CVMX_GMXX_RXX_IFG(%lu,%lu) is invalid on this chip\n", offset, block_id);
423 return CVMX_ADD_IO_SEG(0x0001180008000058ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
426 #define CVMX_GMXX_RXX_IFG(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000058ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
428 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
429 static inline uint64_t CVMX_GMXX_RXX_INT_EN(unsigned long offset, unsigned long block_id)
432 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
433 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
434 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
435 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
436 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
437 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
438 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
439 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
440 cvmx_warn("CVMX_GMXX_RXX_INT_EN(%lu,%lu) is invalid on this chip\n", offset, block_id);
441 return CVMX_ADD_IO_SEG(0x0001180008000008ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
444 #define CVMX_GMXX_RXX_INT_EN(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000008ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
446 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
447 static inline uint64_t CVMX_GMXX_RXX_INT_REG(unsigned long offset, unsigned long block_id)
450 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
451 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
452 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
453 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
454 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
455 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
456 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
457 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
458 cvmx_warn("CVMX_GMXX_RXX_INT_REG(%lu,%lu) is invalid on this chip\n", offset, block_id);
459 return CVMX_ADD_IO_SEG(0x0001180008000000ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
462 #define CVMX_GMXX_RXX_INT_REG(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000000ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
464 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
465 static inline uint64_t CVMX_GMXX_RXX_JABBER(unsigned long offset, unsigned long block_id)
468 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
469 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
470 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
471 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
472 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
473 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
474 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
475 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
476 cvmx_warn("CVMX_GMXX_RXX_JABBER(%lu,%lu) is invalid on this chip\n", offset, block_id);
477 return CVMX_ADD_IO_SEG(0x0001180008000038ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
480 #define CVMX_GMXX_RXX_JABBER(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000038ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
482 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
483 static inline uint64_t CVMX_GMXX_RXX_PAUSE_DROP_TIME(unsigned long offset, unsigned long block_id)
486 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
487 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
488 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
489 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
490 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
491 cvmx_warn("CVMX_GMXX_RXX_PAUSE_DROP_TIME(%lu,%lu) is invalid on this chip\n", offset, block_id);
492 return CVMX_ADD_IO_SEG(0x0001180008000068ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
495 #define CVMX_GMXX_RXX_PAUSE_DROP_TIME(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000068ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
497 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
498 static inline uint64_t CVMX_GMXX_RXX_RX_INBND(unsigned long offset, unsigned long block_id)
501 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
502 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
503 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
504 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
505 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
506 cvmx_warn("CVMX_GMXX_RXX_RX_INBND(%lu,%lu) is invalid on this chip\n", offset, block_id);
507 return CVMX_ADD_IO_SEG(0x0001180008000060ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
510 #define CVMX_GMXX_RXX_RX_INBND(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000060ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
512 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
513 static inline uint64_t CVMX_GMXX_RXX_STATS_CTL(unsigned long offset, unsigned long block_id)
516 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
517 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
518 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
519 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
520 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
521 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
522 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
523 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
524 cvmx_warn("CVMX_GMXX_RXX_STATS_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id);
525 return CVMX_ADD_IO_SEG(0x0001180008000050ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
528 #define CVMX_GMXX_RXX_STATS_CTL(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000050ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
530 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
531 static inline uint64_t CVMX_GMXX_RXX_STATS_OCTS(unsigned long offset, unsigned long block_id)
534 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
535 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
536 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
537 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
538 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
539 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
540 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
541 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
542 cvmx_warn("CVMX_GMXX_RXX_STATS_OCTS(%lu,%lu) is invalid on this chip\n", offset, block_id);
543 return CVMX_ADD_IO_SEG(0x0001180008000088ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
546 #define CVMX_GMXX_RXX_STATS_OCTS(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000088ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
548 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
549 static inline uint64_t CVMX_GMXX_RXX_STATS_OCTS_CTL(unsigned long offset, unsigned long block_id)
552 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
553 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
554 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
555 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
556 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
557 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
558 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
559 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
560 cvmx_warn("CVMX_GMXX_RXX_STATS_OCTS_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id);
561 return CVMX_ADD_IO_SEG(0x0001180008000098ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
564 #define CVMX_GMXX_RXX_STATS_OCTS_CTL(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000098ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
566 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
567 static inline uint64_t CVMX_GMXX_RXX_STATS_OCTS_DMAC(unsigned long offset, unsigned long block_id)
570 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
571 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
572 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
573 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
574 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
575 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
576 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
577 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
578 cvmx_warn("CVMX_GMXX_RXX_STATS_OCTS_DMAC(%lu,%lu) is invalid on this chip\n", offset, block_id);
579 return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
582 #define CVMX_GMXX_RXX_STATS_OCTS_DMAC(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080000A8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
584 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
585 static inline uint64_t CVMX_GMXX_RXX_STATS_OCTS_DRP(unsigned long offset, unsigned long block_id)
588 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
589 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
590 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
591 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
592 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
593 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
594 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
595 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
596 cvmx_warn("CVMX_GMXX_RXX_STATS_OCTS_DRP(%lu,%lu) is invalid on this chip\n", offset, block_id);
597 return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
600 #define CVMX_GMXX_RXX_STATS_OCTS_DRP(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080000B8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
602 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
603 static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS(unsigned long offset, unsigned long block_id)
606 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
607 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
608 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
609 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
610 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
611 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
612 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
613 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
614 cvmx_warn("CVMX_GMXX_RXX_STATS_PKTS(%lu,%lu) is invalid on this chip\n", offset, block_id);
615 return CVMX_ADD_IO_SEG(0x0001180008000080ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
618 #define CVMX_GMXX_RXX_STATS_PKTS(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000080ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
620 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
621 static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_BAD(unsigned long offset, unsigned long block_id)
624 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
625 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
626 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
627 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
628 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
629 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
630 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
631 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
632 cvmx_warn("CVMX_GMXX_RXX_STATS_PKTS_BAD(%lu,%lu) is invalid on this chip\n", offset, block_id);
633 return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
636 #define CVMX_GMXX_RXX_STATS_PKTS_BAD(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080000C0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
638 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
639 static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_CTL(unsigned long offset, unsigned long block_id)
642 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
643 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
644 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
645 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
646 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
647 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
648 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
649 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
650 cvmx_warn("CVMX_GMXX_RXX_STATS_PKTS_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id);
651 return CVMX_ADD_IO_SEG(0x0001180008000090ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
654 #define CVMX_GMXX_RXX_STATS_PKTS_CTL(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000090ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
656 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
657 static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_DMAC(unsigned long offset, unsigned long block_id)
660 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
661 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
662 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
663 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
664 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
665 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
666 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
667 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
668 cvmx_warn("CVMX_GMXX_RXX_STATS_PKTS_DMAC(%lu,%lu) is invalid on this chip\n", offset, block_id);
669 return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
672 #define CVMX_GMXX_RXX_STATS_PKTS_DMAC(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080000A0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
674 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
675 static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_DRP(unsigned long offset, unsigned long block_id)
678 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
679 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
680 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
681 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
682 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
683 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
684 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
685 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
686 cvmx_warn("CVMX_GMXX_RXX_STATS_PKTS_DRP(%lu,%lu) is invalid on this chip\n", offset, block_id);
687 return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
690 #define CVMX_GMXX_RXX_STATS_PKTS_DRP(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080000B0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
692 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
693 static inline uint64_t CVMX_GMXX_RXX_UDD_SKP(unsigned long offset, unsigned long block_id)
696 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
697 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
698 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
699 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
700 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
701 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
702 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
703 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
704 cvmx_warn("CVMX_GMXX_RXX_UDD_SKP(%lu,%lu) is invalid on this chip\n", offset, block_id);
705 return CVMX_ADD_IO_SEG(0x0001180008000048ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
708 #define CVMX_GMXX_RXX_UDD_SKP(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000048ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
710 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
711 static inline uint64_t CVMX_GMXX_RX_BP_DROPX(unsigned long offset, unsigned long block_id)
714 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
715 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
716 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
717 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
718 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
719 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
720 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
721 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
722 cvmx_warn("CVMX_GMXX_RX_BP_DROPX(%lu,%lu) is invalid on this chip\n", offset, block_id);
723 return CVMX_ADD_IO_SEG(0x0001180008000420ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8;
726 #define CVMX_GMXX_RX_BP_DROPX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000420ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8)
728 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
729 static inline uint64_t CVMX_GMXX_RX_BP_OFFX(unsigned long offset, unsigned long block_id)
732 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
733 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
734 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
735 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
736 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
737 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
738 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
739 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
740 cvmx_warn("CVMX_GMXX_RX_BP_OFFX(%lu,%lu) is invalid on this chip\n", offset, block_id);
741 return CVMX_ADD_IO_SEG(0x0001180008000460ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8;
744 #define CVMX_GMXX_RX_BP_OFFX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000460ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8)
746 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
747 static inline uint64_t CVMX_GMXX_RX_BP_ONX(unsigned long offset, unsigned long block_id)
750 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
751 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
752 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
753 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
754 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
755 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
756 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
757 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
758 cvmx_warn("CVMX_GMXX_RX_BP_ONX(%lu,%lu) is invalid on this chip\n", offset, block_id);
759 return CVMX_ADD_IO_SEG(0x0001180008000440ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8;
762 #define CVMX_GMXX_RX_BP_ONX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000440ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8)
764 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
765 static inline uint64_t CVMX_GMXX_RX_HG2_STATUS(unsigned long block_id)
768 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
769 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
770 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
771 cvmx_warn("CVMX_GMXX_RX_HG2_STATUS(%lu) is invalid on this chip\n", block_id);
772 return CVMX_ADD_IO_SEG(0x0001180008000548ull) + ((block_id) & 1) * 0x8000000ull;
775 #define CVMX_GMXX_RX_HG2_STATUS(block_id) (CVMX_ADD_IO_SEG(0x0001180008000548ull) + ((block_id) & 1) * 0x8000000ull)
777 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
778 static inline uint64_t CVMX_GMXX_RX_PASS_EN(unsigned long block_id)
781 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
782 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
783 cvmx_warn("CVMX_GMXX_RX_PASS_EN(%lu) is invalid on this chip\n", block_id);
784 return CVMX_ADD_IO_SEG(0x00011800080005F8ull) + ((block_id) & 1) * 0x8000000ull;
787 #define CVMX_GMXX_RX_PASS_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800080005F8ull) + ((block_id) & 1) * 0x8000000ull)
789 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
790 static inline uint64_t CVMX_GMXX_RX_PASS_MAPX(unsigned long offset, unsigned long block_id)
793 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 15)) && ((block_id <= 1)))) ||
794 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 15)) && ((block_id <= 1))))))
795 cvmx_warn("CVMX_GMXX_RX_PASS_MAPX(%lu,%lu) is invalid on this chip\n", offset, block_id);
796 return CVMX_ADD_IO_SEG(0x0001180008000600ull) + (((offset) & 15) + ((block_id) & 1) * 0x1000000ull) * 8;
799 #define CVMX_GMXX_RX_PASS_MAPX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000600ull) + (((offset) & 15) + ((block_id) & 1) * 0x1000000ull) * 8)
801 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
802 static inline uint64_t CVMX_GMXX_RX_PRTS(unsigned long block_id)
805 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
806 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
807 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
808 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
809 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
810 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
811 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
812 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
813 cvmx_warn("CVMX_GMXX_RX_PRTS(%lu) is invalid on this chip\n", block_id);
814 return CVMX_ADD_IO_SEG(0x0001180008000410ull) + ((block_id) & 1) * 0x8000000ull;
817 #define CVMX_GMXX_RX_PRTS(block_id) (CVMX_ADD_IO_SEG(0x0001180008000410ull) + ((block_id) & 1) * 0x8000000ull)
819 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
820 static inline uint64_t CVMX_GMXX_RX_PRT_INFO(unsigned long block_id)
823 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
824 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
825 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
826 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
827 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
828 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
829 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
830 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
831 cvmx_warn("CVMX_GMXX_RX_PRT_INFO(%lu) is invalid on this chip\n", block_id);
832 return CVMX_ADD_IO_SEG(0x00011800080004E8ull) + ((block_id) & 1) * 0x8000000ull;
835 #define CVMX_GMXX_RX_PRT_INFO(block_id) (CVMX_ADD_IO_SEG(0x00011800080004E8ull) + ((block_id) & 1) * 0x8000000ull)
837 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
838 static inline uint64_t CVMX_GMXX_RX_TX_STATUS(unsigned long block_id)
841 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
842 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
843 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0)))))
844 cvmx_warn("CVMX_GMXX_RX_TX_STATUS(%lu) is invalid on this chip\n", block_id);
845 return CVMX_ADD_IO_SEG(0x00011800080007E8ull);
848 #define CVMX_GMXX_RX_TX_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800080007E8ull))
850 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
851 static inline uint64_t CVMX_GMXX_RX_XAUI_BAD_COL(unsigned long block_id)
854 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
855 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
856 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
857 cvmx_warn("CVMX_GMXX_RX_XAUI_BAD_COL(%lu) is invalid on this chip\n", block_id);
858 return CVMX_ADD_IO_SEG(0x0001180008000538ull) + ((block_id) & 1) * 0x8000000ull;
861 #define CVMX_GMXX_RX_XAUI_BAD_COL(block_id) (CVMX_ADD_IO_SEG(0x0001180008000538ull) + ((block_id) & 1) * 0x8000000ull)
863 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
864 static inline uint64_t CVMX_GMXX_RX_XAUI_CTL(unsigned long block_id)
867 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
868 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
869 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
870 cvmx_warn("CVMX_GMXX_RX_XAUI_CTL(%lu) is invalid on this chip\n", block_id);
871 return CVMX_ADD_IO_SEG(0x0001180008000530ull) + ((block_id) & 1) * 0x8000000ull;
874 #define CVMX_GMXX_RX_XAUI_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180008000530ull) + ((block_id) & 1) * 0x8000000ull)
876 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
877 static inline uint64_t CVMX_GMXX_SMACX(unsigned long offset, unsigned long block_id)
880 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
881 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
882 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
883 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
884 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
885 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
886 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
887 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
888 cvmx_warn("CVMX_GMXX_SMACX(%lu,%lu) is invalid on this chip\n", offset, block_id);
889 return CVMX_ADD_IO_SEG(0x0001180008000230ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
892 #define CVMX_GMXX_SMACX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000230ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
894 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
895 static inline uint64_t CVMX_GMXX_SOFT_BIST(unsigned long block_id)
898 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
899 cvmx_warn("CVMX_GMXX_SOFT_BIST(%lu) is invalid on this chip\n", block_id);
900 return CVMX_ADD_IO_SEG(0x00011800080007E8ull);
903 #define CVMX_GMXX_SOFT_BIST(block_id) (CVMX_ADD_IO_SEG(0x00011800080007E8ull))
905 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
906 static inline uint64_t CVMX_GMXX_STAT_BP(unsigned long block_id)
909 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
910 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
911 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
912 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
913 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
914 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
915 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
916 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
917 cvmx_warn("CVMX_GMXX_STAT_BP(%lu) is invalid on this chip\n", block_id);
918 return CVMX_ADD_IO_SEG(0x0001180008000520ull) + ((block_id) & 1) * 0x8000000ull;
921 #define CVMX_GMXX_STAT_BP(block_id) (CVMX_ADD_IO_SEG(0x0001180008000520ull) + ((block_id) & 1) * 0x8000000ull)
923 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
924 static inline uint64_t CVMX_GMXX_TXX_APPEND(unsigned long offset, unsigned long block_id)
927 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
928 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
929 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
930 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
931 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
932 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
933 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
934 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
935 cvmx_warn("CVMX_GMXX_TXX_APPEND(%lu,%lu) is invalid on this chip\n", offset, block_id);
936 return CVMX_ADD_IO_SEG(0x0001180008000218ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
939 #define CVMX_GMXX_TXX_APPEND(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000218ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
941 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
942 static inline uint64_t CVMX_GMXX_TXX_BURST(unsigned long offset, unsigned long block_id)
945 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
946 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
947 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
948 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
949 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
950 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
951 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
952 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
953 cvmx_warn("CVMX_GMXX_TXX_BURST(%lu,%lu) is invalid on this chip\n", offset, block_id);
954 return CVMX_ADD_IO_SEG(0x0001180008000228ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
957 #define CVMX_GMXX_TXX_BURST(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000228ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
959 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
960 static inline uint64_t CVMX_GMXX_TXX_CBFC_XOFF(unsigned long offset, unsigned long block_id)
963 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset == 0)) && ((block_id == 0)))) ||
964 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset == 0)) && ((block_id <= 1)))) ||
965 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset == 0)) && ((block_id == 0))))))
966 cvmx_warn("CVMX_GMXX_TXX_CBFC_XOFF(%lu,%lu) is invalid on this chip\n", offset, block_id);
967 return CVMX_ADD_IO_SEG(0x00011800080005A0ull) + ((block_id) & 1) * 0x8000000ull;
970 #define CVMX_GMXX_TXX_CBFC_XOFF(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080005A0ull) + ((block_id) & 1) * 0x8000000ull)
972 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
973 static inline uint64_t CVMX_GMXX_TXX_CBFC_XON(unsigned long offset, unsigned long block_id)
976 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset == 0)) && ((block_id == 0)))) ||
977 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset == 0)) && ((block_id <= 1)))) ||
978 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset == 0)) && ((block_id == 0))))))
979 cvmx_warn("CVMX_GMXX_TXX_CBFC_XON(%lu,%lu) is invalid on this chip\n", offset, block_id);
980 return CVMX_ADD_IO_SEG(0x00011800080005C0ull) + ((block_id) & 1) * 0x8000000ull;
983 #define CVMX_GMXX_TXX_CBFC_XON(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080005C0ull) + ((block_id) & 1) * 0x8000000ull)
985 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
986 static inline uint64_t CVMX_GMXX_TXX_CLK(unsigned long offset, unsigned long block_id)
989 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
990 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
991 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
992 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
993 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1))))))
994 cvmx_warn("CVMX_GMXX_TXX_CLK(%lu,%lu) is invalid on this chip\n", offset, block_id);
995 return CVMX_ADD_IO_SEG(0x0001180008000208ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
998 #define CVMX_GMXX_TXX_CLK(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000208ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1000 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1001 static inline uint64_t CVMX_GMXX_TXX_CTL(unsigned long offset, unsigned long block_id)
1004 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1005 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1006 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1007 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1008 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1009 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1010 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1011 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1012 cvmx_warn("CVMX_GMXX_TXX_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id);
1013 return CVMX_ADD_IO_SEG(0x0001180008000270ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1016 #define CVMX_GMXX_TXX_CTL(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000270ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1018 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1019 static inline uint64_t CVMX_GMXX_TXX_MIN_PKT(unsigned long offset, unsigned long block_id)
1022 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1023 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1024 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1025 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1026 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1027 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1028 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1029 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1030 cvmx_warn("CVMX_GMXX_TXX_MIN_PKT(%lu,%lu) is invalid on this chip\n", offset, block_id);
1031 return CVMX_ADD_IO_SEG(0x0001180008000240ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1034 #define CVMX_GMXX_TXX_MIN_PKT(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000240ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1036 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1037 static inline uint64_t CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(unsigned long offset, unsigned long block_id)
1040 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1041 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1042 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1043 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1044 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1045 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1046 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1047 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1048 cvmx_warn("CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(%lu,%lu) is invalid on this chip\n", offset, block_id);
1049 return CVMX_ADD_IO_SEG(0x0001180008000248ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1052 #define CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000248ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1054 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1055 static inline uint64_t CVMX_GMXX_TXX_PAUSE_PKT_TIME(unsigned long offset, unsigned long block_id)
1058 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1059 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1060 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1061 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1062 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1063 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1064 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1065 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1066 cvmx_warn("CVMX_GMXX_TXX_PAUSE_PKT_TIME(%lu,%lu) is invalid on this chip\n", offset, block_id);
1067 return CVMX_ADD_IO_SEG(0x0001180008000238ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1070 #define CVMX_GMXX_TXX_PAUSE_PKT_TIME(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000238ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1072 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1073 static inline uint64_t CVMX_GMXX_TXX_PAUSE_TOGO(unsigned long offset, unsigned long block_id)
1076 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1077 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1078 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1079 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1080 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1081 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1082 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1083 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1084 cvmx_warn("CVMX_GMXX_TXX_PAUSE_TOGO(%lu,%lu) is invalid on this chip\n", offset, block_id);
1085 return CVMX_ADD_IO_SEG(0x0001180008000258ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1088 #define CVMX_GMXX_TXX_PAUSE_TOGO(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000258ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1090 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1091 static inline uint64_t CVMX_GMXX_TXX_PAUSE_ZERO(unsigned long offset, unsigned long block_id)
1094 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1095 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1096 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1097 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1098 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1099 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1100 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1101 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1102 cvmx_warn("CVMX_GMXX_TXX_PAUSE_ZERO(%lu,%lu) is invalid on this chip\n", offset, block_id);
1103 return CVMX_ADD_IO_SEG(0x0001180008000260ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1106 #define CVMX_GMXX_TXX_PAUSE_ZERO(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000260ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1108 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1109 static inline uint64_t CVMX_GMXX_TXX_SGMII_CTL(unsigned long offset, unsigned long block_id)
1112 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1113 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1114 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1115 cvmx_warn("CVMX_GMXX_TXX_SGMII_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id);
1116 return CVMX_ADD_IO_SEG(0x0001180008000300ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1119 #define CVMX_GMXX_TXX_SGMII_CTL(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000300ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1121 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1122 static inline uint64_t CVMX_GMXX_TXX_SLOT(unsigned long offset, unsigned long block_id)
1125 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1126 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1127 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1128 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1129 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1130 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1131 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1132 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1133 cvmx_warn("CVMX_GMXX_TXX_SLOT(%lu,%lu) is invalid on this chip\n", offset, block_id);
1134 return CVMX_ADD_IO_SEG(0x0001180008000220ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1137 #define CVMX_GMXX_TXX_SLOT(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000220ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1139 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1140 static inline uint64_t CVMX_GMXX_TXX_SOFT_PAUSE(unsigned long offset, unsigned long block_id)
1143 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1144 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1145 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1146 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1147 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1148 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1149 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1150 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1151 cvmx_warn("CVMX_GMXX_TXX_SOFT_PAUSE(%lu,%lu) is invalid on this chip\n", offset, block_id);
1152 return CVMX_ADD_IO_SEG(0x0001180008000250ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1155 #define CVMX_GMXX_TXX_SOFT_PAUSE(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000250ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1157 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1158 static inline uint64_t CVMX_GMXX_TXX_STAT0(unsigned long offset, unsigned long block_id)
1161 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1162 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1163 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1164 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1165 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1166 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1167 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1168 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1169 cvmx_warn("CVMX_GMXX_TXX_STAT0(%lu,%lu) is invalid on this chip\n", offset, block_id);
1170 return CVMX_ADD_IO_SEG(0x0001180008000280ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1173 #define CVMX_GMXX_TXX_STAT0(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000280ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1175 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1176 static inline uint64_t CVMX_GMXX_TXX_STAT1(unsigned long offset, unsigned long block_id)
1179 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1180 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1181 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1182 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1183 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1184 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1185 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1186 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1187 cvmx_warn("CVMX_GMXX_TXX_STAT1(%lu,%lu) is invalid on this chip\n", offset, block_id);
1188 return CVMX_ADD_IO_SEG(0x0001180008000288ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1191 #define CVMX_GMXX_TXX_STAT1(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000288ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1193 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1194 static inline uint64_t CVMX_GMXX_TXX_STAT2(unsigned long offset, unsigned long block_id)
1197 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1198 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1199 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1200 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1201 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1202 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1203 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1204 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1205 cvmx_warn("CVMX_GMXX_TXX_STAT2(%lu,%lu) is invalid on this chip\n", offset, block_id);
1206 return CVMX_ADD_IO_SEG(0x0001180008000290ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1209 #define CVMX_GMXX_TXX_STAT2(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000290ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1211 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1212 static inline uint64_t CVMX_GMXX_TXX_STAT3(unsigned long offset, unsigned long block_id)
1215 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1216 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1217 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1218 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1219 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1220 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1221 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1222 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1223 cvmx_warn("CVMX_GMXX_TXX_STAT3(%lu,%lu) is invalid on this chip\n", offset, block_id);
1224 return CVMX_ADD_IO_SEG(0x0001180008000298ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1227 #define CVMX_GMXX_TXX_STAT3(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000298ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1229 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1230 static inline uint64_t CVMX_GMXX_TXX_STAT4(unsigned long offset, unsigned long block_id)
1233 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1234 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1235 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1236 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1237 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1238 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1239 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1240 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1241 cvmx_warn("CVMX_GMXX_TXX_STAT4(%lu,%lu) is invalid on this chip\n", offset, block_id);
1242 return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1245 #define CVMX_GMXX_TXX_STAT4(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080002A0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1247 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1248 static inline uint64_t CVMX_GMXX_TXX_STAT5(unsigned long offset, unsigned long block_id)
1251 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1252 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1253 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1254 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1255 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1256 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1257 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1258 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1259 cvmx_warn("CVMX_GMXX_TXX_STAT5(%lu,%lu) is invalid on this chip\n", offset, block_id);
1260 return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1263 #define CVMX_GMXX_TXX_STAT5(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080002A8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1265 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1266 static inline uint64_t CVMX_GMXX_TXX_STAT6(unsigned long offset, unsigned long block_id)
1269 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1270 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1271 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1272 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1273 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1274 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1275 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1276 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1277 cvmx_warn("CVMX_GMXX_TXX_STAT6(%lu,%lu) is invalid on this chip\n", offset, block_id);
1278 return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1281 #define CVMX_GMXX_TXX_STAT6(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080002B0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1283 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1284 static inline uint64_t CVMX_GMXX_TXX_STAT7(unsigned long offset, unsigned long block_id)
1287 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1288 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1289 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1290 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1291 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1292 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1293 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1294 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1295 cvmx_warn("CVMX_GMXX_TXX_STAT7(%lu,%lu) is invalid on this chip\n", offset, block_id);
1296 return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1299 #define CVMX_GMXX_TXX_STAT7(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080002B8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1301 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1302 static inline uint64_t CVMX_GMXX_TXX_STAT8(unsigned long offset, unsigned long block_id)
1305 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1306 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1307 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1308 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1309 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1310 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1311 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1312 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1313 cvmx_warn("CVMX_GMXX_TXX_STAT8(%lu,%lu) is invalid on this chip\n", offset, block_id);
1314 return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1317 #define CVMX_GMXX_TXX_STAT8(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080002C0ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1319 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1320 static inline uint64_t CVMX_GMXX_TXX_STAT9(unsigned long offset, unsigned long block_id)
1323 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1324 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1325 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1326 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1327 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1328 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1329 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1330 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1331 cvmx_warn("CVMX_GMXX_TXX_STAT9(%lu,%lu) is invalid on this chip\n", offset, block_id);
1332 return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1335 #define CVMX_GMXX_TXX_STAT9(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800080002C8ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1337 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1338 static inline uint64_t CVMX_GMXX_TXX_STATS_CTL(unsigned long offset, unsigned long block_id)
1341 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1342 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1343 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1344 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1345 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1346 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1347 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1348 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1349 cvmx_warn("CVMX_GMXX_TXX_STATS_CTL(%lu,%lu) is invalid on this chip\n", offset, block_id);
1350 return CVMX_ADD_IO_SEG(0x0001180008000268ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1353 #define CVMX_GMXX_TXX_STATS_CTL(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000268ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1355 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1356 static inline uint64_t CVMX_GMXX_TXX_THRESH(unsigned long offset, unsigned long block_id)
1359 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1360 (OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1361 (OCTEON_IS_MODEL(OCTEON_CN38XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1362 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 2)) && ((block_id == 0)))) ||
1363 (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1364 (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1365 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 3)) && ((block_id <= 1)))) ||
1366 (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
1367 cvmx_warn("CVMX_GMXX_TXX_THRESH(%lu,%lu) is invalid on this chip\n", offset, block_id);
1368 return CVMX_ADD_IO_SEG(0x0001180008000210ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048;
1371 #define CVMX_GMXX_TXX_THRESH(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000210ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
1373 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1374 static inline uint64_t CVMX_GMXX_TX_BP(unsigned long block_id)
1377 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
1378 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
1379 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1380 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
1381 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1382 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1383 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
1384 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1385 cvmx_warn("CVMX_GMXX_TX_BP(%lu) is invalid on this chip\n", block_id);
1386 return CVMX_ADD_IO_SEG(0x00011800080004D0ull) + ((block_id) & 1) * 0x8000000ull;
1389 #define CVMX_GMXX_TX_BP(block_id) (CVMX_ADD_IO_SEG(0x00011800080004D0ull) + ((block_id) & 1) * 0x8000000ull)
1391 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1392 static inline uint64_t CVMX_GMXX_TX_CLK_MSKX(unsigned long offset, unsigned long block_id)
1395 (OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset <= 1)) && ((block_id == 0)))) ||
1396 (OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset <= 1)) && ((block_id == 0))))))
1397 cvmx_warn("CVMX_GMXX_TX_CLK_MSKX(%lu,%lu) is invalid on this chip\n", offset, block_id);
1398 return CVMX_ADD_IO_SEG(0x0001180008000780ull) + (((offset) & 1) + ((block_id) & 0) * 0x0ull) * 8;
1401 #define CVMX_GMXX_TX_CLK_MSKX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000780ull) + (((offset) & 1) + ((block_id) & 0) * 0x0ull) * 8)
1403 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1404 static inline uint64_t CVMX_GMXX_TX_COL_ATTEMPT(unsigned long block_id)
1407 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
1408 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
1409 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1410 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
1411 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1412 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1413 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
1414 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1415 cvmx_warn("CVMX_GMXX_TX_COL_ATTEMPT(%lu) is invalid on this chip\n", block_id);
1416 return CVMX_ADD_IO_SEG(0x0001180008000498ull) + ((block_id) & 1) * 0x8000000ull;
1419 #define CVMX_GMXX_TX_COL_ATTEMPT(block_id) (CVMX_ADD_IO_SEG(0x0001180008000498ull) + ((block_id) & 1) * 0x8000000ull)
1421 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1422 static inline uint64_t CVMX_GMXX_TX_CORRUPT(unsigned long block_id)
1425 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
1426 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
1427 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1428 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
1429 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1430 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1431 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
1432 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1433 cvmx_warn("CVMX_GMXX_TX_CORRUPT(%lu) is invalid on this chip\n", block_id);
1434 return CVMX_ADD_IO_SEG(0x00011800080004D8ull) + ((block_id) & 1) * 0x8000000ull;
1437 #define CVMX_GMXX_TX_CORRUPT(block_id) (CVMX_ADD_IO_SEG(0x00011800080004D8ull) + ((block_id) & 1) * 0x8000000ull)
1439 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1440 static inline uint64_t CVMX_GMXX_TX_HG2_REG1(unsigned long block_id)
1443 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1444 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1445 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1446 cvmx_warn("CVMX_GMXX_TX_HG2_REG1(%lu) is invalid on this chip\n", block_id);
1447 return CVMX_ADD_IO_SEG(0x0001180008000558ull) + ((block_id) & 1) * 0x8000000ull;
1450 #define CVMX_GMXX_TX_HG2_REG1(block_id) (CVMX_ADD_IO_SEG(0x0001180008000558ull) + ((block_id) & 1) * 0x8000000ull)
1452 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1453 static inline uint64_t CVMX_GMXX_TX_HG2_REG2(unsigned long block_id)
1456 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1457 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1458 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1459 cvmx_warn("CVMX_GMXX_TX_HG2_REG2(%lu) is invalid on this chip\n", block_id);
1460 return CVMX_ADD_IO_SEG(0x0001180008000560ull) + ((block_id) & 1) * 0x8000000ull;
1463 #define CVMX_GMXX_TX_HG2_REG2(block_id) (CVMX_ADD_IO_SEG(0x0001180008000560ull) + ((block_id) & 1) * 0x8000000ull)
1465 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1466 static inline uint64_t CVMX_GMXX_TX_IFG(unsigned long block_id)
1469 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
1470 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
1471 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1472 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
1473 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1474 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1475 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
1476 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1477 cvmx_warn("CVMX_GMXX_TX_IFG(%lu) is invalid on this chip\n", block_id);
1478 return CVMX_ADD_IO_SEG(0x0001180008000488ull) + ((block_id) & 1) * 0x8000000ull;
1481 #define CVMX_GMXX_TX_IFG(block_id) (CVMX_ADD_IO_SEG(0x0001180008000488ull) + ((block_id) & 1) * 0x8000000ull)
1483 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1484 static inline uint64_t CVMX_GMXX_TX_INT_EN(unsigned long block_id)
1487 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
1488 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
1489 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1490 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
1491 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1492 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1493 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
1494 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1495 cvmx_warn("CVMX_GMXX_TX_INT_EN(%lu) is invalid on this chip\n", block_id);
1496 return CVMX_ADD_IO_SEG(0x0001180008000508ull) + ((block_id) & 1) * 0x8000000ull;
1499 #define CVMX_GMXX_TX_INT_EN(block_id) (CVMX_ADD_IO_SEG(0x0001180008000508ull) + ((block_id) & 1) * 0x8000000ull)
1501 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1502 static inline uint64_t CVMX_GMXX_TX_INT_REG(unsigned long block_id)
1505 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
1506 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
1507 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1508 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
1509 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1510 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1511 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
1512 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1513 cvmx_warn("CVMX_GMXX_TX_INT_REG(%lu) is invalid on this chip\n", block_id);
1514 return CVMX_ADD_IO_SEG(0x0001180008000500ull) + ((block_id) & 1) * 0x8000000ull;
1517 #define CVMX_GMXX_TX_INT_REG(block_id) (CVMX_ADD_IO_SEG(0x0001180008000500ull) + ((block_id) & 1) * 0x8000000ull)
1519 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1520 static inline uint64_t CVMX_GMXX_TX_JAM(unsigned long block_id)
1523 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
1524 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
1525 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1526 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
1527 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1528 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1529 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
1530 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1531 cvmx_warn("CVMX_GMXX_TX_JAM(%lu) is invalid on this chip\n", block_id);
1532 return CVMX_ADD_IO_SEG(0x0001180008000490ull) + ((block_id) & 1) * 0x8000000ull;
1535 #define CVMX_GMXX_TX_JAM(block_id) (CVMX_ADD_IO_SEG(0x0001180008000490ull) + ((block_id) & 1) * 0x8000000ull)
1537 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1538 static inline uint64_t CVMX_GMXX_TX_LFSR(unsigned long block_id)
1541 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
1542 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
1543 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1544 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
1545 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1546 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1547 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
1548 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1549 cvmx_warn("CVMX_GMXX_TX_LFSR(%lu) is invalid on this chip\n", block_id);
1550 return CVMX_ADD_IO_SEG(0x00011800080004F8ull) + ((block_id) & 1) * 0x8000000ull;
1553 #define CVMX_GMXX_TX_LFSR(block_id) (CVMX_ADD_IO_SEG(0x00011800080004F8ull) + ((block_id) & 1) * 0x8000000ull)
1555 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1556 static inline uint64_t CVMX_GMXX_TX_OVR_BP(unsigned long block_id)
1559 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
1560 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
1561 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1562 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
1563 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1564 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1565 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
1566 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1567 cvmx_warn("CVMX_GMXX_TX_OVR_BP(%lu) is invalid on this chip\n", block_id);
1568 return CVMX_ADD_IO_SEG(0x00011800080004C8ull) + ((block_id) & 1) * 0x8000000ull;
1571 #define CVMX_GMXX_TX_OVR_BP(block_id) (CVMX_ADD_IO_SEG(0x00011800080004C8ull) + ((block_id) & 1) * 0x8000000ull)
1573 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1574 static inline uint64_t CVMX_GMXX_TX_PAUSE_PKT_DMAC(unsigned long block_id)
1577 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
1578 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
1579 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1580 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
1581 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1582 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1583 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
1584 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1585 cvmx_warn("CVMX_GMXX_TX_PAUSE_PKT_DMAC(%lu) is invalid on this chip\n", block_id);
1586 return CVMX_ADD_IO_SEG(0x00011800080004A0ull) + ((block_id) & 1) * 0x8000000ull;
1589 #define CVMX_GMXX_TX_PAUSE_PKT_DMAC(block_id) (CVMX_ADD_IO_SEG(0x00011800080004A0ull) + ((block_id) & 1) * 0x8000000ull)
1591 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1592 static inline uint64_t CVMX_GMXX_TX_PAUSE_PKT_TYPE(unsigned long block_id)
1595 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
1596 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
1597 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1598 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
1599 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1600 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1601 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
1602 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1603 cvmx_warn("CVMX_GMXX_TX_PAUSE_PKT_TYPE(%lu) is invalid on this chip\n", block_id);
1604 return CVMX_ADD_IO_SEG(0x00011800080004A8ull) + ((block_id) & 1) * 0x8000000ull;
1607 #define CVMX_GMXX_TX_PAUSE_PKT_TYPE(block_id) (CVMX_ADD_IO_SEG(0x00011800080004A8ull) + ((block_id) & 1) * 0x8000000ull)
1609 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1610 static inline uint64_t CVMX_GMXX_TX_PRTS(unsigned long block_id)
1613 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
1614 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
1615 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1616 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
1617 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1618 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1619 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1))) ||
1620 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1621 cvmx_warn("CVMX_GMXX_TX_PRTS(%lu) is invalid on this chip\n", block_id);
1622 return CVMX_ADD_IO_SEG(0x0001180008000480ull) + ((block_id) & 1) * 0x8000000ull;
1625 #define CVMX_GMXX_TX_PRTS(block_id) (CVMX_ADD_IO_SEG(0x0001180008000480ull) + ((block_id) & 1) * 0x8000000ull)
1627 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1628 static inline uint64_t CVMX_GMXX_TX_SPI_CTL(unsigned long block_id)
1631 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1632 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
1633 cvmx_warn("CVMX_GMXX_TX_SPI_CTL(%lu) is invalid on this chip\n", block_id);
1634 return CVMX_ADD_IO_SEG(0x00011800080004C0ull) + ((block_id) & 1) * 0x8000000ull;
1637 #define CVMX_GMXX_TX_SPI_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800080004C0ull) + ((block_id) & 1) * 0x8000000ull)
1639 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1640 static inline uint64_t CVMX_GMXX_TX_SPI_DRAIN(unsigned long block_id)
1643 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1644 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
1645 cvmx_warn("CVMX_GMXX_TX_SPI_DRAIN(%lu) is invalid on this chip\n", block_id);
1646 return CVMX_ADD_IO_SEG(0x00011800080004E0ull) + ((block_id) & 1) * 0x8000000ull;
1649 #define CVMX_GMXX_TX_SPI_DRAIN(block_id) (CVMX_ADD_IO_SEG(0x00011800080004E0ull) + ((block_id) & 1) * 0x8000000ull)
1651 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1652 static inline uint64_t CVMX_GMXX_TX_SPI_MAX(unsigned long block_id)
1655 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1656 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
1657 cvmx_warn("CVMX_GMXX_TX_SPI_MAX(%lu) is invalid on this chip\n", block_id);
1658 return CVMX_ADD_IO_SEG(0x00011800080004B0ull) + ((block_id) & 1) * 0x8000000ull;
1661 #define CVMX_GMXX_TX_SPI_MAX(block_id) (CVMX_ADD_IO_SEG(0x00011800080004B0ull) + ((block_id) & 1) * 0x8000000ull)
1663 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1664 static inline uint64_t CVMX_GMXX_TX_SPI_ROUNDX(unsigned long offset, unsigned long block_id)
1667 (OCTEON_IS_MODEL(OCTEON_CN58XX) && (((offset <= 31)) && ((block_id <= 1))))))
1668 cvmx_warn("CVMX_GMXX_TX_SPI_ROUNDX(%lu,%lu) is invalid on this chip\n", offset, block_id);
1669 return CVMX_ADD_IO_SEG(0x0001180008000680ull) + (((offset) & 31) + ((block_id) & 1) * 0x1000000ull) * 8;
1672 #define CVMX_GMXX_TX_SPI_ROUNDX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000680ull) + (((offset) & 31) + ((block_id) & 1) * 0x1000000ull) * 8)
1674 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1675 static inline uint64_t CVMX_GMXX_TX_SPI_THRESH(unsigned long block_id)
1678 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id <= 1))) ||
1679 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id <= 1)))))
1680 cvmx_warn("CVMX_GMXX_TX_SPI_THRESH(%lu) is invalid on this chip\n", block_id);
1681 return CVMX_ADD_IO_SEG(0x00011800080004B8ull) + ((block_id) & 1) * 0x8000000ull;
1684 #define CVMX_GMXX_TX_SPI_THRESH(block_id) (CVMX_ADD_IO_SEG(0x00011800080004B8ull) + ((block_id) & 1) * 0x8000000ull)
1686 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1687 static inline uint64_t CVMX_GMXX_TX_XAUI_CTL(unsigned long block_id)
1690 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1691 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1692 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1693 cvmx_warn("CVMX_GMXX_TX_XAUI_CTL(%lu) is invalid on this chip\n", block_id);
1694 return CVMX_ADD_IO_SEG(0x0001180008000528ull) + ((block_id) & 1) * 0x8000000ull;
1697 #define CVMX_GMXX_TX_XAUI_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180008000528ull) + ((block_id) & 1) * 0x8000000ull)
1699 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1700 static inline uint64_t CVMX_GMXX_XAUI_EXT_LOOPBACK(unsigned long block_id)
1703 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1704 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1705 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
1706 cvmx_warn("CVMX_GMXX_XAUI_EXT_LOOPBACK(%lu) is invalid on this chip\n", block_id);
1707 return CVMX_ADD_IO_SEG(0x0001180008000540ull) + ((block_id) & 1) * 0x8000000ull;
1710 #define CVMX_GMXX_XAUI_EXT_LOOPBACK(block_id) (CVMX_ADD_IO_SEG(0x0001180008000540ull) + ((block_id) & 1) * 0x8000000ull)
1716 * GMX_BAD_REG = A collection of things that have gone very, very wrong
1720 * In XAUI mode, only the lsb (corresponding to port0) of INB_NXA, LOSTSTAT, OUT_OVR, are used.
1723 union cvmx_gmxx_bad_reg
1726 struct cvmx_gmxx_bad_reg_s
1728 #if __BYTE_ORDER == __BIG_ENDIAN
1729 uint64_t reserved_31_63 : 33;
1730 uint64_t inb_nxa : 4; /**< Inbound port > GMX_RX_PRTS */
1731 uint64_t statovr : 1; /**< TX Statistics overflow
1732 The common FIFO to SGMII and XAUI had an overflow
1733 TX Stats are corrupted */
1734 uint64_t loststat : 4; /**< TX Statistics data was over-written
1735 In SGMII, one bit per port
1736 In XAUI, only port0 is used
1737 TX Stats are corrupted */
1738 uint64_t reserved_18_21 : 4;
1739 uint64_t out_ovr : 16; /**< Outbound data FIFO overflow (per port) */
1740 uint64_t ncb_ovr : 1; /**< Outbound NCB FIFO Overflow */
1741 uint64_t out_col : 1; /**< Outbound collision occured between PKO and NCB */
1743 uint64_t out_col : 1;
1744 uint64_t ncb_ovr : 1;
1745 uint64_t out_ovr : 16;
1746 uint64_t reserved_18_21 : 4;
1747 uint64_t loststat : 4;
1748 uint64_t statovr : 1;
1749 uint64_t inb_nxa : 4;
1750 uint64_t reserved_31_63 : 33;
1753 struct cvmx_gmxx_bad_reg_cn30xx
1755 #if __BYTE_ORDER == __BIG_ENDIAN
1756 uint64_t reserved_31_63 : 33;
1757 uint64_t inb_nxa : 4; /**< Inbound port > GMX_RX_PRTS */
1758 uint64_t statovr : 1; /**< TX Statistics overflow */
1759 uint64_t reserved_25_25 : 1;
1760 uint64_t loststat : 3; /**< TX Statistics data was over-written (per RGM port)
1761 TX Stats are corrupted */
1762 uint64_t reserved_5_21 : 17;
1763 uint64_t out_ovr : 3; /**< Outbound data FIFO overflow (per port) */
1764 uint64_t reserved_0_1 : 2;
1766 uint64_t reserved_0_1 : 2;
1767 uint64_t out_ovr : 3;
1768 uint64_t reserved_5_21 : 17;
1769 uint64_t loststat : 3;
1770 uint64_t reserved_25_25 : 1;
1771 uint64_t statovr : 1;
1772 uint64_t inb_nxa : 4;
1773 uint64_t reserved_31_63 : 33;
1776 struct cvmx_gmxx_bad_reg_cn30xx cn31xx;
1777 struct cvmx_gmxx_bad_reg_s cn38xx;
1778 struct cvmx_gmxx_bad_reg_s cn38xxp2;
1779 struct cvmx_gmxx_bad_reg_cn30xx cn50xx;
1780 struct cvmx_gmxx_bad_reg_cn52xx
1782 #if __BYTE_ORDER == __BIG_ENDIAN
1783 uint64_t reserved_31_63 : 33;
1784 uint64_t inb_nxa : 4; /**< Inbound port > GMX_RX_PRTS */
1785 uint64_t statovr : 1; /**< TX Statistics overflow
1786 The common FIFO to SGMII and XAUI had an overflow
1787 TX Stats are corrupted */
1788 uint64_t loststat : 4; /**< TX Statistics data was over-written
1789 In SGMII, one bit per port
1790 In XAUI, only port0 is used
1791 TX Stats are corrupted */
1792 uint64_t reserved_6_21 : 16;
1793 uint64_t out_ovr : 4; /**< Outbound data FIFO overflow (per port) */
1794 uint64_t reserved_0_1 : 2;
1796 uint64_t reserved_0_1 : 2;
1797 uint64_t out_ovr : 4;
1798 uint64_t reserved_6_21 : 16;
1799 uint64_t loststat : 4;
1800 uint64_t statovr : 1;
1801 uint64_t inb_nxa : 4;
1802 uint64_t reserved_31_63 : 33;
1805 struct cvmx_gmxx_bad_reg_cn52xx cn52xxp1;
1806 struct cvmx_gmxx_bad_reg_cn52xx cn56xx;
1807 struct cvmx_gmxx_bad_reg_cn52xx cn56xxp1;
1808 struct cvmx_gmxx_bad_reg_s cn58xx;
1809 struct cvmx_gmxx_bad_reg_s cn58xxp1;
1810 struct cvmx_gmxx_bad_reg_cn52xx cn63xx;
1811 struct cvmx_gmxx_bad_reg_cn52xx cn63xxp1;
1813 typedef union cvmx_gmxx_bad_reg cvmx_gmxx_bad_reg_t;
1818 * GMX_BIST = GMX BIST Results
1821 union cvmx_gmxx_bist
1824 struct cvmx_gmxx_bist_s
1826 #if __BYTE_ORDER == __BIG_ENDIAN
1827 uint64_t reserved_25_63 : 39;
1828 uint64_t status : 25; /**< BIST Results.
1829 HW sets a bit in BIST for for memory that fails
1830 - 0: gmx#.inb.fif_bnk0
1831 - 1: gmx#.inb.fif_bnk1
1832 - 2: gmx#.inb.fif_bnk2
1833 - 3: gmx#.inb.fif_bnk3
1834 - 4: gmx#.inb.fif_bnk_ext0
1835 - 5: gmx#.inb.fif_bnk_ext1
1836 - 6: gmx#.inb.fif_bnk_ext2
1837 - 7: gmx#.inb.fif_bnk_ext3
1838 - 8: gmx#.outb.fif.fif_bnk0
1839 - 9: gmx#.outb.fif.fif_bnk1
1840 - 10: gmx#.outb.fif.fif_bnk2
1841 - 11: gmx#.outb.fif.fif_bnk3
1842 - 12: gmx#.outb.fif.fif_bnk_ext0
1843 - 13: gmx#.outb.fif.fif_bnk_ext1
1844 - 14: gmx#.outb.fif.fif_bnk_ext2
1845 - 15: gmx#.outb.fif.fif_bnk_ext3
1846 - 16: gmx#.csr.gmi0.srf8x64m1_bist
1847 - 17: gmx#.csr.gmi1.srf8x64m1_bist
1848 - 18: gmx#.csr.gmi2.srf8x64m1_bist
1849 - 19: gmx#.csr.gmi3.srf8x64m1_bist
1850 - 20: gmx#.csr.drf20x32m2_bist
1851 - 21: gmx#.csr.drf20x48m2_bist
1852 - 22: gmx#.outb.stat.drf16x27m1_bist
1853 - 23: gmx#.outb.stat.drf40x64m1_bist
1854 - 24: xgmii.tx.drf16x38m1_async_bist */
1856 uint64_t status : 25;
1857 uint64_t reserved_25_63 : 39;
1860 struct cvmx_gmxx_bist_cn30xx
1862 #if __BYTE_ORDER == __BIG_ENDIAN
1863 uint64_t reserved_10_63 : 54;
1864 uint64_t status : 10; /**< BIST Results.
1865 HW sets a bit in BIST for for memory that fails
1866 - 0: gmx#.inb.dpr512x78m4_bist
1867 - 1: gmx#.outb.fif.dpr512x71m4_bist
1868 - 2: gmx#.csr.gmi0.srf8x64m1_bist
1869 - 3: gmx#.csr.gmi1.srf8x64m1_bist
1870 - 4: gmx#.csr.gmi2.srf8x64m1_bist
1872 - 6: gmx#.csr.drf20x80m1_bist
1873 - 7: gmx#.outb.stat.drf16x27m1_bist
1874 - 8: gmx#.outb.stat.drf40x64m1_bist
1877 uint64_t status : 10;
1878 uint64_t reserved_10_63 : 54;
1881 struct cvmx_gmxx_bist_cn30xx cn31xx;
1882 struct cvmx_gmxx_bist_cn30xx cn38xx;
1883 struct cvmx_gmxx_bist_cn30xx cn38xxp2;
1884 struct cvmx_gmxx_bist_cn50xx
1886 #if __BYTE_ORDER == __BIG_ENDIAN
1887 uint64_t reserved_12_63 : 52;
1888 uint64_t status : 12; /**< BIST Results.
1889 HW sets a bit in BIST for for memory that fails */
1891 uint64_t status : 12;
1892 uint64_t reserved_12_63 : 52;
1895 struct cvmx_gmxx_bist_cn52xx
1897 #if __BYTE_ORDER == __BIG_ENDIAN
1898 uint64_t reserved_16_63 : 48;
1899 uint64_t status : 16; /**< BIST Results.
1900 HW sets a bit in BIST for for memory that fails
1901 - 0: gmx#.inb.fif_bnk0
1902 - 1: gmx#.inb.fif_bnk1
1903 - 2: gmx#.inb.fif_bnk2
1904 - 3: gmx#.inb.fif_bnk3
1905 - 4: gmx#.outb.fif.fif_bnk0
1906 - 5: gmx#.outb.fif.fif_bnk1
1907 - 6: gmx#.outb.fif.fif_bnk2
1908 - 7: gmx#.outb.fif.fif_bnk3
1909 - 8: gmx#.csr.gmi0.srf8x64m1_bist
1910 - 9: gmx#.csr.gmi1.srf8x64m1_bist
1911 - 10: gmx#.csr.gmi2.srf8x64m1_bist
1912 - 11: gmx#.csr.gmi3.srf8x64m1_bist
1913 - 12: gmx#.csr.drf20x80m1_bist
1914 - 13: gmx#.outb.stat.drf16x27m1_bist
1915 - 14: gmx#.outb.stat.drf40x64m1_bist
1916 - 15: xgmii.tx.drf16x38m1_async_bist */
1918 uint64_t status : 16;
1919 uint64_t reserved_16_63 : 48;
1922 struct cvmx_gmxx_bist_cn52xx cn52xxp1;
1923 struct cvmx_gmxx_bist_cn52xx cn56xx;
1924 struct cvmx_gmxx_bist_cn52xx cn56xxp1;
1925 struct cvmx_gmxx_bist_cn58xx
1927 #if __BYTE_ORDER == __BIG_ENDIAN
1928 uint64_t reserved_17_63 : 47;
1929 uint64_t status : 17; /**< BIST Results.
1930 HW sets a bit in BIST for for memory that fails
1931 - 0: gmx#.inb.fif_bnk0
1932 - 1: gmx#.inb.fif_bnk1
1933 - 2: gmx#.inb.fif_bnk2
1934 - 3: gmx#.inb.fif_bnk3
1935 - 4: gmx#.outb.fif.fif_bnk0
1936 - 5: gmx#.outb.fif.fif_bnk1
1937 - 6: gmx#.outb.fif.fif_bnk2
1938 - 7: gmx#.outb.fif.fif_bnk3
1939 - 8: gmx#.csr.gmi0.srf8x64m1_bist
1940 - 9: gmx#.csr.gmi1.srf8x64m1_bist
1941 - 10: gmx#.csr.gmi2.srf8x64m1_bist
1942 - 11: gmx#.csr.gmi3.srf8x64m1_bist
1943 - 12: gmx#.csr.drf20x80m1_bist
1944 - 13: gmx#.outb.stat.drf16x27m1_bist
1945 - 14: gmx#.outb.stat.drf40x64m1_bist
1946 - 15: gmx#.outb.ncb.drf16x76m1_bist
1947 - 16: gmx#.outb.fif.srf32x16m2_bist */
1949 uint64_t status : 17;
1950 uint64_t reserved_17_63 : 47;
1953 struct cvmx_gmxx_bist_cn58xx cn58xxp1;
1954 struct cvmx_gmxx_bist_s cn63xx;
1955 struct cvmx_gmxx_bist_s cn63xxp1;
1957 typedef union cvmx_gmxx_bist cvmx_gmxx_bist_t;
1962 * DO NOT DOCUMENT THIS REGISTER - IT IS NOT OFFICIAL
1965 union cvmx_gmxx_clk_en
1968 struct cvmx_gmxx_clk_en_s
1970 #if __BYTE_ORDER == __BIG_ENDIAN
1971 uint64_t reserved_1_63 : 63;
1972 uint64_t clk_en : 1; /**< Force the clock enables on */
1974 uint64_t clk_en : 1;
1975 uint64_t reserved_1_63 : 63;
1978 struct cvmx_gmxx_clk_en_s cn52xx;
1979 struct cvmx_gmxx_clk_en_s cn52xxp1;
1980 struct cvmx_gmxx_clk_en_s cn56xx;
1981 struct cvmx_gmxx_clk_en_s cn56xxp1;
1982 struct cvmx_gmxx_clk_en_s cn63xx;
1983 struct cvmx_gmxx_clk_en_s cn63xxp1;
1985 typedef union cvmx_gmxx_clk_en cvmx_gmxx_clk_en_t;
1988 * cvmx_gmx#_hg2_control
1991 * The HiGig2 TX and RX enable would normally be both set together for HiGig2 messaging. However
1992 * setting just the TX or RX bit will result in only the HG2 message transmit or the receive
1994 * PHYS_EN and LOGL_EN bits when 1, allow link pause or back pressure to PKO as per received
1995 * HiGig2 message. When 0, link pause and back pressure to PKO in response to received messages
1998 * GMX*_TX_XAUI_CTL[HG_EN] must be set to one(to enable HiGig) whenever either HG2TX_EN or HG2RX_EN
2001 * GMX*_RX0_UDD_SKP[LEN] must be set to 16 (to select HiGig2) whenever either HG2TX_EN or HG2RX_EN
2004 * GMX*_TX_OVR_BP[EN<0>] must be set to one and GMX*_TX_OVR_BP[BP<0>] must be cleared to zero
2005 * (to forcibly disable HW-automatic 802.3 pause packet generation) with the HiGig2 Protocol when
2006 * GMX*_HG2_CONTROL[HG2TX_EN]=0. (The HiGig2 protocol is indicated by GMX*_TX_XAUI_CTL[HG_EN]=1
2007 * and GMX*_RX0_UDD_SKP[LEN]=16.) The HW can only auto-generate backpressure via HiGig2 messages
2008 * (optionally, when HG2TX_EN=1) with the HiGig2 protocol.
2010 union cvmx_gmxx_hg2_control
2013 struct cvmx_gmxx_hg2_control_s
2015 #if __BYTE_ORDER == __BIG_ENDIAN
2016 uint64_t reserved_19_63 : 45;
2017 uint64_t hg2tx_en : 1; /**< Enable Transmission of HG2 phys and logl messages
2018 When set, also disables HW auto-generated (802.3
2019 and CBFC) pause frames. (OCTEON cannot generate
2020 proper 802.3 or CBFC pause frames in HiGig2 mode.) */
2021 uint64_t hg2rx_en : 1; /**< Enable extraction and processing of HG2 message
2022 packet from RX flow. Physical logical pause info
2023 is used to pause physical link, back pressure PKO
2024 HG2RX_EN must be set when HiGig2 messages are
2025 present in the receive stream. */
2026 uint64_t phys_en : 1; /**< 1 bit physical link pause enable for recevied
2027 HiGig2 physical pause message */
2028 uint64_t logl_en : 16; /**< 16 bit xof enables for recevied HiGig2 messages
2031 uint64_t logl_en : 16;
2032 uint64_t phys_en : 1;
2033 uint64_t hg2rx_en : 1;
2034 uint64_t hg2tx_en : 1;
2035 uint64_t reserved_19_63 : 45;
2038 struct cvmx_gmxx_hg2_control_s cn52xx;
2039 struct cvmx_gmxx_hg2_control_s cn52xxp1;
2040 struct cvmx_gmxx_hg2_control_s cn56xx;
2041 struct cvmx_gmxx_hg2_control_s cn63xx;
2042 struct cvmx_gmxx_hg2_control_s cn63xxp1;
2044 typedef union cvmx_gmxx_hg2_control cvmx_gmxx_hg2_control_t;
2047 * cvmx_gmx#_inf_mode
2049 * GMX_INF_MODE = Interface Mode
2052 union cvmx_gmxx_inf_mode
2055 struct cvmx_gmxx_inf_mode_s
2057 #if __BYTE_ORDER == __BIG_ENDIAN
2058 uint64_t reserved_12_63 : 52;
2059 uint64_t speed : 4; /**< Interface Speed */
2060 uint64_t reserved_6_7 : 2;
2061 uint64_t mode : 2; /**< Interface Electrical Operating Mode
2063 - 1: XAUI (IEEE 802.3-2005) */
2064 uint64_t reserved_3_3 : 1;
2065 uint64_t p0mii : 1; /**< Port 0 Interface Mode
2066 - 0: Port 0 is RGMII
2067 - 1: Port 0 is MII */
2068 uint64_t en : 1; /**< Interface Enable
2069 Must be set to enable the packet interface.
2070 Should be enabled before any other requests to
2071 GMX including enabling port back pressure with
2072 IPD_CTL_STATUS[PBP_EN] */
2073 uint64_t type : 1; /**< Interface Protocol Type
2074 - 0: SGMII/1000Base-X
2080 uint64_t reserved_3_3 : 1;
2082 uint64_t reserved_6_7 : 2;
2084 uint64_t reserved_12_63 : 52;
2087 struct cvmx_gmxx_inf_mode_cn30xx
2089 #if __BYTE_ORDER == __BIG_ENDIAN
2090 uint64_t reserved_3_63 : 61;
2091 uint64_t p0mii : 1; /**< Port 0 Interface Mode
2092 - 0: Port 0 is RGMII
2093 - 1: Port 0 is MII */
2094 uint64_t en : 1; /**< Interface Enable
2095 Must be set to enable the packet interface.
2096 Should be enabled before any other requests to
2097 GMX including enabling port back pressure with
2098 IPD_CTL_STATUS[PBP_EN] */
2099 uint64_t type : 1; /**< Port 1/2 Interface Mode
2100 - 0: Ports 1 and 2 are RGMII
2101 - 1: Port 1 is GMII/MII, Port 2 is unused
2102 GMII/MII is selected by GMX_PRT1_CFG[SPEED] */
2107 uint64_t reserved_3_63 : 61;
2110 struct cvmx_gmxx_inf_mode_cn31xx
2112 #if __BYTE_ORDER == __BIG_ENDIAN
2113 uint64_t reserved_2_63 : 62;
2114 uint64_t en : 1; /**< Interface Enable
2115 Must be set to enable the packet interface.
2116 Should be enabled before any other requests to
2117 GMX including enabling port back pressure with
2118 IPD_CTL_STATUS[PBP_EN] */
2119 uint64_t type : 1; /**< Interface Mode
2120 - 0: All three ports are RGMII ports
2121 - 1: prt0 is RGMII, prt1 is GMII, and prt2 is unused */
2125 uint64_t reserved_2_63 : 62;
2128 struct cvmx_gmxx_inf_mode_cn31xx cn38xx;
2129 struct cvmx_gmxx_inf_mode_cn31xx cn38xxp2;
2130 struct cvmx_gmxx_inf_mode_cn30xx cn50xx;
2131 struct cvmx_gmxx_inf_mode_cn52xx
2133 #if __BYTE_ORDER == __BIG_ENDIAN
2134 uint64_t reserved_10_63 : 54;
2135 uint64_t speed : 2; /**< Interface Speed
2140 uint64_t reserved_6_7 : 2;
2141 uint64_t mode : 2; /**< Interface Electrical Operating Mode
2142 - 0: Disabled (PCIe)
2143 - 1: XAUI (IEEE 802.3-2005)
2146 uint64_t reserved_2_3 : 2;
2147 uint64_t en : 1; /**< Interface Enable
2148 Must be set to enable the packet interface.
2149 Should be enabled before any other requests to
2150 GMX including enabling port back pressure with
2151 IPD_CTL_STATUS[PBP_EN] */
2152 uint64_t type : 1; /**< Interface Protocol Type
2153 - 0: SGMII/1000Base-X
2158 uint64_t reserved_2_3 : 2;
2160 uint64_t reserved_6_7 : 2;
2162 uint64_t reserved_10_63 : 54;
2165 struct cvmx_gmxx_inf_mode_cn52xx cn52xxp1;
2166 struct cvmx_gmxx_inf_mode_cn52xx cn56xx;
2167 struct cvmx_gmxx_inf_mode_cn52xx cn56xxp1;
2168 struct cvmx_gmxx_inf_mode_cn31xx cn58xx;
2169 struct cvmx_gmxx_inf_mode_cn31xx cn58xxp1;
2170 struct cvmx_gmxx_inf_mode_cn63xx
2172 #if __BYTE_ORDER == __BIG_ENDIAN
2173 uint64_t reserved_12_63 : 52;
2174 uint64_t speed : 4; /**< Interface Speed */
2175 uint64_t reserved_5_7 : 3;
2176 uint64_t mode : 1; /**< Interface Electrical Operating Mode
2178 - 1: XAUI (IEEE 802.3-2005) */
2179 uint64_t reserved_2_3 : 2;
2180 uint64_t en : 1; /**< Interface Enable
2181 Must be set to enable the packet interface.
2182 Should be enabled before any other requests to
2183 GMX including enabling port back pressure with
2184 IPD_CTL_STATUS[PBP_EN] */
2185 uint64_t type : 1; /**< Interface Protocol Type
2186 - 0: SGMII/1000Base-X
2191 uint64_t reserved_2_3 : 2;
2193 uint64_t reserved_5_7 : 3;
2195 uint64_t reserved_12_63 : 52;
2198 struct cvmx_gmxx_inf_mode_cn63xx cn63xxp1;
2200 typedef union cvmx_gmxx_inf_mode cvmx_gmxx_inf_mode_t;
2205 * GMX_NXA_ADR = NXA Port Address
2208 union cvmx_gmxx_nxa_adr
2211 struct cvmx_gmxx_nxa_adr_s
2213 #if __BYTE_ORDER == __BIG_ENDIAN
2214 uint64_t reserved_6_63 : 58;
2215 uint64_t prt : 6; /**< Logged address for NXA exceptions
2216 The logged address will be from the first
2217 exception that caused the problem. NCB has
2218 higher priority than PKO and will win.
2222 uint64_t reserved_6_63 : 58;
2225 struct cvmx_gmxx_nxa_adr_s cn30xx;
2226 struct cvmx_gmxx_nxa_adr_s cn31xx;
2227 struct cvmx_gmxx_nxa_adr_s cn38xx;
2228 struct cvmx_gmxx_nxa_adr_s cn38xxp2;
2229 struct cvmx_gmxx_nxa_adr_s cn50xx;
2230 struct cvmx_gmxx_nxa_adr_s cn52xx;
2231 struct cvmx_gmxx_nxa_adr_s cn52xxp1;
2232 struct cvmx_gmxx_nxa_adr_s cn56xx;
2233 struct cvmx_gmxx_nxa_adr_s cn56xxp1;
2234 struct cvmx_gmxx_nxa_adr_s cn58xx;
2235 struct cvmx_gmxx_nxa_adr_s cn58xxp1;
2236 struct cvmx_gmxx_nxa_adr_s cn63xx;
2237 struct cvmx_gmxx_nxa_adr_s cn63xxp1;
2239 typedef union cvmx_gmxx_nxa_adr cvmx_gmxx_nxa_adr_t;
2242 * cvmx_gmx#_prt#_cbfc_ctl
2244 * ** HG2 message CSRs end
2248 * XOFF for a specific port is XOFF<prt> = (PHYS_EN<prt> & PHYS_BP) | (LOGL_EN<prt> & LOGL_BP<prt>)
2251 union cvmx_gmxx_prtx_cbfc_ctl
2254 struct cvmx_gmxx_prtx_cbfc_ctl_s
2256 #if __BYTE_ORDER == __BIG_ENDIAN
2257 uint64_t phys_en : 16; /**< Determines which ports will have physical
2258 backpressure pause packets.
2259 The value pplaced in the Class Enable Vector
2260 field of the CBFC pause packet will be
2261 PHYS_EN | LOGL_EN */
2262 uint64_t logl_en : 16; /**< Determines which ports will have logical
2263 backpressure pause packets.
2264 The value pplaced in the Class Enable Vector
2265 field of the CBFC pause packet will be
2266 PHYS_EN | LOGL_EN */
2267 uint64_t phys_bp : 16; /**< When RX_EN is set and the HW is backpressuring any
2268 ports (from either CBFC pause packets or the
2269 GMX_TX_OVR_BP[TX_PRT_BP] register) and all ports
2270 indiciated by PHYS_BP are backpressured, simulate
2271 physical backpressure by defering all packets on
2273 uint64_t reserved_4_15 : 12;
2274 uint64_t bck_en : 1; /**< Forward CBFC Pause information to BP block */
2275 uint64_t drp_en : 1; /**< Drop Control CBFC Pause Frames */
2276 uint64_t tx_en : 1; /**< When set, allow for CBFC Pause Packets
2277 Must be clear in HiGig2 mode i.e. when
2278 GMX_TX_XAUI_CTL[HG_EN]=1 and
2279 GMX_RX_UDD_SKP[SKIP]=16. */
2280 uint64_t rx_en : 1; /**< When set, allow for CBFC Pause Packets
2281 Must be clear in HiGig2 mode i.e. when
2282 GMX_TX_XAUI_CTL[HG_EN]=1 and
2283 GMX_RX_UDD_SKP[SKIP]=16. */
2287 uint64_t drp_en : 1;
2288 uint64_t bck_en : 1;
2289 uint64_t reserved_4_15 : 12;
2290 uint64_t phys_bp : 16;
2291 uint64_t logl_en : 16;
2292 uint64_t phys_en : 16;
2295 struct cvmx_gmxx_prtx_cbfc_ctl_s cn52xx;
2296 struct cvmx_gmxx_prtx_cbfc_ctl_s cn56xx;
2297 struct cvmx_gmxx_prtx_cbfc_ctl_s cn63xx;
2298 struct cvmx_gmxx_prtx_cbfc_ctl_s cn63xxp1;
2300 typedef union cvmx_gmxx_prtx_cbfc_ctl cvmx_gmxx_prtx_cbfc_ctl_t;
2303 * cvmx_gmx#_prt#_cfg
2305 * GMX_PRT_CFG = Port description
2308 union cvmx_gmxx_prtx_cfg
2311 struct cvmx_gmxx_prtx_cfg_s
2313 #if __BYTE_ORDER == __BIG_ENDIAN
2314 uint64_t reserved_14_63 : 50;
2315 uint64_t tx_idle : 1; /**< TX Machine is idle */
2316 uint64_t rx_idle : 1; /**< RX Machine is idle */
2317 uint64_t reserved_9_11 : 3;
2318 uint64_t speed_msb : 1; /**< Link Speed MSB [SPEED_MSB:SPEED]
2319 10 = 10Mbs operation
2320 00 = 100Mbs operation
2321 01 = 1000Mbs operation
2323 (SGMII/1000Base-X only) */
2324 uint64_t reserved_4_7 : 4;
2325 uint64_t slottime : 1; /**< Slot Time for Half-Duplex operation
2326 0 = 512 bitimes (10/100Mbs operation)
2327 1 = 4096 bitimes (1000Mbs operation)
2328 (SGMII/1000Base-X only) */
2329 uint64_t duplex : 1; /**< Duplex
2330 0 = Half Duplex (collisions/extentions/bursts)
2332 (SGMII/1000Base-X only) */
2333 uint64_t speed : 1; /**< Link Speed LSB [SPEED_MSB:SPEED]
2334 10 = 10Mbs operation
2335 00 = 100Mbs operation
2336 01 = 1000Mbs operation
2338 (SGMII/1000Base-X only) */
2339 uint64_t en : 1; /**< Link Enable
2340 When EN is clear, packets will not be received
2341 or transmitted (including PAUSE and JAM packets).
2342 If EN is cleared while a packet is currently
2343 being received or transmitted, the packet will
2344 be allowed to complete before the bus is idled.
2345 On the RX side, subsequent packets in a burst
2350 uint64_t duplex : 1;
2351 uint64_t slottime : 1;
2352 uint64_t reserved_4_7 : 4;
2353 uint64_t speed_msb : 1;
2354 uint64_t reserved_9_11 : 3;
2355 uint64_t rx_idle : 1;
2356 uint64_t tx_idle : 1;
2357 uint64_t reserved_14_63 : 50;
2360 struct cvmx_gmxx_prtx_cfg_cn30xx
2362 #if __BYTE_ORDER == __BIG_ENDIAN
2363 uint64_t reserved_4_63 : 60;
2364 uint64_t slottime : 1; /**< Slot Time for Half-Duplex operation
2365 0 = 512 bitimes (10/100Mbs operation)
2366 1 = 4096 bitimes (1000Mbs operation) */
2367 uint64_t duplex : 1; /**< Duplex
2368 0 = Half Duplex (collisions/extentions/bursts)
2370 uint64_t speed : 1; /**< Link Speed
2371 0 = 10/100Mbs operation
2372 (in RGMII mode, GMX_TX_CLK[CLK_CNT] > 1)
2373 (in MII mode, GMX_TX_CLK[CLK_CNT] == 1)
2374 1 = 1000Mbs operation */
2375 uint64_t en : 1; /**< Link Enable
2376 When EN is clear, packets will not be received
2377 or transmitted (including PAUSE and JAM packets).
2378 If EN is cleared while a packet is currently
2379 being received or transmitted, the packet will
2380 be allowed to complete before the bus is idled.
2381 On the RX side, subsequent packets in a burst
2386 uint64_t duplex : 1;
2387 uint64_t slottime : 1;
2388 uint64_t reserved_4_63 : 60;
2391 struct cvmx_gmxx_prtx_cfg_cn30xx cn31xx;
2392 struct cvmx_gmxx_prtx_cfg_cn30xx cn38xx;
2393 struct cvmx_gmxx_prtx_cfg_cn30xx cn38xxp2;
2394 struct cvmx_gmxx_prtx_cfg_cn30xx cn50xx;
2395 struct cvmx_gmxx_prtx_cfg_s cn52xx;
2396 struct cvmx_gmxx_prtx_cfg_s cn52xxp1;
2397 struct cvmx_gmxx_prtx_cfg_s cn56xx;
2398 struct cvmx_gmxx_prtx_cfg_s cn56xxp1;
2399 struct cvmx_gmxx_prtx_cfg_cn30xx cn58xx;
2400 struct cvmx_gmxx_prtx_cfg_cn30xx cn58xxp1;
2401 struct cvmx_gmxx_prtx_cfg_s cn63xx;
2402 struct cvmx_gmxx_prtx_cfg_s cn63xxp1;
2404 typedef union cvmx_gmxx_prtx_cfg cvmx_gmxx_prtx_cfg_t;
2407 * cvmx_gmx#_rx#_adr_cam0
2409 * GMX_RX_ADR_CAM = Address Filtering Control
2412 union cvmx_gmxx_rxx_adr_cam0
2415 struct cvmx_gmxx_rxx_adr_cam0_s
2417 #if __BYTE_ORDER == __BIG_ENDIAN
2418 uint64_t adr : 64; /**< The DMAC address to match on
2419 Each entry contributes 8bits to one of 8 matchers
2420 Write transactions to GMX_RX_ADR_CAM will not
2421 change the CSR when GMX_PRT_CFG[EN] is enabled
2422 The CAM matches against unicst or multicst DMAC
2424 In XAUI mode, all ports will reflect the data
2425 written to port0. */
2430 struct cvmx_gmxx_rxx_adr_cam0_s cn30xx;
2431 struct cvmx_gmxx_rxx_adr_cam0_s cn31xx;
2432 struct cvmx_gmxx_rxx_adr_cam0_s cn38xx;
2433 struct cvmx_gmxx_rxx_adr_cam0_s cn38xxp2;
2434 struct cvmx_gmxx_rxx_adr_cam0_s cn50xx;
2435 struct cvmx_gmxx_rxx_adr_cam0_s cn52xx;
2436 struct cvmx_gmxx_rxx_adr_cam0_s cn52xxp1;
2437 struct cvmx_gmxx_rxx_adr_cam0_s cn56xx;
2438 struct cvmx_gmxx_rxx_adr_cam0_s cn56xxp1;
2439 struct cvmx_gmxx_rxx_adr_cam0_s cn58xx;
2440 struct cvmx_gmxx_rxx_adr_cam0_s cn58xxp1;
2441 struct cvmx_gmxx_rxx_adr_cam0_s cn63xx;
2442 struct cvmx_gmxx_rxx_adr_cam0_s cn63xxp1;
2444 typedef union cvmx_gmxx_rxx_adr_cam0 cvmx_gmxx_rxx_adr_cam0_t;
2447 * cvmx_gmx#_rx#_adr_cam1
2449 * GMX_RX_ADR_CAM = Address Filtering Control
2452 union cvmx_gmxx_rxx_adr_cam1
2455 struct cvmx_gmxx_rxx_adr_cam1_s
2457 #if __BYTE_ORDER == __BIG_ENDIAN
2458 uint64_t adr : 64; /**< The DMAC address to match on
2459 Each entry contributes 8bits to one of 8 matchers
2460 Write transactions to GMX_RX_ADR_CAM will not
2461 change the CSR when GMX_PRT_CFG[EN] is enabled
2462 The CAM matches against unicst or multicst DMAC
2464 In XAUI mode, all ports will reflect the data
2465 written to port0. */
2470 struct cvmx_gmxx_rxx_adr_cam1_s cn30xx;
2471 struct cvmx_gmxx_rxx_adr_cam1_s cn31xx;
2472 struct cvmx_gmxx_rxx_adr_cam1_s cn38xx;
2473 struct cvmx_gmxx_rxx_adr_cam1_s cn38xxp2;
2474 struct cvmx_gmxx_rxx_adr_cam1_s cn50xx;
2475 struct cvmx_gmxx_rxx_adr_cam1_s cn52xx;
2476 struct cvmx_gmxx_rxx_adr_cam1_s cn52xxp1;
2477 struct cvmx_gmxx_rxx_adr_cam1_s cn56xx;
2478 struct cvmx_gmxx_rxx_adr_cam1_s cn56xxp1;
2479 struct cvmx_gmxx_rxx_adr_cam1_s cn58xx;
2480 struct cvmx_gmxx_rxx_adr_cam1_s cn58xxp1;
2481 struct cvmx_gmxx_rxx_adr_cam1_s cn63xx;
2482 struct cvmx_gmxx_rxx_adr_cam1_s cn63xxp1;
2484 typedef union cvmx_gmxx_rxx_adr_cam1 cvmx_gmxx_rxx_adr_cam1_t;
2487 * cvmx_gmx#_rx#_adr_cam2
2489 * GMX_RX_ADR_CAM = Address Filtering Control
2492 union cvmx_gmxx_rxx_adr_cam2
2495 struct cvmx_gmxx_rxx_adr_cam2_s
2497 #if __BYTE_ORDER == __BIG_ENDIAN
2498 uint64_t adr : 64; /**< The DMAC address to match on
2499 Each entry contributes 8bits to one of 8 matchers
2500 Write transactions to GMX_RX_ADR_CAM will not
2501 change the CSR when GMX_PRT_CFG[EN] is enabled
2502 The CAM matches against unicst or multicst DMAC
2504 In XAUI mode, all ports will reflect the data
2505 written to port0. */
2510 struct cvmx_gmxx_rxx_adr_cam2_s cn30xx;
2511 struct cvmx_gmxx_rxx_adr_cam2_s cn31xx;
2512 struct cvmx_gmxx_rxx_adr_cam2_s cn38xx;
2513 struct cvmx_gmxx_rxx_adr_cam2_s cn38xxp2;
2514 struct cvmx_gmxx_rxx_adr_cam2_s cn50xx;
2515 struct cvmx_gmxx_rxx_adr_cam2_s cn52xx;
2516 struct cvmx_gmxx_rxx_adr_cam2_s cn52xxp1;
2517 struct cvmx_gmxx_rxx_adr_cam2_s cn56xx;
2518 struct cvmx_gmxx_rxx_adr_cam2_s cn56xxp1;
2519 struct cvmx_gmxx_rxx_adr_cam2_s cn58xx;
2520 struct cvmx_gmxx_rxx_adr_cam2_s cn58xxp1;
2521 struct cvmx_gmxx_rxx_adr_cam2_s cn63xx;
2522 struct cvmx_gmxx_rxx_adr_cam2_s cn63xxp1;
2524 typedef union cvmx_gmxx_rxx_adr_cam2 cvmx_gmxx_rxx_adr_cam2_t;
2527 * cvmx_gmx#_rx#_adr_cam3
2529 * GMX_RX_ADR_CAM = Address Filtering Control
2532 union cvmx_gmxx_rxx_adr_cam3
2535 struct cvmx_gmxx_rxx_adr_cam3_s
2537 #if __BYTE_ORDER == __BIG_ENDIAN
2538 uint64_t adr : 64; /**< The DMAC address to match on
2539 Each entry contributes 8bits to one of 8 matchers
2540 Write transactions to GMX_RX_ADR_CAM will not
2541 change the CSR when GMX_PRT_CFG[EN] is enabled
2542 The CAM matches against unicst or multicst DMAC
2544 In XAUI mode, all ports will reflect the data
2545 written to port0. */
2550 struct cvmx_gmxx_rxx_adr_cam3_s cn30xx;
2551 struct cvmx_gmxx_rxx_adr_cam3_s cn31xx;
2552 struct cvmx_gmxx_rxx_adr_cam3_s cn38xx;
2553 struct cvmx_gmxx_rxx_adr_cam3_s cn38xxp2;
2554 struct cvmx_gmxx_rxx_adr_cam3_s cn50xx;
2555 struct cvmx_gmxx_rxx_adr_cam3_s cn52xx;
2556 struct cvmx_gmxx_rxx_adr_cam3_s cn52xxp1;
2557 struct cvmx_gmxx_rxx_adr_cam3_s cn56xx;
2558 struct cvmx_gmxx_rxx_adr_cam3_s cn56xxp1;
2559 struct cvmx_gmxx_rxx_adr_cam3_s cn58xx;
2560 struct cvmx_gmxx_rxx_adr_cam3_s cn58xxp1;
2561 struct cvmx_gmxx_rxx_adr_cam3_s cn63xx;
2562 struct cvmx_gmxx_rxx_adr_cam3_s cn63xxp1;
2564 typedef union cvmx_gmxx_rxx_adr_cam3 cvmx_gmxx_rxx_adr_cam3_t;
2567 * cvmx_gmx#_rx#_adr_cam4
2569 * GMX_RX_ADR_CAM = Address Filtering Control
2572 union cvmx_gmxx_rxx_adr_cam4
2575 struct cvmx_gmxx_rxx_adr_cam4_s
2577 #if __BYTE_ORDER == __BIG_ENDIAN
2578 uint64_t adr : 64; /**< The DMAC address to match on
2579 Each entry contributes 8bits to one of 8 matchers
2580 Write transactions to GMX_RX_ADR_CAM will not
2581 change the CSR when GMX_PRT_CFG[EN] is enabled
2582 The CAM matches against unicst or multicst DMAC
2584 In XAUI mode, all ports will reflect the data
2585 written to port0. */
2590 struct cvmx_gmxx_rxx_adr_cam4_s cn30xx;
2591 struct cvmx_gmxx_rxx_adr_cam4_s cn31xx;
2592 struct cvmx_gmxx_rxx_adr_cam4_s cn38xx;
2593 struct cvmx_gmxx_rxx_adr_cam4_s cn38xxp2;
2594 struct cvmx_gmxx_rxx_adr_cam4_s cn50xx;
2595 struct cvmx_gmxx_rxx_adr_cam4_s cn52xx;
2596 struct cvmx_gmxx_rxx_adr_cam4_s cn52xxp1;
2597 struct cvmx_gmxx_rxx_adr_cam4_s cn56xx;
2598 struct cvmx_gmxx_rxx_adr_cam4_s cn56xxp1;
2599 struct cvmx_gmxx_rxx_adr_cam4_s cn58xx;
2600 struct cvmx_gmxx_rxx_adr_cam4_s cn58xxp1;
2601 struct cvmx_gmxx_rxx_adr_cam4_s cn63xx;
2602 struct cvmx_gmxx_rxx_adr_cam4_s cn63xxp1;
2604 typedef union cvmx_gmxx_rxx_adr_cam4 cvmx_gmxx_rxx_adr_cam4_t;
2607 * cvmx_gmx#_rx#_adr_cam5
2609 * GMX_RX_ADR_CAM = Address Filtering Control
2612 union cvmx_gmxx_rxx_adr_cam5
2615 struct cvmx_gmxx_rxx_adr_cam5_s
2617 #if __BYTE_ORDER == __BIG_ENDIAN
2618 uint64_t adr : 64; /**< The DMAC address to match on
2619 Each entry contributes 8bits to one of 8 matchers
2620 Write transactions to GMX_RX_ADR_CAM will not
2621 change the CSR when GMX_PRT_CFG[EN] is enabled
2622 The CAM matches against unicst or multicst DMAC
2624 In XAUI mode, all ports will reflect the data
2625 written to port0. */
2630 struct cvmx_gmxx_rxx_adr_cam5_s cn30xx;
2631 struct cvmx_gmxx_rxx_adr_cam5_s cn31xx;
2632 struct cvmx_gmxx_rxx_adr_cam5_s cn38xx;
2633 struct cvmx_gmxx_rxx_adr_cam5_s cn38xxp2;
2634 struct cvmx_gmxx_rxx_adr_cam5_s cn50xx;
2635 struct cvmx_gmxx_rxx_adr_cam5_s cn52xx;
2636 struct cvmx_gmxx_rxx_adr_cam5_s cn52xxp1;
2637 struct cvmx_gmxx_rxx_adr_cam5_s cn56xx;
2638 struct cvmx_gmxx_rxx_adr_cam5_s cn56xxp1;
2639 struct cvmx_gmxx_rxx_adr_cam5_s cn58xx;
2640 struct cvmx_gmxx_rxx_adr_cam5_s cn58xxp1;
2641 struct cvmx_gmxx_rxx_adr_cam5_s cn63xx;
2642 struct cvmx_gmxx_rxx_adr_cam5_s cn63xxp1;
2644 typedef union cvmx_gmxx_rxx_adr_cam5 cvmx_gmxx_rxx_adr_cam5_t;
2647 * cvmx_gmx#_rx#_adr_cam_en
2649 * GMX_RX_ADR_CAM_EN = Address Filtering Control Enable
2652 union cvmx_gmxx_rxx_adr_cam_en
2655 struct cvmx_gmxx_rxx_adr_cam_en_s
2657 #if __BYTE_ORDER == __BIG_ENDIAN
2658 uint64_t reserved_8_63 : 56;
2659 uint64_t en : 8; /**< CAM Entry Enables */
2662 uint64_t reserved_8_63 : 56;
2665 struct cvmx_gmxx_rxx_adr_cam_en_s cn30xx;
2666 struct cvmx_gmxx_rxx_adr_cam_en_s cn31xx;
2667 struct cvmx_gmxx_rxx_adr_cam_en_s cn38xx;
2668 struct cvmx_gmxx_rxx_adr_cam_en_s cn38xxp2;
2669 struct cvmx_gmxx_rxx_adr_cam_en_s cn50xx;
2670 struct cvmx_gmxx_rxx_adr_cam_en_s cn52xx;
2671 struct cvmx_gmxx_rxx_adr_cam_en_s cn52xxp1;
2672 struct cvmx_gmxx_rxx_adr_cam_en_s cn56xx;
2673 struct cvmx_gmxx_rxx_adr_cam_en_s cn56xxp1;
2674 struct cvmx_gmxx_rxx_adr_cam_en_s cn58xx;
2675 struct cvmx_gmxx_rxx_adr_cam_en_s cn58xxp1;
2676 struct cvmx_gmxx_rxx_adr_cam_en_s cn63xx;
2677 struct cvmx_gmxx_rxx_adr_cam_en_s cn63xxp1;
2679 typedef union cvmx_gmxx_rxx_adr_cam_en cvmx_gmxx_rxx_adr_cam_en_t;
2682 * cvmx_gmx#_rx#_adr_ctl
2684 * GMX_RX_ADR_CTL = Address Filtering Control
2689 * Here is some pseudo code that represents the address filter behavior.
2692 * bool dmac_addr_filter(uint8 prt, uint48 dmac) [
2693 * ASSERT(prt >= 0 && prt <= 3);
2694 * if (is_bcst(dmac)) // broadcast accept
2695 * return (GMX_RX[prt]_ADR_CTL[BCST] ? ACCEPT : REJECT);
2696 * if (is_mcst(dmac) & GMX_RX[prt]_ADR_CTL[MCST] == 1) // multicast reject
2698 * if (is_mcst(dmac) & GMX_RX[prt]_ADR_CTL[MCST] == 2) // multicast accept
2703 * for (i=0; i<8; i++) [
2704 * if (GMX_RX[prt]_ADR_CAM_EN[EN<i>] == 0)
2706 * uint48 unswizzled_mac_adr = 0x0;
2707 * for (j=5; j>=0; j--) [
2708 * unswizzled_mac_adr = (unswizzled_mac_adr << 8) | GMX_RX[prt]_ADR_CAM[j][ADR<i*8+7:i*8>];
2710 * if (unswizzled_mac_adr == dmac) [
2717 * return (GMX_RX[prt]_ADR_CTL[CAM_MODE] ? ACCEPT : REJECT);
2719 * return (GMX_RX[prt]_ADR_CTL[CAM_MODE] ? REJECT : ACCEPT);
2723 union cvmx_gmxx_rxx_adr_ctl
2726 struct cvmx_gmxx_rxx_adr_ctl_s
2728 #if __BYTE_ORDER == __BIG_ENDIAN
2729 uint64_t reserved_4_63 : 60;
2730 uint64_t cam_mode : 1; /**< Allow or deny DMAC address filter
2731 0 = reject the packet on DMAC address match
2732 1 = accept the packet on DMAC address match */
2733 uint64_t mcst : 2; /**< Multicast Mode
2734 0 = Use the Address Filter CAM
2735 1 = Force reject all multicast packets
2736 2 = Force accept all multicast packets
2738 uint64_t bcst : 1; /**< Accept All Broadcast Packets */
2742 uint64_t cam_mode : 1;
2743 uint64_t reserved_4_63 : 60;
2746 struct cvmx_gmxx_rxx_adr_ctl_s cn30xx;
2747 struct cvmx_gmxx_rxx_adr_ctl_s cn31xx;
2748 struct cvmx_gmxx_rxx_adr_ctl_s cn38xx;
2749 struct cvmx_gmxx_rxx_adr_ctl_s cn38xxp2;
2750 struct cvmx_gmxx_rxx_adr_ctl_s cn50xx;
2751 struct cvmx_gmxx_rxx_adr_ctl_s cn52xx;
2752 struct cvmx_gmxx_rxx_adr_ctl_s cn52xxp1;
2753 struct cvmx_gmxx_rxx_adr_ctl_s cn56xx;
2754 struct cvmx_gmxx_rxx_adr_ctl_s cn56xxp1;
2755 struct cvmx_gmxx_rxx_adr_ctl_s cn58xx;
2756 struct cvmx_gmxx_rxx_adr_ctl_s cn58xxp1;
2757 struct cvmx_gmxx_rxx_adr_ctl_s cn63xx;
2758 struct cvmx_gmxx_rxx_adr_ctl_s cn63xxp1;
2760 typedef union cvmx_gmxx_rxx_adr_ctl cvmx_gmxx_rxx_adr_ctl_t;
2763 * cvmx_gmx#_rx#_decision
2765 * GMX_RX_DECISION = The byte count to decide when to accept or filter a packet
2769 * As each byte in a packet is received by GMX, the L2 byte count is compared
2770 * against the GMX_RX_DECISION[CNT]. The L2 byte count is the number of bytes
2771 * from the beginning of the L2 header (DMAC). In normal operation, the L2
2772 * header begins after the PREAMBLE+SFD (GMX_RX_FRM_CTL[PRE_CHK]=1) and any
2773 * optional UDD skip data (GMX_RX_UDD_SKP[LEN]).
2775 * When GMX_RX_FRM_CTL[PRE_CHK] is clear, PREAMBLE+SFD are prepended to the
2776 * packet and would require UDD skip length to account for them.
2779 * Port Mode <GMX_RX_DECISION bytes (default=24) >=GMX_RX_DECISION bytes (default=24)
2781 * Full Duplex accept packet apply filters
2782 * no filtering is applied accept packet based on DMAC and PAUSE packet filters
2784 * Half Duplex drop packet apply filters
2785 * packet is unconditionally dropped accept packet based on DMAC
2787 * where l2_size = MAX(0, total_packet_size - GMX_RX_UDD_SKP[LEN] - ((GMX_RX_FRM_CTL[PRE_CHK]==1)*8)
2789 union cvmx_gmxx_rxx_decision
2792 struct cvmx_gmxx_rxx_decision_s
2794 #if __BYTE_ORDER == __BIG_ENDIAN
2795 uint64_t reserved_5_63 : 59;
2796 uint64_t cnt : 5; /**< The byte count to decide when to accept or filter
2800 uint64_t reserved_5_63 : 59;
2803 struct cvmx_gmxx_rxx_decision_s cn30xx;
2804 struct cvmx_gmxx_rxx_decision_s cn31xx;
2805 struct cvmx_gmxx_rxx_decision_s cn38xx;
2806 struct cvmx_gmxx_rxx_decision_s cn38xxp2;
2807 struct cvmx_gmxx_rxx_decision_s cn50xx;
2808 struct cvmx_gmxx_rxx_decision_s cn52xx;
2809 struct cvmx_gmxx_rxx_decision_s cn52xxp1;
2810 struct cvmx_gmxx_rxx_decision_s cn56xx;
2811 struct cvmx_gmxx_rxx_decision_s cn56xxp1;
2812 struct cvmx_gmxx_rxx_decision_s cn58xx;
2813 struct cvmx_gmxx_rxx_decision_s cn58xxp1;
2814 struct cvmx_gmxx_rxx_decision_s cn63xx;
2815 struct cvmx_gmxx_rxx_decision_s cn63xxp1;
2817 typedef union cvmx_gmxx_rxx_decision cvmx_gmxx_rxx_decision_t;
2820 * cvmx_gmx#_rx#_frm_chk
2822 * GMX_RX_FRM_CHK = Which frame errors will set the ERR bit of the frame
2826 * If GMX_RX_UDD_SKP[LEN] != 0, then LENERR will be forced to zero in HW.
2828 * In XAUI mode prt0 is used for checking.
2830 union cvmx_gmxx_rxx_frm_chk
2833 struct cvmx_gmxx_rxx_frm_chk_s
2835 #if __BYTE_ORDER == __BIG_ENDIAN
2836 uint64_t reserved_10_63 : 54;
2837 uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */
2838 uint64_t skperr : 1; /**< Skipper error */
2839 uint64_t rcverr : 1; /**< Frame was received with Data reception error */
2840 uint64_t lenerr : 1; /**< Frame was received with length error */
2841 uint64_t alnerr : 1; /**< Frame was received with an alignment error */
2842 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
2843 uint64_t jabber : 1; /**< Frame was received with length > sys_length */
2844 uint64_t maxerr : 1; /**< Frame was received with length > max_length */
2845 uint64_t carext : 1; /**< Carrier extend error
2846 (SGMII/1000Base-X only) */
2847 uint64_t minerr : 1; /**< Pause Frame was received with length<minFrameSize */
2849 uint64_t minerr : 1;
2850 uint64_t carext : 1;
2851 uint64_t maxerr : 1;
2852 uint64_t jabber : 1;
2853 uint64_t fcserr : 1;
2854 uint64_t alnerr : 1;
2855 uint64_t lenerr : 1;
2856 uint64_t rcverr : 1;
2857 uint64_t skperr : 1;
2858 uint64_t niberr : 1;
2859 uint64_t reserved_10_63 : 54;
2862 struct cvmx_gmxx_rxx_frm_chk_s cn30xx;
2863 struct cvmx_gmxx_rxx_frm_chk_s cn31xx;
2864 struct cvmx_gmxx_rxx_frm_chk_s cn38xx;
2865 struct cvmx_gmxx_rxx_frm_chk_s cn38xxp2;
2866 struct cvmx_gmxx_rxx_frm_chk_cn50xx
2868 #if __BYTE_ORDER == __BIG_ENDIAN
2869 uint64_t reserved_10_63 : 54;
2870 uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */
2871 uint64_t skperr : 1; /**< Skipper error */
2872 uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
2873 uint64_t reserved_6_6 : 1;
2874 uint64_t alnerr : 1; /**< Frame was received with an alignment error */
2875 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
2876 uint64_t jabber : 1; /**< Frame was received with length > sys_length */
2877 uint64_t reserved_2_2 : 1;
2878 uint64_t carext : 1; /**< RGMII carrier extend error */
2879 uint64_t reserved_0_0 : 1;
2881 uint64_t reserved_0_0 : 1;
2882 uint64_t carext : 1;
2883 uint64_t reserved_2_2 : 1;
2884 uint64_t jabber : 1;
2885 uint64_t fcserr : 1;
2886 uint64_t alnerr : 1;
2887 uint64_t reserved_6_6 : 1;
2888 uint64_t rcverr : 1;
2889 uint64_t skperr : 1;
2890 uint64_t niberr : 1;
2891 uint64_t reserved_10_63 : 54;
2894 struct cvmx_gmxx_rxx_frm_chk_cn52xx
2896 #if __BYTE_ORDER == __BIG_ENDIAN
2897 uint64_t reserved_9_63 : 55;
2898 uint64_t skperr : 1; /**< Skipper error */
2899 uint64_t rcverr : 1; /**< Frame was received with Data reception error */
2900 uint64_t reserved_5_6 : 2;
2901 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
2902 uint64_t jabber : 1; /**< Frame was received with length > sys_length */
2903 uint64_t reserved_2_2 : 1;
2904 uint64_t carext : 1; /**< Carrier extend error
2905 (SGMII/1000Base-X only) */
2906 uint64_t reserved_0_0 : 1;
2908 uint64_t reserved_0_0 : 1;
2909 uint64_t carext : 1;
2910 uint64_t reserved_2_2 : 1;
2911 uint64_t jabber : 1;
2912 uint64_t fcserr : 1;
2913 uint64_t reserved_5_6 : 2;
2914 uint64_t rcverr : 1;
2915 uint64_t skperr : 1;
2916 uint64_t reserved_9_63 : 55;
2919 struct cvmx_gmxx_rxx_frm_chk_cn52xx cn52xxp1;
2920 struct cvmx_gmxx_rxx_frm_chk_cn52xx cn56xx;
2921 struct cvmx_gmxx_rxx_frm_chk_cn52xx cn56xxp1;
2922 struct cvmx_gmxx_rxx_frm_chk_s cn58xx;
2923 struct cvmx_gmxx_rxx_frm_chk_s cn58xxp1;
2924 struct cvmx_gmxx_rxx_frm_chk_cn63xx
2926 #if __BYTE_ORDER == __BIG_ENDIAN
2927 uint64_t reserved_9_63 : 55;
2928 uint64_t skperr : 1; /**< Skipper error */
2929 uint64_t rcverr : 1; /**< Frame was received with Data reception error */
2930 uint64_t reserved_5_6 : 2;
2931 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
2932 uint64_t jabber : 1; /**< Frame was received with length > sys_length */
2933 uint64_t reserved_2_2 : 1;
2934 uint64_t carext : 1; /**< Carrier extend error
2935 (SGMII/1000Base-X only) */
2936 uint64_t minerr : 1; /**< Pause Frame was received with length<minFrameSize */
2938 uint64_t minerr : 1;
2939 uint64_t carext : 1;
2940 uint64_t reserved_2_2 : 1;
2941 uint64_t jabber : 1;
2942 uint64_t fcserr : 1;
2943 uint64_t reserved_5_6 : 2;
2944 uint64_t rcverr : 1;
2945 uint64_t skperr : 1;
2946 uint64_t reserved_9_63 : 55;
2949 struct cvmx_gmxx_rxx_frm_chk_cn63xx cn63xxp1;
2951 typedef union cvmx_gmxx_rxx_frm_chk cvmx_gmxx_rxx_frm_chk_t;
2954 * cvmx_gmx#_rx#_frm_ctl
2956 * GMX_RX_FRM_CTL = Frame Control
2961 * When PRE_CHK is set (indicating that the PREAMBLE will be sent), PRE_STRP
2962 * determines if the PREAMBLE+SFD bytes are thrown away or sent to the Octane
2963 * core as part of the packet.
2965 * In either mode, the PREAMBLE+SFD bytes are not counted toward the packet
2966 * size when checking against the MIN and MAX bounds. Furthermore, the bytes
2967 * are skipped when locating the start of the L2 header for DMAC and Control
2968 * frame recognition.
2971 * These bits control how the HW handles incoming PAUSE packets. Here are
2972 * the most common modes of operation:
2973 * CTL_BCK=1,CTL_DRP=1 - HW does it all
2974 * CTL_BCK=0,CTL_DRP=0 - SW sees all pause frames
2975 * CTL_BCK=0,CTL_DRP=1 - all pause frames are completely ignored
2977 * These control bits should be set to CTL_BCK=0,CTL_DRP=0 in halfdup mode.
2978 * Since PAUSE packets only apply to fulldup operation, any PAUSE packet
2979 * would constitute an exception which should be handled by the processing
2980 * cores. PAUSE packets should not be forwarded.
2982 union cvmx_gmxx_rxx_frm_ctl
2985 struct cvmx_gmxx_rxx_frm_ctl_s
2987 #if __BYTE_ORDER == __BIG_ENDIAN
2988 uint64_t reserved_13_63 : 51;
2989 uint64_t ptp_mode : 1; /**< Timestamp mode
2990 When PTP_MODE is set, a 64-bit timestamp will be
2991 prepended to every incoming packet. The timestamp
2992 bytes are added to the packet in such a way as to
2993 not modify the packet's receive byte count. This
2994 implies that the GMX_RX_JABBER, MINERR,
2995 GMX_RX_DECISION, GMX_RX_UDD_SKP, and the
2996 GMX_RX_STATS_* do not require any adjustment as
2997 they operate on the received packet size.
2998 When the packet reaches PKI, its size will
2999 reflect the additional bytes and is subject to
3000 the restrictions below.
3001 If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1.
3003 PIP_PRT_CFGx[SKIP] should be increased by 8.
3004 PIP_PRT_CFGx[HIGIG_EN] should be 0.
3005 PIP_FRM_CHKx[MAXLEN] should be increased by 8.
3006 PIP_FRM_CHKx[MINLEN] should be increased by 8.
3007 PIP_TAG_INCx[EN] should be adjusted. */
3008 uint64_t reserved_11_11 : 1;
3009 uint64_t null_dis : 1; /**< When set, do not modify the MOD bits on NULL ticks
3010 due to PARITAL packets */
3011 uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte
3012 regardless of the number of previous PREAMBLE
3013 nibbles. In this mode, PRE_STRP should be set to
3014 account for the variable nature of the PREAMBLE.
3015 PRE_CHK must be set to enable this and all
3017 (SGMII at 10/100Mbs only) */
3018 uint64_t pad_len : 1; /**< When set, disables the length check for non-min
3019 sized pkts with padding in the client data
3021 uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */
3022 uint64_t pre_free : 1; /**< When set, PREAMBLE checking is less strict.
3023 GMX will begin the frame at the first SFD.
3024 PRE_CHK must be set to enable this and all
3026 (SGMII/1000Base-X only) */
3027 uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */
3028 uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign
3029 Multicast address */
3030 uint64_t ctl_bck : 1; /**< Forward pause information to TX block */
3031 uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */
3032 uint64_t pre_strp : 1; /**< Strip off the preamble (when present)
3033 0=PREAMBLE+SFD is sent to core as part of frame
3034 1=PREAMBLE+SFD is dropped
3035 PRE_CHK must be set to enable this and all
3037 If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1. */
3038 uint64_t pre_chk : 1; /**< This port is configured to send a valid 802.3
3039 PREAMBLE to begin every frame. GMX checks that a
3040 valid PREAMBLE is received (based on PRE_FREE).
3041 When a problem does occur within the PREAMBLE
3042 seqeunce, the frame is marked as bad and not sent
3043 into the core. The GMX_GMX_RX_INT_REG[PCTERR]
3044 interrupt is also raised.
3045 When GMX_TX_XAUI_CTL[HG_EN] is set, PRE_CHK
3047 If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1. */
3049 uint64_t pre_chk : 1;
3050 uint64_t pre_strp : 1;
3051 uint64_t ctl_drp : 1;
3052 uint64_t ctl_bck : 1;
3053 uint64_t ctl_mcst : 1;
3054 uint64_t ctl_smac : 1;
3055 uint64_t pre_free : 1;
3056 uint64_t vlan_len : 1;
3057 uint64_t pad_len : 1;
3058 uint64_t pre_align : 1;
3059 uint64_t null_dis : 1;
3060 uint64_t reserved_11_11 : 1;
3061 uint64_t ptp_mode : 1;
3062 uint64_t reserved_13_63 : 51;
3065 struct cvmx_gmxx_rxx_frm_ctl_cn30xx
3067 #if __BYTE_ORDER == __BIG_ENDIAN
3068 uint64_t reserved_9_63 : 55;
3069 uint64_t pad_len : 1; /**< When set, disables the length check for non-min
3070 sized pkts with padding in the client data */
3071 uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */
3072 uint64_t pre_free : 1; /**< Allows for less strict PREAMBLE checking.
3073 0-7 cycles of PREAMBLE followed by SFD (pass 1.0)
3074 0-254 cycles of PREAMBLE followed by SFD (else) */
3075 uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */
3076 uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign
3077 Multicast address */
3078 uint64_t ctl_bck : 1; /**< Forward pause information to TX block */
3079 uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */
3080 uint64_t pre_strp : 1; /**< Strip off the preamble (when present)
3081 0=PREAMBLE+SFD is sent to core as part of frame
3082 1=PREAMBLE+SFD is dropped */
3083 uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD
3084 to begin every frame. GMX checks that the
3085 PREAMBLE is sent correctly */
3087 uint64_t pre_chk : 1;
3088 uint64_t pre_strp : 1;
3089 uint64_t ctl_drp : 1;
3090 uint64_t ctl_bck : 1;
3091 uint64_t ctl_mcst : 1;
3092 uint64_t ctl_smac : 1;
3093 uint64_t pre_free : 1;
3094 uint64_t vlan_len : 1;
3095 uint64_t pad_len : 1;
3096 uint64_t reserved_9_63 : 55;
3099 struct cvmx_gmxx_rxx_frm_ctl_cn31xx
3101 #if __BYTE_ORDER == __BIG_ENDIAN
3102 uint64_t reserved_8_63 : 56;
3103 uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */
3104 uint64_t pre_free : 1; /**< Allows for less strict PREAMBLE checking.
3105 0 - 7 cycles of PREAMBLE followed by SFD (pass1.0)
3106 0 - 254 cycles of PREAMBLE followed by SFD (else) */
3107 uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */
3108 uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign
3109 Multicast address */
3110 uint64_t ctl_bck : 1; /**< Forward pause information to TX block */
3111 uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */
3112 uint64_t pre_strp : 1; /**< Strip off the preamble (when present)
3113 0=PREAMBLE+SFD is sent to core as part of frame
3114 1=PREAMBLE+SFD is dropped */
3115 uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD
3116 to begin every frame. GMX checks that the
3117 PREAMBLE is sent correctly */
3119 uint64_t pre_chk : 1;
3120 uint64_t pre_strp : 1;
3121 uint64_t ctl_drp : 1;
3122 uint64_t ctl_bck : 1;
3123 uint64_t ctl_mcst : 1;
3124 uint64_t ctl_smac : 1;
3125 uint64_t pre_free : 1;
3126 uint64_t vlan_len : 1;
3127 uint64_t reserved_8_63 : 56;
3130 struct cvmx_gmxx_rxx_frm_ctl_cn30xx cn38xx;
3131 struct cvmx_gmxx_rxx_frm_ctl_cn31xx cn38xxp2;
3132 struct cvmx_gmxx_rxx_frm_ctl_cn50xx
3134 #if __BYTE_ORDER == __BIG_ENDIAN
3135 uint64_t reserved_11_63 : 53;
3136 uint64_t null_dis : 1; /**< When set, do not modify the MOD bits on NULL ticks
3137 due to PARITAL packets */
3138 uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte
3139 regardless of the number of previous PREAMBLE
3140 nibbles. In this mode, PREAMBLE can be consumed
3141 by the HW so when PRE_ALIGN is set, PRE_FREE,
3142 PRE_STRP must be set for correct operation.
3143 PRE_CHK must be set to enable this and all
3144 PREAMBLE features. */
3145 uint64_t reserved_7_8 : 2;
3146 uint64_t pre_free : 1; /**< Allows for less strict PREAMBLE checking.
3147 0-254 cycles of PREAMBLE followed by SFD */
3148 uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */
3149 uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign
3150 Multicast address */
3151 uint64_t ctl_bck : 1; /**< Forward pause information to TX block */
3152 uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */
3153 uint64_t pre_strp : 1; /**< Strip off the preamble (when present)
3154 0=PREAMBLE+SFD is sent to core as part of frame
3155 1=PREAMBLE+SFD is dropped */
3156 uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD
3157 to begin every frame. GMX checks that the
3158 PREAMBLE is sent correctly */
3160 uint64_t pre_chk : 1;
3161 uint64_t pre_strp : 1;
3162 uint64_t ctl_drp : 1;
3163 uint64_t ctl_bck : 1;
3164 uint64_t ctl_mcst : 1;
3165 uint64_t ctl_smac : 1;
3166 uint64_t pre_free : 1;
3167 uint64_t reserved_7_8 : 2;
3168 uint64_t pre_align : 1;
3169 uint64_t null_dis : 1;
3170 uint64_t reserved_11_63 : 53;
3173 struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn52xx;
3174 struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn52xxp1;
3175 struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn56xx;
3176 struct cvmx_gmxx_rxx_frm_ctl_cn56xxp1
3178 #if __BYTE_ORDER == __BIG_ENDIAN
3179 uint64_t reserved_10_63 : 54;
3180 uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte
3181 regardless of the number of previous PREAMBLE
3182 nibbles. In this mode, PRE_STRP should be set to
3183 account for the variable nature of the PREAMBLE.
3184 PRE_CHK must be set to enable this and all
3186 (SGMII at 10/100Mbs only) */
3187 uint64_t reserved_7_8 : 2;
3188 uint64_t pre_free : 1; /**< When set, PREAMBLE checking is less strict.
3189 0 - 254 cycles of PREAMBLE followed by SFD
3190 PRE_CHK must be set to enable this and all
3192 (SGMII/1000Base-X only) */
3193 uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */
3194 uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign
3195 Multicast address */
3196 uint64_t ctl_bck : 1; /**< Forward pause information to TX block */
3197 uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */
3198 uint64_t pre_strp : 1; /**< Strip off the preamble (when present)
3199 0=PREAMBLE+SFD is sent to core as part of frame
3200 1=PREAMBLE+SFD is dropped
3201 PRE_CHK must be set to enable this and all
3202 PREAMBLE features. */
3203 uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD
3204 to begin every frame. GMX checks that the
3205 PREAMBLE is sent correctly.
3206 When GMX_TX_XAUI_CTL[HG_EN] is set, PRE_CHK
3209 uint64_t pre_chk : 1;
3210 uint64_t pre_strp : 1;
3211 uint64_t ctl_drp : 1;
3212 uint64_t ctl_bck : 1;
3213 uint64_t ctl_mcst : 1;
3214 uint64_t ctl_smac : 1;
3215 uint64_t pre_free : 1;
3216 uint64_t reserved_7_8 : 2;
3217 uint64_t pre_align : 1;
3218 uint64_t reserved_10_63 : 54;
3221 struct cvmx_gmxx_rxx_frm_ctl_cn58xx
3223 #if __BYTE_ORDER == __BIG_ENDIAN
3224 uint64_t reserved_11_63 : 53;
3225 uint64_t null_dis : 1; /**< When set, do not modify the MOD bits on NULL ticks
3226 due to PARITAL packets
3227 In spi4 mode, all ports use prt0 for checking. */
3228 uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte
3229 regardless of the number of previous PREAMBLE
3230 nibbles. In this mode, PREAMBLE can be consumed
3231 by the HW so when PRE_ALIGN is set, PRE_FREE,
3232 PRE_STRP must be set for correct operation.
3233 PRE_CHK must be set to enable this and all
3234 PREAMBLE features. */
3235 uint64_t pad_len : 1; /**< When set, disables the length check for non-min
3236 sized pkts with padding in the client data
3238 uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */
3239 uint64_t pre_free : 1; /**< When set, PREAMBLE checking is less strict.
3240 0 - 254 cycles of PREAMBLE followed by SFD */
3241 uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */
3242 uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign
3243 Multicast address */
3244 uint64_t ctl_bck : 1; /**< Forward pause information to TX block */
3245 uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */
3246 uint64_t pre_strp : 1; /**< Strip off the preamble (when present)
3247 0=PREAMBLE+SFD is sent to core as part of frame
3248 1=PREAMBLE+SFD is dropped */
3249 uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD
3250 to begin every frame. GMX checks that the
3251 PREAMBLE is sent correctly */
3253 uint64_t pre_chk : 1;
3254 uint64_t pre_strp : 1;
3255 uint64_t ctl_drp : 1;
3256 uint64_t ctl_bck : 1;
3257 uint64_t ctl_mcst : 1;
3258 uint64_t ctl_smac : 1;
3259 uint64_t pre_free : 1;
3260 uint64_t vlan_len : 1;
3261 uint64_t pad_len : 1;
3262 uint64_t pre_align : 1;
3263 uint64_t null_dis : 1;
3264 uint64_t reserved_11_63 : 53;
3267 struct cvmx_gmxx_rxx_frm_ctl_cn30xx cn58xxp1;
3268 struct cvmx_gmxx_rxx_frm_ctl_cn63xx
3270 #if __BYTE_ORDER == __BIG_ENDIAN
3271 uint64_t reserved_13_63 : 51;
3272 uint64_t ptp_mode : 1; /**< Timestamp mode
3273 When PTP_MODE is set, a 64-bit timestamp will be
3274 prepended to every incoming packet. The timestamp
3275 bytes are added to the packet in such a way as to
3276 not modify the packet's receive byte count. This
3277 implies that the GMX_RX_JABBER, MINERR,
3278 GMX_RX_DECISION, GMX_RX_UDD_SKP, and the
3279 GMX_RX_STATS_* do not require any adjustment as
3280 they operate on the received packet size.
3281 When the packet reaches PKI, its size will
3282 reflect the additional bytes and is subject to
3283 the restrictions below.
3284 If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1.
3286 PIP_PRT_CFGx[SKIP] should be increased by 8.
3287 PIP_PRT_CFGx[HIGIG_EN] should be 0.
3288 PIP_FRM_CHKx[MAXLEN] should be increased by 8.
3289 PIP_FRM_CHKx[MINLEN] should be increased by 8.
3290 PIP_TAG_INCx[EN] should be adjusted. */
3291 uint64_t reserved_11_11 : 1;
3292 uint64_t null_dis : 1; /**< When set, do not modify the MOD bits on NULL ticks
3293 due to PARITAL packets */
3294 uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte
3295 regardless of the number of previous PREAMBLE
3296 nibbles. In this mode, PRE_STRP should be set to
3297 account for the variable nature of the PREAMBLE.
3298 PRE_CHK must be set to enable this and all
3300 (SGMII at 10/100Mbs only) */
3301 uint64_t reserved_7_8 : 2;
3302 uint64_t pre_free : 1; /**< When set, PREAMBLE checking is less strict.
3303 GMX will begin the frame at the first SFD.
3304 PRE_CHK must be set to enable this and all
3306 (SGMII/1000Base-X only) */
3307 uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */
3308 uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign
3309 Multicast address */
3310 uint64_t ctl_bck : 1; /**< Forward pause information to TX block */
3311 uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */
3312 uint64_t pre_strp : 1; /**< Strip off the preamble (when present)
3313 0=PREAMBLE+SFD is sent to core as part of frame
3314 1=PREAMBLE+SFD is dropped
3315 PRE_CHK must be set to enable this and all
3317 If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1. */
3318 uint64_t pre_chk : 1; /**< This port is configured to send a valid 802.3
3319 PREAMBLE to begin every frame. GMX checks that a
3320 valid PREAMBLE is received (based on PRE_FREE).
3321 When a problem does occur within the PREAMBLE
3322 seqeunce, the frame is marked as bad and not sent
3323 into the core. The GMX_GMX_RX_INT_REG[PCTERR]
3324 interrupt is also raised.
3325 When GMX_TX_XAUI_CTL[HG_EN] is set, PRE_CHK
3327 If PTP_MODE=1 and PRE_CHK=1, PRE_STRP must be 1. */
3329 uint64_t pre_chk : 1;
3330 uint64_t pre_strp : 1;
3331 uint64_t ctl_drp : 1;
3332 uint64_t ctl_bck : 1;
3333 uint64_t ctl_mcst : 1;
3334 uint64_t ctl_smac : 1;
3335 uint64_t pre_free : 1;
3336 uint64_t reserved_7_8 : 2;
3337 uint64_t pre_align : 1;
3338 uint64_t null_dis : 1;
3339 uint64_t reserved_11_11 : 1;
3340 uint64_t ptp_mode : 1;
3341 uint64_t reserved_13_63 : 51;
3344 struct cvmx_gmxx_rxx_frm_ctl_cn63xx cn63xxp1;
3346 typedef union cvmx_gmxx_rxx_frm_ctl cvmx_gmxx_rxx_frm_ctl_t;
3349 * cvmx_gmx#_rx#_frm_max
3351 * GMX_RX_FRM_MAX = Frame Max length
3355 * In spi4 mode, all spi4 ports use prt0 for checking.
3357 * When changing the LEN field, be sure that LEN does not exceed
3358 * GMX_RX_JABBER[CNT]. Failure to meet this constraint will cause packets that
3359 * are within the maximum length parameter to be rejected because they exceed
3360 * the GMX_RX_JABBER[CNT] limit.
3362 union cvmx_gmxx_rxx_frm_max
3365 struct cvmx_gmxx_rxx_frm_max_s
3367 #if __BYTE_ORDER == __BIG_ENDIAN
3368 uint64_t reserved_16_63 : 48;
3369 uint64_t len : 16; /**< Byte count for Max-sized frame check
3370 GMX_RXn_FRM_CHK[MAXERR] enables the check for
3372 If enabled, failing packets set the MAXERR
3373 interrupt and work-queue entry WORD2[opcode] is
3374 set to OVER_FCS (0x3, if packet has bad FCS) or
3375 OVER_ERR (0x4, if packet has good FCS).
3376 LEN =< GMX_RX_JABBER[CNT] */
3379 uint64_t reserved_16_63 : 48;
3382 struct cvmx_gmxx_rxx_frm_max_s cn30xx;
3383 struct cvmx_gmxx_rxx_frm_max_s cn31xx;
3384 struct cvmx_gmxx_rxx_frm_max_s cn38xx;
3385 struct cvmx_gmxx_rxx_frm_max_s cn38xxp2;
3386 struct cvmx_gmxx_rxx_frm_max_s cn58xx;
3387 struct cvmx_gmxx_rxx_frm_max_s cn58xxp1;
3389 typedef union cvmx_gmxx_rxx_frm_max cvmx_gmxx_rxx_frm_max_t;
3392 * cvmx_gmx#_rx#_frm_min
3394 * GMX_RX_FRM_MIN = Frame Min length
3398 * In spi4 mode, all spi4 ports use prt0 for checking.
3401 union cvmx_gmxx_rxx_frm_min
3404 struct cvmx_gmxx_rxx_frm_min_s
3406 #if __BYTE_ORDER == __BIG_ENDIAN
3407 uint64_t reserved_16_63 : 48;
3408 uint64_t len : 16; /**< Byte count for Min-sized frame check
3409 GMX_RXn_FRM_CHK[MINERR] enables the check for
3411 If enabled, failing packets set the MINERR
3412 interrupt and work-queue entry WORD2[opcode] is
3413 set to UNDER_FCS (0x6, if packet has bad FCS) or
3414 UNDER_ERR (0x8, if packet has good FCS). */
3417 uint64_t reserved_16_63 : 48;
3420 struct cvmx_gmxx_rxx_frm_min_s cn30xx;
3421 struct cvmx_gmxx_rxx_frm_min_s cn31xx;
3422 struct cvmx_gmxx_rxx_frm_min_s cn38xx;
3423 struct cvmx_gmxx_rxx_frm_min_s cn38xxp2;
3424 struct cvmx_gmxx_rxx_frm_min_s cn58xx;
3425 struct cvmx_gmxx_rxx_frm_min_s cn58xxp1;
3427 typedef union cvmx_gmxx_rxx_frm_min cvmx_gmxx_rxx_frm_min_t;
3432 * GMX_RX_IFG = RX Min IFG
3435 union cvmx_gmxx_rxx_ifg
3438 struct cvmx_gmxx_rxx_ifg_s
3440 #if __BYTE_ORDER == __BIG_ENDIAN
3441 uint64_t reserved_4_63 : 60;
3442 uint64_t ifg : 4; /**< Min IFG (in IFG*8 bits) between packets used to
3443 determine IFGERR. Normally IFG is 96 bits.
3444 Note in some operating modes, IFG cycles can be
3445 inserted or removed in order to achieve clock rate
3446 adaptation. For these reasons, the default value
3447 is slightly conservative and does not check upto
3448 the full 96 bits of IFG.
3449 (SGMII/1000Base-X only) */
3452 uint64_t reserved_4_63 : 60;
3455 struct cvmx_gmxx_rxx_ifg_s cn30xx;
3456 struct cvmx_gmxx_rxx_ifg_s cn31xx;
3457 struct cvmx_gmxx_rxx_ifg_s cn38xx;
3458 struct cvmx_gmxx_rxx_ifg_s cn38xxp2;
3459 struct cvmx_gmxx_rxx_ifg_s cn50xx;
3460 struct cvmx_gmxx_rxx_ifg_s cn52xx;
3461 struct cvmx_gmxx_rxx_ifg_s cn52xxp1;
3462 struct cvmx_gmxx_rxx_ifg_s cn56xx;
3463 struct cvmx_gmxx_rxx_ifg_s cn56xxp1;
3464 struct cvmx_gmxx_rxx_ifg_s cn58xx;
3465 struct cvmx_gmxx_rxx_ifg_s cn58xxp1;
3466 struct cvmx_gmxx_rxx_ifg_s cn63xx;
3467 struct cvmx_gmxx_rxx_ifg_s cn63xxp1;
3469 typedef union cvmx_gmxx_rxx_ifg cvmx_gmxx_rxx_ifg_t;
3472 * cvmx_gmx#_rx#_int_en
3474 * GMX_RX_INT_EN = Interrupt Enable
3478 * In XAUI mode prt0 is used for checking.
3481 union cvmx_gmxx_rxx_int_en
3484 struct cvmx_gmxx_rxx_int_en_s
3486 #if __BYTE_ORDER == __BIG_ENDIAN
3487 uint64_t reserved_29_63 : 35;
3488 uint64_t hg2cc : 1; /**< HiGig2 CRC8 or Control char error interrupt enable */
3489 uint64_t hg2fld : 1; /**< HiGig2 Bad field error interrupt enable */
3490 uint64_t undat : 1; /**< Unexpected Data
3492 uint64_t uneop : 1; /**< Unexpected EOP
3494 uint64_t unsop : 1; /**< Unexpected SOP
3496 uint64_t bad_term : 1; /**< Frame is terminated by control character other
3497 than /T/. The error propagation control
3498 character /E/ will be included as part of the
3499 frame and does not cause a frame termination.
3501 uint64_t bad_seq : 1; /**< Reserved Sequence Deteted
3503 uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted
3505 uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted
3507 uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
3508 uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */
3509 uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */
3510 uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */
3511 uint64_t ifgerr : 1; /**< Interframe Gap Violation
3512 (SGMII/1000Base-X only) */
3513 uint64_t coldet : 1; /**< Collision Detection
3514 (SGMII/1000Base-X half-duplex only) */
3515 uint64_t falerr : 1; /**< False carrier error or extend error after slottime
3516 (SGMII/1000Base-X only) */
3517 uint64_t rsverr : 1; /**< Reserved opcodes */
3518 uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
3519 uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
3520 (SGMII/1000Base-X only) */
3521 uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */
3522 uint64_t skperr : 1; /**< Skipper error */
3523 uint64_t rcverr : 1; /**< Frame was received with Data reception error */
3524 uint64_t lenerr : 1; /**< Frame was received with length error */
3525 uint64_t alnerr : 1; /**< Frame was received with an alignment error */
3526 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
3527 uint64_t jabber : 1; /**< Frame was received with length > sys_length */
3528 uint64_t maxerr : 1; /**< Frame was received with length > max_length */
3529 uint64_t carext : 1; /**< Carrier extend error
3530 (SGMII/1000Base-X only) */
3531 uint64_t minerr : 1; /**< Pause Frame was received with length<minFrameSize */
3533 uint64_t minerr : 1;
3534 uint64_t carext : 1;
3535 uint64_t maxerr : 1;
3536 uint64_t jabber : 1;
3537 uint64_t fcserr : 1;
3538 uint64_t alnerr : 1;
3539 uint64_t lenerr : 1;
3540 uint64_t rcverr : 1;
3541 uint64_t skperr : 1;
3542 uint64_t niberr : 1;
3543 uint64_t ovrerr : 1;
3544 uint64_t pcterr : 1;
3545 uint64_t rsverr : 1;
3546 uint64_t falerr : 1;
3547 uint64_t coldet : 1;
3548 uint64_t ifgerr : 1;
3549 uint64_t phy_link : 1;
3550 uint64_t phy_spd : 1;
3551 uint64_t phy_dupx : 1;
3552 uint64_t pause_drp : 1;
3553 uint64_t loc_fault : 1;
3554 uint64_t rem_fault : 1;
3555 uint64_t bad_seq : 1;
3556 uint64_t bad_term : 1;
3560 uint64_t hg2fld : 1;
3562 uint64_t reserved_29_63 : 35;
3565 struct cvmx_gmxx_rxx_int_en_cn30xx
3567 #if __BYTE_ORDER == __BIG_ENDIAN
3568 uint64_t reserved_19_63 : 45;
3569 uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */
3570 uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */
3571 uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */
3572 uint64_t ifgerr : 1; /**< Interframe Gap Violation */
3573 uint64_t coldet : 1; /**< Collision Detection */
3574 uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
3575 uint64_t rsverr : 1; /**< RGMII reserved opcodes */
3576 uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
3577 uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow */
3578 uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */
3579 uint64_t skperr : 1; /**< Skipper error */
3580 uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
3581 uint64_t lenerr : 1; /**< Frame was received with length error */
3582 uint64_t alnerr : 1; /**< Frame was received with an alignment error */
3583 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
3584 uint64_t jabber : 1; /**< Frame was received with length > sys_length */
3585 uint64_t maxerr : 1; /**< Frame was received with length > max_length */
3586 uint64_t carext : 1; /**< RGMII carrier extend error */
3587 uint64_t minerr : 1; /**< Frame was received with length < min_length */
3589 uint64_t minerr : 1;
3590 uint64_t carext : 1;
3591 uint64_t maxerr : 1;
3592 uint64_t jabber : 1;
3593 uint64_t fcserr : 1;
3594 uint64_t alnerr : 1;
3595 uint64_t lenerr : 1;
3596 uint64_t rcverr : 1;
3597 uint64_t skperr : 1;
3598 uint64_t niberr : 1;
3599 uint64_t ovrerr : 1;
3600 uint64_t pcterr : 1;
3601 uint64_t rsverr : 1;
3602 uint64_t falerr : 1;
3603 uint64_t coldet : 1;
3604 uint64_t ifgerr : 1;
3605 uint64_t phy_link : 1;
3606 uint64_t phy_spd : 1;
3607 uint64_t phy_dupx : 1;
3608 uint64_t reserved_19_63 : 45;
3611 struct cvmx_gmxx_rxx_int_en_cn30xx cn31xx;
3612 struct cvmx_gmxx_rxx_int_en_cn30xx cn38xx;
3613 struct cvmx_gmxx_rxx_int_en_cn30xx cn38xxp2;
3614 struct cvmx_gmxx_rxx_int_en_cn50xx
3616 #if __BYTE_ORDER == __BIG_ENDIAN
3617 uint64_t reserved_20_63 : 44;
3618 uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
3619 uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */
3620 uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */
3621 uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */
3622 uint64_t ifgerr : 1; /**< Interframe Gap Violation */
3623 uint64_t coldet : 1; /**< Collision Detection */
3624 uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
3625 uint64_t rsverr : 1; /**< RGMII reserved opcodes */
3626 uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
3627 uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow */
3628 uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */
3629 uint64_t skperr : 1; /**< Skipper error */
3630 uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
3631 uint64_t reserved_6_6 : 1;
3632 uint64_t alnerr : 1; /**< Frame was received with an alignment error */
3633 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
3634 uint64_t jabber : 1; /**< Frame was received with length > sys_length */
3635 uint64_t reserved_2_2 : 1;
3636 uint64_t carext : 1; /**< RGMII carrier extend error */
3637 uint64_t reserved_0_0 : 1;
3639 uint64_t reserved_0_0 : 1;
3640 uint64_t carext : 1;
3641 uint64_t reserved_2_2 : 1;
3642 uint64_t jabber : 1;
3643 uint64_t fcserr : 1;
3644 uint64_t alnerr : 1;
3645 uint64_t reserved_6_6 : 1;
3646 uint64_t rcverr : 1;
3647 uint64_t skperr : 1;
3648 uint64_t niberr : 1;
3649 uint64_t ovrerr : 1;
3650 uint64_t pcterr : 1;
3651 uint64_t rsverr : 1;
3652 uint64_t falerr : 1;
3653 uint64_t coldet : 1;
3654 uint64_t ifgerr : 1;
3655 uint64_t phy_link : 1;
3656 uint64_t phy_spd : 1;
3657 uint64_t phy_dupx : 1;
3658 uint64_t pause_drp : 1;
3659 uint64_t reserved_20_63 : 44;
3662 struct cvmx_gmxx_rxx_int_en_cn52xx
3664 #if __BYTE_ORDER == __BIG_ENDIAN
3665 uint64_t reserved_29_63 : 35;
3666 uint64_t hg2cc : 1; /**< HiGig2 CRC8 or Control char error interrupt enable */
3667 uint64_t hg2fld : 1; /**< HiGig2 Bad field error interrupt enable */
3668 uint64_t undat : 1; /**< Unexpected Data
3670 uint64_t uneop : 1; /**< Unexpected EOP
3672 uint64_t unsop : 1; /**< Unexpected SOP
3674 uint64_t bad_term : 1; /**< Frame is terminated by control character other
3675 than /T/. The error propagation control
3676 character /E/ will be included as part of the
3677 frame and does not cause a frame termination.
3679 uint64_t bad_seq : 1; /**< Reserved Sequence Deteted
3681 uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted
3683 uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted
3685 uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
3686 uint64_t reserved_16_18 : 3;
3687 uint64_t ifgerr : 1; /**< Interframe Gap Violation
3688 (SGMII/1000Base-X only) */
3689 uint64_t coldet : 1; /**< Collision Detection
3690 (SGMII/1000Base-X half-duplex only) */
3691 uint64_t falerr : 1; /**< False carrier error or extend error after slottime
3692 (SGMII/1000Base-X only) */
3693 uint64_t rsverr : 1; /**< Reserved opcodes */
3694 uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
3695 uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
3696 (SGMII/1000Base-X only) */
3697 uint64_t reserved_9_9 : 1;
3698 uint64_t skperr : 1; /**< Skipper error */
3699 uint64_t rcverr : 1; /**< Frame was received with Data reception error */
3700 uint64_t reserved_5_6 : 2;
3701 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
3702 uint64_t jabber : 1; /**< Frame was received with length > sys_length */
3703 uint64_t reserved_2_2 : 1;
3704 uint64_t carext : 1; /**< Carrier extend error
3705 (SGMII/1000Base-X only) */
3706 uint64_t reserved_0_0 : 1;
3708 uint64_t reserved_0_0 : 1;
3709 uint64_t carext : 1;
3710 uint64_t reserved_2_2 : 1;
3711 uint64_t jabber : 1;
3712 uint64_t fcserr : 1;
3713 uint64_t reserved_5_6 : 2;
3714 uint64_t rcverr : 1;
3715 uint64_t skperr : 1;
3716 uint64_t reserved_9_9 : 1;
3717 uint64_t ovrerr : 1;
3718 uint64_t pcterr : 1;
3719 uint64_t rsverr : 1;
3720 uint64_t falerr : 1;
3721 uint64_t coldet : 1;
3722 uint64_t ifgerr : 1;
3723 uint64_t reserved_16_18 : 3;
3724 uint64_t pause_drp : 1;
3725 uint64_t loc_fault : 1;
3726 uint64_t rem_fault : 1;
3727 uint64_t bad_seq : 1;
3728 uint64_t bad_term : 1;
3732 uint64_t hg2fld : 1;
3734 uint64_t reserved_29_63 : 35;
3737 struct cvmx_gmxx_rxx_int_en_cn52xx cn52xxp1;
3738 struct cvmx_gmxx_rxx_int_en_cn52xx cn56xx;
3739 struct cvmx_gmxx_rxx_int_en_cn56xxp1
3741 #if __BYTE_ORDER == __BIG_ENDIAN
3742 uint64_t reserved_27_63 : 37;
3743 uint64_t undat : 1; /**< Unexpected Data
3745 uint64_t uneop : 1; /**< Unexpected EOP
3747 uint64_t unsop : 1; /**< Unexpected SOP
3749 uint64_t bad_term : 1; /**< Frame is terminated by control character other
3750 than /T/. The error propagation control
3751 character /E/ will be included as part of the
3752 frame and does not cause a frame termination.
3754 uint64_t bad_seq : 1; /**< Reserved Sequence Deteted
3756 uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted
3758 uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted
3760 uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
3761 uint64_t reserved_16_18 : 3;
3762 uint64_t ifgerr : 1; /**< Interframe Gap Violation
3763 (SGMII/1000Base-X only) */
3764 uint64_t coldet : 1; /**< Collision Detection
3765 (SGMII/1000Base-X half-duplex only) */
3766 uint64_t falerr : 1; /**< False carrier error or extend error after slottime
3767 (SGMII/1000Base-X only) */
3768 uint64_t rsverr : 1; /**< Reserved opcodes */
3769 uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
3770 uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
3771 (SGMII/1000Base-X only) */
3772 uint64_t reserved_9_9 : 1;
3773 uint64_t skperr : 1; /**< Skipper error */
3774 uint64_t rcverr : 1; /**< Frame was received with Data reception error */
3775 uint64_t reserved_5_6 : 2;
3776 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
3777 uint64_t jabber : 1; /**< Frame was received with length > sys_length */
3778 uint64_t reserved_2_2 : 1;
3779 uint64_t carext : 1; /**< Carrier extend error
3780 (SGMII/1000Base-X only) */
3781 uint64_t reserved_0_0 : 1;
3783 uint64_t reserved_0_0 : 1;
3784 uint64_t carext : 1;
3785 uint64_t reserved_2_2 : 1;
3786 uint64_t jabber : 1;
3787 uint64_t fcserr : 1;
3788 uint64_t reserved_5_6 : 2;
3789 uint64_t rcverr : 1;
3790 uint64_t skperr : 1;
3791 uint64_t reserved_9_9 : 1;
3792 uint64_t ovrerr : 1;
3793 uint64_t pcterr : 1;
3794 uint64_t rsverr : 1;
3795 uint64_t falerr : 1;
3796 uint64_t coldet : 1;
3797 uint64_t ifgerr : 1;
3798 uint64_t reserved_16_18 : 3;
3799 uint64_t pause_drp : 1;
3800 uint64_t loc_fault : 1;
3801 uint64_t rem_fault : 1;
3802 uint64_t bad_seq : 1;
3803 uint64_t bad_term : 1;
3807 uint64_t reserved_27_63 : 37;
3810 struct cvmx_gmxx_rxx_int_en_cn58xx
3812 #if __BYTE_ORDER == __BIG_ENDIAN
3813 uint64_t reserved_20_63 : 44;
3814 uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
3815 uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */
3816 uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */
3817 uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */
3818 uint64_t ifgerr : 1; /**< Interframe Gap Violation */
3819 uint64_t coldet : 1; /**< Collision Detection */
3820 uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
3821 uint64_t rsverr : 1; /**< RGMII reserved opcodes */
3822 uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
3823 uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow */
3824 uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */
3825 uint64_t skperr : 1; /**< Skipper error */
3826 uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
3827 uint64_t lenerr : 1; /**< Frame was received with length error */
3828 uint64_t alnerr : 1; /**< Frame was received with an alignment error */
3829 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
3830 uint64_t jabber : 1; /**< Frame was received with length > sys_length */
3831 uint64_t maxerr : 1; /**< Frame was received with length > max_length */
3832 uint64_t carext : 1; /**< RGMII carrier extend error */
3833 uint64_t minerr : 1; /**< Frame was received with length < min_length */
3835 uint64_t minerr : 1;
3836 uint64_t carext : 1;
3837 uint64_t maxerr : 1;
3838 uint64_t jabber : 1;
3839 uint64_t fcserr : 1;
3840 uint64_t alnerr : 1;
3841 uint64_t lenerr : 1;
3842 uint64_t rcverr : 1;
3843 uint64_t skperr : 1;
3844 uint64_t niberr : 1;
3845 uint64_t ovrerr : 1;
3846 uint64_t pcterr : 1;
3847 uint64_t rsverr : 1;
3848 uint64_t falerr : 1;
3849 uint64_t coldet : 1;
3850 uint64_t ifgerr : 1;
3851 uint64_t phy_link : 1;
3852 uint64_t phy_spd : 1;
3853 uint64_t phy_dupx : 1;
3854 uint64_t pause_drp : 1;
3855 uint64_t reserved_20_63 : 44;
3858 struct cvmx_gmxx_rxx_int_en_cn58xx cn58xxp1;
3859 struct cvmx_gmxx_rxx_int_en_cn63xx
3861 #if __BYTE_ORDER == __BIG_ENDIAN
3862 uint64_t reserved_29_63 : 35;
3863 uint64_t hg2cc : 1; /**< HiGig2 CRC8 or Control char error interrupt enable */
3864 uint64_t hg2fld : 1; /**< HiGig2 Bad field error interrupt enable */
3865 uint64_t undat : 1; /**< Unexpected Data
3867 uint64_t uneop : 1; /**< Unexpected EOP
3869 uint64_t unsop : 1; /**< Unexpected SOP
3871 uint64_t bad_term : 1; /**< Frame is terminated by control character other
3872 than /T/. The error propagation control
3873 character /E/ will be included as part of the
3874 frame and does not cause a frame termination.
3876 uint64_t bad_seq : 1; /**< Reserved Sequence Deteted
3878 uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted
3880 uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted
3882 uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
3883 uint64_t reserved_16_18 : 3;
3884 uint64_t ifgerr : 1; /**< Interframe Gap Violation
3885 (SGMII/1000Base-X only) */
3886 uint64_t coldet : 1; /**< Collision Detection
3887 (SGMII/1000Base-X half-duplex only) */
3888 uint64_t falerr : 1; /**< False carrier error or extend error after slottime
3889 (SGMII/1000Base-X only) */
3890 uint64_t rsverr : 1; /**< Reserved opcodes */
3891 uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
3892 uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
3893 (SGMII/1000Base-X only) */
3894 uint64_t reserved_9_9 : 1;
3895 uint64_t skperr : 1; /**< Skipper error */
3896 uint64_t rcverr : 1; /**< Frame was received with Data reception error */
3897 uint64_t reserved_5_6 : 2;
3898 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
3899 uint64_t jabber : 1; /**< Frame was received with length > sys_length */
3900 uint64_t reserved_2_2 : 1;
3901 uint64_t carext : 1; /**< Carrier extend error
3902 (SGMII/1000Base-X only) */
3903 uint64_t minerr : 1; /**< Pause Frame was received with length<minFrameSize */
3905 uint64_t minerr : 1;
3906 uint64_t carext : 1;
3907 uint64_t reserved_2_2 : 1;
3908 uint64_t jabber : 1;
3909 uint64_t fcserr : 1;
3910 uint64_t reserved_5_6 : 2;
3911 uint64_t rcverr : 1;
3912 uint64_t skperr : 1;
3913 uint64_t reserved_9_9 : 1;
3914 uint64_t ovrerr : 1;
3915 uint64_t pcterr : 1;
3916 uint64_t rsverr : 1;
3917 uint64_t falerr : 1;
3918 uint64_t coldet : 1;
3919 uint64_t ifgerr : 1;
3920 uint64_t reserved_16_18 : 3;
3921 uint64_t pause_drp : 1;
3922 uint64_t loc_fault : 1;
3923 uint64_t rem_fault : 1;
3924 uint64_t bad_seq : 1;
3925 uint64_t bad_term : 1;
3929 uint64_t hg2fld : 1;
3931 uint64_t reserved_29_63 : 35;
3934 struct cvmx_gmxx_rxx_int_en_cn63xx cn63xxp1;
3936 typedef union cvmx_gmxx_rxx_int_en cvmx_gmxx_rxx_int_en_t;
3939 * cvmx_gmx#_rx#_int_reg
3941 * GMX_RX_INT_REG = Interrupt Register
3945 * (1) exceptions will only be raised to the control processor if the
3946 * corresponding bit in the GMX_RX_INT_EN register is set.
3948 * (2) exception conditions 10:0 can also set the rcv/opcode in the received
3949 * packet's workQ entry. The GMX_RX_FRM_CHK register provides a bit mask
3950 * for configuring which conditions set the error.
3952 * (3) in half duplex operation, the expectation is that collisions will appear
3953 * as either MINERR o r CAREXT errors.
3955 * (4) JABBER - An RX Jabber error indicates that a packet was received which
3956 * is longer than the maximum allowed packet as defined by the
3957 * system. GMX will truncate the packet at the JABBER count.
3958 * Failure to do so could lead to system instabilty.
3960 * (5) NIBERR - This error is illegal at 1000Mbs speeds
3961 * (GMX_RX_PRT_CFG[SPEED]==0) and will never assert.
3963 * (6) MAXERR - for untagged frames, the total frame DA+SA+TL+DATA+PAD+FCS >
3964 * GMX_RX_FRM_MAX. For tagged frames, DA+SA+VLAN+TL+DATA+PAD+FCS
3965 * > GMX_RX_FRM_MAX + 4*VLAN_VAL + 4*VLAN_STACKED.
3967 * (7) MINERR - total frame DA+SA+TL+DATA+PAD+FCS < 64
3969 * (8) ALNERR - Indicates that the packet received was not an integer number of
3970 * bytes. If FCS checking is enabled, ALNERR will only assert if
3971 * the FCS is bad. If FCS checking is disabled, ALNERR will
3972 * assert in all non-integer frame cases.
3974 * (9) Collisions - Collisions can only occur in half-duplex mode. A collision
3975 * is assumed by the receiver when the slottime
3976 * (GMX_PRT_CFG[SLOTTIME]) is not satisfied. In 10/100 mode,
3977 * this will result in a frame < SLOTTIME. In 1000 mode, it
3978 * could result either in frame < SLOTTIME or a carrier extend
3979 * error with the SLOTTIME. These conditions are visible by...
3981 * . transfer ended before slottime - COLDET
3982 * . carrier extend error - CAREXT
3984 * (A) LENERR - Length errors occur when the received packet does not match the
3985 * length field. LENERR is only checked for packets between 64
3986 * and 1500 bytes. For untagged frames, the length must exact
3987 * match. For tagged frames the length or length+4 must match.
3989 * (B) PCTERR - checks that the frame begins with a valid PREAMBLE sequence.
3990 * Does not check the number of PREAMBLE cycles.
3992 * (C) OVRERR - Not to be included in the HRM
3994 * OVRERR is an architectural assertion check internal to GMX to
3995 * make sure no assumption was violated. In a correctly operating
3996 * system, this interrupt can never fire.
3998 * GMX has an internal arbiter which selects which of 4 ports to
3999 * buffer in the main RX FIFO. If we normally buffer 8 bytes,
4000 * then each port will typically push a tick every 8 cycles - if
4001 * the packet interface is going as fast as possible. If there
4002 * are four ports, they push every two cycles. So that's the
4003 * assumption. That the inbound module will always be able to
4004 * consume the tick before another is produced. If that doesn't
4005 * happen - that's when OVRERR will assert.
4007 * (D) In XAUI mode prt0 is used for interrupt logging.
4009 union cvmx_gmxx_rxx_int_reg
4012 struct cvmx_gmxx_rxx_int_reg_s
4014 #if __BYTE_ORDER == __BIG_ENDIAN
4015 uint64_t reserved_29_63 : 35;
4016 uint64_t hg2cc : 1; /**< HiGig2 received message CRC or Control char error
4017 Set when either CRC8 error detected or when
4018 a Control Character is found in the message
4019 bytes after the K.SOM
4020 NOTE: HG2CC has higher priority than HG2FLD
4021 i.e. a HiGig2 message that results in HG2CC
4022 getting set, will never set HG2FLD. */
4023 uint64_t hg2fld : 1; /**< HiGig2 received message field error, as below
4024 1) MSG_TYPE field not 6'b00_0000
4025 i.e. it is not a FLOW CONTROL message, which
4026 is the only defined type for HiGig2
4027 2) FWD_TYPE field not 2'b00 i.e. Link Level msg
4028 which is the only defined type for HiGig2
4029 3) FC_OBJECT field is neither 4'b0000 for
4030 Physical Link nor 4'b0010 for Logical Link.
4031 Those are the only two defined types in HiGig2 */
4032 uint64_t undat : 1; /**< Unexpected Data
4034 uint64_t uneop : 1; /**< Unexpected EOP
4036 uint64_t unsop : 1; /**< Unexpected SOP
4038 uint64_t bad_term : 1; /**< Frame is terminated by control character other
4039 than /T/. The error propagation control
4040 character /E/ will be included as part of the
4041 frame and does not cause a frame termination.
4043 uint64_t bad_seq : 1; /**< Reserved Sequence Deteted
4045 uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted
4047 uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted
4049 uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
4050 uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */
4051 uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */
4052 uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */
4053 uint64_t ifgerr : 1; /**< Interframe Gap Violation
4054 Does not necessarily indicate a failure
4055 (SGMII/1000Base-X only) */
4056 uint64_t coldet : 1; /**< Collision Detection
4057 (SGMII/1000Base-X half-duplex only) */
4058 uint64_t falerr : 1; /**< False carrier error or extend error after slottime
4059 (SGMII/1000Base-X only) */
4060 uint64_t rsverr : 1; /**< Reserved opcodes */
4061 uint64_t pcterr : 1; /**< Bad Preamble / Protocol
4062 In XAUI mode, the column of data that was bad
4063 will be logged in GMX_RX_XAUI_BAD_COL */
4064 uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
4065 This interrupt should never assert
4066 (SGMII/1000Base-X only) */
4067 uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */
4068 uint64_t skperr : 1; /**< Skipper error */
4069 uint64_t rcverr : 1; /**< Frame was received with Data reception error */
4070 uint64_t lenerr : 1; /**< Frame was received with length error */
4071 uint64_t alnerr : 1; /**< Frame was received with an alignment error */
4072 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
4073 uint64_t jabber : 1; /**< Frame was received with length > sys_length */
4074 uint64_t maxerr : 1; /**< Frame was received with length > max_length */
4075 uint64_t carext : 1; /**< Carrier extend error
4076 (SGMII/1000Base-X only) */
4077 uint64_t minerr : 1; /**< Pause Frame was received with length<minFrameSize
4078 Frame length checks are typically handled in PIP
4079 (PIP_INT_REG[MINERR]), but pause frames are
4080 normally discarded before being inspected by PIP. */
4082 uint64_t minerr : 1;
4083 uint64_t carext : 1;
4084 uint64_t maxerr : 1;
4085 uint64_t jabber : 1;
4086 uint64_t fcserr : 1;
4087 uint64_t alnerr : 1;
4088 uint64_t lenerr : 1;
4089 uint64_t rcverr : 1;
4090 uint64_t skperr : 1;
4091 uint64_t niberr : 1;
4092 uint64_t ovrerr : 1;
4093 uint64_t pcterr : 1;
4094 uint64_t rsverr : 1;
4095 uint64_t falerr : 1;
4096 uint64_t coldet : 1;
4097 uint64_t ifgerr : 1;
4098 uint64_t phy_link : 1;
4099 uint64_t phy_spd : 1;
4100 uint64_t phy_dupx : 1;
4101 uint64_t pause_drp : 1;
4102 uint64_t loc_fault : 1;
4103 uint64_t rem_fault : 1;
4104 uint64_t bad_seq : 1;
4105 uint64_t bad_term : 1;
4109 uint64_t hg2fld : 1;
4111 uint64_t reserved_29_63 : 35;
4114 struct cvmx_gmxx_rxx_int_reg_cn30xx
4116 #if __BYTE_ORDER == __BIG_ENDIAN
4117 uint64_t reserved_19_63 : 45;
4118 uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */
4119 uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */
4120 uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */
4121 uint64_t ifgerr : 1; /**< Interframe Gap Violation
4122 Does not necessarily indicate a failure */
4123 uint64_t coldet : 1; /**< Collision Detection */
4124 uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
4125 uint64_t rsverr : 1; /**< RGMII reserved opcodes */
4126 uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
4127 uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
4128 This interrupt should never assert */
4129 uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */
4130 uint64_t skperr : 1; /**< Skipper error */
4131 uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
4132 uint64_t lenerr : 1; /**< Frame was received with length error */
4133 uint64_t alnerr : 1; /**< Frame was received with an alignment error */
4134 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
4135 uint64_t jabber : 1; /**< Frame was received with length > sys_length */
4136 uint64_t maxerr : 1; /**< Frame was received with length > max_length */
4137 uint64_t carext : 1; /**< RGMII carrier extend error */
4138 uint64_t minerr : 1; /**< Frame was received with length < min_length */
4140 uint64_t minerr : 1;
4141 uint64_t carext : 1;
4142 uint64_t maxerr : 1;
4143 uint64_t jabber : 1;
4144 uint64_t fcserr : 1;
4145 uint64_t alnerr : 1;
4146 uint64_t lenerr : 1;
4147 uint64_t rcverr : 1;
4148 uint64_t skperr : 1;
4149 uint64_t niberr : 1;
4150 uint64_t ovrerr : 1;
4151 uint64_t pcterr : 1;
4152 uint64_t rsverr : 1;
4153 uint64_t falerr : 1;
4154 uint64_t coldet : 1;
4155 uint64_t ifgerr : 1;
4156 uint64_t phy_link : 1;
4157 uint64_t phy_spd : 1;
4158 uint64_t phy_dupx : 1;
4159 uint64_t reserved_19_63 : 45;
4162 struct cvmx_gmxx_rxx_int_reg_cn30xx cn31xx;
4163 struct cvmx_gmxx_rxx_int_reg_cn30xx cn38xx;
4164 struct cvmx_gmxx_rxx_int_reg_cn30xx cn38xxp2;
4165 struct cvmx_gmxx_rxx_int_reg_cn50xx
4167 #if __BYTE_ORDER == __BIG_ENDIAN
4168 uint64_t reserved_20_63 : 44;
4169 uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
4170 uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */
4171 uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */
4172 uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */
4173 uint64_t ifgerr : 1; /**< Interframe Gap Violation
4174 Does not necessarily indicate a failure */
4175 uint64_t coldet : 1; /**< Collision Detection */
4176 uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
4177 uint64_t rsverr : 1; /**< RGMII reserved opcodes */
4178 uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
4179 uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
4180 This interrupt should never assert */
4181 uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */
4182 uint64_t skperr : 1; /**< Skipper error */
4183 uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
4184 uint64_t reserved_6_6 : 1;
4185 uint64_t alnerr : 1; /**< Frame was received with an alignment error */
4186 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
4187 uint64_t jabber : 1; /**< Frame was received with length > sys_length */
4188 uint64_t reserved_2_2 : 1;
4189 uint64_t carext : 1; /**< RGMII carrier extend error */
4190 uint64_t reserved_0_0 : 1;
4192 uint64_t reserved_0_0 : 1;
4193 uint64_t carext : 1;
4194 uint64_t reserved_2_2 : 1;
4195 uint64_t jabber : 1;
4196 uint64_t fcserr : 1;
4197 uint64_t alnerr : 1;
4198 uint64_t reserved_6_6 : 1;
4199 uint64_t rcverr : 1;
4200 uint64_t skperr : 1;
4201 uint64_t niberr : 1;
4202 uint64_t ovrerr : 1;
4203 uint64_t pcterr : 1;
4204 uint64_t rsverr : 1;
4205 uint64_t falerr : 1;
4206 uint64_t coldet : 1;
4207 uint64_t ifgerr : 1;
4208 uint64_t phy_link : 1;
4209 uint64_t phy_spd : 1;
4210 uint64_t phy_dupx : 1;
4211 uint64_t pause_drp : 1;
4212 uint64_t reserved_20_63 : 44;
4215 struct cvmx_gmxx_rxx_int_reg_cn52xx
4217 #if __BYTE_ORDER == __BIG_ENDIAN
4218 uint64_t reserved_29_63 : 35;
4219 uint64_t hg2cc : 1; /**< HiGig2 received message CRC or Control char error
4220 Set when either CRC8 error detected or when
4221 a Control Character is found in the message
4222 bytes after the K.SOM
4223 NOTE: HG2CC has higher priority than HG2FLD
4224 i.e. a HiGig2 message that results in HG2CC
4225 getting set, will never set HG2FLD. */
4226 uint64_t hg2fld : 1; /**< HiGig2 received message field error, as below
4227 1) MSG_TYPE field not 6'b00_0000
4228 i.e. it is not a FLOW CONTROL message, which
4229 is the only defined type for HiGig2
4230 2) FWD_TYPE field not 2'b00 i.e. Link Level msg
4231 which is the only defined type for HiGig2
4232 3) FC_OBJECT field is neither 4'b0000 for
4233 Physical Link nor 4'b0010 for Logical Link.
4234 Those are the only two defined types in HiGig2 */
4235 uint64_t undat : 1; /**< Unexpected Data
4237 uint64_t uneop : 1; /**< Unexpected EOP
4239 uint64_t unsop : 1; /**< Unexpected SOP
4241 uint64_t bad_term : 1; /**< Frame is terminated by control character other
4242 than /T/. The error propagation control
4243 character /E/ will be included as part of the
4244 frame and does not cause a frame termination.
4246 uint64_t bad_seq : 1; /**< Reserved Sequence Deteted
4248 uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted
4250 uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted
4252 uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
4253 uint64_t reserved_16_18 : 3;
4254 uint64_t ifgerr : 1; /**< Interframe Gap Violation
4255 Does not necessarily indicate a failure
4256 (SGMII/1000Base-X only) */
4257 uint64_t coldet : 1; /**< Collision Detection
4258 (SGMII/1000Base-X half-duplex only) */
4259 uint64_t falerr : 1; /**< False carrier error or extend error after slottime
4260 (SGMII/1000Base-X only) */
4261 uint64_t rsverr : 1; /**< Reserved opcodes */
4262 uint64_t pcterr : 1; /**< Bad Preamble / Protocol
4263 In XAUI mode, the column of data that was bad
4264 will be logged in GMX_RX_XAUI_BAD_COL */
4265 uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
4266 This interrupt should never assert
4267 (SGMII/1000Base-X only) */
4268 uint64_t reserved_9_9 : 1;
4269 uint64_t skperr : 1; /**< Skipper error */
4270 uint64_t rcverr : 1; /**< Frame was received with Data reception error */
4271 uint64_t reserved_5_6 : 2;
4272 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
4273 uint64_t jabber : 1; /**< Frame was received with length > sys_length */
4274 uint64_t reserved_2_2 : 1;
4275 uint64_t carext : 1; /**< Carrier extend error
4276 (SGMII/1000Base-X only) */
4277 uint64_t reserved_0_0 : 1;
4279 uint64_t reserved_0_0 : 1;
4280 uint64_t carext : 1;
4281 uint64_t reserved_2_2 : 1;
4282 uint64_t jabber : 1;
4283 uint64_t fcserr : 1;
4284 uint64_t reserved_5_6 : 2;
4285 uint64_t rcverr : 1;
4286 uint64_t skperr : 1;
4287 uint64_t reserved_9_9 : 1;
4288 uint64_t ovrerr : 1;
4289 uint64_t pcterr : 1;
4290 uint64_t rsverr : 1;
4291 uint64_t falerr : 1;
4292 uint64_t coldet : 1;
4293 uint64_t ifgerr : 1;
4294 uint64_t reserved_16_18 : 3;
4295 uint64_t pause_drp : 1;
4296 uint64_t loc_fault : 1;
4297 uint64_t rem_fault : 1;
4298 uint64_t bad_seq : 1;
4299 uint64_t bad_term : 1;
4303 uint64_t hg2fld : 1;
4305 uint64_t reserved_29_63 : 35;
4308 struct cvmx_gmxx_rxx_int_reg_cn52xx cn52xxp1;
4309 struct cvmx_gmxx_rxx_int_reg_cn52xx cn56xx;
4310 struct cvmx_gmxx_rxx_int_reg_cn56xxp1
4312 #if __BYTE_ORDER == __BIG_ENDIAN
4313 uint64_t reserved_27_63 : 37;
4314 uint64_t undat : 1; /**< Unexpected Data
4316 uint64_t uneop : 1; /**< Unexpected EOP
4318 uint64_t unsop : 1; /**< Unexpected SOP
4320 uint64_t bad_term : 1; /**< Frame is terminated by control character other
4321 than /T/. The error propagation control
4322 character /E/ will be included as part of the
4323 frame and does not cause a frame termination.
4325 uint64_t bad_seq : 1; /**< Reserved Sequence Deteted
4327 uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted
4329 uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted
4331 uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
4332 uint64_t reserved_16_18 : 3;
4333 uint64_t ifgerr : 1; /**< Interframe Gap Violation
4334 Does not necessarily indicate a failure
4335 (SGMII/1000Base-X only) */
4336 uint64_t coldet : 1; /**< Collision Detection
4337 (SGMII/1000Base-X half-duplex only) */
4338 uint64_t falerr : 1; /**< False carrier error or extend error after slottime
4339 (SGMII/1000Base-X only) */
4340 uint64_t rsverr : 1; /**< Reserved opcodes */
4341 uint64_t pcterr : 1; /**< Bad Preamble / Protocol
4342 In XAUI mode, the column of data that was bad
4343 will be logged in GMX_RX_XAUI_BAD_COL */
4344 uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
4345 This interrupt should never assert
4346 (SGMII/1000Base-X only) */
4347 uint64_t reserved_9_9 : 1;
4348 uint64_t skperr : 1; /**< Skipper error */
4349 uint64_t rcverr : 1; /**< Frame was received with Data reception error */
4350 uint64_t reserved_5_6 : 2;
4351 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
4352 uint64_t jabber : 1; /**< Frame was received with length > sys_length */
4353 uint64_t reserved_2_2 : 1;
4354 uint64_t carext : 1; /**< Carrier extend error
4355 (SGMII/1000Base-X only) */
4356 uint64_t reserved_0_0 : 1;
4358 uint64_t reserved_0_0 : 1;
4359 uint64_t carext : 1;
4360 uint64_t reserved_2_2 : 1;
4361 uint64_t jabber : 1;
4362 uint64_t fcserr : 1;
4363 uint64_t reserved_5_6 : 2;
4364 uint64_t rcverr : 1;
4365 uint64_t skperr : 1;
4366 uint64_t reserved_9_9 : 1;
4367 uint64_t ovrerr : 1;
4368 uint64_t pcterr : 1;
4369 uint64_t rsverr : 1;
4370 uint64_t falerr : 1;
4371 uint64_t coldet : 1;
4372 uint64_t ifgerr : 1;
4373 uint64_t reserved_16_18 : 3;
4374 uint64_t pause_drp : 1;
4375 uint64_t loc_fault : 1;
4376 uint64_t rem_fault : 1;
4377 uint64_t bad_seq : 1;
4378 uint64_t bad_term : 1;
4382 uint64_t reserved_27_63 : 37;
4385 struct cvmx_gmxx_rxx_int_reg_cn58xx
4387 #if __BYTE_ORDER == __BIG_ENDIAN
4388 uint64_t reserved_20_63 : 44;
4389 uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
4390 uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */
4391 uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */
4392 uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */
4393 uint64_t ifgerr : 1; /**< Interframe Gap Violation
4394 Does not necessarily indicate a failure */
4395 uint64_t coldet : 1; /**< Collision Detection */
4396 uint64_t falerr : 1; /**< False carrier error or extend error after slottime */
4397 uint64_t rsverr : 1; /**< RGMII reserved opcodes */
4398 uint64_t pcterr : 1; /**< Bad Preamble / Protocol */
4399 uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
4400 This interrupt should never assert */
4401 uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */
4402 uint64_t skperr : 1; /**< Skipper error */
4403 uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */
4404 uint64_t lenerr : 1; /**< Frame was received with length error */
4405 uint64_t alnerr : 1; /**< Frame was received with an alignment error */
4406 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
4407 uint64_t jabber : 1; /**< Frame was received with length > sys_length */
4408 uint64_t maxerr : 1; /**< Frame was received with length > max_length */
4409 uint64_t carext : 1; /**< RGMII carrier extend error */
4410 uint64_t minerr : 1; /**< Frame was received with length < min_length */
4412 uint64_t minerr : 1;
4413 uint64_t carext : 1;
4414 uint64_t maxerr : 1;
4415 uint64_t jabber : 1;
4416 uint64_t fcserr : 1;
4417 uint64_t alnerr : 1;
4418 uint64_t lenerr : 1;
4419 uint64_t rcverr : 1;
4420 uint64_t skperr : 1;
4421 uint64_t niberr : 1;
4422 uint64_t ovrerr : 1;
4423 uint64_t pcterr : 1;
4424 uint64_t rsverr : 1;
4425 uint64_t falerr : 1;
4426 uint64_t coldet : 1;
4427 uint64_t ifgerr : 1;
4428 uint64_t phy_link : 1;
4429 uint64_t phy_spd : 1;
4430 uint64_t phy_dupx : 1;
4431 uint64_t pause_drp : 1;
4432 uint64_t reserved_20_63 : 44;
4435 struct cvmx_gmxx_rxx_int_reg_cn58xx cn58xxp1;
4436 struct cvmx_gmxx_rxx_int_reg_cn63xx
4438 #if __BYTE_ORDER == __BIG_ENDIAN
4439 uint64_t reserved_29_63 : 35;
4440 uint64_t hg2cc : 1; /**< HiGig2 received message CRC or Control char error
4441 Set when either CRC8 error detected or when
4442 a Control Character is found in the message
4443 bytes after the K.SOM
4444 NOTE: HG2CC has higher priority than HG2FLD
4445 i.e. a HiGig2 message that results in HG2CC
4446 getting set, will never set HG2FLD. */
4447 uint64_t hg2fld : 1; /**< HiGig2 received message field error, as below
4448 1) MSG_TYPE field not 6'b00_0000
4449 i.e. it is not a FLOW CONTROL message, which
4450 is the only defined type for HiGig2
4451 2) FWD_TYPE field not 2'b00 i.e. Link Level msg
4452 which is the only defined type for HiGig2
4453 3) FC_OBJECT field is neither 4'b0000 for
4454 Physical Link nor 4'b0010 for Logical Link.
4455 Those are the only two defined types in HiGig2 */
4456 uint64_t undat : 1; /**< Unexpected Data
4458 uint64_t uneop : 1; /**< Unexpected EOP
4460 uint64_t unsop : 1; /**< Unexpected SOP
4462 uint64_t bad_term : 1; /**< Frame is terminated by control character other
4463 than /T/. The error propagation control
4464 character /E/ will be included as part of the
4465 frame and does not cause a frame termination.
4467 uint64_t bad_seq : 1; /**< Reserved Sequence Deteted
4469 uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted
4471 uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted
4473 uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */
4474 uint64_t reserved_16_18 : 3;
4475 uint64_t ifgerr : 1; /**< Interframe Gap Violation
4476 Does not necessarily indicate a failure
4477 (SGMII/1000Base-X only) */
4478 uint64_t coldet : 1; /**< Collision Detection
4479 (SGMII/1000Base-X half-duplex only) */
4480 uint64_t falerr : 1; /**< False carrier error or extend error after slottime
4481 (SGMII/1000Base-X only) */
4482 uint64_t rsverr : 1; /**< Reserved opcodes */
4483 uint64_t pcterr : 1; /**< Bad Preamble / Protocol
4484 In XAUI mode, the column of data that was bad
4485 will be logged in GMX_RX_XAUI_BAD_COL */
4486 uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow
4487 This interrupt should never assert
4488 (SGMII/1000Base-X only) */
4489 uint64_t reserved_9_9 : 1;
4490 uint64_t skperr : 1; /**< Skipper error */
4491 uint64_t rcverr : 1; /**< Frame was received with Data reception error */
4492 uint64_t reserved_5_6 : 2;
4493 uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */
4494 uint64_t jabber : 1; /**< Frame was received with length > sys_length */
4495 uint64_t reserved_2_2 : 1;
4496 uint64_t carext : 1; /**< Carrier extend error
4497 (SGMII/1000Base-X only) */
4498 uint64_t minerr : 1; /**< Pause Frame was received with length<minFrameSize
4499 Frame length checks are typically handled in PIP
4500 (PIP_INT_REG[MINERR]), but pause frames are
4501 normally discarded before being inspected by PIP. */
4503 uint64_t minerr : 1;
4504 uint64_t carext : 1;
4505 uint64_t reserved_2_2 : 1;
4506 uint64_t jabber : 1;
4507 uint64_t fcserr : 1;
4508 uint64_t reserved_5_6 : 2;
4509 uint64_t rcverr : 1;
4510 uint64_t skperr : 1;
4511 uint64_t reserved_9_9 : 1;
4512 uint64_t ovrerr : 1;
4513 uint64_t pcterr : 1;
4514 uint64_t rsverr : 1;
4515 uint64_t falerr : 1;
4516 uint64_t coldet : 1;
4517 uint64_t ifgerr : 1;
4518 uint64_t reserved_16_18 : 3;
4519 uint64_t pause_drp : 1;
4520 uint64_t loc_fault : 1;
4521 uint64_t rem_fault : 1;
4522 uint64_t bad_seq : 1;
4523 uint64_t bad_term : 1;
4527 uint64_t hg2fld : 1;
4529 uint64_t reserved_29_63 : 35;
4532 struct cvmx_gmxx_rxx_int_reg_cn63xx cn63xxp1;
4534 typedef union cvmx_gmxx_rxx_int_reg cvmx_gmxx_rxx_int_reg_t;
4537 * cvmx_gmx#_rx#_jabber
4539 * GMX_RX_JABBER = The max size packet after which GMX will truncate
4543 * CNT must be 8-byte aligned such that CNT[2:0] == 0
4545 * The packet that will be sent to the packet input logic will have an
4546 * additionl 8 bytes if GMX_RX_FRM_CTL[PRE_CHK] is set and
4547 * GMX_RX_FRM_CTL[PRE_STRP] is clear. The max packet that will be sent is
4550 * max_sized_packet = GMX_RX_JABBER[CNT]+((GMX_RX_FRM_CTL[PRE_CHK] & !GMX_RX_FRM_CTL[PRE_STRP])*8)
4552 * In XAUI mode prt0 is used for checking.
4554 union cvmx_gmxx_rxx_jabber
4557 struct cvmx_gmxx_rxx_jabber_s
4559 #if __BYTE_ORDER == __BIG_ENDIAN
4560 uint64_t reserved_16_63 : 48;
4561 uint64_t cnt : 16; /**< Byte count for jabber check
4562 Failing packets set the JABBER interrupt and are
4563 optionally sent with opcode==JABBER
4564 GMX will truncate the packet to CNT bytes */
4567 uint64_t reserved_16_63 : 48;
4570 struct cvmx_gmxx_rxx_jabber_s cn30xx;
4571 struct cvmx_gmxx_rxx_jabber_s cn31xx;
4572 struct cvmx_gmxx_rxx_jabber_s cn38xx;
4573 struct cvmx_gmxx_rxx_jabber_s cn38xxp2;
4574 struct cvmx_gmxx_rxx_jabber_s cn50xx;
4575 struct cvmx_gmxx_rxx_jabber_s cn52xx;
4576 struct cvmx_gmxx_rxx_jabber_s cn52xxp1;
4577 struct cvmx_gmxx_rxx_jabber_s cn56xx;
4578 struct cvmx_gmxx_rxx_jabber_s cn56xxp1;
4579 struct cvmx_gmxx_rxx_jabber_s cn58xx;
4580 struct cvmx_gmxx_rxx_jabber_s cn58xxp1;
4581 struct cvmx_gmxx_rxx_jabber_s cn63xx;
4582 struct cvmx_gmxx_rxx_jabber_s cn63xxp1;
4584 typedef union cvmx_gmxx_rxx_jabber cvmx_gmxx_rxx_jabber_t;
4587 * cvmx_gmx#_rx#_pause_drop_time
4589 * GMX_RX_PAUSE_DROP_TIME = The TIME field in a PAUSE Packet which was dropped due to GMX RX FIFO full condition
4592 union cvmx_gmxx_rxx_pause_drop_time
4595 struct cvmx_gmxx_rxx_pause_drop_time_s
4597 #if __BYTE_ORDER == __BIG_ENDIAN
4598 uint64_t reserved_16_63 : 48;
4599 uint64_t status : 16; /**< Time extracted from the dropped PAUSE packet */
4601 uint64_t status : 16;
4602 uint64_t reserved_16_63 : 48;
4605 struct cvmx_gmxx_rxx_pause_drop_time_s cn50xx;
4606 struct cvmx_gmxx_rxx_pause_drop_time_s cn52xx;
4607 struct cvmx_gmxx_rxx_pause_drop_time_s cn52xxp1;
4608 struct cvmx_gmxx_rxx_pause_drop_time_s cn56xx;
4609 struct cvmx_gmxx_rxx_pause_drop_time_s cn56xxp1;
4610 struct cvmx_gmxx_rxx_pause_drop_time_s cn58xx;
4611 struct cvmx_gmxx_rxx_pause_drop_time_s cn58xxp1;
4612 struct cvmx_gmxx_rxx_pause_drop_time_s cn63xx;
4613 struct cvmx_gmxx_rxx_pause_drop_time_s cn63xxp1;
4615 typedef union cvmx_gmxx_rxx_pause_drop_time cvmx_gmxx_rxx_pause_drop_time_t;
4618 * cvmx_gmx#_rx#_rx_inbnd
4620 * GMX_RX_INBND = RGMII InBand Link Status
4624 * These fields are only valid if the attached PHY is operating in RGMII mode
4625 * and supports the optional in-band status (see section 3.4.1 of the RGMII
4626 * specification, version 1.3 for more information).
4628 union cvmx_gmxx_rxx_rx_inbnd
4631 struct cvmx_gmxx_rxx_rx_inbnd_s
4633 #if __BYTE_ORDER == __BIG_ENDIAN
4634 uint64_t reserved_4_63 : 60;
4635 uint64_t duplex : 1; /**< RGMII Inbound LinkDuplex
4638 uint64_t speed : 2; /**< RGMII Inbound LinkSpeed
4643 uint64_t status : 1; /**< RGMII Inbound LinkStatus
4647 uint64_t status : 1;
4649 uint64_t duplex : 1;
4650 uint64_t reserved_4_63 : 60;
4653 struct cvmx_gmxx_rxx_rx_inbnd_s cn30xx;
4654 struct cvmx_gmxx_rxx_rx_inbnd_s cn31xx;
4655 struct cvmx_gmxx_rxx_rx_inbnd_s cn38xx;
4656 struct cvmx_gmxx_rxx_rx_inbnd_s cn38xxp2;
4657 struct cvmx_gmxx_rxx_rx_inbnd_s cn50xx;
4658 struct cvmx_gmxx_rxx_rx_inbnd_s cn58xx;
4659 struct cvmx_gmxx_rxx_rx_inbnd_s cn58xxp1;
4661 typedef union cvmx_gmxx_rxx_rx_inbnd cvmx_gmxx_rxx_rx_inbnd_t;
4664 * cvmx_gmx#_rx#_stats_ctl
4666 * GMX_RX_STATS_CTL = RX Stats Control register
4669 union cvmx_gmxx_rxx_stats_ctl
4672 struct cvmx_gmxx_rxx_stats_ctl_s
4674 #if __BYTE_ORDER == __BIG_ENDIAN
4675 uint64_t reserved_1_63 : 63;
4676 uint64_t rd_clr : 1; /**< RX Stats registers will clear on reads */
4678 uint64_t rd_clr : 1;
4679 uint64_t reserved_1_63 : 63;
4682 struct cvmx_gmxx_rxx_stats_ctl_s cn30xx;
4683 struct cvmx_gmxx_rxx_stats_ctl_s cn31xx;
4684 struct cvmx_gmxx_rxx_stats_ctl_s cn38xx;
4685 struct cvmx_gmxx_rxx_stats_ctl_s cn38xxp2;
4686 struct cvmx_gmxx_rxx_stats_ctl_s cn50xx;
4687 struct cvmx_gmxx_rxx_stats_ctl_s cn52xx;
4688 struct cvmx_gmxx_rxx_stats_ctl_s cn52xxp1;
4689 struct cvmx_gmxx_rxx_stats_ctl_s cn56xx;
4690 struct cvmx_gmxx_rxx_stats_ctl_s cn56xxp1;
4691 struct cvmx_gmxx_rxx_stats_ctl_s cn58xx;
4692 struct cvmx_gmxx_rxx_stats_ctl_s cn58xxp1;
4693 struct cvmx_gmxx_rxx_stats_ctl_s cn63xx;
4694 struct cvmx_gmxx_rxx_stats_ctl_s cn63xxp1;
4696 typedef union cvmx_gmxx_rxx_stats_ctl cvmx_gmxx_rxx_stats_ctl_t;
4699 * cvmx_gmx#_rx#_stats_octs
4702 * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
4703 * - Counters will wrap
4705 union cvmx_gmxx_rxx_stats_octs
4708 struct cvmx_gmxx_rxx_stats_octs_s
4710 #if __BYTE_ORDER == __BIG_ENDIAN
4711 uint64_t reserved_48_63 : 16;
4712 uint64_t cnt : 48; /**< Octet count of received good packets */
4715 uint64_t reserved_48_63 : 16;
4718 struct cvmx_gmxx_rxx_stats_octs_s cn30xx;
4719 struct cvmx_gmxx_rxx_stats_octs_s cn31xx;
4720 struct cvmx_gmxx_rxx_stats_octs_s cn38xx;
4721 struct cvmx_gmxx_rxx_stats_octs_s cn38xxp2;
4722 struct cvmx_gmxx_rxx_stats_octs_s cn50xx;
4723 struct cvmx_gmxx_rxx_stats_octs_s cn52xx;
4724 struct cvmx_gmxx_rxx_stats_octs_s cn52xxp1;
4725 struct cvmx_gmxx_rxx_stats_octs_s cn56xx;
4726 struct cvmx_gmxx_rxx_stats_octs_s cn56xxp1;
4727 struct cvmx_gmxx_rxx_stats_octs_s cn58xx;
4728 struct cvmx_gmxx_rxx_stats_octs_s cn58xxp1;
4729 struct cvmx_gmxx_rxx_stats_octs_s cn63xx;
4730 struct cvmx_gmxx_rxx_stats_octs_s cn63xxp1;
4732 typedef union cvmx_gmxx_rxx_stats_octs cvmx_gmxx_rxx_stats_octs_t;
4735 * cvmx_gmx#_rx#_stats_octs_ctl
4738 * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
4739 * - Counters will wrap
4741 union cvmx_gmxx_rxx_stats_octs_ctl
4744 struct cvmx_gmxx_rxx_stats_octs_ctl_s
4746 #if __BYTE_ORDER == __BIG_ENDIAN
4747 uint64_t reserved_48_63 : 16;
4748 uint64_t cnt : 48; /**< Octet count of received pause packets */
4751 uint64_t reserved_48_63 : 16;
4754 struct cvmx_gmxx_rxx_stats_octs_ctl_s cn30xx;
4755 struct cvmx_gmxx_rxx_stats_octs_ctl_s cn31xx;
4756 struct cvmx_gmxx_rxx_stats_octs_ctl_s cn38xx;
4757 struct cvmx_gmxx_rxx_stats_octs_ctl_s cn38xxp2;
4758 struct cvmx_gmxx_rxx_stats_octs_ctl_s cn50xx;
4759 struct cvmx_gmxx_rxx_stats_octs_ctl_s cn52xx;
4760 struct cvmx_gmxx_rxx_stats_octs_ctl_s cn52xxp1;
4761 struct cvmx_gmxx_rxx_stats_octs_ctl_s cn56xx;
4762 struct cvmx_gmxx_rxx_stats_octs_ctl_s cn56xxp1;
4763 struct cvmx_gmxx_rxx_stats_octs_ctl_s cn58xx;
4764 struct cvmx_gmxx_rxx_stats_octs_ctl_s cn58xxp1;
4765 struct cvmx_gmxx_rxx_stats_octs_ctl_s cn63xx;
4766 struct cvmx_gmxx_rxx_stats_octs_ctl_s cn63xxp1;
4768 typedef union cvmx_gmxx_rxx_stats_octs_ctl cvmx_gmxx_rxx_stats_octs_ctl_t;
4771 * cvmx_gmx#_rx#_stats_octs_dmac
4774 * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
4775 * - Counters will wrap
4777 union cvmx_gmxx_rxx_stats_octs_dmac
4780 struct cvmx_gmxx_rxx_stats_octs_dmac_s
4782 #if __BYTE_ORDER == __BIG_ENDIAN
4783 uint64_t reserved_48_63 : 16;
4784 uint64_t cnt : 48; /**< Octet count of filtered dmac packets */
4787 uint64_t reserved_48_63 : 16;
4790 struct cvmx_gmxx_rxx_stats_octs_dmac_s cn30xx;
4791 struct cvmx_gmxx_rxx_stats_octs_dmac_s cn31xx;
4792 struct cvmx_gmxx_rxx_stats_octs_dmac_s cn38xx;
4793 struct cvmx_gmxx_rxx_stats_octs_dmac_s cn38xxp2;
4794 struct cvmx_gmxx_rxx_stats_octs_dmac_s cn50xx;
4795 struct cvmx_gmxx_rxx_stats_octs_dmac_s cn52xx;
4796 struct cvmx_gmxx_rxx_stats_octs_dmac_s cn52xxp1;
4797 struct cvmx_gmxx_rxx_stats_octs_dmac_s cn56xx;
4798 struct cvmx_gmxx_rxx_stats_octs_dmac_s cn56xxp1;
4799 struct cvmx_gmxx_rxx_stats_octs_dmac_s cn58xx;
4800 struct cvmx_gmxx_rxx_stats_octs_dmac_s cn58xxp1;
4801 struct cvmx_gmxx_rxx_stats_octs_dmac_s cn63xx;
4802 struct cvmx_gmxx_rxx_stats_octs_dmac_s cn63xxp1;
4804 typedef union cvmx_gmxx_rxx_stats_octs_dmac cvmx_gmxx_rxx_stats_octs_dmac_t;
4807 * cvmx_gmx#_rx#_stats_octs_drp
4810 * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
4811 * - Counters will wrap
4813 union cvmx_gmxx_rxx_stats_octs_drp
4816 struct cvmx_gmxx_rxx_stats_octs_drp_s
4818 #if __BYTE_ORDER == __BIG_ENDIAN
4819 uint64_t reserved_48_63 : 16;
4820 uint64_t cnt : 48; /**< Octet count of dropped packets */
4823 uint64_t reserved_48_63 : 16;
4826 struct cvmx_gmxx_rxx_stats_octs_drp_s cn30xx;
4827 struct cvmx_gmxx_rxx_stats_octs_drp_s cn31xx;
4828 struct cvmx_gmxx_rxx_stats_octs_drp_s cn38xx;
4829 struct cvmx_gmxx_rxx_stats_octs_drp_s cn38xxp2;
4830 struct cvmx_gmxx_rxx_stats_octs_drp_s cn50xx;
4831 struct cvmx_gmxx_rxx_stats_octs_drp_s cn52xx;
4832 struct cvmx_gmxx_rxx_stats_octs_drp_s cn52xxp1;
4833 struct cvmx_gmxx_rxx_stats_octs_drp_s cn56xx;
4834 struct cvmx_gmxx_rxx_stats_octs_drp_s cn56xxp1;
4835 struct cvmx_gmxx_rxx_stats_octs_drp_s cn58xx;
4836 struct cvmx_gmxx_rxx_stats_octs_drp_s cn58xxp1;
4837 struct cvmx_gmxx_rxx_stats_octs_drp_s cn63xx;
4838 struct cvmx_gmxx_rxx_stats_octs_drp_s cn63xxp1;
4840 typedef union cvmx_gmxx_rxx_stats_octs_drp cvmx_gmxx_rxx_stats_octs_drp_t;
4843 * cvmx_gmx#_rx#_stats_pkts
4847 * Count of good received packets - packets that are not recognized as PAUSE
4848 * packets, dropped due the DMAC filter, dropped due FIFO full status, or
4849 * have any other OPCODE (FCS, Length, etc).
4852 * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
4853 * - Counters will wrap
4855 union cvmx_gmxx_rxx_stats_pkts
4858 struct cvmx_gmxx_rxx_stats_pkts_s
4860 #if __BYTE_ORDER == __BIG_ENDIAN
4861 uint64_t reserved_32_63 : 32;
4862 uint64_t cnt : 32; /**< Count of received good packets */
4865 uint64_t reserved_32_63 : 32;
4868 struct cvmx_gmxx_rxx_stats_pkts_s cn30xx;
4869 struct cvmx_gmxx_rxx_stats_pkts_s cn31xx;
4870 struct cvmx_gmxx_rxx_stats_pkts_s cn38xx;
4871 struct cvmx_gmxx_rxx_stats_pkts_s cn38xxp2;
4872 struct cvmx_gmxx_rxx_stats_pkts_s cn50xx;
4873 struct cvmx_gmxx_rxx_stats_pkts_s cn52xx;
4874 struct cvmx_gmxx_rxx_stats_pkts_s cn52xxp1;
4875 struct cvmx_gmxx_rxx_stats_pkts_s cn56xx;
4876 struct cvmx_gmxx_rxx_stats_pkts_s cn56xxp1;
4877 struct cvmx_gmxx_rxx_stats_pkts_s cn58xx;
4878 struct cvmx_gmxx_rxx_stats_pkts_s cn58xxp1;
4879 struct cvmx_gmxx_rxx_stats_pkts_s cn63xx;
4880 struct cvmx_gmxx_rxx_stats_pkts_s cn63xxp1;
4882 typedef union cvmx_gmxx_rxx_stats_pkts cvmx_gmxx_rxx_stats_pkts_t;
4885 * cvmx_gmx#_rx#_stats_pkts_bad
4887 * GMX_RX_STATS_PKTS_BAD
4889 * Count of all packets received with some error that were not dropped
4890 * either due to the dmac filter or lack of room in the receive FIFO.
4893 * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
4894 * - Counters will wrap
4896 union cvmx_gmxx_rxx_stats_pkts_bad
4899 struct cvmx_gmxx_rxx_stats_pkts_bad_s
4901 #if __BYTE_ORDER == __BIG_ENDIAN
4902 uint64_t reserved_32_63 : 32;
4903 uint64_t cnt : 32; /**< Count of bad packets */
4906 uint64_t reserved_32_63 : 32;
4909 struct cvmx_gmxx_rxx_stats_pkts_bad_s cn30xx;
4910 struct cvmx_gmxx_rxx_stats_pkts_bad_s cn31xx;
4911 struct cvmx_gmxx_rxx_stats_pkts_bad_s cn38xx;
4912 struct cvmx_gmxx_rxx_stats_pkts_bad_s cn38xxp2;
4913 struct cvmx_gmxx_rxx_stats_pkts_bad_s cn50xx;
4914 struct cvmx_gmxx_rxx_stats_pkts_bad_s cn52xx;
4915 struct cvmx_gmxx_rxx_stats_pkts_bad_s cn52xxp1;
4916 struct cvmx_gmxx_rxx_stats_pkts_bad_s cn56xx;
4917 struct cvmx_gmxx_rxx_stats_pkts_bad_s cn56xxp1;
4918 struct cvmx_gmxx_rxx_stats_pkts_bad_s cn58xx;
4919 struct cvmx_gmxx_rxx_stats_pkts_bad_s cn58xxp1;
4920 struct cvmx_gmxx_rxx_stats_pkts_bad_s cn63xx;
4921 struct cvmx_gmxx_rxx_stats_pkts_bad_s cn63xxp1;
4923 typedef union cvmx_gmxx_rxx_stats_pkts_bad cvmx_gmxx_rxx_stats_pkts_bad_t;
4926 * cvmx_gmx#_rx#_stats_pkts_ctl
4928 * GMX_RX_STATS_PKTS_CTL
4930 * Count of all packets received that were recognized as Flow Control or
4931 * PAUSE packets. PAUSE packets with any kind of error are counted in
4932 * GMX_RX_STATS_PKTS_BAD. Pause packets can be optionally dropped or
4933 * forwarded based on the GMX_RX_FRM_CTL[CTL_DRP] bit. This count
4934 * increments regardless of whether the packet is dropped. Pause packets
4935 * will never be counted in GMX_RX_STATS_PKTS. Packets dropped due the dmac
4936 * filter will be counted in GMX_RX_STATS_PKTS_DMAC and not here.
4939 * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
4940 * - Counters will wrap
4942 union cvmx_gmxx_rxx_stats_pkts_ctl
4945 struct cvmx_gmxx_rxx_stats_pkts_ctl_s
4947 #if __BYTE_ORDER == __BIG_ENDIAN
4948 uint64_t reserved_32_63 : 32;
4949 uint64_t cnt : 32; /**< Count of received pause packets */
4952 uint64_t reserved_32_63 : 32;
4955 struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn30xx;
4956 struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn31xx;
4957 struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn38xx;
4958 struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn38xxp2;
4959 struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn50xx;
4960 struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn52xx;
4961 struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn52xxp1;
4962 struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn56xx;
4963 struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn56xxp1;
4964 struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn58xx;
4965 struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn58xxp1;
4966 struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn63xx;
4967 struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn63xxp1;
4969 typedef union cvmx_gmxx_rxx_stats_pkts_ctl cvmx_gmxx_rxx_stats_pkts_ctl_t;
4972 * cvmx_gmx#_rx#_stats_pkts_dmac
4974 * GMX_RX_STATS_PKTS_DMAC
4976 * Count of all packets received that were dropped by the dmac filter.
4977 * Packets that match the DMAC will be dropped and counted here regardless
4978 * of if they were bad packets. These packets will never be counted in
4979 * GMX_RX_STATS_PKTS.
4981 * Some packets that were not able to satisify the DECISION_CNT may not
4982 * actually be dropped by Octeon, but they will be counted here as if they
4986 * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
4987 * - Counters will wrap
4989 union cvmx_gmxx_rxx_stats_pkts_dmac
4992 struct cvmx_gmxx_rxx_stats_pkts_dmac_s
4994 #if __BYTE_ORDER == __BIG_ENDIAN
4995 uint64_t reserved_32_63 : 32;
4996 uint64_t cnt : 32; /**< Count of filtered dmac packets */
4999 uint64_t reserved_32_63 : 32;
5002 struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn30xx;
5003 struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn31xx;
5004 struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn38xx;
5005 struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn38xxp2;
5006 struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn50xx;
5007 struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn52xx;
5008 struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn52xxp1;
5009 struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn56xx;
5010 struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn56xxp1;
5011 struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn58xx;
5012 struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn58xxp1;
5013 struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn63xx;
5014 struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn63xxp1;
5016 typedef union cvmx_gmxx_rxx_stats_pkts_dmac cvmx_gmxx_rxx_stats_pkts_dmac_t;
5019 * cvmx_gmx#_rx#_stats_pkts_drp
5021 * GMX_RX_STATS_PKTS_DRP
5023 * Count of all packets received that were dropped due to a full receive
5024 * FIFO. This counts good and bad packets received - all packets dropped by
5025 * the FIFO. It does not count packets dropped by the dmac or pause packet
5029 * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set
5030 * - Counters will wrap
5032 union cvmx_gmxx_rxx_stats_pkts_drp
5035 struct cvmx_gmxx_rxx_stats_pkts_drp_s
5037 #if __BYTE_ORDER == __BIG_ENDIAN
5038 uint64_t reserved_32_63 : 32;
5039 uint64_t cnt : 32; /**< Count of dropped packets */
5042 uint64_t reserved_32_63 : 32;
5045 struct cvmx_gmxx_rxx_stats_pkts_drp_s cn30xx;
5046 struct cvmx_gmxx_rxx_stats_pkts_drp_s cn31xx;
5047 struct cvmx_gmxx_rxx_stats_pkts_drp_s cn38xx;
5048 struct cvmx_gmxx_rxx_stats_pkts_drp_s cn38xxp2;
5049 struct cvmx_gmxx_rxx_stats_pkts_drp_s cn50xx;
5050 struct cvmx_gmxx_rxx_stats_pkts_drp_s cn52xx;
5051 struct cvmx_gmxx_rxx_stats_pkts_drp_s cn52xxp1;
5052 struct cvmx_gmxx_rxx_stats_pkts_drp_s cn56xx;
5053 struct cvmx_gmxx_rxx_stats_pkts_drp_s cn56xxp1;
5054 struct cvmx_gmxx_rxx_stats_pkts_drp_s cn58xx;
5055 struct cvmx_gmxx_rxx_stats_pkts_drp_s cn58xxp1;
5056 struct cvmx_gmxx_rxx_stats_pkts_drp_s cn63xx;
5057 struct cvmx_gmxx_rxx_stats_pkts_drp_s cn63xxp1;
5059 typedef union cvmx_gmxx_rxx_stats_pkts_drp cvmx_gmxx_rxx_stats_pkts_drp_t;
5062 * cvmx_gmx#_rx#_udd_skp
5064 * GMX_RX_UDD_SKP = Amount of User-defined data before the start of the L2 data
5068 * (1) The skip bytes are part of the packet and will be sent down the NCB
5069 * packet interface and will be handled by PKI.
5071 * (2) The system can determine if the UDD bytes are included in the FCS check
5072 * by using the FCSSEL field - if the FCS check is enabled.
5074 * (3) Assume that the preamble/sfd is always at the start of the frame - even
5075 * before UDD bytes. In most cases, there will be no preamble in these
5076 * cases since it will be packet interface in direct communication to
5077 * another packet interface (MAC to MAC) without a PHY involved.
5079 * (4) We can still do address filtering and control packet filtering is the
5082 * (5) UDD_SKP must be 0 in half-duplex operation unless
5083 * GMX_RX_FRM_CTL[PRE_CHK] is clear. If GMX_RX_FRM_CTL[PRE_CHK] is clear,
5084 * then UDD_SKP will normally be 8.
5086 * (6) In all cases, the UDD bytes will be sent down the packet interface as
5087 * part of the packet. The UDD bytes are never stripped from the actual
5090 * (7) If LEN != 0, then GMX_RX_FRM_CHK[LENERR] will be disabled and GMX_RX_INT_REG[LENERR] will be zero
5092 union cvmx_gmxx_rxx_udd_skp
5095 struct cvmx_gmxx_rxx_udd_skp_s
5097 #if __BYTE_ORDER == __BIG_ENDIAN
5098 uint64_t reserved_9_63 : 55;
5099 uint64_t fcssel : 1; /**< Include the skip bytes in the FCS calculation
5100 0 = all skip bytes are included in FCS
5101 1 = the skip bytes are not included in FCS
5102 When GMX_TX_XAUI_CTL[HG_EN] is set, FCSSEL must
5104 uint64_t reserved_7_7 : 1;
5105 uint64_t len : 7; /**< Amount of User-defined data before the start of
5106 the L2 data. Zero means L2 comes first.
5108 When GMX_TX_XAUI_CTL[HG_EN] is set, LEN must be
5109 set to 12 or 16 (depending on HiGig header size)
5110 to account for the HiGig header. LEN=12 selects
5111 HiGig/HiGig+, and LEN=16 selects HiGig2. */
5114 uint64_t reserved_7_7 : 1;
5115 uint64_t fcssel : 1;
5116 uint64_t reserved_9_63 : 55;
5119 struct cvmx_gmxx_rxx_udd_skp_s cn30xx;
5120 struct cvmx_gmxx_rxx_udd_skp_s cn31xx;
5121 struct cvmx_gmxx_rxx_udd_skp_s cn38xx;
5122 struct cvmx_gmxx_rxx_udd_skp_s cn38xxp2;
5123 struct cvmx_gmxx_rxx_udd_skp_s cn50xx;
5124 struct cvmx_gmxx_rxx_udd_skp_s cn52xx;
5125 struct cvmx_gmxx_rxx_udd_skp_s cn52xxp1;
5126 struct cvmx_gmxx_rxx_udd_skp_s cn56xx;
5127 struct cvmx_gmxx_rxx_udd_skp_s cn56xxp1;
5128 struct cvmx_gmxx_rxx_udd_skp_s cn58xx;
5129 struct cvmx_gmxx_rxx_udd_skp_s cn58xxp1;
5130 struct cvmx_gmxx_rxx_udd_skp_s cn63xx;
5131 struct cvmx_gmxx_rxx_udd_skp_s cn63xxp1;
5133 typedef union cvmx_gmxx_rxx_udd_skp cvmx_gmxx_rxx_udd_skp_t;
5136 * cvmx_gmx#_rx_bp_drop#
5138 * GMX_RX_BP_DROP = FIFO mark for packet drop
5142 * The actual watermark is dynamic with respect to the GMX_RX_PRTS
5143 * register. The GMX_RX_PRTS controls the depth of the port's
5144 * FIFO so as ports are added or removed, the drop point may change.
5146 * In XAUI mode prt0 is used for checking.
5148 union cvmx_gmxx_rx_bp_dropx
5151 struct cvmx_gmxx_rx_bp_dropx_s
5153 #if __BYTE_ORDER == __BIG_ENDIAN
5154 uint64_t reserved_6_63 : 58;
5155 uint64_t mark : 6; /**< Number of 8B ticks to reserve in the RX FIFO.
5156 When the FIFO exceeds this count, packets will
5157 be dropped and not buffered.
5158 MARK should typically be programmed to ports+1.
5159 Failure to program correctly can lead to system
5163 uint64_t reserved_6_63 : 58;
5166 struct cvmx_gmxx_rx_bp_dropx_s cn30xx;
5167 struct cvmx_gmxx_rx_bp_dropx_s cn31xx;
5168 struct cvmx_gmxx_rx_bp_dropx_s cn38xx;
5169 struct cvmx_gmxx_rx_bp_dropx_s cn38xxp2;
5170 struct cvmx_gmxx_rx_bp_dropx_s cn50xx;
5171 struct cvmx_gmxx_rx_bp_dropx_s cn52xx;
5172 struct cvmx_gmxx_rx_bp_dropx_s cn52xxp1;
5173 struct cvmx_gmxx_rx_bp_dropx_s cn56xx;
5174 struct cvmx_gmxx_rx_bp_dropx_s cn56xxp1;
5175 struct cvmx_gmxx_rx_bp_dropx_s cn58xx;
5176 struct cvmx_gmxx_rx_bp_dropx_s cn58xxp1;
5177 struct cvmx_gmxx_rx_bp_dropx_s cn63xx;
5178 struct cvmx_gmxx_rx_bp_dropx_s cn63xxp1;
5180 typedef union cvmx_gmxx_rx_bp_dropx cvmx_gmxx_rx_bp_dropx_t;
5183 * cvmx_gmx#_rx_bp_off#
5185 * GMX_RX_BP_OFF = Lowater mark for packet drop
5189 * In XAUI mode, prt0 is used for checking.
5192 union cvmx_gmxx_rx_bp_offx
5195 struct cvmx_gmxx_rx_bp_offx_s
5197 #if __BYTE_ORDER == __BIG_ENDIAN
5198 uint64_t reserved_6_63 : 58;
5199 uint64_t mark : 6; /**< Water mark (8B ticks) to deassert backpressure */
5202 uint64_t reserved_6_63 : 58;
5205 struct cvmx_gmxx_rx_bp_offx_s cn30xx;
5206 struct cvmx_gmxx_rx_bp_offx_s cn31xx;
5207 struct cvmx_gmxx_rx_bp_offx_s cn38xx;
5208 struct cvmx_gmxx_rx_bp_offx_s cn38xxp2;
5209 struct cvmx_gmxx_rx_bp_offx_s cn50xx;
5210 struct cvmx_gmxx_rx_bp_offx_s cn52xx;
5211 struct cvmx_gmxx_rx_bp_offx_s cn52xxp1;
5212 struct cvmx_gmxx_rx_bp_offx_s cn56xx;
5213 struct cvmx_gmxx_rx_bp_offx_s cn56xxp1;
5214 struct cvmx_gmxx_rx_bp_offx_s cn58xx;
5215 struct cvmx_gmxx_rx_bp_offx_s cn58xxp1;
5216 struct cvmx_gmxx_rx_bp_offx_s cn63xx;
5217 struct cvmx_gmxx_rx_bp_offx_s cn63xxp1;
5219 typedef union cvmx_gmxx_rx_bp_offx cvmx_gmxx_rx_bp_offx_t;
5222 * cvmx_gmx#_rx_bp_on#
5224 * GMX_RX_BP_ON = Hiwater mark for port/interface backpressure
5228 * In XAUI mode, prt0 is used for checking.
5231 union cvmx_gmxx_rx_bp_onx
5234 struct cvmx_gmxx_rx_bp_onx_s
5236 #if __BYTE_ORDER == __BIG_ENDIAN
5237 uint64_t reserved_9_63 : 55;
5238 uint64_t mark : 9; /**< Hiwater mark (8B ticks) for backpressure.
5239 Each register is for an individual port. In XAUI
5240 mode, prt0 is used for the unified RX FIFO
5241 GMX_RX_BP_ON must satisfy
5242 BP_OFF <= BP_ON < (FIFO_SIZE - BP_DROP)
5243 A value of zero will immediately assert back
5247 uint64_t reserved_9_63 : 55;
5250 struct cvmx_gmxx_rx_bp_onx_s cn30xx;
5251 struct cvmx_gmxx_rx_bp_onx_s cn31xx;
5252 struct cvmx_gmxx_rx_bp_onx_s cn38xx;
5253 struct cvmx_gmxx_rx_bp_onx_s cn38xxp2;
5254 struct cvmx_gmxx_rx_bp_onx_s cn50xx;
5255 struct cvmx_gmxx_rx_bp_onx_s cn52xx;
5256 struct cvmx_gmxx_rx_bp_onx_s cn52xxp1;
5257 struct cvmx_gmxx_rx_bp_onx_s cn56xx;
5258 struct cvmx_gmxx_rx_bp_onx_s cn56xxp1;
5259 struct cvmx_gmxx_rx_bp_onx_s cn58xx;
5260 struct cvmx_gmxx_rx_bp_onx_s cn58xxp1;
5261 struct cvmx_gmxx_rx_bp_onx_s cn63xx;
5262 struct cvmx_gmxx_rx_bp_onx_s cn63xxp1;
5264 typedef union cvmx_gmxx_rx_bp_onx cvmx_gmxx_rx_bp_onx_t;
5267 * cvmx_gmx#_rx_hg2_status
5269 * ** HG2 message CSRs
5272 union cvmx_gmxx_rx_hg2_status
5275 struct cvmx_gmxx_rx_hg2_status_s
5277 #if __BYTE_ORDER == __BIG_ENDIAN
5278 uint64_t reserved_48_63 : 16;
5279 uint64_t phtim2go : 16; /**< Physical time to go for removal of physical link
5280 pause. Initial value from received HiGig2 msg pkt
5281 Non-zero only when physical back pressure active */
5282 uint64_t xof : 16; /**< 16 bit xof back pressure vector from HiGig2 msg pkt
5283 or from CBFC packets.
5284 Non-zero only when logical back pressure is active
5285 All bits will be 0 when LGTIM2GO=0 */
5286 uint64_t lgtim2go : 16; /**< Logical packet flow back pressure time remaining
5287 Initial value set from xof time field of HiGig2
5288 message packet received or a function of the
5289 enabled and current timers for CBFC packets.
5290 Non-zero only when logical back pressure is active */
5292 uint64_t lgtim2go : 16;
5294 uint64_t phtim2go : 16;
5295 uint64_t reserved_48_63 : 16;
5298 struct cvmx_gmxx_rx_hg2_status_s cn52xx;
5299 struct cvmx_gmxx_rx_hg2_status_s cn52xxp1;
5300 struct cvmx_gmxx_rx_hg2_status_s cn56xx;
5301 struct cvmx_gmxx_rx_hg2_status_s cn63xx;
5302 struct cvmx_gmxx_rx_hg2_status_s cn63xxp1;
5304 typedef union cvmx_gmxx_rx_hg2_status cvmx_gmxx_rx_hg2_status_t;
5307 * cvmx_gmx#_rx_pass_en
5309 * GMX_RX_PASS_EN = Packet pass through mode enable
5311 * When both Octane ports are running in Spi4 mode, packets can be directly
5312 * passed from one SPX interface to the other without being processed by the
5313 * core or PP's. The register has one bit for each port to enable the pass
5317 * (1) Can only be used in dual Spi4 configs
5319 * (2) The mapped pass through output port cannot be the destination port for
5320 * any Octane core traffic.
5322 union cvmx_gmxx_rx_pass_en
5325 struct cvmx_gmxx_rx_pass_en_s
5327 #if __BYTE_ORDER == __BIG_ENDIAN
5328 uint64_t reserved_16_63 : 48;
5329 uint64_t en : 16; /**< Which ports to configure in pass through mode */
5332 uint64_t reserved_16_63 : 48;
5335 struct cvmx_gmxx_rx_pass_en_s cn38xx;
5336 struct cvmx_gmxx_rx_pass_en_s cn38xxp2;
5337 struct cvmx_gmxx_rx_pass_en_s cn58xx;
5338 struct cvmx_gmxx_rx_pass_en_s cn58xxp1;
5340 typedef union cvmx_gmxx_rx_pass_en cvmx_gmxx_rx_pass_en_t;
5343 * cvmx_gmx#_rx_pass_map#
5345 * GMX_RX_PASS_MAP = Packet pass through port map
5348 union cvmx_gmxx_rx_pass_mapx
5351 struct cvmx_gmxx_rx_pass_mapx_s
5353 #if __BYTE_ORDER == __BIG_ENDIAN
5354 uint64_t reserved_4_63 : 60;
5355 uint64_t dprt : 4; /**< Destination port to map Spi pass through traffic */
5358 uint64_t reserved_4_63 : 60;
5361 struct cvmx_gmxx_rx_pass_mapx_s cn38xx;
5362 struct cvmx_gmxx_rx_pass_mapx_s cn38xxp2;
5363 struct cvmx_gmxx_rx_pass_mapx_s cn58xx;
5364 struct cvmx_gmxx_rx_pass_mapx_s cn58xxp1;
5366 typedef union cvmx_gmxx_rx_pass_mapx cvmx_gmxx_rx_pass_mapx_t;
5369 * cvmx_gmx#_rx_prt_info
5371 * GMX_RX_PRT_INFO = Report the RX status for port
5375 * In XAUI mode, only the lsb (corresponding to port0) of DROP and COMMIT are used.
5378 union cvmx_gmxx_rx_prt_info
5381 struct cvmx_gmxx_rx_prt_info_s
5383 #if __BYTE_ORDER == __BIG_ENDIAN
5384 uint64_t reserved_32_63 : 32;
5385 uint64_t drop : 16; /**< Per port indication that data was dropped */
5386 uint64_t commit : 16; /**< Per port indication that SOP was accepted */
5388 uint64_t commit : 16;
5390 uint64_t reserved_32_63 : 32;
5393 struct cvmx_gmxx_rx_prt_info_cn30xx
5395 #if __BYTE_ORDER == __BIG_ENDIAN
5396 uint64_t reserved_19_63 : 45;
5397 uint64_t drop : 3; /**< Per port indication that data was dropped */
5398 uint64_t reserved_3_15 : 13;
5399 uint64_t commit : 3; /**< Per port indication that SOP was accepted */
5401 uint64_t commit : 3;
5402 uint64_t reserved_3_15 : 13;
5404 uint64_t reserved_19_63 : 45;
5407 struct cvmx_gmxx_rx_prt_info_cn30xx cn31xx;
5408 struct cvmx_gmxx_rx_prt_info_s cn38xx;
5409 struct cvmx_gmxx_rx_prt_info_cn30xx cn50xx;
5410 struct cvmx_gmxx_rx_prt_info_cn52xx
5412 #if __BYTE_ORDER == __BIG_ENDIAN
5413 uint64_t reserved_20_63 : 44;
5414 uint64_t drop : 4; /**< Per port indication that data was dropped */
5415 uint64_t reserved_4_15 : 12;
5416 uint64_t commit : 4; /**< Per port indication that SOP was accepted */
5418 uint64_t commit : 4;
5419 uint64_t reserved_4_15 : 12;
5421 uint64_t reserved_20_63 : 44;
5424 struct cvmx_gmxx_rx_prt_info_cn52xx cn52xxp1;
5425 struct cvmx_gmxx_rx_prt_info_cn52xx cn56xx;
5426 struct cvmx_gmxx_rx_prt_info_cn52xx cn56xxp1;
5427 struct cvmx_gmxx_rx_prt_info_s cn58xx;
5428 struct cvmx_gmxx_rx_prt_info_s cn58xxp1;
5429 struct cvmx_gmxx_rx_prt_info_cn52xx cn63xx;
5430 struct cvmx_gmxx_rx_prt_info_cn52xx cn63xxp1;
5432 typedef union cvmx_gmxx_rx_prt_info cvmx_gmxx_rx_prt_info_t;
5437 * GMX_RX_PRTS = Number of FIFOs to carve the RX buffer into
5441 * GMX_RX_PRTS[PRTS] must be set to '1' in XAUI mode.
5444 union cvmx_gmxx_rx_prts
5447 struct cvmx_gmxx_rx_prts_s
5449 #if __BYTE_ORDER == __BIG_ENDIAN
5450 uint64_t reserved_3_63 : 61;
5451 uint64_t prts : 3; /**< In SGMII/1000Base-X mode, the RX buffer can be
5452 carved into several logical buffers depending on
5453 the number or implemented ports.
5454 0 or 1 port = 512ticks / 4096bytes
5455 2 ports = 256ticks / 2048bytes
5456 3 or 4 ports = 128ticks / 1024bytes */
5459 uint64_t reserved_3_63 : 61;
5462 struct cvmx_gmxx_rx_prts_s cn30xx;
5463 struct cvmx_gmxx_rx_prts_s cn31xx;
5464 struct cvmx_gmxx_rx_prts_s cn38xx;
5465 struct cvmx_gmxx_rx_prts_s cn38xxp2;
5466 struct cvmx_gmxx_rx_prts_s cn50xx;
5467 struct cvmx_gmxx_rx_prts_s cn52xx;
5468 struct cvmx_gmxx_rx_prts_s cn52xxp1;
5469 struct cvmx_gmxx_rx_prts_s cn56xx;
5470 struct cvmx_gmxx_rx_prts_s cn56xxp1;
5471 struct cvmx_gmxx_rx_prts_s cn58xx;
5472 struct cvmx_gmxx_rx_prts_s cn58xxp1;
5473 struct cvmx_gmxx_rx_prts_s cn63xx;
5474 struct cvmx_gmxx_rx_prts_s cn63xxp1;
5476 typedef union cvmx_gmxx_rx_prts cvmx_gmxx_rx_prts_t;
5479 * cvmx_gmx#_rx_tx_status
5481 * GMX_RX_TX_STATUS = GMX RX/TX Status
5484 union cvmx_gmxx_rx_tx_status
5487 struct cvmx_gmxx_rx_tx_status_s
5489 #if __BYTE_ORDER == __BIG_ENDIAN
5490 uint64_t reserved_7_63 : 57;
5491 uint64_t tx : 3; /**< Transmit data since last read */
5492 uint64_t reserved_3_3 : 1;
5493 uint64_t rx : 3; /**< Receive data since last read */
5496 uint64_t reserved_3_3 : 1;
5498 uint64_t reserved_7_63 : 57;
5501 struct cvmx_gmxx_rx_tx_status_s cn30xx;
5502 struct cvmx_gmxx_rx_tx_status_s cn31xx;
5503 struct cvmx_gmxx_rx_tx_status_s cn50xx;
5505 typedef union cvmx_gmxx_rx_tx_status cvmx_gmxx_rx_tx_status_t;
5508 * cvmx_gmx#_rx_xaui_bad_col
5510 union cvmx_gmxx_rx_xaui_bad_col
5513 struct cvmx_gmxx_rx_xaui_bad_col_s
5515 #if __BYTE_ORDER == __BIG_ENDIAN
5516 uint64_t reserved_40_63 : 24;
5517 uint64_t val : 1; /**< Set when GMX_RX_INT_REG[PCTERR] is set.
5519 uint64_t state : 3; /**< When GMX_RX_INT_REG[PCTERR] is set, STATE will
5520 conatin the receive state at the time of the
5523 uint64_t lane_rxc : 4; /**< When GMX_RX_INT_REG[PCTERR] is set, LANE_RXC will
5524 conatin the XAUI column at the time of the error.
5526 uint64_t lane_rxd : 32; /**< When GMX_RX_INT_REG[PCTERR] is set, LANE_RXD will
5527 conatin the XAUI column at the time of the error.
5530 uint64_t lane_rxd : 32;
5531 uint64_t lane_rxc : 4;
5534 uint64_t reserved_40_63 : 24;
5537 struct cvmx_gmxx_rx_xaui_bad_col_s cn52xx;
5538 struct cvmx_gmxx_rx_xaui_bad_col_s cn52xxp1;
5539 struct cvmx_gmxx_rx_xaui_bad_col_s cn56xx;
5540 struct cvmx_gmxx_rx_xaui_bad_col_s cn56xxp1;
5541 struct cvmx_gmxx_rx_xaui_bad_col_s cn63xx;
5542 struct cvmx_gmxx_rx_xaui_bad_col_s cn63xxp1;
5544 typedef union cvmx_gmxx_rx_xaui_bad_col cvmx_gmxx_rx_xaui_bad_col_t;
5547 * cvmx_gmx#_rx_xaui_ctl
5549 union cvmx_gmxx_rx_xaui_ctl
5552 struct cvmx_gmxx_rx_xaui_ctl_s
5554 #if __BYTE_ORDER == __BIG_ENDIAN
5555 uint64_t reserved_2_63 : 62;
5556 uint64_t status : 2; /**< Link Status
5563 uint64_t status : 2;
5564 uint64_t reserved_2_63 : 62;
5567 struct cvmx_gmxx_rx_xaui_ctl_s cn52xx;
5568 struct cvmx_gmxx_rx_xaui_ctl_s cn52xxp1;
5569 struct cvmx_gmxx_rx_xaui_ctl_s cn56xx;
5570 struct cvmx_gmxx_rx_xaui_ctl_s cn56xxp1;
5571 struct cvmx_gmxx_rx_xaui_ctl_s cn63xx;
5572 struct cvmx_gmxx_rx_xaui_ctl_s cn63xxp1;
5574 typedef union cvmx_gmxx_rx_xaui_ctl cvmx_gmxx_rx_xaui_ctl_t;
5579 * GMX_SMAC = Packet SMAC
5582 union cvmx_gmxx_smacx
5585 struct cvmx_gmxx_smacx_s
5587 #if __BYTE_ORDER == __BIG_ENDIAN
5588 uint64_t reserved_48_63 : 16;
5589 uint64_t smac : 48; /**< The SMAC field is used for generating and
5590 accepting Control Pause packets */
5593 uint64_t reserved_48_63 : 16;
5596 struct cvmx_gmxx_smacx_s cn30xx;
5597 struct cvmx_gmxx_smacx_s cn31xx;
5598 struct cvmx_gmxx_smacx_s cn38xx;
5599 struct cvmx_gmxx_smacx_s cn38xxp2;
5600 struct cvmx_gmxx_smacx_s cn50xx;
5601 struct cvmx_gmxx_smacx_s cn52xx;
5602 struct cvmx_gmxx_smacx_s cn52xxp1;
5603 struct cvmx_gmxx_smacx_s cn56xx;
5604 struct cvmx_gmxx_smacx_s cn56xxp1;
5605 struct cvmx_gmxx_smacx_s cn58xx;
5606 struct cvmx_gmxx_smacx_s cn58xxp1;
5607 struct cvmx_gmxx_smacx_s cn63xx;
5608 struct cvmx_gmxx_smacx_s cn63xxp1;
5610 typedef union cvmx_gmxx_smacx cvmx_gmxx_smacx_t;
5613 * cvmx_gmx#_soft_bist
5615 * GMX_SOFT_BIST = Software BIST Control
5618 union cvmx_gmxx_soft_bist
5621 struct cvmx_gmxx_soft_bist_s
5623 #if __BYTE_ORDER == __BIG_ENDIAN
5624 uint64_t reserved_2_63 : 62;
5625 uint64_t start_bist : 1; /**< Run BIST on all memories in the XAUI CLK domain */
5626 uint64_t clear_bist : 1; /**< Choose between full BIST and CLEAR bist
5628 1=Only run clear BIST */
5630 uint64_t clear_bist : 1;
5631 uint64_t start_bist : 1;
5632 uint64_t reserved_2_63 : 62;
5635 struct cvmx_gmxx_soft_bist_s cn63xx;
5636 struct cvmx_gmxx_soft_bist_s cn63xxp1;
5638 typedef union cvmx_gmxx_soft_bist cvmx_gmxx_soft_bist_t;
5643 * GMX_STAT_BP = Number of cycles that the TX/Stats block has help up operation
5646 union cvmx_gmxx_stat_bp
5649 struct cvmx_gmxx_stat_bp_s
5651 #if __BYTE_ORDER == __BIG_ENDIAN
5652 uint64_t reserved_17_63 : 47;
5653 uint64_t bp : 1; /**< Current BP state */
5654 uint64_t cnt : 16; /**< Number of cycles that BP has been asserted
5655 Saturating counter */
5659 uint64_t reserved_17_63 : 47;
5662 struct cvmx_gmxx_stat_bp_s cn30xx;
5663 struct cvmx_gmxx_stat_bp_s cn31xx;
5664 struct cvmx_gmxx_stat_bp_s cn38xx;
5665 struct cvmx_gmxx_stat_bp_s cn38xxp2;
5666 struct cvmx_gmxx_stat_bp_s cn50xx;
5667 struct cvmx_gmxx_stat_bp_s cn52xx;
5668 struct cvmx_gmxx_stat_bp_s cn52xxp1;
5669 struct cvmx_gmxx_stat_bp_s cn56xx;
5670 struct cvmx_gmxx_stat_bp_s cn56xxp1;
5671 struct cvmx_gmxx_stat_bp_s cn58xx;
5672 struct cvmx_gmxx_stat_bp_s cn58xxp1;
5673 struct cvmx_gmxx_stat_bp_s cn63xx;
5674 struct cvmx_gmxx_stat_bp_s cn63xxp1;
5676 typedef union cvmx_gmxx_stat_bp cvmx_gmxx_stat_bp_t;
5679 * cvmx_gmx#_tx#_append
5681 * GMX_TX_APPEND = Packet TX Append Control
5684 union cvmx_gmxx_txx_append
5687 struct cvmx_gmxx_txx_append_s
5689 #if __BYTE_ORDER == __BIG_ENDIAN
5690 uint64_t reserved_4_63 : 60;
5691 uint64_t force_fcs : 1; /**< Append the Ethernet FCS on each pause packet
5692 when FCS is clear. Pause packets are normally
5693 padded to 60 bytes. If GMX_TX_MIN_PKT[MIN_SIZE]
5694 exceeds 59, then FORCE_FCS will not be used. */
5695 uint64_t fcs : 1; /**< Append the Ethernet FCS on each packet */
5696 uint64_t pad : 1; /**< Append PAD bytes such that min sized */
5697 uint64_t preamble : 1; /**< Prepend the Ethernet preamble on each transfer
5698 When GMX_TX_XAUI_CTL[HG_EN] is set, PREAMBLE
5701 uint64_t preamble : 1;
5704 uint64_t force_fcs : 1;
5705 uint64_t reserved_4_63 : 60;
5708 struct cvmx_gmxx_txx_append_s cn30xx;
5709 struct cvmx_gmxx_txx_append_s cn31xx;
5710 struct cvmx_gmxx_txx_append_s cn38xx;
5711 struct cvmx_gmxx_txx_append_s cn38xxp2;
5712 struct cvmx_gmxx_txx_append_s cn50xx;
5713 struct cvmx_gmxx_txx_append_s cn52xx;
5714 struct cvmx_gmxx_txx_append_s cn52xxp1;
5715 struct cvmx_gmxx_txx_append_s cn56xx;
5716 struct cvmx_gmxx_txx_append_s cn56xxp1;
5717 struct cvmx_gmxx_txx_append_s cn58xx;
5718 struct cvmx_gmxx_txx_append_s cn58xxp1;
5719 struct cvmx_gmxx_txx_append_s cn63xx;
5720 struct cvmx_gmxx_txx_append_s cn63xxp1;
5722 typedef union cvmx_gmxx_txx_append cvmx_gmxx_txx_append_t;
5725 * cvmx_gmx#_tx#_burst
5727 * GMX_TX_BURST = Packet TX Burst Counter
5730 union cvmx_gmxx_txx_burst
5733 struct cvmx_gmxx_txx_burst_s
5735 #if __BYTE_ORDER == __BIG_ENDIAN
5736 uint64_t reserved_16_63 : 48;
5737 uint64_t burst : 16; /**< Burst (refer to 802.3 to set correctly)
5738 Only valid for 1000Mbs half-duplex operation
5739 halfdup / 1000Mbs: 0x2000
5740 all other modes: 0x0
5741 (SGMII/1000Base-X only) */
5743 uint64_t burst : 16;
5744 uint64_t reserved_16_63 : 48;
5747 struct cvmx_gmxx_txx_burst_s cn30xx;
5748 struct cvmx_gmxx_txx_burst_s cn31xx;
5749 struct cvmx_gmxx_txx_burst_s cn38xx;
5750 struct cvmx_gmxx_txx_burst_s cn38xxp2;
5751 struct cvmx_gmxx_txx_burst_s cn50xx;
5752 struct cvmx_gmxx_txx_burst_s cn52xx;
5753 struct cvmx_gmxx_txx_burst_s cn52xxp1;
5754 struct cvmx_gmxx_txx_burst_s cn56xx;
5755 struct cvmx_gmxx_txx_burst_s cn56xxp1;
5756 struct cvmx_gmxx_txx_burst_s cn58xx;
5757 struct cvmx_gmxx_txx_burst_s cn58xxp1;
5758 struct cvmx_gmxx_txx_burst_s cn63xx;
5759 struct cvmx_gmxx_txx_burst_s cn63xxp1;
5761 typedef union cvmx_gmxx_txx_burst cvmx_gmxx_txx_burst_t;
5764 * cvmx_gmx#_tx#_cbfc_xoff
5766 union cvmx_gmxx_txx_cbfc_xoff
5769 struct cvmx_gmxx_txx_cbfc_xoff_s
5771 #if __BYTE_ORDER == __BIG_ENDIAN
5772 uint64_t reserved_16_63 : 48;
5773 uint64_t xoff : 16; /**< Which ports to backpressure
5774 Do not write in HiGig2 mode i.e. when
5775 GMX_TX_XAUI_CTL[HG_EN]=1 and
5776 GMX_RX_UDD_SKP[SKIP]=16. */
5779 uint64_t reserved_16_63 : 48;
5782 struct cvmx_gmxx_txx_cbfc_xoff_s cn52xx;
5783 struct cvmx_gmxx_txx_cbfc_xoff_s cn56xx;
5784 struct cvmx_gmxx_txx_cbfc_xoff_s cn63xx;
5785 struct cvmx_gmxx_txx_cbfc_xoff_s cn63xxp1;
5787 typedef union cvmx_gmxx_txx_cbfc_xoff cvmx_gmxx_txx_cbfc_xoff_t;
5790 * cvmx_gmx#_tx#_cbfc_xon
5792 union cvmx_gmxx_txx_cbfc_xon
5795 struct cvmx_gmxx_txx_cbfc_xon_s
5797 #if __BYTE_ORDER == __BIG_ENDIAN
5798 uint64_t reserved_16_63 : 48;
5799 uint64_t xon : 16; /**< Which ports to stop backpressure
5800 Do not write in HiGig2 mode i.e. when
5801 GMX_TX_XAUI_CTL[HG_EN]=1 and
5802 GMX_RX_UDD_SKP[SKIP]=16. */
5805 uint64_t reserved_16_63 : 48;
5808 struct cvmx_gmxx_txx_cbfc_xon_s cn52xx;
5809 struct cvmx_gmxx_txx_cbfc_xon_s cn56xx;
5810 struct cvmx_gmxx_txx_cbfc_xon_s cn63xx;
5811 struct cvmx_gmxx_txx_cbfc_xon_s cn63xxp1;
5813 typedef union cvmx_gmxx_txx_cbfc_xon cvmx_gmxx_txx_cbfc_xon_t;
5821 * GMX_TX_CLK = RGMII TX Clock Generation Register
5824 * Programming Restrictions:
5825 * (1) In RGMII mode, if GMX_PRT_CFG[SPEED]==0, then CLK_CNT must be > 1.
5826 * (2) In MII mode, CLK_CNT == 1
5827 * (3) In RGMII or GMII mode, if CLK_CNT==0, Octeon will not generate a tx clock.
5830 * Given a 125MHz PLL reference clock...
5831 * CLK_CNT == 1 ==> 125.0MHz TXC clock period (8ns* 1)
5832 * CLK_CNT == 5 ==> 25.0MHz TXC clock period (8ns* 5)
5833 * CLK_CNT == 50 ==> 2.5MHz TXC clock period (8ns*50)
5835 union cvmx_gmxx_txx_clk
5838 struct cvmx_gmxx_txx_clk_s
5840 #if __BYTE_ORDER == __BIG_ENDIAN
5841 uint64_t reserved_6_63 : 58;
5842 uint64_t clk_cnt : 6; /**< Controls the RGMII TXC frequency
5843 When PLL is used, TXC(phase) =
5844 spi4_tx_pll_ref_clk(period)/2*CLK_CNT
5845 When PLL bypass is used, TXC(phase) =
5846 spi4_tx_pll_ref_clk(period)*2*CLK_CNT
5847 NOTE: CLK_CNT==0 will not generate any clock
5848 if CLK_CNT > 1 if GMX_PRT_CFG[SPEED]==0 */
5850 uint64_t clk_cnt : 6;
5851 uint64_t reserved_6_63 : 58;
5854 struct cvmx_gmxx_txx_clk_s cn30xx;
5855 struct cvmx_gmxx_txx_clk_s cn31xx;
5856 struct cvmx_gmxx_txx_clk_s cn38xx;
5857 struct cvmx_gmxx_txx_clk_s cn38xxp2;
5858 struct cvmx_gmxx_txx_clk_s cn50xx;
5859 struct cvmx_gmxx_txx_clk_s cn58xx;
5860 struct cvmx_gmxx_txx_clk_s cn58xxp1;
5862 typedef union cvmx_gmxx_txx_clk cvmx_gmxx_txx_clk_t;
5867 * GMX_TX_CTL = TX Control register
5870 union cvmx_gmxx_txx_ctl
5873 struct cvmx_gmxx_txx_ctl_s
5875 #if __BYTE_ORDER == __BIG_ENDIAN
5876 uint64_t reserved_2_63 : 62;
5877 uint64_t xsdef_en : 1; /**< Enables the excessive deferral check for stats
5879 (SGMII/1000Base-X half-duplex only) */
5880 uint64_t xscol_en : 1; /**< Enables the excessive collision check for stats
5882 (SGMII/1000Base-X half-duplex only) */
5884 uint64_t xscol_en : 1;
5885 uint64_t xsdef_en : 1;
5886 uint64_t reserved_2_63 : 62;
5889 struct cvmx_gmxx_txx_ctl_s cn30xx;
5890 struct cvmx_gmxx_txx_ctl_s cn31xx;
5891 struct cvmx_gmxx_txx_ctl_s cn38xx;
5892 struct cvmx_gmxx_txx_ctl_s cn38xxp2;
5893 struct cvmx_gmxx_txx_ctl_s cn50xx;
5894 struct cvmx_gmxx_txx_ctl_s cn52xx;
5895 struct cvmx_gmxx_txx_ctl_s cn52xxp1;
5896 struct cvmx_gmxx_txx_ctl_s cn56xx;
5897 struct cvmx_gmxx_txx_ctl_s cn56xxp1;
5898 struct cvmx_gmxx_txx_ctl_s cn58xx;
5899 struct cvmx_gmxx_txx_ctl_s cn58xxp1;
5900 struct cvmx_gmxx_txx_ctl_s cn63xx;
5901 struct cvmx_gmxx_txx_ctl_s cn63xxp1;
5903 typedef union cvmx_gmxx_txx_ctl cvmx_gmxx_txx_ctl_t;
5906 * cvmx_gmx#_tx#_min_pkt
5908 * GMX_TX_MIN_PKT = Packet TX Min Size Packet (PAD upto min size)
5911 union cvmx_gmxx_txx_min_pkt
5914 struct cvmx_gmxx_txx_min_pkt_s
5916 #if __BYTE_ORDER == __BIG_ENDIAN
5917 uint64_t reserved_8_63 : 56;
5918 uint64_t min_size : 8; /**< Min frame in bytes before the FCS is applied
5919 Padding is only appened when GMX_TX_APPEND[PAD]
5920 for the coresponding port is set.
5921 In SGMII mode, packets will be padded to
5922 MIN_SIZE+1. The reset value will pad to 60 bytes.
5923 In XAUI mode, packets will be padded to
5924 MIN(252,(MIN_SIZE+1 & ~0x3))
5925 When GMX_TX_XAUI_CTL[HG_EN] is set, the HiGig
5926 header (12B or 16B) is normally added to the
5927 packet, so MIN_SIZE should be 59+12=71B for
5928 HiGig or 59+16=75B for HiGig2. */
5930 uint64_t min_size : 8;
5931 uint64_t reserved_8_63 : 56;
5934 struct cvmx_gmxx_txx_min_pkt_s cn30xx;
5935 struct cvmx_gmxx_txx_min_pkt_s cn31xx;
5936 struct cvmx_gmxx_txx_min_pkt_s cn38xx;
5937 struct cvmx_gmxx_txx_min_pkt_s cn38xxp2;
5938 struct cvmx_gmxx_txx_min_pkt_s cn50xx;
5939 struct cvmx_gmxx_txx_min_pkt_s cn52xx;
5940 struct cvmx_gmxx_txx_min_pkt_s cn52xxp1;
5941 struct cvmx_gmxx_txx_min_pkt_s cn56xx;
5942 struct cvmx_gmxx_txx_min_pkt_s cn56xxp1;
5943 struct cvmx_gmxx_txx_min_pkt_s cn58xx;
5944 struct cvmx_gmxx_txx_min_pkt_s cn58xxp1;
5945 struct cvmx_gmxx_txx_min_pkt_s cn63xx;
5946 struct cvmx_gmxx_txx_min_pkt_s cn63xxp1;
5948 typedef union cvmx_gmxx_txx_min_pkt cvmx_gmxx_txx_min_pkt_t;
5951 * cvmx_gmx#_tx#_pause_pkt_interval
5953 * GMX_TX_PAUSE_PKT_INTERVAL = Packet TX Pause Packet transmission interval - how often PAUSE packets will be sent
5957 * Choosing proper values of GMX_TX_PAUSE_PKT_TIME[TIME] and
5958 * GMX_TX_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to the system
5959 * designer. It is suggested that TIME be much greater than INTERVAL and
5960 * GMX_TX_PAUSE_ZERO[SEND] be set. This allows a periodic refresh of the PAUSE
5961 * count and then when the backpressure condition is lifted, a PAUSE packet
5962 * with TIME==0 will be sent indicating that Octane is ready for additional
5965 * If the system chooses to not set GMX_TX_PAUSE_ZERO[SEND], then it is
5966 * suggested that TIME and INTERVAL are programmed such that they satisify the
5969 * INTERVAL <= TIME - (largest_pkt_size + IFG + pause_pkt_size)
5971 * where largest_pkt_size is that largest packet that the system can send
5972 * (normally 1518B), IFG is the interframe gap and pause_pkt_size is the size
5973 * of the PAUSE packet (normally 64B).
5975 union cvmx_gmxx_txx_pause_pkt_interval
5978 struct cvmx_gmxx_txx_pause_pkt_interval_s
5980 #if __BYTE_ORDER == __BIG_ENDIAN
5981 uint64_t reserved_16_63 : 48;
5982 uint64_t interval : 16; /**< Arbitrate for a 802.3 pause packet, HiGig2 message,
5983 or CBFC pause packet every (INTERVAL*512)
5985 Normally, 0 < INTERVAL < GMX_TX_PAUSE_PKT_TIME
5986 INTERVAL=0, will only send a single PAUSE packet
5987 for each backpressure event */
5989 uint64_t interval : 16;
5990 uint64_t reserved_16_63 : 48;
5993 struct cvmx_gmxx_txx_pause_pkt_interval_s cn30xx;
5994 struct cvmx_gmxx_txx_pause_pkt_interval_s cn31xx;
5995 struct cvmx_gmxx_txx_pause_pkt_interval_s cn38xx;
5996 struct cvmx_gmxx_txx_pause_pkt_interval_s cn38xxp2;
5997 struct cvmx_gmxx_txx_pause_pkt_interval_s cn50xx;
5998 struct cvmx_gmxx_txx_pause_pkt_interval_s cn52xx;
5999 struct cvmx_gmxx_txx_pause_pkt_interval_s cn52xxp1;
6000 struct cvmx_gmxx_txx_pause_pkt_interval_s cn56xx;
6001 struct cvmx_gmxx_txx_pause_pkt_interval_s cn56xxp1;
6002 struct cvmx_gmxx_txx_pause_pkt_interval_s cn58xx;
6003 struct cvmx_gmxx_txx_pause_pkt_interval_s cn58xxp1;
6004 struct cvmx_gmxx_txx_pause_pkt_interval_s cn63xx;
6005 struct cvmx_gmxx_txx_pause_pkt_interval_s cn63xxp1;
6007 typedef union cvmx_gmxx_txx_pause_pkt_interval cvmx_gmxx_txx_pause_pkt_interval_t;
6010 * cvmx_gmx#_tx#_pause_pkt_time
6012 * GMX_TX_PAUSE_PKT_TIME = Packet TX Pause Packet pause_time field
6016 * Choosing proper values of GMX_TX_PAUSE_PKT_TIME[TIME] and
6017 * GMX_TX_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to the system
6018 * designer. It is suggested that TIME be much greater than INTERVAL and
6019 * GMX_TX_PAUSE_ZERO[SEND] be set. This allows a periodic refresh of the PAUSE
6020 * count and then when the backpressure condition is lifted, a PAUSE packet
6021 * with TIME==0 will be sent indicating that Octane is ready for additional
6024 * If the system chooses to not set GMX_TX_PAUSE_ZERO[SEND], then it is
6025 * suggested that TIME and INTERVAL are programmed such that they satisify the
6028 * INTERVAL <= TIME - (largest_pkt_size + IFG + pause_pkt_size)
6030 * where largest_pkt_size is that largest packet that the system can send
6031 * (normally 1518B), IFG is the interframe gap and pause_pkt_size is the size
6032 * of the PAUSE packet (normally 64B).
6034 union cvmx_gmxx_txx_pause_pkt_time
6037 struct cvmx_gmxx_txx_pause_pkt_time_s
6039 #if __BYTE_ORDER == __BIG_ENDIAN
6040 uint64_t reserved_16_63 : 48;
6041 uint64_t time : 16; /**< The pause_time field placed in outbnd 802.3 pause
6042 packets, HiGig2 messages, or CBFC pause packets.
6043 pause_time is in 512 bit-times
6044 Normally, TIME > GMX_TX_PAUSE_PKT_INTERVAL */
6047 uint64_t reserved_16_63 : 48;
6050 struct cvmx_gmxx_txx_pause_pkt_time_s cn30xx;
6051 struct cvmx_gmxx_txx_pause_pkt_time_s cn31xx;
6052 struct cvmx_gmxx_txx_pause_pkt_time_s cn38xx;
6053 struct cvmx_gmxx_txx_pause_pkt_time_s cn38xxp2;
6054 struct cvmx_gmxx_txx_pause_pkt_time_s cn50xx;
6055 struct cvmx_gmxx_txx_pause_pkt_time_s cn52xx;
6056 struct cvmx_gmxx_txx_pause_pkt_time_s cn52xxp1;
6057 struct cvmx_gmxx_txx_pause_pkt_time_s cn56xx;
6058 struct cvmx_gmxx_txx_pause_pkt_time_s cn56xxp1;
6059 struct cvmx_gmxx_txx_pause_pkt_time_s cn58xx;
6060 struct cvmx_gmxx_txx_pause_pkt_time_s cn58xxp1;
6061 struct cvmx_gmxx_txx_pause_pkt_time_s cn63xx;
6062 struct cvmx_gmxx_txx_pause_pkt_time_s cn63xxp1;
6064 typedef union cvmx_gmxx_txx_pause_pkt_time cvmx_gmxx_txx_pause_pkt_time_t;
6067 * cvmx_gmx#_tx#_pause_togo
6069 * GMX_TX_PAUSE_TOGO = Packet TX Amount of time remaining to backpressure
6072 union cvmx_gmxx_txx_pause_togo
6075 struct cvmx_gmxx_txx_pause_togo_s
6077 #if __BYTE_ORDER == __BIG_ENDIAN
6078 uint64_t reserved_32_63 : 32;
6079 uint64_t msg_time : 16; /**< Amount of time remaining to backpressure
6080 From the higig2 physical message pause timer
6081 (only valid on port0) */
6082 uint64_t time : 16; /**< Amount of time remaining to backpressure
6083 From the standard 802.3 pause timer */
6086 uint64_t msg_time : 16;
6087 uint64_t reserved_32_63 : 32;
6090 struct cvmx_gmxx_txx_pause_togo_cn30xx
6092 #if __BYTE_ORDER == __BIG_ENDIAN
6093 uint64_t reserved_16_63 : 48;
6094 uint64_t time : 16; /**< Amount of time remaining to backpressure */
6097 uint64_t reserved_16_63 : 48;
6100 struct cvmx_gmxx_txx_pause_togo_cn30xx cn31xx;
6101 struct cvmx_gmxx_txx_pause_togo_cn30xx cn38xx;
6102 struct cvmx_gmxx_txx_pause_togo_cn30xx cn38xxp2;
6103 struct cvmx_gmxx_txx_pause_togo_cn30xx cn50xx;
6104 struct cvmx_gmxx_txx_pause_togo_s cn52xx;
6105 struct cvmx_gmxx_txx_pause_togo_s cn52xxp1;
6106 struct cvmx_gmxx_txx_pause_togo_s cn56xx;
6107 struct cvmx_gmxx_txx_pause_togo_cn30xx cn56xxp1;
6108 struct cvmx_gmxx_txx_pause_togo_cn30xx cn58xx;
6109 struct cvmx_gmxx_txx_pause_togo_cn30xx cn58xxp1;
6110 struct cvmx_gmxx_txx_pause_togo_s cn63xx;
6111 struct cvmx_gmxx_txx_pause_togo_s cn63xxp1;
6113 typedef union cvmx_gmxx_txx_pause_togo cvmx_gmxx_txx_pause_togo_t;
6116 * cvmx_gmx#_tx#_pause_zero
6118 * GMX_TX_PAUSE_ZERO = Packet TX Amount of time remaining to backpressure
6121 union cvmx_gmxx_txx_pause_zero
6124 struct cvmx_gmxx_txx_pause_zero_s
6126 #if __BYTE_ORDER == __BIG_ENDIAN
6127 uint64_t reserved_1_63 : 63;
6128 uint64_t send : 1; /**< When backpressure condition clear, send PAUSE
6129 packet with pause_time of zero to enable the
6133 uint64_t reserved_1_63 : 63;
6136 struct cvmx_gmxx_txx_pause_zero_s cn30xx;
6137 struct cvmx_gmxx_txx_pause_zero_s cn31xx;
6138 struct cvmx_gmxx_txx_pause_zero_s cn38xx;
6139 struct cvmx_gmxx_txx_pause_zero_s cn38xxp2;
6140 struct cvmx_gmxx_txx_pause_zero_s cn50xx;
6141 struct cvmx_gmxx_txx_pause_zero_s cn52xx;
6142 struct cvmx_gmxx_txx_pause_zero_s cn52xxp1;
6143 struct cvmx_gmxx_txx_pause_zero_s cn56xx;
6144 struct cvmx_gmxx_txx_pause_zero_s cn56xxp1;
6145 struct cvmx_gmxx_txx_pause_zero_s cn58xx;
6146 struct cvmx_gmxx_txx_pause_zero_s cn58xxp1;
6147 struct cvmx_gmxx_txx_pause_zero_s cn63xx;
6148 struct cvmx_gmxx_txx_pause_zero_s cn63xxp1;
6150 typedef union cvmx_gmxx_txx_pause_zero cvmx_gmxx_txx_pause_zero_t;
6153 * cvmx_gmx#_tx#_sgmii_ctl
6155 union cvmx_gmxx_txx_sgmii_ctl
6158 struct cvmx_gmxx_txx_sgmii_ctl_s
6160 #if __BYTE_ORDER == __BIG_ENDIAN
6161 uint64_t reserved_1_63 : 63;
6162 uint64_t align : 1; /**< Align the transmission to even cycles
6163 0 = Data can be sent on any cycle
6164 Possible to for the TX PCS machine to drop
6165 first byte of preamble
6166 1 = Data will only be sent on even cycles
6167 There will be no loss of data
6168 (SGMII/1000Base-X only) */
6171 uint64_t reserved_1_63 : 63;
6174 struct cvmx_gmxx_txx_sgmii_ctl_s cn52xx;
6175 struct cvmx_gmxx_txx_sgmii_ctl_s cn52xxp1;
6176 struct cvmx_gmxx_txx_sgmii_ctl_s cn56xx;
6177 struct cvmx_gmxx_txx_sgmii_ctl_s cn56xxp1;
6178 struct cvmx_gmxx_txx_sgmii_ctl_s cn63xx;
6179 struct cvmx_gmxx_txx_sgmii_ctl_s cn63xxp1;
6181 typedef union cvmx_gmxx_txx_sgmii_ctl cvmx_gmxx_txx_sgmii_ctl_t;
6184 * cvmx_gmx#_tx#_slot
6186 * GMX_TX_SLOT = Packet TX Slottime Counter
6189 union cvmx_gmxx_txx_slot
6192 struct cvmx_gmxx_txx_slot_s
6194 #if __BYTE_ORDER == __BIG_ENDIAN
6195 uint64_t reserved_10_63 : 54;
6196 uint64_t slot : 10; /**< Slottime (refer to 802.3 to set correctly)
6199 (SGMII/1000Base-X only) */
6202 uint64_t reserved_10_63 : 54;
6205 struct cvmx_gmxx_txx_slot_s cn30xx;
6206 struct cvmx_gmxx_txx_slot_s cn31xx;
6207 struct cvmx_gmxx_txx_slot_s cn38xx;
6208 struct cvmx_gmxx_txx_slot_s cn38xxp2;
6209 struct cvmx_gmxx_txx_slot_s cn50xx;
6210 struct cvmx_gmxx_txx_slot_s cn52xx;
6211 struct cvmx_gmxx_txx_slot_s cn52xxp1;
6212 struct cvmx_gmxx_txx_slot_s cn56xx;
6213 struct cvmx_gmxx_txx_slot_s cn56xxp1;
6214 struct cvmx_gmxx_txx_slot_s cn58xx;
6215 struct cvmx_gmxx_txx_slot_s cn58xxp1;
6216 struct cvmx_gmxx_txx_slot_s cn63xx;
6217 struct cvmx_gmxx_txx_slot_s cn63xxp1;
6219 typedef union cvmx_gmxx_txx_slot cvmx_gmxx_txx_slot_t;
6222 * cvmx_gmx#_tx#_soft_pause
6224 * GMX_TX_SOFT_PAUSE = Packet TX Software Pause
6227 union cvmx_gmxx_txx_soft_pause
6230 struct cvmx_gmxx_txx_soft_pause_s
6232 #if __BYTE_ORDER == __BIG_ENDIAN
6233 uint64_t reserved_16_63 : 48;
6234 uint64_t time : 16; /**< Back off the TX bus for (TIME*512) bit-times */
6237 uint64_t reserved_16_63 : 48;
6240 struct cvmx_gmxx_txx_soft_pause_s cn30xx;
6241 struct cvmx_gmxx_txx_soft_pause_s cn31xx;
6242 struct cvmx_gmxx_txx_soft_pause_s cn38xx;
6243 struct cvmx_gmxx_txx_soft_pause_s cn38xxp2;
6244 struct cvmx_gmxx_txx_soft_pause_s cn50xx;
6245 struct cvmx_gmxx_txx_soft_pause_s cn52xx;
6246 struct cvmx_gmxx_txx_soft_pause_s cn52xxp1;
6247 struct cvmx_gmxx_txx_soft_pause_s cn56xx;
6248 struct cvmx_gmxx_txx_soft_pause_s cn56xxp1;
6249 struct cvmx_gmxx_txx_soft_pause_s cn58xx;
6250 struct cvmx_gmxx_txx_soft_pause_s cn58xxp1;
6251 struct cvmx_gmxx_txx_soft_pause_s cn63xx;
6252 struct cvmx_gmxx_txx_soft_pause_s cn63xxp1;
6254 typedef union cvmx_gmxx_txx_soft_pause cvmx_gmxx_txx_soft_pause_t;
6257 * cvmx_gmx#_tx#_stat0
6259 * GMX_TX_STAT0 = GMX_TX_STATS_XSDEF / GMX_TX_STATS_XSCOL
6263 * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
6264 * - Counters will wrap
6266 union cvmx_gmxx_txx_stat0
6269 struct cvmx_gmxx_txx_stat0_s
6271 #if __BYTE_ORDER == __BIG_ENDIAN
6272 uint64_t xsdef : 32; /**< Number of packets dropped (never successfully
6273 sent) due to excessive deferal
6274 (SGMII/1000Base-X half-duplex only) */
6275 uint64_t xscol : 32; /**< Number of packets dropped (never successfully
6276 sent) due to excessive collision. Defined by
6277 GMX_TX_COL_ATTEMPT[LIMIT].
6278 (SGMII/1000Base-X half-duplex only) */
6280 uint64_t xscol : 32;
6281 uint64_t xsdef : 32;
6284 struct cvmx_gmxx_txx_stat0_s cn30xx;
6285 struct cvmx_gmxx_txx_stat0_s cn31xx;
6286 struct cvmx_gmxx_txx_stat0_s cn38xx;
6287 struct cvmx_gmxx_txx_stat0_s cn38xxp2;
6288 struct cvmx_gmxx_txx_stat0_s cn50xx;
6289 struct cvmx_gmxx_txx_stat0_s cn52xx;
6290 struct cvmx_gmxx_txx_stat0_s cn52xxp1;
6291 struct cvmx_gmxx_txx_stat0_s cn56xx;
6292 struct cvmx_gmxx_txx_stat0_s cn56xxp1;
6293 struct cvmx_gmxx_txx_stat0_s cn58xx;
6294 struct cvmx_gmxx_txx_stat0_s cn58xxp1;
6295 struct cvmx_gmxx_txx_stat0_s cn63xx;
6296 struct cvmx_gmxx_txx_stat0_s cn63xxp1;
6298 typedef union cvmx_gmxx_txx_stat0 cvmx_gmxx_txx_stat0_t;
6301 * cvmx_gmx#_tx#_stat1
6303 * GMX_TX_STAT1 = GMX_TX_STATS_SCOL / GMX_TX_STATS_MCOL
6307 * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
6308 * - Counters will wrap
6310 union cvmx_gmxx_txx_stat1
6313 struct cvmx_gmxx_txx_stat1_s
6315 #if __BYTE_ORDER == __BIG_ENDIAN
6316 uint64_t scol : 32; /**< Number of packets sent with a single collision
6317 (SGMII/1000Base-X half-duplex only) */
6318 uint64_t mcol : 32; /**< Number of packets sent with multiple collisions
6319 but < GMX_TX_COL_ATTEMPT[LIMIT].
6320 (SGMII/1000Base-X half-duplex only) */
6326 struct cvmx_gmxx_txx_stat1_s cn30xx;
6327 struct cvmx_gmxx_txx_stat1_s cn31xx;
6328 struct cvmx_gmxx_txx_stat1_s cn38xx;
6329 struct cvmx_gmxx_txx_stat1_s cn38xxp2;
6330 struct cvmx_gmxx_txx_stat1_s cn50xx;
6331 struct cvmx_gmxx_txx_stat1_s cn52xx;
6332 struct cvmx_gmxx_txx_stat1_s cn52xxp1;
6333 struct cvmx_gmxx_txx_stat1_s cn56xx;
6334 struct cvmx_gmxx_txx_stat1_s cn56xxp1;
6335 struct cvmx_gmxx_txx_stat1_s cn58xx;
6336 struct cvmx_gmxx_txx_stat1_s cn58xxp1;
6337 struct cvmx_gmxx_txx_stat1_s cn63xx;
6338 struct cvmx_gmxx_txx_stat1_s cn63xxp1;
6340 typedef union cvmx_gmxx_txx_stat1 cvmx_gmxx_txx_stat1_t;
6343 * cvmx_gmx#_tx#_stat2
6345 * GMX_TX_STAT2 = GMX_TX_STATS_OCTS
6349 * - Octect counts are the sum of all data transmitted on the wire including
6350 * packet data, pad bytes, fcs bytes, pause bytes, and jam bytes. The octect
6351 * counts do not include PREAMBLE byte or EXTEND cycles.
6352 * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
6353 * - Counters will wrap
6355 union cvmx_gmxx_txx_stat2
6358 struct cvmx_gmxx_txx_stat2_s
6360 #if __BYTE_ORDER == __BIG_ENDIAN
6361 uint64_t reserved_48_63 : 16;
6362 uint64_t octs : 48; /**< Number of total octets sent on the interface.
6363 Does not count octets from frames that were
6364 truncated due to collisions in halfdup mode. */
6367 uint64_t reserved_48_63 : 16;
6370 struct cvmx_gmxx_txx_stat2_s cn30xx;
6371 struct cvmx_gmxx_txx_stat2_s cn31xx;
6372 struct cvmx_gmxx_txx_stat2_s cn38xx;
6373 struct cvmx_gmxx_txx_stat2_s cn38xxp2;
6374 struct cvmx_gmxx_txx_stat2_s cn50xx;
6375 struct cvmx_gmxx_txx_stat2_s cn52xx;
6376 struct cvmx_gmxx_txx_stat2_s cn52xxp1;
6377 struct cvmx_gmxx_txx_stat2_s cn56xx;
6378 struct cvmx_gmxx_txx_stat2_s cn56xxp1;
6379 struct cvmx_gmxx_txx_stat2_s cn58xx;
6380 struct cvmx_gmxx_txx_stat2_s cn58xxp1;
6381 struct cvmx_gmxx_txx_stat2_s cn63xx;
6382 struct cvmx_gmxx_txx_stat2_s cn63xxp1;
6384 typedef union cvmx_gmxx_txx_stat2 cvmx_gmxx_txx_stat2_t;
6387 * cvmx_gmx#_tx#_stat3
6389 * GMX_TX_STAT3 = GMX_TX_STATS_PKTS
6393 * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
6394 * - Counters will wrap
6396 union cvmx_gmxx_txx_stat3
6399 struct cvmx_gmxx_txx_stat3_s
6401 #if __BYTE_ORDER == __BIG_ENDIAN
6402 uint64_t reserved_32_63 : 32;
6403 uint64_t pkts : 32; /**< Number of total frames sent on the interface.
6404 Does not count frames that were truncated due to
6405 collisions in halfdup mode. */
6408 uint64_t reserved_32_63 : 32;
6411 struct cvmx_gmxx_txx_stat3_s cn30xx;
6412 struct cvmx_gmxx_txx_stat3_s cn31xx;
6413 struct cvmx_gmxx_txx_stat3_s cn38xx;
6414 struct cvmx_gmxx_txx_stat3_s cn38xxp2;
6415 struct cvmx_gmxx_txx_stat3_s cn50xx;
6416 struct cvmx_gmxx_txx_stat3_s cn52xx;
6417 struct cvmx_gmxx_txx_stat3_s cn52xxp1;
6418 struct cvmx_gmxx_txx_stat3_s cn56xx;
6419 struct cvmx_gmxx_txx_stat3_s cn56xxp1;
6420 struct cvmx_gmxx_txx_stat3_s cn58xx;
6421 struct cvmx_gmxx_txx_stat3_s cn58xxp1;
6422 struct cvmx_gmxx_txx_stat3_s cn63xx;
6423 struct cvmx_gmxx_txx_stat3_s cn63xxp1;
6425 typedef union cvmx_gmxx_txx_stat3 cvmx_gmxx_txx_stat3_t;
6428 * cvmx_gmx#_tx#_stat4
6430 * GMX_TX_STAT4 = GMX_TX_STATS_HIST1 (64) / GMX_TX_STATS_HIST0 (<64)
6434 * - Packet length is the sum of all data transmitted on the wire for the given
6435 * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
6436 * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles.
6437 * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
6438 * - Counters will wrap
6440 union cvmx_gmxx_txx_stat4
6443 struct cvmx_gmxx_txx_stat4_s
6445 #if __BYTE_ORDER == __BIG_ENDIAN
6446 uint64_t hist1 : 32; /**< Number of packets sent with an octet count of 64. */
6447 uint64_t hist0 : 32; /**< Number of packets sent with an octet count
6450 uint64_t hist0 : 32;
6451 uint64_t hist1 : 32;
6454 struct cvmx_gmxx_txx_stat4_s cn30xx;
6455 struct cvmx_gmxx_txx_stat4_s cn31xx;
6456 struct cvmx_gmxx_txx_stat4_s cn38xx;
6457 struct cvmx_gmxx_txx_stat4_s cn38xxp2;
6458 struct cvmx_gmxx_txx_stat4_s cn50xx;
6459 struct cvmx_gmxx_txx_stat4_s cn52xx;
6460 struct cvmx_gmxx_txx_stat4_s cn52xxp1;
6461 struct cvmx_gmxx_txx_stat4_s cn56xx;
6462 struct cvmx_gmxx_txx_stat4_s cn56xxp1;
6463 struct cvmx_gmxx_txx_stat4_s cn58xx;
6464 struct cvmx_gmxx_txx_stat4_s cn58xxp1;
6465 struct cvmx_gmxx_txx_stat4_s cn63xx;
6466 struct cvmx_gmxx_txx_stat4_s cn63xxp1;
6468 typedef union cvmx_gmxx_txx_stat4 cvmx_gmxx_txx_stat4_t;
6471 * cvmx_gmx#_tx#_stat5
6473 * GMX_TX_STAT5 = GMX_TX_STATS_HIST3 (128- 255) / GMX_TX_STATS_HIST2 (65- 127)
6477 * - Packet length is the sum of all data transmitted on the wire for the given
6478 * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
6479 * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles.
6480 * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
6481 * - Counters will wrap
6483 union cvmx_gmxx_txx_stat5
6486 struct cvmx_gmxx_txx_stat5_s
6488 #if __BYTE_ORDER == __BIG_ENDIAN
6489 uint64_t hist3 : 32; /**< Number of packets sent with an octet count of
6491 uint64_t hist2 : 32; /**< Number of packets sent with an octet count of
6494 uint64_t hist2 : 32;
6495 uint64_t hist3 : 32;
6498 struct cvmx_gmxx_txx_stat5_s cn30xx;
6499 struct cvmx_gmxx_txx_stat5_s cn31xx;
6500 struct cvmx_gmxx_txx_stat5_s cn38xx;
6501 struct cvmx_gmxx_txx_stat5_s cn38xxp2;
6502 struct cvmx_gmxx_txx_stat5_s cn50xx;
6503 struct cvmx_gmxx_txx_stat5_s cn52xx;
6504 struct cvmx_gmxx_txx_stat5_s cn52xxp1;
6505 struct cvmx_gmxx_txx_stat5_s cn56xx;
6506 struct cvmx_gmxx_txx_stat5_s cn56xxp1;
6507 struct cvmx_gmxx_txx_stat5_s cn58xx;
6508 struct cvmx_gmxx_txx_stat5_s cn58xxp1;
6509 struct cvmx_gmxx_txx_stat5_s cn63xx;
6510 struct cvmx_gmxx_txx_stat5_s cn63xxp1;
6512 typedef union cvmx_gmxx_txx_stat5 cvmx_gmxx_txx_stat5_t;
6515 * cvmx_gmx#_tx#_stat6
6517 * GMX_TX_STAT6 = GMX_TX_STATS_HIST5 (512-1023) / GMX_TX_STATS_HIST4 (256-511)
6521 * - Packet length is the sum of all data transmitted on the wire for the given
6522 * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
6523 * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles.
6524 * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
6525 * - Counters will wrap
6527 union cvmx_gmxx_txx_stat6
6530 struct cvmx_gmxx_txx_stat6_s
6532 #if __BYTE_ORDER == __BIG_ENDIAN
6533 uint64_t hist5 : 32; /**< Number of packets sent with an octet count of
6535 uint64_t hist4 : 32; /**< Number of packets sent with an octet count of
6538 uint64_t hist4 : 32;
6539 uint64_t hist5 : 32;
6542 struct cvmx_gmxx_txx_stat6_s cn30xx;
6543 struct cvmx_gmxx_txx_stat6_s cn31xx;
6544 struct cvmx_gmxx_txx_stat6_s cn38xx;
6545 struct cvmx_gmxx_txx_stat6_s cn38xxp2;
6546 struct cvmx_gmxx_txx_stat6_s cn50xx;
6547 struct cvmx_gmxx_txx_stat6_s cn52xx;
6548 struct cvmx_gmxx_txx_stat6_s cn52xxp1;
6549 struct cvmx_gmxx_txx_stat6_s cn56xx;
6550 struct cvmx_gmxx_txx_stat6_s cn56xxp1;
6551 struct cvmx_gmxx_txx_stat6_s cn58xx;
6552 struct cvmx_gmxx_txx_stat6_s cn58xxp1;
6553 struct cvmx_gmxx_txx_stat6_s cn63xx;
6554 struct cvmx_gmxx_txx_stat6_s cn63xxp1;
6556 typedef union cvmx_gmxx_txx_stat6 cvmx_gmxx_txx_stat6_t;
6559 * cvmx_gmx#_tx#_stat7
6561 * GMX_TX_STAT7 = GMX_TX_STATS_HIST7 (1024-1518) / GMX_TX_STATS_HIST6 (>1518)
6565 * - Packet length is the sum of all data transmitted on the wire for the given
6566 * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam
6567 * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles.
6568 * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
6569 * - Counters will wrap
6571 union cvmx_gmxx_txx_stat7
6574 struct cvmx_gmxx_txx_stat7_s
6576 #if __BYTE_ORDER == __BIG_ENDIAN
6577 uint64_t hist7 : 32; /**< Number of packets sent with an octet count
6579 uint64_t hist6 : 32; /**< Number of packets sent with an octet count of
6582 uint64_t hist6 : 32;
6583 uint64_t hist7 : 32;
6586 struct cvmx_gmxx_txx_stat7_s cn30xx;
6587 struct cvmx_gmxx_txx_stat7_s cn31xx;
6588 struct cvmx_gmxx_txx_stat7_s cn38xx;
6589 struct cvmx_gmxx_txx_stat7_s cn38xxp2;
6590 struct cvmx_gmxx_txx_stat7_s cn50xx;
6591 struct cvmx_gmxx_txx_stat7_s cn52xx;
6592 struct cvmx_gmxx_txx_stat7_s cn52xxp1;
6593 struct cvmx_gmxx_txx_stat7_s cn56xx;
6594 struct cvmx_gmxx_txx_stat7_s cn56xxp1;
6595 struct cvmx_gmxx_txx_stat7_s cn58xx;
6596 struct cvmx_gmxx_txx_stat7_s cn58xxp1;
6597 struct cvmx_gmxx_txx_stat7_s cn63xx;
6598 struct cvmx_gmxx_txx_stat7_s cn63xxp1;
6600 typedef union cvmx_gmxx_txx_stat7 cvmx_gmxx_txx_stat7_t;
6603 * cvmx_gmx#_tx#_stat8
6605 * GMX_TX_STAT8 = GMX_TX_STATS_MCST / GMX_TX_STATS_BCST
6609 * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
6610 * - Counters will wrap
6611 * - Note, GMX determines if the packet is MCST or BCST from the DMAC of the
6612 * packet. GMX assumes that the DMAC lies in the first 6 bytes of the packet
6613 * as per the 802.3 frame definition. If the system requires additional data
6614 * before the L2 header, then the MCST and BCST counters may not reflect
6615 * reality and should be ignored by software.
6617 union cvmx_gmxx_txx_stat8
6620 struct cvmx_gmxx_txx_stat8_s
6622 #if __BYTE_ORDER == __BIG_ENDIAN
6623 uint64_t mcst : 32; /**< Number of packets sent to multicast DMAC.
6624 Does not include BCST packets. */
6625 uint64_t bcst : 32; /**< Number of packets sent to broadcast DMAC.
6626 Does not include MCST packets. */
6632 struct cvmx_gmxx_txx_stat8_s cn30xx;
6633 struct cvmx_gmxx_txx_stat8_s cn31xx;
6634 struct cvmx_gmxx_txx_stat8_s cn38xx;
6635 struct cvmx_gmxx_txx_stat8_s cn38xxp2;
6636 struct cvmx_gmxx_txx_stat8_s cn50xx;
6637 struct cvmx_gmxx_txx_stat8_s cn52xx;
6638 struct cvmx_gmxx_txx_stat8_s cn52xxp1;
6639 struct cvmx_gmxx_txx_stat8_s cn56xx;
6640 struct cvmx_gmxx_txx_stat8_s cn56xxp1;
6641 struct cvmx_gmxx_txx_stat8_s cn58xx;
6642 struct cvmx_gmxx_txx_stat8_s cn58xxp1;
6643 struct cvmx_gmxx_txx_stat8_s cn63xx;
6644 struct cvmx_gmxx_txx_stat8_s cn63xxp1;
6646 typedef union cvmx_gmxx_txx_stat8 cvmx_gmxx_txx_stat8_t;
6649 * cvmx_gmx#_tx#_stat9
6651 * GMX_TX_STAT9 = GMX_TX_STATS_UNDFLW / GMX_TX_STATS_CTL
6655 * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set
6656 * - Counters will wrap
6658 union cvmx_gmxx_txx_stat9
6661 struct cvmx_gmxx_txx_stat9_s
6663 #if __BYTE_ORDER == __BIG_ENDIAN
6664 uint64_t undflw : 32; /**< Number of underflow packets */
6665 uint64_t ctl : 32; /**< Number of Control packets (PAUSE flow control)
6666 generated by GMX. It does not include control
6667 packets forwarded or generated by the PP's. */
6670 uint64_t undflw : 32;
6673 struct cvmx_gmxx_txx_stat9_s cn30xx;
6674 struct cvmx_gmxx_txx_stat9_s cn31xx;
6675 struct cvmx_gmxx_txx_stat9_s cn38xx;
6676 struct cvmx_gmxx_txx_stat9_s cn38xxp2;
6677 struct cvmx_gmxx_txx_stat9_s cn50xx;
6678 struct cvmx_gmxx_txx_stat9_s cn52xx;
6679 struct cvmx_gmxx_txx_stat9_s cn52xxp1;
6680 struct cvmx_gmxx_txx_stat9_s cn56xx;
6681 struct cvmx_gmxx_txx_stat9_s cn56xxp1;
6682 struct cvmx_gmxx_txx_stat9_s cn58xx;
6683 struct cvmx_gmxx_txx_stat9_s cn58xxp1;
6684 struct cvmx_gmxx_txx_stat9_s cn63xx;
6685 struct cvmx_gmxx_txx_stat9_s cn63xxp1;
6687 typedef union cvmx_gmxx_txx_stat9 cvmx_gmxx_txx_stat9_t;
6690 * cvmx_gmx#_tx#_stats_ctl
6692 * GMX_TX_STATS_CTL = TX Stats Control register
6695 union cvmx_gmxx_txx_stats_ctl
6698 struct cvmx_gmxx_txx_stats_ctl_s
6700 #if __BYTE_ORDER == __BIG_ENDIAN
6701 uint64_t reserved_1_63 : 63;
6702 uint64_t rd_clr : 1; /**< Stats registers will clear on reads */
6704 uint64_t rd_clr : 1;
6705 uint64_t reserved_1_63 : 63;
6708 struct cvmx_gmxx_txx_stats_ctl_s cn30xx;
6709 struct cvmx_gmxx_txx_stats_ctl_s cn31xx;
6710 struct cvmx_gmxx_txx_stats_ctl_s cn38xx;
6711 struct cvmx_gmxx_txx_stats_ctl_s cn38xxp2;
6712 struct cvmx_gmxx_txx_stats_ctl_s cn50xx;
6713 struct cvmx_gmxx_txx_stats_ctl_s cn52xx;
6714 struct cvmx_gmxx_txx_stats_ctl_s cn52xxp1;
6715 struct cvmx_gmxx_txx_stats_ctl_s cn56xx;
6716 struct cvmx_gmxx_txx_stats_ctl_s cn56xxp1;
6717 struct cvmx_gmxx_txx_stats_ctl_s cn58xx;
6718 struct cvmx_gmxx_txx_stats_ctl_s cn58xxp1;
6719 struct cvmx_gmxx_txx_stats_ctl_s cn63xx;
6720 struct cvmx_gmxx_txx_stats_ctl_s cn63xxp1;
6722 typedef union cvmx_gmxx_txx_stats_ctl cvmx_gmxx_txx_stats_ctl_t;
6725 * cvmx_gmx#_tx#_thresh
6730 * GMX_TX_THRESH = Packet TX Threshold
6733 * In XAUI mode, prt0 is used for checking. Since XAUI mode uses a single TX FIFO and is higher data rate, recommended value is 0x100.
6736 union cvmx_gmxx_txx_thresh
6739 struct cvmx_gmxx_txx_thresh_s
6741 #if __BYTE_ORDER == __BIG_ENDIAN
6742 uint64_t reserved_9_63 : 55;
6743 uint64_t cnt : 9; /**< Number of 16B ticks to accumulate in the TX FIFO
6744 before sending on the packet interface
6745 This register should be large enough to prevent
6746 underflow on the packet interface and must never
6747 be set to zero. This register cannot exceed the
6748 the TX FIFO depth which is...
6749 GMX_TX_PRTS==0,1: CNT MAX = 0x100
6750 GMX_TX_PRTS==2 : CNT MAX = 0x080
6751 GMX_TX_PRTS==3,4: CNT MAX = 0x040 */
6754 uint64_t reserved_9_63 : 55;
6757 struct cvmx_gmxx_txx_thresh_cn30xx
6759 #if __BYTE_ORDER == __BIG_ENDIAN
6760 uint64_t reserved_7_63 : 57;
6761 uint64_t cnt : 7; /**< Number of 16B ticks to accumulate in the TX FIFO
6762 before sending on the RGMII interface
6763 This register should be large enough to prevent
6764 underflow on the RGMII interface and must never
6765 be set below 4. This register cannot exceed the
6766 the TX FIFO depth which is 64 16B entries. */
6769 uint64_t reserved_7_63 : 57;
6772 struct cvmx_gmxx_txx_thresh_cn30xx cn31xx;
6773 struct cvmx_gmxx_txx_thresh_s cn38xx;
6774 struct cvmx_gmxx_txx_thresh_s cn38xxp2;
6775 struct cvmx_gmxx_txx_thresh_cn30xx cn50xx;
6776 struct cvmx_gmxx_txx_thresh_s cn52xx;
6777 struct cvmx_gmxx_txx_thresh_s cn52xxp1;
6778 struct cvmx_gmxx_txx_thresh_s cn56xx;
6779 struct cvmx_gmxx_txx_thresh_s cn56xxp1;
6780 struct cvmx_gmxx_txx_thresh_s cn58xx;
6781 struct cvmx_gmxx_txx_thresh_s cn58xxp1;
6782 struct cvmx_gmxx_txx_thresh_s cn63xx;
6783 struct cvmx_gmxx_txx_thresh_s cn63xxp1;
6785 typedef union cvmx_gmxx_txx_thresh cvmx_gmxx_txx_thresh_t;
6790 * GMX_TX_BP = Packet Interface TX BackPressure Register
6794 * In XAUI mode, only the lsb (corresponding to port0) of BP is used.
6797 union cvmx_gmxx_tx_bp
6800 struct cvmx_gmxx_tx_bp_s
6802 #if __BYTE_ORDER == __BIG_ENDIAN
6803 uint64_t reserved_4_63 : 60;
6804 uint64_t bp : 4; /**< Per port BackPressure status
6806 1=Port should be back pressured */
6809 uint64_t reserved_4_63 : 60;
6812 struct cvmx_gmxx_tx_bp_cn30xx
6814 #if __BYTE_ORDER == __BIG_ENDIAN
6815 uint64_t reserved_3_63 : 61;
6816 uint64_t bp : 3; /**< Per port BackPressure status
6818 1=Port should be back pressured */
6821 uint64_t reserved_3_63 : 61;
6824 struct cvmx_gmxx_tx_bp_cn30xx cn31xx;
6825 struct cvmx_gmxx_tx_bp_s cn38xx;
6826 struct cvmx_gmxx_tx_bp_s cn38xxp2;
6827 struct cvmx_gmxx_tx_bp_cn30xx cn50xx;
6828 struct cvmx_gmxx_tx_bp_s cn52xx;
6829 struct cvmx_gmxx_tx_bp_s cn52xxp1;
6830 struct cvmx_gmxx_tx_bp_s cn56xx;
6831 struct cvmx_gmxx_tx_bp_s cn56xxp1;
6832 struct cvmx_gmxx_tx_bp_s cn58xx;
6833 struct cvmx_gmxx_tx_bp_s cn58xxp1;
6834 struct cvmx_gmxx_tx_bp_s cn63xx;
6835 struct cvmx_gmxx_tx_bp_s cn63xxp1;
6837 typedef union cvmx_gmxx_tx_bp cvmx_gmxx_tx_bp_t;
6840 * cvmx_gmx#_tx_clk_msk#
6842 * GMX_TX_CLK_MSK = GMX Clock Select
6845 union cvmx_gmxx_tx_clk_mskx
6848 struct cvmx_gmxx_tx_clk_mskx_s
6850 #if __BYTE_ORDER == __BIG_ENDIAN
6851 uint64_t reserved_1_63 : 63;
6852 uint64_t msk : 1; /**< Write this bit to a 1 when switching clks */
6855 uint64_t reserved_1_63 : 63;
6858 struct cvmx_gmxx_tx_clk_mskx_s cn30xx;
6859 struct cvmx_gmxx_tx_clk_mskx_s cn50xx;
6861 typedef union cvmx_gmxx_tx_clk_mskx cvmx_gmxx_tx_clk_mskx_t;
6864 * cvmx_gmx#_tx_col_attempt
6866 * GMX_TX_COL_ATTEMPT = Packet TX collision attempts before dropping frame
6869 union cvmx_gmxx_tx_col_attempt
6872 struct cvmx_gmxx_tx_col_attempt_s
6874 #if __BYTE_ORDER == __BIG_ENDIAN
6875 uint64_t reserved_5_63 : 59;
6876 uint64_t limit : 5; /**< Collision Attempts
6877 (SGMII/1000Base-X half-duplex only) */
6880 uint64_t reserved_5_63 : 59;
6883 struct cvmx_gmxx_tx_col_attempt_s cn30xx;
6884 struct cvmx_gmxx_tx_col_attempt_s cn31xx;
6885 struct cvmx_gmxx_tx_col_attempt_s cn38xx;
6886 struct cvmx_gmxx_tx_col_attempt_s cn38xxp2;
6887 struct cvmx_gmxx_tx_col_attempt_s cn50xx;
6888 struct cvmx_gmxx_tx_col_attempt_s cn52xx;
6889 struct cvmx_gmxx_tx_col_attempt_s cn52xxp1;
6890 struct cvmx_gmxx_tx_col_attempt_s cn56xx;
6891 struct cvmx_gmxx_tx_col_attempt_s cn56xxp1;
6892 struct cvmx_gmxx_tx_col_attempt_s cn58xx;
6893 struct cvmx_gmxx_tx_col_attempt_s cn58xxp1;
6894 struct cvmx_gmxx_tx_col_attempt_s cn63xx;
6895 struct cvmx_gmxx_tx_col_attempt_s cn63xxp1;
6897 typedef union cvmx_gmxx_tx_col_attempt cvmx_gmxx_tx_col_attempt_t;
6900 * cvmx_gmx#_tx_corrupt
6902 * GMX_TX_CORRUPT = TX - Corrupt TX packets with the ERR bit set
6906 * Packets sent from PKO with the ERR wire asserted will be corrupted by
6907 * the transmitter if CORRUPT[prt] is set (XAUI uses prt==0).
6909 * Corruption means that GMX will send a bad FCS value. If GMX_TX_APPEND[FCS]
6910 * is clear then no FCS is sent and the GMX cannot corrupt it. The corrupt FCS
6911 * value is 0xeeeeeeee for SGMII/1000Base-X and 4 bytes of the error
6912 * propagation code in XAUI mode.
6914 union cvmx_gmxx_tx_corrupt
6917 struct cvmx_gmxx_tx_corrupt_s
6919 #if __BYTE_ORDER == __BIG_ENDIAN
6920 uint64_t reserved_4_63 : 60;
6921 uint64_t corrupt : 4; /**< Per port error propagation
6922 0=Never corrupt packets
6923 1=Corrupt packets with ERR */
6925 uint64_t corrupt : 4;
6926 uint64_t reserved_4_63 : 60;
6929 struct cvmx_gmxx_tx_corrupt_cn30xx
6931 #if __BYTE_ORDER == __BIG_ENDIAN
6932 uint64_t reserved_3_63 : 61;
6933 uint64_t corrupt : 3; /**< Per port error propagation
6934 0=Never corrupt packets
6935 1=Corrupt packets with ERR */
6937 uint64_t corrupt : 3;
6938 uint64_t reserved_3_63 : 61;
6941 struct cvmx_gmxx_tx_corrupt_cn30xx cn31xx;
6942 struct cvmx_gmxx_tx_corrupt_s cn38xx;
6943 struct cvmx_gmxx_tx_corrupt_s cn38xxp2;
6944 struct cvmx_gmxx_tx_corrupt_cn30xx cn50xx;
6945 struct cvmx_gmxx_tx_corrupt_s cn52xx;
6946 struct cvmx_gmxx_tx_corrupt_s cn52xxp1;
6947 struct cvmx_gmxx_tx_corrupt_s cn56xx;
6948 struct cvmx_gmxx_tx_corrupt_s cn56xxp1;
6949 struct cvmx_gmxx_tx_corrupt_s cn58xx;
6950 struct cvmx_gmxx_tx_corrupt_s cn58xxp1;
6951 struct cvmx_gmxx_tx_corrupt_s cn63xx;
6952 struct cvmx_gmxx_tx_corrupt_s cn63xxp1;
6954 typedef union cvmx_gmxx_tx_corrupt cvmx_gmxx_tx_corrupt_t;
6957 * cvmx_gmx#_tx_hg2_reg1
6960 * The TX_XOF[15:0] field in GMX(0)_TX_HG2_REG1 and the TX_XON[15:0] field in
6961 * GMX(0)_TX_HG2_REG2 register map to the same 16 physical flops. When written with address of
6962 * GMX(0)_TX_HG2_REG1, it will exhibit write 1 to set behavior and when written with address of
6963 * GMX(0)_TX_HG2_REG2, it will exhibit write 1 to clear behavior.
6964 * For reads, either address will return the $GMX(0)_TX_HG2_REG1 values.
6966 union cvmx_gmxx_tx_hg2_reg1
6969 struct cvmx_gmxx_tx_hg2_reg1_s
6971 #if __BYTE_ORDER == __BIG_ENDIAN
6972 uint64_t reserved_16_63 : 48;
6973 uint64_t tx_xof : 16; /**< TX HiGig2 message for logical link pause when any
6975 Only write in HiGig2 mode i.e. when
6976 GMX_TX_XAUI_CTL[HG_EN]=1 and
6977 GMX_RX_UDD_SKP[SKIP]=16. */
6979 uint64_t tx_xof : 16;
6980 uint64_t reserved_16_63 : 48;
6983 struct cvmx_gmxx_tx_hg2_reg1_s cn52xx;
6984 struct cvmx_gmxx_tx_hg2_reg1_s cn52xxp1;
6985 struct cvmx_gmxx_tx_hg2_reg1_s cn56xx;
6986 struct cvmx_gmxx_tx_hg2_reg1_s cn63xx;
6987 struct cvmx_gmxx_tx_hg2_reg1_s cn63xxp1;
6989 typedef union cvmx_gmxx_tx_hg2_reg1 cvmx_gmxx_tx_hg2_reg1_t;
6992 * cvmx_gmx#_tx_hg2_reg2
6995 * The TX_XOF[15:0] field in GMX(0)_TX_HG2_REG1 and the TX_XON[15:0] field in
6996 * GMX(0)_TX_HG2_REG2 register map to the same 16 physical flops. When written with address of
6997 * GMX(0)_TX_HG2_REG1, it will exhibit write 1 to set behavior and when written with address of
6998 * GMX(0)_TX_HG2_REG2, it will exhibit write 1 to clear behavior.
6999 * For reads, either address will return the $GMX(0)_TX_HG2_REG1 values.
7001 union cvmx_gmxx_tx_hg2_reg2
7004 struct cvmx_gmxx_tx_hg2_reg2_s
7006 #if __BYTE_ORDER == __BIG_ENDIAN
7007 uint64_t reserved_16_63 : 48;
7008 uint64_t tx_xon : 16; /**< TX HiGig2 message for logical link pause when any
7010 Only write in HiGig2 mode i.e. when
7011 GMX_TX_XAUI_CTL[HG_EN]=1 and
7012 GMX_RX_UDD_SKP[SKIP]=16. */
7014 uint64_t tx_xon : 16;
7015 uint64_t reserved_16_63 : 48;
7018 struct cvmx_gmxx_tx_hg2_reg2_s cn52xx;
7019 struct cvmx_gmxx_tx_hg2_reg2_s cn52xxp1;
7020 struct cvmx_gmxx_tx_hg2_reg2_s cn56xx;
7021 struct cvmx_gmxx_tx_hg2_reg2_s cn63xx;
7022 struct cvmx_gmxx_tx_hg2_reg2_s cn63xxp1;
7024 typedef union cvmx_gmxx_tx_hg2_reg2 cvmx_gmxx_tx_hg2_reg2_t;
7029 * GMX_TX_IFG = Packet TX Interframe Gap
7033 * * Programming IFG1 and IFG2.
7035 * For 10/100/1000Mbs half-duplex systems that require IEEE 802.3
7036 * compatibility, IFG1 must be in the range of 1-8, IFG2 must be in the range
7037 * of 4-12, and the IFG1+IFG2 sum must be 12.
7039 * For 10/100/1000Mbs full-duplex systems that require IEEE 802.3
7040 * compatibility, IFG1 must be in the range of 1-11, IFG2 must be in the range
7041 * of 1-11, and the IFG1+IFG2 sum must be 12.
7043 * For XAUI/10Gbs systems that require IEEE 802.3 compatibility, the
7044 * IFG1+IFG2 sum must be 12. IFG1[1:0] and IFG2[1:0] must be zero.
7046 * For all other systems, IFG1 and IFG2 can be any value in the range of
7047 * 1-15. Allowing for a total possible IFG sum of 2-30.
7049 union cvmx_gmxx_tx_ifg
7052 struct cvmx_gmxx_tx_ifg_s
7054 #if __BYTE_ORDER == __BIG_ENDIAN
7055 uint64_t reserved_8_63 : 56;
7056 uint64_t ifg2 : 4; /**< 1/3 of the interframe gap timing (in IFG2*8 bits)
7057 If CRS is detected during IFG2, then the
7058 interFrameSpacing timer is not reset and a frame
7059 is transmited once the timer expires. */
7060 uint64_t ifg1 : 4; /**< 2/3 of the interframe gap timing (in IFG1*8 bits)
7061 If CRS is detected during IFG1, then the
7062 interFrameSpacing timer is reset and a frame is
7067 uint64_t reserved_8_63 : 56;
7070 struct cvmx_gmxx_tx_ifg_s cn30xx;
7071 struct cvmx_gmxx_tx_ifg_s cn31xx;
7072 struct cvmx_gmxx_tx_ifg_s cn38xx;
7073 struct cvmx_gmxx_tx_ifg_s cn38xxp2;
7074 struct cvmx_gmxx_tx_ifg_s cn50xx;
7075 struct cvmx_gmxx_tx_ifg_s cn52xx;
7076 struct cvmx_gmxx_tx_ifg_s cn52xxp1;
7077 struct cvmx_gmxx_tx_ifg_s cn56xx;
7078 struct cvmx_gmxx_tx_ifg_s cn56xxp1;
7079 struct cvmx_gmxx_tx_ifg_s cn58xx;
7080 struct cvmx_gmxx_tx_ifg_s cn58xxp1;
7081 struct cvmx_gmxx_tx_ifg_s cn63xx;
7082 struct cvmx_gmxx_tx_ifg_s cn63xxp1;
7084 typedef union cvmx_gmxx_tx_ifg cvmx_gmxx_tx_ifg_t;
7087 * cvmx_gmx#_tx_int_en
7089 * GMX_TX_INT_EN = Interrupt Enable
7093 * In XAUI mode, only the lsb (corresponding to port0) of UNDFLW is used.
7096 union cvmx_gmxx_tx_int_en
7099 struct cvmx_gmxx_tx_int_en_s
7101 #if __BYTE_ORDER == __BIG_ENDIAN
7102 uint64_t reserved_24_63 : 40;
7103 uint64_t ptp_lost : 4; /**< A packet with a PTP request was not able to be
7104 sent due to XSCOL */
7105 uint64_t late_col : 4; /**< TX Late Collision
7106 (SGMII/1000Base-X half-duplex only) */
7107 uint64_t xsdef : 4; /**< TX Excessive deferral
7108 (SGMII/1000Base-X half-duplex only) */
7109 uint64_t xscol : 4; /**< TX Excessive collisions
7110 (SGMII/1000Base-X half-duplex only) */
7111 uint64_t reserved_6_7 : 2;
7112 uint64_t undflw : 4; /**< TX Underflow */
7113 uint64_t ncb_nxa : 1; /**< Port address out-of-range from NCB Interface */
7114 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
7116 uint64_t pko_nxa : 1;
7117 uint64_t ncb_nxa : 1;
7118 uint64_t undflw : 4;
7119 uint64_t reserved_6_7 : 2;
7122 uint64_t late_col : 4;
7123 uint64_t ptp_lost : 4;
7124 uint64_t reserved_24_63 : 40;
7127 struct cvmx_gmxx_tx_int_en_cn30xx
7129 #if __BYTE_ORDER == __BIG_ENDIAN
7130 uint64_t reserved_19_63 : 45;
7131 uint64_t late_col : 3; /**< TX Late Collision */
7132 uint64_t reserved_15_15 : 1;
7133 uint64_t xsdef : 3; /**< TX Excessive deferral (RGMII/halfdup mode only) */
7134 uint64_t reserved_11_11 : 1;
7135 uint64_t xscol : 3; /**< TX Excessive collisions (RGMII/halfdup mode only) */
7136 uint64_t reserved_5_7 : 3;
7137 uint64_t undflw : 3; /**< TX Underflow (RGMII mode only) */
7138 uint64_t reserved_1_1 : 1;
7139 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
7141 uint64_t pko_nxa : 1;
7142 uint64_t reserved_1_1 : 1;
7143 uint64_t undflw : 3;
7144 uint64_t reserved_5_7 : 3;
7146 uint64_t reserved_11_11 : 1;
7148 uint64_t reserved_15_15 : 1;
7149 uint64_t late_col : 3;
7150 uint64_t reserved_19_63 : 45;
7153 struct cvmx_gmxx_tx_int_en_cn31xx
7155 #if __BYTE_ORDER == __BIG_ENDIAN
7156 uint64_t reserved_15_63 : 49;
7157 uint64_t xsdef : 3; /**< TX Excessive deferral (RGMII/halfdup mode only) */
7158 uint64_t reserved_11_11 : 1;
7159 uint64_t xscol : 3; /**< TX Excessive collisions (RGMII/halfdup mode only) */
7160 uint64_t reserved_5_7 : 3;
7161 uint64_t undflw : 3; /**< TX Underflow (RGMII mode only) */
7162 uint64_t reserved_1_1 : 1;
7163 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
7165 uint64_t pko_nxa : 1;
7166 uint64_t reserved_1_1 : 1;
7167 uint64_t undflw : 3;
7168 uint64_t reserved_5_7 : 3;
7170 uint64_t reserved_11_11 : 1;
7172 uint64_t reserved_15_63 : 49;
7175 struct cvmx_gmxx_tx_int_en_cn38xx
7177 #if __BYTE_ORDER == __BIG_ENDIAN
7178 uint64_t reserved_20_63 : 44;
7179 uint64_t late_col : 4; /**< TX Late Collision
7181 uint64_t xsdef : 4; /**< TX Excessive deferral (RGMII/halfdup mode only) */
7182 uint64_t xscol : 4; /**< TX Excessive collisions (RGMII/halfdup mode only) */
7183 uint64_t reserved_6_7 : 2;
7184 uint64_t undflw : 4; /**< TX Underflow (RGMII mode only) */
7185 uint64_t ncb_nxa : 1; /**< Port address out-of-range from NCB Interface */
7186 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
7188 uint64_t pko_nxa : 1;
7189 uint64_t ncb_nxa : 1;
7190 uint64_t undflw : 4;
7191 uint64_t reserved_6_7 : 2;
7194 uint64_t late_col : 4;
7195 uint64_t reserved_20_63 : 44;
7198 struct cvmx_gmxx_tx_int_en_cn38xxp2
7200 #if __BYTE_ORDER == __BIG_ENDIAN
7201 uint64_t reserved_16_63 : 48;
7202 uint64_t xsdef : 4; /**< TX Excessive deferral (RGMII/halfdup mode only) */
7203 uint64_t xscol : 4; /**< TX Excessive collisions (RGMII/halfdup mode only) */
7204 uint64_t reserved_6_7 : 2;
7205 uint64_t undflw : 4; /**< TX Underflow (RGMII mode only) */
7206 uint64_t ncb_nxa : 1; /**< Port address out-of-range from NCB Interface */
7207 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
7209 uint64_t pko_nxa : 1;
7210 uint64_t ncb_nxa : 1;
7211 uint64_t undflw : 4;
7212 uint64_t reserved_6_7 : 2;
7215 uint64_t reserved_16_63 : 48;
7218 struct cvmx_gmxx_tx_int_en_cn30xx cn50xx;
7219 struct cvmx_gmxx_tx_int_en_cn52xx
7221 #if __BYTE_ORDER == __BIG_ENDIAN
7222 uint64_t reserved_20_63 : 44;
7223 uint64_t late_col : 4; /**< TX Late Collision
7224 (SGMII/1000Base-X half-duplex only) */
7225 uint64_t xsdef : 4; /**< TX Excessive deferral
7226 (SGMII/1000Base-X half-duplex only) */
7227 uint64_t xscol : 4; /**< TX Excessive collisions
7228 (SGMII/1000Base-X half-duplex only) */
7229 uint64_t reserved_6_7 : 2;
7230 uint64_t undflw : 4; /**< TX Underflow */
7231 uint64_t reserved_1_1 : 1;
7232 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
7234 uint64_t pko_nxa : 1;
7235 uint64_t reserved_1_1 : 1;
7236 uint64_t undflw : 4;
7237 uint64_t reserved_6_7 : 2;
7240 uint64_t late_col : 4;
7241 uint64_t reserved_20_63 : 44;
7244 struct cvmx_gmxx_tx_int_en_cn52xx cn52xxp1;
7245 struct cvmx_gmxx_tx_int_en_cn52xx cn56xx;
7246 struct cvmx_gmxx_tx_int_en_cn52xx cn56xxp1;
7247 struct cvmx_gmxx_tx_int_en_cn38xx cn58xx;
7248 struct cvmx_gmxx_tx_int_en_cn38xx cn58xxp1;
7249 struct cvmx_gmxx_tx_int_en_cn63xx
7251 #if __BYTE_ORDER == __BIG_ENDIAN
7252 uint64_t reserved_24_63 : 40;
7253 uint64_t ptp_lost : 4; /**< A packet with a PTP request was not able to be
7254 sent due to XSCOL */
7255 uint64_t late_col : 4; /**< TX Late Collision
7256 (SGMII/1000Base-X half-duplex only) */
7257 uint64_t xsdef : 4; /**< TX Excessive deferral
7258 (SGMII/1000Base-X half-duplex only) */
7259 uint64_t xscol : 4; /**< TX Excessive collisions
7260 (SGMII/1000Base-X half-duplex only) */
7261 uint64_t reserved_6_7 : 2;
7262 uint64_t undflw : 4; /**< TX Underflow */
7263 uint64_t reserved_1_1 : 1;
7264 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
7266 uint64_t pko_nxa : 1;
7267 uint64_t reserved_1_1 : 1;
7268 uint64_t undflw : 4;
7269 uint64_t reserved_6_7 : 2;
7272 uint64_t late_col : 4;
7273 uint64_t ptp_lost : 4;
7274 uint64_t reserved_24_63 : 40;
7277 struct cvmx_gmxx_tx_int_en_cn63xx cn63xxp1;
7279 typedef union cvmx_gmxx_tx_int_en cvmx_gmxx_tx_int_en_t;
7282 * cvmx_gmx#_tx_int_reg
7284 * GMX_TX_INT_REG = Interrupt Register
7288 * In XAUI mode, only the lsb (corresponding to port0) of UNDFLW is used.
7291 union cvmx_gmxx_tx_int_reg
7294 struct cvmx_gmxx_tx_int_reg_s
7296 #if __BYTE_ORDER == __BIG_ENDIAN
7297 uint64_t reserved_24_63 : 40;
7298 uint64_t ptp_lost : 4; /**< A packet with a PTP request was not able to be
7299 sent due to XSCOL */
7300 uint64_t late_col : 4; /**< TX Late Collision
7301 (SGMII/1000Base-X half-duplex only) */
7302 uint64_t xsdef : 4; /**< TX Excessive deferral
7303 (SGMII/1000Base-X half-duplex only) */
7304 uint64_t xscol : 4; /**< TX Excessive collisions
7305 (SGMII/1000Base-X half-duplex only) */
7306 uint64_t reserved_6_7 : 2;
7307 uint64_t undflw : 4; /**< TX Underflow */
7308 uint64_t ncb_nxa : 1; /**< Port address out-of-range from NCB Interface */
7309 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
7311 uint64_t pko_nxa : 1;
7312 uint64_t ncb_nxa : 1;
7313 uint64_t undflw : 4;
7314 uint64_t reserved_6_7 : 2;
7317 uint64_t late_col : 4;
7318 uint64_t ptp_lost : 4;
7319 uint64_t reserved_24_63 : 40;
7322 struct cvmx_gmxx_tx_int_reg_cn30xx
7324 #if __BYTE_ORDER == __BIG_ENDIAN
7325 uint64_t reserved_19_63 : 45;
7326 uint64_t late_col : 3; /**< TX Late Collision */
7327 uint64_t reserved_15_15 : 1;
7328 uint64_t xsdef : 3; /**< TX Excessive deferral (RGMII/halfdup mode only) */
7329 uint64_t reserved_11_11 : 1;
7330 uint64_t xscol : 3; /**< TX Excessive collisions (RGMII/halfdup mode only) */
7331 uint64_t reserved_5_7 : 3;
7332 uint64_t undflw : 3; /**< TX Underflow (RGMII mode only) */
7333 uint64_t reserved_1_1 : 1;
7334 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
7336 uint64_t pko_nxa : 1;
7337 uint64_t reserved_1_1 : 1;
7338 uint64_t undflw : 3;
7339 uint64_t reserved_5_7 : 3;
7341 uint64_t reserved_11_11 : 1;
7343 uint64_t reserved_15_15 : 1;
7344 uint64_t late_col : 3;
7345 uint64_t reserved_19_63 : 45;
7348 struct cvmx_gmxx_tx_int_reg_cn31xx
7350 #if __BYTE_ORDER == __BIG_ENDIAN
7351 uint64_t reserved_15_63 : 49;
7352 uint64_t xsdef : 3; /**< TX Excessive deferral (RGMII/halfdup mode only) */
7353 uint64_t reserved_11_11 : 1;
7354 uint64_t xscol : 3; /**< TX Excessive collisions (RGMII/halfdup mode only) */
7355 uint64_t reserved_5_7 : 3;
7356 uint64_t undflw : 3; /**< TX Underflow (RGMII mode only) */
7357 uint64_t reserved_1_1 : 1;
7358 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
7360 uint64_t pko_nxa : 1;
7361 uint64_t reserved_1_1 : 1;
7362 uint64_t undflw : 3;
7363 uint64_t reserved_5_7 : 3;
7365 uint64_t reserved_11_11 : 1;
7367 uint64_t reserved_15_63 : 49;
7370 struct cvmx_gmxx_tx_int_reg_cn38xx
7372 #if __BYTE_ORDER == __BIG_ENDIAN
7373 uint64_t reserved_20_63 : 44;
7374 uint64_t late_col : 4; /**< TX Late Collision
7376 uint64_t xsdef : 4; /**< TX Excessive deferral (RGMII/halfdup mode only) */
7377 uint64_t xscol : 4; /**< TX Excessive collisions (RGMII/halfdup mode only) */
7378 uint64_t reserved_6_7 : 2;
7379 uint64_t undflw : 4; /**< TX Underflow (RGMII mode only) */
7380 uint64_t ncb_nxa : 1; /**< Port address out-of-range from NCB Interface */
7381 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
7383 uint64_t pko_nxa : 1;
7384 uint64_t ncb_nxa : 1;
7385 uint64_t undflw : 4;
7386 uint64_t reserved_6_7 : 2;
7389 uint64_t late_col : 4;
7390 uint64_t reserved_20_63 : 44;
7393 struct cvmx_gmxx_tx_int_reg_cn38xxp2
7395 #if __BYTE_ORDER == __BIG_ENDIAN
7396 uint64_t reserved_16_63 : 48;
7397 uint64_t xsdef : 4; /**< TX Excessive deferral (RGMII/halfdup mode only) */
7398 uint64_t xscol : 4; /**< TX Excessive collisions (RGMII/halfdup mode only) */
7399 uint64_t reserved_6_7 : 2;
7400 uint64_t undflw : 4; /**< TX Underflow (RGMII mode only) */
7401 uint64_t ncb_nxa : 1; /**< Port address out-of-range from NCB Interface */
7402 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
7404 uint64_t pko_nxa : 1;
7405 uint64_t ncb_nxa : 1;
7406 uint64_t undflw : 4;
7407 uint64_t reserved_6_7 : 2;
7410 uint64_t reserved_16_63 : 48;
7413 struct cvmx_gmxx_tx_int_reg_cn30xx cn50xx;
7414 struct cvmx_gmxx_tx_int_reg_cn52xx
7416 #if __BYTE_ORDER == __BIG_ENDIAN
7417 uint64_t reserved_20_63 : 44;
7418 uint64_t late_col : 4; /**< TX Late Collision
7419 (SGMII/1000Base-X half-duplex only) */
7420 uint64_t xsdef : 4; /**< TX Excessive deferral
7421 (SGMII/1000Base-X half-duplex only) */
7422 uint64_t xscol : 4; /**< TX Excessive collisions
7423 (SGMII/1000Base-X half-duplex only) */
7424 uint64_t reserved_6_7 : 2;
7425 uint64_t undflw : 4; /**< TX Underflow */
7426 uint64_t reserved_1_1 : 1;
7427 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
7429 uint64_t pko_nxa : 1;
7430 uint64_t reserved_1_1 : 1;
7431 uint64_t undflw : 4;
7432 uint64_t reserved_6_7 : 2;
7435 uint64_t late_col : 4;
7436 uint64_t reserved_20_63 : 44;
7439 struct cvmx_gmxx_tx_int_reg_cn52xx cn52xxp1;
7440 struct cvmx_gmxx_tx_int_reg_cn52xx cn56xx;
7441 struct cvmx_gmxx_tx_int_reg_cn52xx cn56xxp1;
7442 struct cvmx_gmxx_tx_int_reg_cn38xx cn58xx;
7443 struct cvmx_gmxx_tx_int_reg_cn38xx cn58xxp1;
7444 struct cvmx_gmxx_tx_int_reg_cn63xx
7446 #if __BYTE_ORDER == __BIG_ENDIAN
7447 uint64_t reserved_24_63 : 40;
7448 uint64_t ptp_lost : 4; /**< A packet with a PTP request was not able to be
7449 sent due to XSCOL */
7450 uint64_t late_col : 4; /**< TX Late Collision
7451 (SGMII/1000Base-X half-duplex only) */
7452 uint64_t xsdef : 4; /**< TX Excessive deferral
7453 (SGMII/1000Base-X half-duplex only) */
7454 uint64_t xscol : 4; /**< TX Excessive collisions
7455 (SGMII/1000Base-X half-duplex only) */
7456 uint64_t reserved_6_7 : 2;
7457 uint64_t undflw : 4; /**< TX Underflow */
7458 uint64_t reserved_1_1 : 1;
7459 uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */
7461 uint64_t pko_nxa : 1;
7462 uint64_t reserved_1_1 : 1;
7463 uint64_t undflw : 4;
7464 uint64_t reserved_6_7 : 2;
7467 uint64_t late_col : 4;
7468 uint64_t ptp_lost : 4;
7469 uint64_t reserved_24_63 : 40;
7472 struct cvmx_gmxx_tx_int_reg_cn63xx cn63xxp1;
7474 typedef union cvmx_gmxx_tx_int_reg cvmx_gmxx_tx_int_reg_t;
7479 * GMX_TX_JAM = Packet TX Jam Pattern
7482 union cvmx_gmxx_tx_jam
7485 struct cvmx_gmxx_tx_jam_s
7487 #if __BYTE_ORDER == __BIG_ENDIAN
7488 uint64_t reserved_8_63 : 56;
7489 uint64_t jam : 8; /**< Jam pattern
7490 (SGMII/1000Base-X half-duplex only) */
7493 uint64_t reserved_8_63 : 56;
7496 struct cvmx_gmxx_tx_jam_s cn30xx;
7497 struct cvmx_gmxx_tx_jam_s cn31xx;
7498 struct cvmx_gmxx_tx_jam_s cn38xx;
7499 struct cvmx_gmxx_tx_jam_s cn38xxp2;
7500 struct cvmx_gmxx_tx_jam_s cn50xx;
7501 struct cvmx_gmxx_tx_jam_s cn52xx;
7502 struct cvmx_gmxx_tx_jam_s cn52xxp1;
7503 struct cvmx_gmxx_tx_jam_s cn56xx;
7504 struct cvmx_gmxx_tx_jam_s cn56xxp1;
7505 struct cvmx_gmxx_tx_jam_s cn58xx;
7506 struct cvmx_gmxx_tx_jam_s cn58xxp1;
7507 struct cvmx_gmxx_tx_jam_s cn63xx;
7508 struct cvmx_gmxx_tx_jam_s cn63xxp1;
7510 typedef union cvmx_gmxx_tx_jam cvmx_gmxx_tx_jam_t;
7515 * GMX_TX_LFSR = LFSR used to implement truncated binary exponential backoff
7518 union cvmx_gmxx_tx_lfsr
7521 struct cvmx_gmxx_tx_lfsr_s
7523 #if __BYTE_ORDER == __BIG_ENDIAN
7524 uint64_t reserved_16_63 : 48;
7525 uint64_t lfsr : 16; /**< The current state of the LFSR used to feed random
7526 numbers to compute truncated binary exponential
7528 (SGMII/1000Base-X half-duplex only) */
7531 uint64_t reserved_16_63 : 48;
7534 struct cvmx_gmxx_tx_lfsr_s cn30xx;
7535 struct cvmx_gmxx_tx_lfsr_s cn31xx;
7536 struct cvmx_gmxx_tx_lfsr_s cn38xx;
7537 struct cvmx_gmxx_tx_lfsr_s cn38xxp2;
7538 struct cvmx_gmxx_tx_lfsr_s cn50xx;
7539 struct cvmx_gmxx_tx_lfsr_s cn52xx;
7540 struct cvmx_gmxx_tx_lfsr_s cn52xxp1;
7541 struct cvmx_gmxx_tx_lfsr_s cn56xx;
7542 struct cvmx_gmxx_tx_lfsr_s cn56xxp1;
7543 struct cvmx_gmxx_tx_lfsr_s cn58xx;
7544 struct cvmx_gmxx_tx_lfsr_s cn58xxp1;
7545 struct cvmx_gmxx_tx_lfsr_s cn63xx;
7546 struct cvmx_gmxx_tx_lfsr_s cn63xxp1;
7548 typedef union cvmx_gmxx_tx_lfsr cvmx_gmxx_tx_lfsr_t;
7551 * cvmx_gmx#_tx_ovr_bp
7553 * GMX_TX_OVR_BP = Packet Interface TX Override BackPressure
7557 * In XAUI mode, only the lsb (corresponding to port0) of EN, BP, and IGN_FULL are used.
7559 * GMX*_TX_OVR_BP[EN<0>] must be set to one and GMX*_TX_OVR_BP[BP<0>] must be cleared to zero
7560 * (to forcibly disable HW-automatic 802.3 pause packet generation) with the HiGig2 Protocol
7561 * when GMX*_HG2_CONTROL[HG2TX_EN]=0. (The HiGig2 protocol is indicated by
7562 * GMX*_TX_XAUI_CTL[HG_EN]=1 and GMX*_RX0_UDD_SKP[LEN]=16.) HW can only auto-generate backpressure
7563 * through HiGig2 messages (optionally, when GMX*_HG2_CONTROL[HG2TX_EN]=1) with the HiGig2
7566 union cvmx_gmxx_tx_ovr_bp
7569 struct cvmx_gmxx_tx_ovr_bp_s
7571 #if __BYTE_ORDER == __BIG_ENDIAN
7572 uint64_t reserved_48_63 : 16;
7573 uint64_t tx_prt_bp : 16; /**< Per port BP sent to PKO
7575 1=Port should be back pressured
7576 TX_PRT_BP should not be set until
7577 GMX_INF_MODE[EN] has been enabled */
7578 uint64_t reserved_12_31 : 20;
7579 uint64_t en : 4; /**< Per port Enable back pressure override */
7580 uint64_t bp : 4; /**< Per port BackPressure status to use
7582 1=Port should be back pressured */
7583 uint64_t ign_full : 4; /**< Ignore the RX FIFO full when computing BP */
7585 uint64_t ign_full : 4;
7588 uint64_t reserved_12_31 : 20;
7589 uint64_t tx_prt_bp : 16;
7590 uint64_t reserved_48_63 : 16;
7593 struct cvmx_gmxx_tx_ovr_bp_cn30xx
7595 #if __BYTE_ORDER == __BIG_ENDIAN
7596 uint64_t reserved_11_63 : 53;
7597 uint64_t en : 3; /**< Per port Enable back pressure override */
7598 uint64_t reserved_7_7 : 1;
7599 uint64_t bp : 3; /**< Per port BackPressure status to use
7601 1=Port should be back pressured */
7602 uint64_t reserved_3_3 : 1;
7603 uint64_t ign_full : 3; /**< Ignore the RX FIFO full when computing BP */
7605 uint64_t ign_full : 3;
7606 uint64_t reserved_3_3 : 1;
7608 uint64_t reserved_7_7 : 1;
7610 uint64_t reserved_11_63 : 53;
7613 struct cvmx_gmxx_tx_ovr_bp_cn30xx cn31xx;
7614 struct cvmx_gmxx_tx_ovr_bp_cn38xx
7616 #if __BYTE_ORDER == __BIG_ENDIAN
7617 uint64_t reserved_12_63 : 52;
7618 uint64_t en : 4; /**< Per port Enable back pressure override */
7619 uint64_t bp : 4; /**< Per port BackPressure status to use
7621 1=Port should be back pressured */
7622 uint64_t ign_full : 4; /**< Ignore the RX FIFO full when computing BP */
7624 uint64_t ign_full : 4;
7627 uint64_t reserved_12_63 : 52;
7630 struct cvmx_gmxx_tx_ovr_bp_cn38xx cn38xxp2;
7631 struct cvmx_gmxx_tx_ovr_bp_cn30xx cn50xx;
7632 struct cvmx_gmxx_tx_ovr_bp_s cn52xx;
7633 struct cvmx_gmxx_tx_ovr_bp_s cn52xxp1;
7634 struct cvmx_gmxx_tx_ovr_bp_s cn56xx;
7635 struct cvmx_gmxx_tx_ovr_bp_s cn56xxp1;
7636 struct cvmx_gmxx_tx_ovr_bp_cn38xx cn58xx;
7637 struct cvmx_gmxx_tx_ovr_bp_cn38xx cn58xxp1;
7638 struct cvmx_gmxx_tx_ovr_bp_s cn63xx;
7639 struct cvmx_gmxx_tx_ovr_bp_s cn63xxp1;
7641 typedef union cvmx_gmxx_tx_ovr_bp cvmx_gmxx_tx_ovr_bp_t;
7644 * cvmx_gmx#_tx_pause_pkt_dmac
7646 * GMX_TX_PAUSE_PKT_DMAC = Packet TX Pause Packet DMAC field
7649 union cvmx_gmxx_tx_pause_pkt_dmac
7652 struct cvmx_gmxx_tx_pause_pkt_dmac_s
7654 #if __BYTE_ORDER == __BIG_ENDIAN
7655 uint64_t reserved_48_63 : 16;
7656 uint64_t dmac : 48; /**< The DMAC field placed is outbnd pause pkts */
7659 uint64_t reserved_48_63 : 16;
7662 struct cvmx_gmxx_tx_pause_pkt_dmac_s cn30xx;
7663 struct cvmx_gmxx_tx_pause_pkt_dmac_s cn31xx;
7664 struct cvmx_gmxx_tx_pause_pkt_dmac_s cn38xx;
7665 struct cvmx_gmxx_tx_pause_pkt_dmac_s cn38xxp2;
7666 struct cvmx_gmxx_tx_pause_pkt_dmac_s cn50xx;
7667 struct cvmx_gmxx_tx_pause_pkt_dmac_s cn52xx;
7668 struct cvmx_gmxx_tx_pause_pkt_dmac_s cn52xxp1;
7669 struct cvmx_gmxx_tx_pause_pkt_dmac_s cn56xx;
7670 struct cvmx_gmxx_tx_pause_pkt_dmac_s cn56xxp1;
7671 struct cvmx_gmxx_tx_pause_pkt_dmac_s cn58xx;
7672 struct cvmx_gmxx_tx_pause_pkt_dmac_s cn58xxp1;
7673 struct cvmx_gmxx_tx_pause_pkt_dmac_s cn63xx;
7674 struct cvmx_gmxx_tx_pause_pkt_dmac_s cn63xxp1;
7676 typedef union cvmx_gmxx_tx_pause_pkt_dmac cvmx_gmxx_tx_pause_pkt_dmac_t;
7679 * cvmx_gmx#_tx_pause_pkt_type
7681 * GMX_TX_PAUSE_PKT_TYPE = Packet Interface TX Pause Packet TYPE field
7684 union cvmx_gmxx_tx_pause_pkt_type
7687 struct cvmx_gmxx_tx_pause_pkt_type_s
7689 #if __BYTE_ORDER == __BIG_ENDIAN
7690 uint64_t reserved_16_63 : 48;
7691 uint64_t type : 16; /**< The TYPE field placed is outbnd pause pkts */
7694 uint64_t reserved_16_63 : 48;
7697 struct cvmx_gmxx_tx_pause_pkt_type_s cn30xx;
7698 struct cvmx_gmxx_tx_pause_pkt_type_s cn31xx;
7699 struct cvmx_gmxx_tx_pause_pkt_type_s cn38xx;
7700 struct cvmx_gmxx_tx_pause_pkt_type_s cn38xxp2;
7701 struct cvmx_gmxx_tx_pause_pkt_type_s cn50xx;
7702 struct cvmx_gmxx_tx_pause_pkt_type_s cn52xx;
7703 struct cvmx_gmxx_tx_pause_pkt_type_s cn52xxp1;
7704 struct cvmx_gmxx_tx_pause_pkt_type_s cn56xx;
7705 struct cvmx_gmxx_tx_pause_pkt_type_s cn56xxp1;
7706 struct cvmx_gmxx_tx_pause_pkt_type_s cn58xx;
7707 struct cvmx_gmxx_tx_pause_pkt_type_s cn58xxp1;
7708 struct cvmx_gmxx_tx_pause_pkt_type_s cn63xx;
7709 struct cvmx_gmxx_tx_pause_pkt_type_s cn63xxp1;
7711 typedef union cvmx_gmxx_tx_pause_pkt_type cvmx_gmxx_tx_pause_pkt_type_t;
7719 * GMX_TX_PRTS = TX Ports
7722 * * The value programmed for PRTS is the number of the highest architected
7723 * port number on the interface, plus 1. For example, if port 2 is the
7724 * highest architected port, then the programmed value should be 3 since
7725 * there are 3 ports in the system - 0, 1, and 2.
7727 union cvmx_gmxx_tx_prts
7730 struct cvmx_gmxx_tx_prts_s
7732 #if __BYTE_ORDER == __BIG_ENDIAN
7733 uint64_t reserved_5_63 : 59;
7734 uint64_t prts : 5; /**< Number of ports allowed on the interface
7735 (SGMII/1000Base-X only) */
7738 uint64_t reserved_5_63 : 59;
7741 struct cvmx_gmxx_tx_prts_s cn30xx;
7742 struct cvmx_gmxx_tx_prts_s cn31xx;
7743 struct cvmx_gmxx_tx_prts_s cn38xx;
7744 struct cvmx_gmxx_tx_prts_s cn38xxp2;
7745 struct cvmx_gmxx_tx_prts_s cn50xx;
7746 struct cvmx_gmxx_tx_prts_s cn52xx;
7747 struct cvmx_gmxx_tx_prts_s cn52xxp1;
7748 struct cvmx_gmxx_tx_prts_s cn56xx;
7749 struct cvmx_gmxx_tx_prts_s cn56xxp1;
7750 struct cvmx_gmxx_tx_prts_s cn58xx;
7751 struct cvmx_gmxx_tx_prts_s cn58xxp1;
7752 struct cvmx_gmxx_tx_prts_s cn63xx;
7753 struct cvmx_gmxx_tx_prts_s cn63xxp1;
7755 typedef union cvmx_gmxx_tx_prts cvmx_gmxx_tx_prts_t;
7758 * cvmx_gmx#_tx_spi_ctl
7760 * GMX_TX_SPI_CTL = Spi4 TX ModesSpi4
7763 union cvmx_gmxx_tx_spi_ctl
7766 struct cvmx_gmxx_tx_spi_ctl_s
7768 #if __BYTE_ORDER == __BIG_ENDIAN
7769 uint64_t reserved_2_63 : 62;
7770 uint64_t tpa_clr : 1; /**< TPA Clear Mode
7771 Clear credit counter when satisifed status */
7772 uint64_t cont_pkt : 1; /**< Contiguous Packet Mode
7773 Finish one packet before switching to another
7774 Cannot be set in Spi4 pass-through mode */
7776 uint64_t cont_pkt : 1;
7777 uint64_t tpa_clr : 1;
7778 uint64_t reserved_2_63 : 62;
7781 struct cvmx_gmxx_tx_spi_ctl_s cn38xx;
7782 struct cvmx_gmxx_tx_spi_ctl_s cn38xxp2;
7783 struct cvmx_gmxx_tx_spi_ctl_s cn58xx;
7784 struct cvmx_gmxx_tx_spi_ctl_s cn58xxp1;
7786 typedef union cvmx_gmxx_tx_spi_ctl cvmx_gmxx_tx_spi_ctl_t;
7789 * cvmx_gmx#_tx_spi_drain
7791 * GMX_TX_SPI_DRAIN = Drain out Spi TX FIFO
7794 union cvmx_gmxx_tx_spi_drain
7797 struct cvmx_gmxx_tx_spi_drain_s
7799 #if __BYTE_ORDER == __BIG_ENDIAN
7800 uint64_t reserved_16_63 : 48;
7801 uint64_t drain : 16; /**< Per port drain control
7803 1=GMX TX will be popped, but no valid data will
7804 be sent to SPX. Credits are correctly returned
7805 to PKO. STX_IGN_CAL should be set to ignore
7806 TPA and not stall due to back-pressure.
7809 uint64_t drain : 16;
7810 uint64_t reserved_16_63 : 48;
7813 struct cvmx_gmxx_tx_spi_drain_s cn38xx;
7814 struct cvmx_gmxx_tx_spi_drain_s cn58xx;
7815 struct cvmx_gmxx_tx_spi_drain_s cn58xxp1;
7817 typedef union cvmx_gmxx_tx_spi_drain cvmx_gmxx_tx_spi_drain_t;
7820 * cvmx_gmx#_tx_spi_max
7822 * GMX_TX_SPI_MAX = RGMII TX Spi4 MAX
7825 union cvmx_gmxx_tx_spi_max
7828 struct cvmx_gmxx_tx_spi_max_s
7830 #if __BYTE_ORDER == __BIG_ENDIAN
7831 uint64_t reserved_23_63 : 41;
7832 uint64_t slice : 7; /**< Number of 16B blocks to transmit in a burst before
7833 switching to the next port. SLICE does not always
7834 limit the burst length transmitted by OCTEON.
7835 Depending on the traffic pattern and
7836 GMX_TX_SPI_ROUND programming, the next port could
7837 be the same as the current port. In this case,
7838 OCTEON may merge multiple sub-SLICE bursts into
7839 one contiguous burst that is longer than SLICE
7840 (as long as the burst does not cross a packet
7842 SLICE must be programmed to be >=
7843 GMX_TX_SPI_THRESH[THRESH]
7844 If SLICE==0, then the transmitter will tend to
7845 send the complete packet. The port will only
7846 switch if credits are exhausted or PKO cannot
7849 uint64_t max2 : 8; /**< MAX2 (per Spi4.2 spec) */
7850 uint64_t max1 : 8; /**< MAX1 (per Spi4.2 spec)
7851 MAX1 >= GMX_TX_SPI_THRESH[THRESH] */
7856 uint64_t reserved_23_63 : 41;
7859 struct cvmx_gmxx_tx_spi_max_cn38xx
7861 #if __BYTE_ORDER == __BIG_ENDIAN
7862 uint64_t reserved_16_63 : 48;
7863 uint64_t max2 : 8; /**< MAX2 (per Spi4.2 spec) */
7864 uint64_t max1 : 8; /**< MAX1 (per Spi4.2 spec)
7865 MAX1 >= GMX_TX_SPI_THRESH[THRESH] */
7869 uint64_t reserved_16_63 : 48;
7872 struct cvmx_gmxx_tx_spi_max_cn38xx cn38xxp2;
7873 struct cvmx_gmxx_tx_spi_max_s cn58xx;
7874 struct cvmx_gmxx_tx_spi_max_s cn58xxp1;
7876 typedef union cvmx_gmxx_tx_spi_max cvmx_gmxx_tx_spi_max_t;
7879 * cvmx_gmx#_tx_spi_round#
7881 * GMX_TX_SPI_ROUND = Controls SPI4 TX Arbitration
7884 union cvmx_gmxx_tx_spi_roundx
7887 struct cvmx_gmxx_tx_spi_roundx_s
7889 #if __BYTE_ORDER == __BIG_ENDIAN
7890 uint64_t reserved_16_63 : 48;
7891 uint64_t round : 16; /**< Which Spi ports participate in each arbitration
7892 round. Each bit corresponds to a spi port
7893 - 0: this port will arb in this round
7894 - 1: this port will not arb in this round
7897 uint64_t round : 16;
7898 uint64_t reserved_16_63 : 48;
7901 struct cvmx_gmxx_tx_spi_roundx_s cn58xx;
7902 struct cvmx_gmxx_tx_spi_roundx_s cn58xxp1;
7904 typedef union cvmx_gmxx_tx_spi_roundx cvmx_gmxx_tx_spi_roundx_t;
7907 * cvmx_gmx#_tx_spi_thresh
7909 * GMX_TX_SPI_THRESH = RGMII TX Spi4 Transmit Threshold
7913 * Note: zero will map to 0x20
7915 * This will normally creates Spi4 traffic bursts at least THRESH in length.
7916 * If dclk > eclk, then this rule may not always hold and Octeon may split
7917 * transfers into smaller bursts - some of which could be as short as 16B.
7918 * Octeon will never violate the Spi4.2 spec and send a non-EOP burst that is
7919 * not a multiple of 16B.
7921 union cvmx_gmxx_tx_spi_thresh
7924 struct cvmx_gmxx_tx_spi_thresh_s
7926 #if __BYTE_ORDER == __BIG_ENDIAN
7927 uint64_t reserved_6_63 : 58;
7928 uint64_t thresh : 6; /**< Transmit threshold in 16B blocks - cannot be zero
7929 THRESH <= TX_FIFO size (in non-passthrough mode)
7930 THRESH <= TX_FIFO size-2 (in passthrough mode)
7931 THRESH <= GMX_TX_SPI_MAX[MAX1]
7932 THRESH <= GMX_TX_SPI_MAX[MAX2], if not then is it
7933 possible for Octeon to send a Spi4 data burst of
7934 MAX2 <= burst <= THRESH 16B ticks
7935 GMX_TX_SPI_MAX[SLICE] must be programmed to be >=
7938 uint64_t thresh : 6;
7939 uint64_t reserved_6_63 : 58;
7942 struct cvmx_gmxx_tx_spi_thresh_s cn38xx;
7943 struct cvmx_gmxx_tx_spi_thresh_s cn38xxp2;
7944 struct cvmx_gmxx_tx_spi_thresh_s cn58xx;
7945 struct cvmx_gmxx_tx_spi_thresh_s cn58xxp1;
7947 typedef union cvmx_gmxx_tx_spi_thresh cvmx_gmxx_tx_spi_thresh_t;
7950 * cvmx_gmx#_tx_xaui_ctl
7952 union cvmx_gmxx_tx_xaui_ctl
7955 struct cvmx_gmxx_tx_xaui_ctl_s
7957 #if __BYTE_ORDER == __BIG_ENDIAN
7958 uint64_t reserved_11_63 : 53;
7959 uint64_t hg_pause_hgi : 2; /**< HGI Field for HW generated HiGig pause packets
7961 uint64_t hg_en : 1; /**< Enable HiGig Mode
7962 When HG_EN is set and GMX_RX_UDD_SKP[SKIP]=12
7963 the interface is in HiGig/HiGig+ mode and the
7964 following must be set:
7965 GMX_RX_FRM_CTL[PRE_CHK] == 0
7966 GMX_RX_UDD_SKP[FCSSEL] == 0
7967 GMX_RX_UDD_SKP[SKIP] == 12
7968 GMX_TX_APPEND[PREAMBLE] == 0
7969 When HG_EN is set and GMX_RX_UDD_SKP[SKIP]=16
7970 the interface is in HiGig2 mode and the
7971 following must be set:
7972 GMX_RX_FRM_CTL[PRE_CHK] == 0
7973 GMX_RX_UDD_SKP[FCSSEL] == 0
7974 GMX_RX_UDD_SKP[SKIP] == 16
7975 GMX_TX_APPEND[PREAMBLE] == 0
7976 GMX_PRT0_CBFC_CTL[RX_EN] == 0
7977 GMX_PRT0_CBFC_CTL[TX_EN] == 0
7979 uint64_t reserved_7_7 : 1;
7980 uint64_t ls_byp : 1; /**< Bypass the link status as determined by the XGMII
7981 receiver and set the link status of the
7984 uint64_t ls : 2; /**< Link Status
7986 Link runs normally. RS passes MAC data to PCS
7988 RS layer sends continuous remote fault
7991 RS layer sends continuous idles sequences
7993 RS layer drops full packets to allow GMX and
7994 PKO to drain their FIFOs
7996 uint64_t reserved_2_3 : 2;
7997 uint64_t uni_en : 1; /**< Enable Unidirectional Mode (IEEE Clause 66)
7999 uint64_t dic_en : 1; /**< Enable the deficit idle counter for IFG averaging
8002 uint64_t dic_en : 1;
8003 uint64_t uni_en : 1;
8004 uint64_t reserved_2_3 : 2;
8006 uint64_t ls_byp : 1;
8007 uint64_t reserved_7_7 : 1;
8009 uint64_t hg_pause_hgi : 2;
8010 uint64_t reserved_11_63 : 53;
8013 struct cvmx_gmxx_tx_xaui_ctl_s cn52xx;
8014 struct cvmx_gmxx_tx_xaui_ctl_s cn52xxp1;
8015 struct cvmx_gmxx_tx_xaui_ctl_s cn56xx;
8016 struct cvmx_gmxx_tx_xaui_ctl_s cn56xxp1;
8017 struct cvmx_gmxx_tx_xaui_ctl_s cn63xx;
8018 struct cvmx_gmxx_tx_xaui_ctl_s cn63xxp1;
8020 typedef union cvmx_gmxx_tx_xaui_ctl cvmx_gmxx_tx_xaui_ctl_t;
8023 * cvmx_gmx#_xaui_ext_loopback
8025 union cvmx_gmxx_xaui_ext_loopback
8028 struct cvmx_gmxx_xaui_ext_loopback_s
8030 #if __BYTE_ORDER == __BIG_ENDIAN
8031 uint64_t reserved_5_63 : 59;
8032 uint64_t en : 1; /**< Loopback enable
8033 Puts the packet interface in external loopback
8034 mode on the XAUI bus in which the RX lines are
8035 reflected on the TX lines.
8037 uint64_t thresh : 4; /**< Threshhold on the TX FIFO
8038 SW must only write the typical value. Any other
8039 value will cause loopback mode not to function
8043 uint64_t thresh : 4;
8045 uint64_t reserved_5_63 : 59;
8048 struct cvmx_gmxx_xaui_ext_loopback_s cn52xx;
8049 struct cvmx_gmxx_xaui_ext_loopback_s cn52xxp1;
8050 struct cvmx_gmxx_xaui_ext_loopback_s cn56xx;
8051 struct cvmx_gmxx_xaui_ext_loopback_s cn56xxp1;
8052 struct cvmx_gmxx_xaui_ext_loopback_s cn63xx;
8053 struct cvmx_gmxx_xaui_ext_loopback_s cn63xxp1;
8055 typedef union cvmx_gmxx_xaui_ext_loopback cvmx_gmxx_xaui_ext_loopback_t;