]> CyberLeo.Net >> Repos - FreeBSD/releng/10.0.git/blob - sys/contrib/octeon-sdk/cvmx-lmcx-defs.h
- Copy stable/10 (r259064) to releng/10.0 as part of the
[FreeBSD/releng/10.0.git] / sys / contrib / octeon-sdk / cvmx-lmcx-defs.h
1 /***********************license start***************
2  * Copyright (c) 2003-2012  Cavium Inc. (support@cavium.com). All rights
3  * reserved.
4  *
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are
8  * met:
9  *
10  *   * Redistributions of source code must retain the above copyright
11  *     notice, this list of conditions and the following disclaimer.
12  *
13  *   * Redistributions in binary form must reproduce the above
14  *     copyright notice, this list of conditions and the following
15  *     disclaimer in the documentation and/or other materials provided
16  *     with the distribution.
17
18  *   * Neither the name of Cavium Inc. nor the names of
19  *     its contributors may be used to endorse or promote products
20  *     derived from this software without specific prior written
21  *     permission.
22
23  * This Software, including technical data, may be subject to U.S. export  control
24  * laws, including the U.S. Export Administration Act and its  associated
25  * regulations, and may be subject to export or import  regulations in other
26  * countries.
27
28  * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29  * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30  * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31  * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32  * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33  * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34  * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35  * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36  * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
37  * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38  ***********************license end**************************************/
39
40
41 /**
42  * cvmx-lmcx-defs.h
43  *
44  * Configuration and status register (CSR) type definitions for
45  * Octeon lmcx.
46  *
47  * This file is auto generated. Do not edit.
48  *
49  * <hr>$Revision$<hr>
50  *
51  */
52 #ifndef __CVMX_LMCX_DEFS_H__
53 #define __CVMX_LMCX_DEFS_H__
54
55 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
56 static inline uint64_t CVMX_LMCX_BIST_CTL(unsigned long block_id)
57 {
58         if (!(
59               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
60               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
61               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
62                 cvmx_warn("CVMX_LMCX_BIST_CTL(%lu) is invalid on this chip\n", block_id);
63         return CVMX_ADD_IO_SEG(0x00011800880000F0ull) + ((block_id) & 1) * 0x60000000ull;
64 }
65 #else
66 #define CVMX_LMCX_BIST_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000F0ull) + ((block_id) & 1) * 0x60000000ull)
67 #endif
68 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
69 static inline uint64_t CVMX_LMCX_BIST_RESULT(unsigned long block_id)
70 {
71         if (!(
72               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
73               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
74               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
75                 cvmx_warn("CVMX_LMCX_BIST_RESULT(%lu) is invalid on this chip\n", block_id);
76         return CVMX_ADD_IO_SEG(0x00011800880000F8ull) + ((block_id) & 1) * 0x60000000ull;
77 }
78 #else
79 #define CVMX_LMCX_BIST_RESULT(block_id) (CVMX_ADD_IO_SEG(0x00011800880000F8ull) + ((block_id) & 1) * 0x60000000ull)
80 #endif
81 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
82 static inline uint64_t CVMX_LMCX_CHAR_CTL(unsigned long block_id)
83 {
84         if (!(
85               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
86               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
87               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
88               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
89               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
90                 cvmx_warn("CVMX_LMCX_CHAR_CTL(%lu) is invalid on this chip\n", block_id);
91         return CVMX_ADD_IO_SEG(0x0001180088000220ull) + ((block_id) & 3) * 0x1000000ull;
92 }
93 #else
94 #define CVMX_LMCX_CHAR_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000220ull) + ((block_id) & 3) * 0x1000000ull)
95 #endif
96 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
97 static inline uint64_t CVMX_LMCX_CHAR_MASK0(unsigned long block_id)
98 {
99         if (!(
100               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
101               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
102               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
103               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
104               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
105                 cvmx_warn("CVMX_LMCX_CHAR_MASK0(%lu) is invalid on this chip\n", block_id);
106         return CVMX_ADD_IO_SEG(0x0001180088000228ull) + ((block_id) & 3) * 0x1000000ull;
107 }
108 #else
109 #define CVMX_LMCX_CHAR_MASK0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000228ull) + ((block_id) & 3) * 0x1000000ull)
110 #endif
111 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
112 static inline uint64_t CVMX_LMCX_CHAR_MASK1(unsigned long block_id)
113 {
114         if (!(
115               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
116               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
117               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
118               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
119               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
120                 cvmx_warn("CVMX_LMCX_CHAR_MASK1(%lu) is invalid on this chip\n", block_id);
121         return CVMX_ADD_IO_SEG(0x0001180088000230ull) + ((block_id) & 3) * 0x1000000ull;
122 }
123 #else
124 #define CVMX_LMCX_CHAR_MASK1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000230ull) + ((block_id) & 3) * 0x1000000ull)
125 #endif
126 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
127 static inline uint64_t CVMX_LMCX_CHAR_MASK2(unsigned long block_id)
128 {
129         if (!(
130               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
131               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
132               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
133               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
134               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
135                 cvmx_warn("CVMX_LMCX_CHAR_MASK2(%lu) is invalid on this chip\n", block_id);
136         return CVMX_ADD_IO_SEG(0x0001180088000238ull) + ((block_id) & 3) * 0x1000000ull;
137 }
138 #else
139 #define CVMX_LMCX_CHAR_MASK2(block_id) (CVMX_ADD_IO_SEG(0x0001180088000238ull) + ((block_id) & 3) * 0x1000000ull)
140 #endif
141 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
142 static inline uint64_t CVMX_LMCX_CHAR_MASK3(unsigned long block_id)
143 {
144         if (!(
145               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
146               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
147               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
148               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
149               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
150                 cvmx_warn("CVMX_LMCX_CHAR_MASK3(%lu) is invalid on this chip\n", block_id);
151         return CVMX_ADD_IO_SEG(0x0001180088000240ull) + ((block_id) & 3) * 0x1000000ull;
152 }
153 #else
154 #define CVMX_LMCX_CHAR_MASK3(block_id) (CVMX_ADD_IO_SEG(0x0001180088000240ull) + ((block_id) & 3) * 0x1000000ull)
155 #endif
156 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
157 static inline uint64_t CVMX_LMCX_CHAR_MASK4(unsigned long block_id)
158 {
159         if (!(
160               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
161               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
162               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
163               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
164               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
165                 cvmx_warn("CVMX_LMCX_CHAR_MASK4(%lu) is invalid on this chip\n", block_id);
166         return CVMX_ADD_IO_SEG(0x0001180088000318ull) + ((block_id) & 3) * 0x1000000ull;
167 }
168 #else
169 #define CVMX_LMCX_CHAR_MASK4(block_id) (CVMX_ADD_IO_SEG(0x0001180088000318ull) + ((block_id) & 3) * 0x1000000ull)
170 #endif
171 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
172 static inline uint64_t CVMX_LMCX_COMP_CTL(unsigned long block_id)
173 {
174         if (!(
175               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
176               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
177               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
178               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
179               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
180               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
181               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
182                 cvmx_warn("CVMX_LMCX_COMP_CTL(%lu) is invalid on this chip\n", block_id);
183         return CVMX_ADD_IO_SEG(0x0001180088000028ull) + ((block_id) & 1) * 0x60000000ull;
184 }
185 #else
186 #define CVMX_LMCX_COMP_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000028ull) + ((block_id) & 1) * 0x60000000ull)
187 #endif
188 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
189 static inline uint64_t CVMX_LMCX_COMP_CTL2(unsigned long block_id)
190 {
191         if (!(
192               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
193               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
194               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
195               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
196               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
197                 cvmx_warn("CVMX_LMCX_COMP_CTL2(%lu) is invalid on this chip\n", block_id);
198         return CVMX_ADD_IO_SEG(0x00011800880001B8ull) + ((block_id) & 3) * 0x1000000ull;
199 }
200 #else
201 #define CVMX_LMCX_COMP_CTL2(block_id) (CVMX_ADD_IO_SEG(0x00011800880001B8ull) + ((block_id) & 3) * 0x1000000ull)
202 #endif
203 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
204 static inline uint64_t CVMX_LMCX_CONFIG(unsigned long block_id)
205 {
206         if (!(
207               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
208               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
209               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
210               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
211               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
212                 cvmx_warn("CVMX_LMCX_CONFIG(%lu) is invalid on this chip\n", block_id);
213         return CVMX_ADD_IO_SEG(0x0001180088000188ull) + ((block_id) & 3) * 0x1000000ull;
214 }
215 #else
216 #define CVMX_LMCX_CONFIG(block_id) (CVMX_ADD_IO_SEG(0x0001180088000188ull) + ((block_id) & 3) * 0x1000000ull)
217 #endif
218 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
219 static inline uint64_t CVMX_LMCX_CONTROL(unsigned long block_id)
220 {
221         if (!(
222               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
223               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
224               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
225               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
226               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
227                 cvmx_warn("CVMX_LMCX_CONTROL(%lu) is invalid on this chip\n", block_id);
228         return CVMX_ADD_IO_SEG(0x0001180088000190ull) + ((block_id) & 3) * 0x1000000ull;
229 }
230 #else
231 #define CVMX_LMCX_CONTROL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000190ull) + ((block_id) & 3) * 0x1000000ull)
232 #endif
233 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
234 static inline uint64_t CVMX_LMCX_CTL(unsigned long block_id)
235 {
236         if (!(
237               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
238               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
239               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
240               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
241               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
242               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
243               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
244                 cvmx_warn("CVMX_LMCX_CTL(%lu) is invalid on this chip\n", block_id);
245         return CVMX_ADD_IO_SEG(0x0001180088000010ull) + ((block_id) & 1) * 0x60000000ull;
246 }
247 #else
248 #define CVMX_LMCX_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000010ull) + ((block_id) & 1) * 0x60000000ull)
249 #endif
250 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
251 static inline uint64_t CVMX_LMCX_CTL1(unsigned long block_id)
252 {
253         if (!(
254               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
255               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
256               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
257               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
258               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
259                 cvmx_warn("CVMX_LMCX_CTL1(%lu) is invalid on this chip\n", block_id);
260         return CVMX_ADD_IO_SEG(0x0001180088000090ull) + ((block_id) & 1) * 0x60000000ull;
261 }
262 #else
263 #define CVMX_LMCX_CTL1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000090ull) + ((block_id) & 1) * 0x60000000ull)
264 #endif
265 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
266 static inline uint64_t CVMX_LMCX_DCLK_CNT(unsigned long block_id)
267 {
268         if (!(
269               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
270               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
271               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
272               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
273               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
274                 cvmx_warn("CVMX_LMCX_DCLK_CNT(%lu) is invalid on this chip\n", block_id);
275         return CVMX_ADD_IO_SEG(0x00011800880001E0ull) + ((block_id) & 3) * 0x1000000ull;
276 }
277 #else
278 #define CVMX_LMCX_DCLK_CNT(block_id) (CVMX_ADD_IO_SEG(0x00011800880001E0ull) + ((block_id) & 3) * 0x1000000ull)
279 #endif
280 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
281 static inline uint64_t CVMX_LMCX_DCLK_CNT_HI(unsigned long block_id)
282 {
283         if (!(
284               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
285               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
286               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
287               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
288               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
289               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
290               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
291                 cvmx_warn("CVMX_LMCX_DCLK_CNT_HI(%lu) is invalid on this chip\n", block_id);
292         return CVMX_ADD_IO_SEG(0x0001180088000070ull) + ((block_id) & 1) * 0x60000000ull;
293 }
294 #else
295 #define CVMX_LMCX_DCLK_CNT_HI(block_id) (CVMX_ADD_IO_SEG(0x0001180088000070ull) + ((block_id) & 1) * 0x60000000ull)
296 #endif
297 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
298 static inline uint64_t CVMX_LMCX_DCLK_CNT_LO(unsigned long block_id)
299 {
300         if (!(
301               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
302               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
303               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
304               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
305               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
306               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
307               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
308                 cvmx_warn("CVMX_LMCX_DCLK_CNT_LO(%lu) is invalid on this chip\n", block_id);
309         return CVMX_ADD_IO_SEG(0x0001180088000068ull) + ((block_id) & 1) * 0x60000000ull;
310 }
311 #else
312 #define CVMX_LMCX_DCLK_CNT_LO(block_id) (CVMX_ADD_IO_SEG(0x0001180088000068ull) + ((block_id) & 1) * 0x60000000ull)
313 #endif
314 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
315 static inline uint64_t CVMX_LMCX_DCLK_CTL(unsigned long block_id)
316 {
317         if (!(
318               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
319                 cvmx_warn("CVMX_LMCX_DCLK_CTL(%lu) is invalid on this chip\n", block_id);
320         return CVMX_ADD_IO_SEG(0x00011800880000B8ull) + ((block_id) & 1) * 0x60000000ull;
321 }
322 #else
323 #define CVMX_LMCX_DCLK_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000B8ull) + ((block_id) & 1) * 0x60000000ull)
324 #endif
325 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
326 static inline uint64_t CVMX_LMCX_DDR2_CTL(unsigned long block_id)
327 {
328         if (!(
329               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
330               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
331               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
332               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
333               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
334               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
335               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
336                 cvmx_warn("CVMX_LMCX_DDR2_CTL(%lu) is invalid on this chip\n", block_id);
337         return CVMX_ADD_IO_SEG(0x0001180088000018ull) + ((block_id) & 1) * 0x60000000ull;
338 }
339 #else
340 #define CVMX_LMCX_DDR2_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000018ull) + ((block_id) & 1) * 0x60000000ull)
341 #endif
342 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
343 static inline uint64_t CVMX_LMCX_DDR_PLL_CTL(unsigned long block_id)
344 {
345         if (!(
346               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
347               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
348               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
349               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
350               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
351                 cvmx_warn("CVMX_LMCX_DDR_PLL_CTL(%lu) is invalid on this chip\n", block_id);
352         return CVMX_ADD_IO_SEG(0x0001180088000258ull) + ((block_id) & 3) * 0x1000000ull;
353 }
354 #else
355 #define CVMX_LMCX_DDR_PLL_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000258ull) + ((block_id) & 3) * 0x1000000ull)
356 #endif
357 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
358 static inline uint64_t CVMX_LMCX_DELAY_CFG(unsigned long block_id)
359 {
360         if (!(
361               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
362               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
363               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
364               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
365               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
366               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
367                 cvmx_warn("CVMX_LMCX_DELAY_CFG(%lu) is invalid on this chip\n", block_id);
368         return CVMX_ADD_IO_SEG(0x0001180088000088ull) + ((block_id) & 1) * 0x60000000ull;
369 }
370 #else
371 #define CVMX_LMCX_DELAY_CFG(block_id) (CVMX_ADD_IO_SEG(0x0001180088000088ull) + ((block_id) & 1) * 0x60000000ull)
372 #endif
373 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
374 static inline uint64_t CVMX_LMCX_DIMMX_PARAMS(unsigned long offset, unsigned long block_id)
375 {
376         if (!(
377               (OCTEON_IS_MODEL(OCTEON_CN61XX) && (((offset <= 1)) && ((block_id == 0)))) ||
378               (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 1)) && ((block_id == 0)))) ||
379               (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset <= 1)) && ((block_id == 0)))) ||
380               (OCTEON_IS_MODEL(OCTEON_CN68XX) && (((offset <= 1)) && ((block_id <= 3)))) ||
381               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && (((offset <= 1)) && ((block_id == 0))))))
382                 cvmx_warn("CVMX_LMCX_DIMMX_PARAMS(%lu,%lu) is invalid on this chip\n", offset, block_id);
383         return CVMX_ADD_IO_SEG(0x0001180088000270ull) + (((offset) & 1) + ((block_id) & 3) * 0x200000ull) * 8;
384 }
385 #else
386 #define CVMX_LMCX_DIMMX_PARAMS(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180088000270ull) + (((offset) & 1) + ((block_id) & 3) * 0x200000ull) * 8)
387 #endif
388 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
389 static inline uint64_t CVMX_LMCX_DIMM_CTL(unsigned long block_id)
390 {
391         if (!(
392               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
393               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
394               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
395               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
396               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
397                 cvmx_warn("CVMX_LMCX_DIMM_CTL(%lu) is invalid on this chip\n", block_id);
398         return CVMX_ADD_IO_SEG(0x0001180088000310ull) + ((block_id) & 3) * 0x1000000ull;
399 }
400 #else
401 #define CVMX_LMCX_DIMM_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000310ull) + ((block_id) & 3) * 0x1000000ull)
402 #endif
403 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
404 static inline uint64_t CVMX_LMCX_DLL_CTL(unsigned long block_id)
405 {
406         if (!(
407               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
408               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
409                 cvmx_warn("CVMX_LMCX_DLL_CTL(%lu) is invalid on this chip\n", block_id);
410         return CVMX_ADD_IO_SEG(0x00011800880000C0ull) + ((block_id) & 1) * 0x60000000ull;
411 }
412 #else
413 #define CVMX_LMCX_DLL_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000C0ull) + ((block_id) & 1) * 0x60000000ull)
414 #endif
415 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
416 static inline uint64_t CVMX_LMCX_DLL_CTL2(unsigned long block_id)
417 {
418         if (!(
419               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
420               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
421               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
422               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
423               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
424                 cvmx_warn("CVMX_LMCX_DLL_CTL2(%lu) is invalid on this chip\n", block_id);
425         return CVMX_ADD_IO_SEG(0x00011800880001C8ull) + ((block_id) & 3) * 0x1000000ull;
426 }
427 #else
428 #define CVMX_LMCX_DLL_CTL2(block_id) (CVMX_ADD_IO_SEG(0x00011800880001C8ull) + ((block_id) & 3) * 0x1000000ull)
429 #endif
430 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
431 static inline uint64_t CVMX_LMCX_DLL_CTL3(unsigned long block_id)
432 {
433         if (!(
434               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
435               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
436               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
437               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
438               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
439                 cvmx_warn("CVMX_LMCX_DLL_CTL3(%lu) is invalid on this chip\n", block_id);
440         return CVMX_ADD_IO_SEG(0x0001180088000218ull) + ((block_id) & 3) * 0x1000000ull;
441 }
442 #else
443 #define CVMX_LMCX_DLL_CTL3(block_id) (CVMX_ADD_IO_SEG(0x0001180088000218ull) + ((block_id) & 3) * 0x1000000ull)
444 #endif
445 static inline uint64_t CVMX_LMCX_DUAL_MEMCFG(unsigned long block_id)
446 {
447         switch(cvmx_get_octeon_family()) {
448                 case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
449                 case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
450                 case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
451                 case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
452                 case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
453                 case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
454                 case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
455                         if ((block_id == 0))
456                                 return CVMX_ADD_IO_SEG(0x0001180088000098ull) + ((block_id) & 0) * 0x60000000ull;
457                         break;
458                 case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
459                         if ((block_id <= 1))
460                                 return CVMX_ADD_IO_SEG(0x0001180088000098ull) + ((block_id) & 1) * 0x60000000ull;
461                         break;
462                 case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
463                         if ((block_id <= 3))
464                                 return CVMX_ADD_IO_SEG(0x0001180088000098ull) + ((block_id) & 3) * 0x1000000ull;
465                         break;
466         }
467         cvmx_warn("CVMX_LMCX_DUAL_MEMCFG (block_id = %lu) not supported on this chip\n", block_id);
468         return CVMX_ADD_IO_SEG(0x0001180088000098ull) + ((block_id) & 0) * 0x60000000ull;
469 }
470 static inline uint64_t CVMX_LMCX_ECC_SYND(unsigned long block_id)
471 {
472         switch(cvmx_get_octeon_family()) {
473                 case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
474                 case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
475                 case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
476                 case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
477                 case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
478                 case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
479                 case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
480                 case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
481                 case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
482                 case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
483                         if ((block_id == 0))
484                                 return CVMX_ADD_IO_SEG(0x0001180088000038ull) + ((block_id) & 0) * 0x60000000ull;
485                         break;
486                 case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
487                         if ((block_id <= 1))
488                                 return CVMX_ADD_IO_SEG(0x0001180088000038ull) + ((block_id) & 1) * 0x60000000ull;
489                         break;
490                 case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
491                         if ((block_id <= 3))
492                                 return CVMX_ADD_IO_SEG(0x0001180088000038ull) + ((block_id) & 3) * 0x1000000ull;
493                         break;
494         }
495         cvmx_warn("CVMX_LMCX_ECC_SYND (block_id = %lu) not supported on this chip\n", block_id);
496         return CVMX_ADD_IO_SEG(0x0001180088000038ull) + ((block_id) & 0) * 0x60000000ull;
497 }
498 static inline uint64_t CVMX_LMCX_FADR(unsigned long block_id)
499 {
500         switch(cvmx_get_octeon_family()) {
501                 case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
502                 case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
503                 case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
504                 case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
505                 case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
506                 case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
507                 case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
508                 case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
509                 case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
510                 case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
511                         if ((block_id == 0))
512                                 return CVMX_ADD_IO_SEG(0x0001180088000020ull) + ((block_id) & 0) * 0x60000000ull;
513                         break;
514                 case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
515                         if ((block_id <= 1))
516                                 return CVMX_ADD_IO_SEG(0x0001180088000020ull) + ((block_id) & 1) * 0x60000000ull;
517                         break;
518                 case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
519                         if ((block_id <= 3))
520                                 return CVMX_ADD_IO_SEG(0x0001180088000020ull) + ((block_id) & 3) * 0x1000000ull;
521                         break;
522         }
523         cvmx_warn("CVMX_LMCX_FADR (block_id = %lu) not supported on this chip\n", block_id);
524         return CVMX_ADD_IO_SEG(0x0001180088000020ull) + ((block_id) & 0) * 0x60000000ull;
525 }
526 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
527 static inline uint64_t CVMX_LMCX_IFB_CNT(unsigned long block_id)
528 {
529         if (!(
530               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
531               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
532               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
533               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
534               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
535                 cvmx_warn("CVMX_LMCX_IFB_CNT(%lu) is invalid on this chip\n", block_id);
536         return CVMX_ADD_IO_SEG(0x00011800880001D0ull) + ((block_id) & 3) * 0x1000000ull;
537 }
538 #else
539 #define CVMX_LMCX_IFB_CNT(block_id) (CVMX_ADD_IO_SEG(0x00011800880001D0ull) + ((block_id) & 3) * 0x1000000ull)
540 #endif
541 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
542 static inline uint64_t CVMX_LMCX_IFB_CNT_HI(unsigned long block_id)
543 {
544         if (!(
545               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
546               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
547               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
548               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
549               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
550               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
551               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
552                 cvmx_warn("CVMX_LMCX_IFB_CNT_HI(%lu) is invalid on this chip\n", block_id);
553         return CVMX_ADD_IO_SEG(0x0001180088000050ull) + ((block_id) & 1) * 0x60000000ull;
554 }
555 #else
556 #define CVMX_LMCX_IFB_CNT_HI(block_id) (CVMX_ADD_IO_SEG(0x0001180088000050ull) + ((block_id) & 1) * 0x60000000ull)
557 #endif
558 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
559 static inline uint64_t CVMX_LMCX_IFB_CNT_LO(unsigned long block_id)
560 {
561         if (!(
562               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
563               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
564               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
565               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
566               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
567               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
568               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
569                 cvmx_warn("CVMX_LMCX_IFB_CNT_LO(%lu) is invalid on this chip\n", block_id);
570         return CVMX_ADD_IO_SEG(0x0001180088000048ull) + ((block_id) & 1) * 0x60000000ull;
571 }
572 #else
573 #define CVMX_LMCX_IFB_CNT_LO(block_id) (CVMX_ADD_IO_SEG(0x0001180088000048ull) + ((block_id) & 1) * 0x60000000ull)
574 #endif
575 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
576 static inline uint64_t CVMX_LMCX_INT(unsigned long block_id)
577 {
578         if (!(
579               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
580               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
581               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
582               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
583               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
584                 cvmx_warn("CVMX_LMCX_INT(%lu) is invalid on this chip\n", block_id);
585         return CVMX_ADD_IO_SEG(0x00011800880001F0ull) + ((block_id) & 3) * 0x1000000ull;
586 }
587 #else
588 #define CVMX_LMCX_INT(block_id) (CVMX_ADD_IO_SEG(0x00011800880001F0ull) + ((block_id) & 3) * 0x1000000ull)
589 #endif
590 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
591 static inline uint64_t CVMX_LMCX_INT_EN(unsigned long block_id)
592 {
593         if (!(
594               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
595               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
596               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
597               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
598               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
599                 cvmx_warn("CVMX_LMCX_INT_EN(%lu) is invalid on this chip\n", block_id);
600         return CVMX_ADD_IO_SEG(0x00011800880001E8ull) + ((block_id) & 3) * 0x1000000ull;
601 }
602 #else
603 #define CVMX_LMCX_INT_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800880001E8ull) + ((block_id) & 3) * 0x1000000ull)
604 #endif
605 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
606 static inline uint64_t CVMX_LMCX_MEM_CFG0(unsigned long block_id)
607 {
608         if (!(
609               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
610               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
611               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
612               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
613               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
614               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
615               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
616                 cvmx_warn("CVMX_LMCX_MEM_CFG0(%lu) is invalid on this chip\n", block_id);
617         return CVMX_ADD_IO_SEG(0x0001180088000000ull) + ((block_id) & 1) * 0x60000000ull;
618 }
619 #else
620 #define CVMX_LMCX_MEM_CFG0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000000ull) + ((block_id) & 1) * 0x60000000ull)
621 #endif
622 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
623 static inline uint64_t CVMX_LMCX_MEM_CFG1(unsigned long block_id)
624 {
625         if (!(
626               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
627               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
628               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
629               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
630               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
631               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
632               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
633                 cvmx_warn("CVMX_LMCX_MEM_CFG1(%lu) is invalid on this chip\n", block_id);
634         return CVMX_ADD_IO_SEG(0x0001180088000008ull) + ((block_id) & 1) * 0x60000000ull;
635 }
636 #else
637 #define CVMX_LMCX_MEM_CFG1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000008ull) + ((block_id) & 1) * 0x60000000ull)
638 #endif
639 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
640 static inline uint64_t CVMX_LMCX_MODEREG_PARAMS0(unsigned long block_id)
641 {
642         if (!(
643               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
644               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
645               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
646               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
647               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
648                 cvmx_warn("CVMX_LMCX_MODEREG_PARAMS0(%lu) is invalid on this chip\n", block_id);
649         return CVMX_ADD_IO_SEG(0x00011800880001A8ull) + ((block_id) & 3) * 0x1000000ull;
650 }
651 #else
652 #define CVMX_LMCX_MODEREG_PARAMS0(block_id) (CVMX_ADD_IO_SEG(0x00011800880001A8ull) + ((block_id) & 3) * 0x1000000ull)
653 #endif
654 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
655 static inline uint64_t CVMX_LMCX_MODEREG_PARAMS1(unsigned long block_id)
656 {
657         if (!(
658               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
659               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
660               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
661               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
662               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
663                 cvmx_warn("CVMX_LMCX_MODEREG_PARAMS1(%lu) is invalid on this chip\n", block_id);
664         return CVMX_ADD_IO_SEG(0x0001180088000260ull) + ((block_id) & 3) * 0x1000000ull;
665 }
666 #else
667 #define CVMX_LMCX_MODEREG_PARAMS1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000260ull) + ((block_id) & 3) * 0x1000000ull)
668 #endif
669 static inline uint64_t CVMX_LMCX_NXM(unsigned long block_id)
670 {
671         switch(cvmx_get_octeon_family()) {
672                 case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
673                 case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
674                 case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
675                 case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
676                 case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
677                 case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
678                         if ((block_id == 0))
679                                 return CVMX_ADD_IO_SEG(0x00011800880000C8ull) + ((block_id) & 0) * 0x60000000ull;
680                         break;
681                 case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
682                         if ((block_id <= 1))
683                                 return CVMX_ADD_IO_SEG(0x00011800880000C8ull) + ((block_id) & 1) * 0x60000000ull;
684                         break;
685                 case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
686                         if ((block_id <= 3))
687                                 return CVMX_ADD_IO_SEG(0x00011800880000C8ull) + ((block_id) & 3) * 0x1000000ull;
688                         break;
689         }
690         cvmx_warn("CVMX_LMCX_NXM (block_id = %lu) not supported on this chip\n", block_id);
691         return CVMX_ADD_IO_SEG(0x00011800880000C8ull) + ((block_id) & 0) * 0x60000000ull;
692 }
693 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
694 static inline uint64_t CVMX_LMCX_OPS_CNT(unsigned long block_id)
695 {
696         if (!(
697               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
698               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
699               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
700               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
701               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
702                 cvmx_warn("CVMX_LMCX_OPS_CNT(%lu) is invalid on this chip\n", block_id);
703         return CVMX_ADD_IO_SEG(0x00011800880001D8ull) + ((block_id) & 3) * 0x1000000ull;
704 }
705 #else
706 #define CVMX_LMCX_OPS_CNT(block_id) (CVMX_ADD_IO_SEG(0x00011800880001D8ull) + ((block_id) & 3) * 0x1000000ull)
707 #endif
708 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
709 static inline uint64_t CVMX_LMCX_OPS_CNT_HI(unsigned long block_id)
710 {
711         if (!(
712               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
713               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
714               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
715               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
716               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
717               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
718               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
719                 cvmx_warn("CVMX_LMCX_OPS_CNT_HI(%lu) is invalid on this chip\n", block_id);
720         return CVMX_ADD_IO_SEG(0x0001180088000060ull) + ((block_id) & 1) * 0x60000000ull;
721 }
722 #else
723 #define CVMX_LMCX_OPS_CNT_HI(block_id) (CVMX_ADD_IO_SEG(0x0001180088000060ull) + ((block_id) & 1) * 0x60000000ull)
724 #endif
725 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
726 static inline uint64_t CVMX_LMCX_OPS_CNT_LO(unsigned long block_id)
727 {
728         if (!(
729               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
730               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
731               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
732               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
733               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
734               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
735               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
736                 cvmx_warn("CVMX_LMCX_OPS_CNT_LO(%lu) is invalid on this chip\n", block_id);
737         return CVMX_ADD_IO_SEG(0x0001180088000058ull) + ((block_id) & 1) * 0x60000000ull;
738 }
739 #else
740 #define CVMX_LMCX_OPS_CNT_LO(block_id) (CVMX_ADD_IO_SEG(0x0001180088000058ull) + ((block_id) & 1) * 0x60000000ull)
741 #endif
742 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
743 static inline uint64_t CVMX_LMCX_PHY_CTL(unsigned long block_id)
744 {
745         if (!(
746               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
747               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
748               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
749               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
750               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
751                 cvmx_warn("CVMX_LMCX_PHY_CTL(%lu) is invalid on this chip\n", block_id);
752         return CVMX_ADD_IO_SEG(0x0001180088000210ull) + ((block_id) & 3) * 0x1000000ull;
753 }
754 #else
755 #define CVMX_LMCX_PHY_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000210ull) + ((block_id) & 3) * 0x1000000ull)
756 #endif
757 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
758 static inline uint64_t CVMX_LMCX_PLL_BWCTL(unsigned long block_id)
759 {
760         if (!(
761               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
762               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
763               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0)))))
764                 cvmx_warn("CVMX_LMCX_PLL_BWCTL(%lu) is invalid on this chip\n", block_id);
765         return CVMX_ADD_IO_SEG(0x0001180088000040ull);
766 }
767 #else
768 #define CVMX_LMCX_PLL_BWCTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000040ull))
769 #endif
770 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
771 static inline uint64_t CVMX_LMCX_PLL_CTL(unsigned long block_id)
772 {
773         if (!(
774               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
775               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
776               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
777               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
778                 cvmx_warn("CVMX_LMCX_PLL_CTL(%lu) is invalid on this chip\n", block_id);
779         return CVMX_ADD_IO_SEG(0x00011800880000A8ull) + ((block_id) & 1) * 0x60000000ull;
780 }
781 #else
782 #define CVMX_LMCX_PLL_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000A8ull) + ((block_id) & 1) * 0x60000000ull)
783 #endif
784 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
785 static inline uint64_t CVMX_LMCX_PLL_STATUS(unsigned long block_id)
786 {
787         if (!(
788               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
789               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
790               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
791               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
792                 cvmx_warn("CVMX_LMCX_PLL_STATUS(%lu) is invalid on this chip\n", block_id);
793         return CVMX_ADD_IO_SEG(0x00011800880000B0ull) + ((block_id) & 1) * 0x60000000ull;
794 }
795 #else
796 #define CVMX_LMCX_PLL_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800880000B0ull) + ((block_id) & 1) * 0x60000000ull)
797 #endif
798 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
799 static inline uint64_t CVMX_LMCX_READ_LEVEL_CTL(unsigned long block_id)
800 {
801         if (!(
802               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
803               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
804                 cvmx_warn("CVMX_LMCX_READ_LEVEL_CTL(%lu) is invalid on this chip\n", block_id);
805         return CVMX_ADD_IO_SEG(0x0001180088000140ull) + ((block_id) & 1) * 0x60000000ull;
806 }
807 #else
808 #define CVMX_LMCX_READ_LEVEL_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000140ull) + ((block_id) & 1) * 0x60000000ull)
809 #endif
810 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
811 static inline uint64_t CVMX_LMCX_READ_LEVEL_DBG(unsigned long block_id)
812 {
813         if (!(
814               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
815               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
816                 cvmx_warn("CVMX_LMCX_READ_LEVEL_DBG(%lu) is invalid on this chip\n", block_id);
817         return CVMX_ADD_IO_SEG(0x0001180088000148ull) + ((block_id) & 1) * 0x60000000ull;
818 }
819 #else
820 #define CVMX_LMCX_READ_LEVEL_DBG(block_id) (CVMX_ADD_IO_SEG(0x0001180088000148ull) + ((block_id) & 1) * 0x60000000ull)
821 #endif
822 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
823 static inline uint64_t CVMX_LMCX_READ_LEVEL_RANKX(unsigned long offset, unsigned long block_id)
824 {
825         if (!(
826               (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
827               (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1))))))
828                 cvmx_warn("CVMX_LMCX_READ_LEVEL_RANKX(%lu,%lu) is invalid on this chip\n", offset, block_id);
829         return CVMX_ADD_IO_SEG(0x0001180088000100ull) + (((offset) & 3) + ((block_id) & 1) * 0xC000000ull) * 8;
830 }
831 #else
832 #define CVMX_LMCX_READ_LEVEL_RANKX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180088000100ull) + (((offset) & 3) + ((block_id) & 1) * 0xC000000ull) * 8)
833 #endif
834 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
835 static inline uint64_t CVMX_LMCX_RESET_CTL(unsigned long block_id)
836 {
837         if (!(
838               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
839               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
840               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
841               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
842               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
843                 cvmx_warn("CVMX_LMCX_RESET_CTL(%lu) is invalid on this chip\n", block_id);
844         return CVMX_ADD_IO_SEG(0x0001180088000180ull) + ((block_id) & 3) * 0x1000000ull;
845 }
846 #else
847 #define CVMX_LMCX_RESET_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000180ull) + ((block_id) & 3) * 0x1000000ull)
848 #endif
849 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
850 static inline uint64_t CVMX_LMCX_RLEVEL_CTL(unsigned long block_id)
851 {
852         if (!(
853               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
854               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
855               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
856               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
857               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
858                 cvmx_warn("CVMX_LMCX_RLEVEL_CTL(%lu) is invalid on this chip\n", block_id);
859         return CVMX_ADD_IO_SEG(0x00011800880002A0ull) + ((block_id) & 3) * 0x1000000ull;
860 }
861 #else
862 #define CVMX_LMCX_RLEVEL_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880002A0ull) + ((block_id) & 3) * 0x1000000ull)
863 #endif
864 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
865 static inline uint64_t CVMX_LMCX_RLEVEL_DBG(unsigned long block_id)
866 {
867         if (!(
868               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
869               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
870               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
871               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
872               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
873                 cvmx_warn("CVMX_LMCX_RLEVEL_DBG(%lu) is invalid on this chip\n", block_id);
874         return CVMX_ADD_IO_SEG(0x00011800880002A8ull) + ((block_id) & 3) * 0x1000000ull;
875 }
876 #else
877 #define CVMX_LMCX_RLEVEL_DBG(block_id) (CVMX_ADD_IO_SEG(0x00011800880002A8ull) + ((block_id) & 3) * 0x1000000ull)
878 #endif
879 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
880 static inline uint64_t CVMX_LMCX_RLEVEL_RANKX(unsigned long offset, unsigned long block_id)
881 {
882         if (!(
883               (OCTEON_IS_MODEL(OCTEON_CN61XX) && (((offset <= 3)) && ((block_id == 0)))) ||
884               (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))) ||
885               (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset <= 3)) && ((block_id == 0)))) ||
886               (OCTEON_IS_MODEL(OCTEON_CN68XX) && (((offset <= 3)) && ((block_id <= 3)))) ||
887               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && (((offset <= 3)) && ((block_id == 0))))))
888                 cvmx_warn("CVMX_LMCX_RLEVEL_RANKX(%lu,%lu) is invalid on this chip\n", offset, block_id);
889         return CVMX_ADD_IO_SEG(0x0001180088000280ull) + (((offset) & 3) + ((block_id) & 3) * 0x200000ull) * 8;
890 }
891 #else
892 #define CVMX_LMCX_RLEVEL_RANKX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180088000280ull) + (((offset) & 3) + ((block_id) & 3) * 0x200000ull) * 8)
893 #endif
894 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
895 static inline uint64_t CVMX_LMCX_RODT_COMP_CTL(unsigned long block_id)
896 {
897         if (!(
898               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
899               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
900               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
901               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
902                 cvmx_warn("CVMX_LMCX_RODT_COMP_CTL(%lu) is invalid on this chip\n", block_id);
903         return CVMX_ADD_IO_SEG(0x00011800880000A0ull) + ((block_id) & 1) * 0x60000000ull;
904 }
905 #else
906 #define CVMX_LMCX_RODT_COMP_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000A0ull) + ((block_id) & 1) * 0x60000000ull)
907 #endif
908 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
909 static inline uint64_t CVMX_LMCX_RODT_CTL(unsigned long block_id)
910 {
911         if (!(
912               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
913               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
914               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
915               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
916               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
917               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
918               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
919                 cvmx_warn("CVMX_LMCX_RODT_CTL(%lu) is invalid on this chip\n", block_id);
920         return CVMX_ADD_IO_SEG(0x0001180088000078ull) + ((block_id) & 1) * 0x60000000ull;
921 }
922 #else
923 #define CVMX_LMCX_RODT_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000078ull) + ((block_id) & 1) * 0x60000000ull)
924 #endif
925 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
926 static inline uint64_t CVMX_LMCX_RODT_MASK(unsigned long block_id)
927 {
928         if (!(
929               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
930               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
931               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
932               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
933               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
934                 cvmx_warn("CVMX_LMCX_RODT_MASK(%lu) is invalid on this chip\n", block_id);
935         return CVMX_ADD_IO_SEG(0x0001180088000268ull) + ((block_id) & 3) * 0x1000000ull;
936 }
937 #else
938 #define CVMX_LMCX_RODT_MASK(block_id) (CVMX_ADD_IO_SEG(0x0001180088000268ull) + ((block_id) & 3) * 0x1000000ull)
939 #endif
940 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
941 static inline uint64_t CVMX_LMCX_SCRAMBLED_FADR(unsigned long block_id)
942 {
943         if (!(
944               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
945               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
946               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
947                 cvmx_warn("CVMX_LMCX_SCRAMBLED_FADR(%lu) is invalid on this chip\n", block_id);
948         return CVMX_ADD_IO_SEG(0x0001180088000330ull);
949 }
950 #else
951 #define CVMX_LMCX_SCRAMBLED_FADR(block_id) (CVMX_ADD_IO_SEG(0x0001180088000330ull))
952 #endif
953 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
954 static inline uint64_t CVMX_LMCX_SCRAMBLE_CFG0(unsigned long block_id)
955 {
956         if (!(
957               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
958               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
959               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
960                 cvmx_warn("CVMX_LMCX_SCRAMBLE_CFG0(%lu) is invalid on this chip\n", block_id);
961         return CVMX_ADD_IO_SEG(0x0001180088000320ull);
962 }
963 #else
964 #define CVMX_LMCX_SCRAMBLE_CFG0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000320ull))
965 #endif
966 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
967 static inline uint64_t CVMX_LMCX_SCRAMBLE_CFG1(unsigned long block_id)
968 {
969         if (!(
970               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
971               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
972               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
973                 cvmx_warn("CVMX_LMCX_SCRAMBLE_CFG1(%lu) is invalid on this chip\n", block_id);
974         return CVMX_ADD_IO_SEG(0x0001180088000328ull);
975 }
976 #else
977 #define CVMX_LMCX_SCRAMBLE_CFG1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000328ull))
978 #endif
979 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
980 static inline uint64_t CVMX_LMCX_SLOT_CTL0(unsigned long block_id)
981 {
982         if (!(
983               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
984               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
985               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
986               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
987               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
988                 cvmx_warn("CVMX_LMCX_SLOT_CTL0(%lu) is invalid on this chip\n", block_id);
989         return CVMX_ADD_IO_SEG(0x00011800880001F8ull) + ((block_id) & 3) * 0x1000000ull;
990 }
991 #else
992 #define CVMX_LMCX_SLOT_CTL0(block_id) (CVMX_ADD_IO_SEG(0x00011800880001F8ull) + ((block_id) & 3) * 0x1000000ull)
993 #endif
994 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
995 static inline uint64_t CVMX_LMCX_SLOT_CTL1(unsigned long block_id)
996 {
997         if (!(
998               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
999               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
1000               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
1001               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
1002               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
1003                 cvmx_warn("CVMX_LMCX_SLOT_CTL1(%lu) is invalid on this chip\n", block_id);
1004         return CVMX_ADD_IO_SEG(0x0001180088000200ull) + ((block_id) & 3) * 0x1000000ull;
1005 }
1006 #else
1007 #define CVMX_LMCX_SLOT_CTL1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000200ull) + ((block_id) & 3) * 0x1000000ull)
1008 #endif
1009 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1010 static inline uint64_t CVMX_LMCX_SLOT_CTL2(unsigned long block_id)
1011 {
1012         if (!(
1013               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
1014               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
1015               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
1016               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
1017               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
1018                 cvmx_warn("CVMX_LMCX_SLOT_CTL2(%lu) is invalid on this chip\n", block_id);
1019         return CVMX_ADD_IO_SEG(0x0001180088000208ull) + ((block_id) & 3) * 0x1000000ull;
1020 }
1021 #else
1022 #define CVMX_LMCX_SLOT_CTL2(block_id) (CVMX_ADD_IO_SEG(0x0001180088000208ull) + ((block_id) & 3) * 0x1000000ull)
1023 #endif
1024 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1025 static inline uint64_t CVMX_LMCX_TIMING_PARAMS0(unsigned long block_id)
1026 {
1027         if (!(
1028               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
1029               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
1030               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
1031               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
1032               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
1033                 cvmx_warn("CVMX_LMCX_TIMING_PARAMS0(%lu) is invalid on this chip\n", block_id);
1034         return CVMX_ADD_IO_SEG(0x0001180088000198ull) + ((block_id) & 3) * 0x1000000ull;
1035 }
1036 #else
1037 #define CVMX_LMCX_TIMING_PARAMS0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000198ull) + ((block_id) & 3) * 0x1000000ull)
1038 #endif
1039 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1040 static inline uint64_t CVMX_LMCX_TIMING_PARAMS1(unsigned long block_id)
1041 {
1042         if (!(
1043               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
1044               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
1045               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
1046               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
1047               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
1048                 cvmx_warn("CVMX_LMCX_TIMING_PARAMS1(%lu) is invalid on this chip\n", block_id);
1049         return CVMX_ADD_IO_SEG(0x00011800880001A0ull) + ((block_id) & 3) * 0x1000000ull;
1050 }
1051 #else
1052 #define CVMX_LMCX_TIMING_PARAMS1(block_id) (CVMX_ADD_IO_SEG(0x00011800880001A0ull) + ((block_id) & 3) * 0x1000000ull)
1053 #endif
1054 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1055 static inline uint64_t CVMX_LMCX_TRO_CTL(unsigned long block_id)
1056 {
1057         if (!(
1058               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
1059               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
1060               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
1061               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
1062               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
1063                 cvmx_warn("CVMX_LMCX_TRO_CTL(%lu) is invalid on this chip\n", block_id);
1064         return CVMX_ADD_IO_SEG(0x0001180088000248ull) + ((block_id) & 3) * 0x1000000ull;
1065 }
1066 #else
1067 #define CVMX_LMCX_TRO_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000248ull) + ((block_id) & 3) * 0x1000000ull)
1068 #endif
1069 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1070 static inline uint64_t CVMX_LMCX_TRO_STAT(unsigned long block_id)
1071 {
1072         if (!(
1073               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
1074               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
1075               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
1076               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
1077               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
1078                 cvmx_warn("CVMX_LMCX_TRO_STAT(%lu) is invalid on this chip\n", block_id);
1079         return CVMX_ADD_IO_SEG(0x0001180088000250ull) + ((block_id) & 3) * 0x1000000ull;
1080 }
1081 #else
1082 #define CVMX_LMCX_TRO_STAT(block_id) (CVMX_ADD_IO_SEG(0x0001180088000250ull) + ((block_id) & 3) * 0x1000000ull)
1083 #endif
1084 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1085 static inline uint64_t CVMX_LMCX_WLEVEL_CTL(unsigned long block_id)
1086 {
1087         if (!(
1088               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
1089               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
1090               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
1091               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
1092               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
1093                 cvmx_warn("CVMX_LMCX_WLEVEL_CTL(%lu) is invalid on this chip\n", block_id);
1094         return CVMX_ADD_IO_SEG(0x0001180088000300ull) + ((block_id) & 3) * 0x1000000ull;
1095 }
1096 #else
1097 #define CVMX_LMCX_WLEVEL_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000300ull) + ((block_id) & 3) * 0x1000000ull)
1098 #endif
1099 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1100 static inline uint64_t CVMX_LMCX_WLEVEL_DBG(unsigned long block_id)
1101 {
1102         if (!(
1103               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
1104               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
1105               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
1106               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
1107               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
1108                 cvmx_warn("CVMX_LMCX_WLEVEL_DBG(%lu) is invalid on this chip\n", block_id);
1109         return CVMX_ADD_IO_SEG(0x0001180088000308ull) + ((block_id) & 3) * 0x1000000ull;
1110 }
1111 #else
1112 #define CVMX_LMCX_WLEVEL_DBG(block_id) (CVMX_ADD_IO_SEG(0x0001180088000308ull) + ((block_id) & 3) * 0x1000000ull)
1113 #endif
1114 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1115 static inline uint64_t CVMX_LMCX_WLEVEL_RANKX(unsigned long offset, unsigned long block_id)
1116 {
1117         if (!(
1118               (OCTEON_IS_MODEL(OCTEON_CN61XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1119               (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1120               (OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset <= 3)) && ((block_id == 0)))) ||
1121               (OCTEON_IS_MODEL(OCTEON_CN68XX) && (((offset <= 3)) && ((block_id <= 3)))) ||
1122               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && (((offset <= 3)) && ((block_id == 0))))))
1123                 cvmx_warn("CVMX_LMCX_WLEVEL_RANKX(%lu,%lu) is invalid on this chip\n", offset, block_id);
1124         return CVMX_ADD_IO_SEG(0x00011800880002B0ull) + (((offset) & 3) + ((block_id) & 3) * 0x200000ull) * 8;
1125 }
1126 #else
1127 #define CVMX_LMCX_WLEVEL_RANKX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800880002B0ull) + (((offset) & 3) + ((block_id) & 3) * 0x200000ull) * 8)
1128 #endif
1129 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1130 static inline uint64_t CVMX_LMCX_WODT_CTL0(unsigned long block_id)
1131 {
1132         if (!(
1133               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
1134               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
1135               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
1136               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
1137               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1138               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
1139               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
1140                 cvmx_warn("CVMX_LMCX_WODT_CTL0(%lu) is invalid on this chip\n", block_id);
1141         return CVMX_ADD_IO_SEG(0x0001180088000030ull) + ((block_id) & 1) * 0x60000000ull;
1142 }
1143 #else
1144 #define CVMX_LMCX_WODT_CTL0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000030ull) + ((block_id) & 1) * 0x60000000ull)
1145 #endif
1146 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1147 static inline uint64_t CVMX_LMCX_WODT_CTL1(unsigned long block_id)
1148 {
1149         if (!(
1150               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
1151               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
1152               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
1153               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
1154                 cvmx_warn("CVMX_LMCX_WODT_CTL1(%lu) is invalid on this chip\n", block_id);
1155         return CVMX_ADD_IO_SEG(0x0001180088000080ull) + ((block_id) & 1) * 0x60000000ull;
1156 }
1157 #else
1158 #define CVMX_LMCX_WODT_CTL1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000080ull) + ((block_id) & 1) * 0x60000000ull)
1159 #endif
1160 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
1161 static inline uint64_t CVMX_LMCX_WODT_MASK(unsigned long block_id)
1162 {
1163         if (!(
1164               (OCTEON_IS_MODEL(OCTEON_CN61XX) && ((block_id == 0))) ||
1165               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0))) ||
1166               (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((block_id == 0))) ||
1167               (OCTEON_IS_MODEL(OCTEON_CN68XX) && ((block_id <= 3))) ||
1168               (OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((block_id == 0)))))
1169                 cvmx_warn("CVMX_LMCX_WODT_MASK(%lu) is invalid on this chip\n", block_id);
1170         return CVMX_ADD_IO_SEG(0x00011800880001B0ull) + ((block_id) & 3) * 0x1000000ull;
1171 }
1172 #else
1173 #define CVMX_LMCX_WODT_MASK(block_id) (CVMX_ADD_IO_SEG(0x00011800880001B0ull) + ((block_id) & 3) * 0x1000000ull)
1174 #endif
1175
1176 /**
1177  * cvmx_lmc#_bist_ctl
1178  *
1179  * Notes:
1180  * This controls BiST only for the memories that operate on DCLK.  The normal, chip-wide BiST flow
1181  * controls BiST for the memories that operate on ECLK.
1182  */
1183 union cvmx_lmcx_bist_ctl {
1184         uint64_t u64;
1185         struct cvmx_lmcx_bist_ctl_s {
1186 #ifdef __BIG_ENDIAN_BITFIELD
1187         uint64_t reserved_1_63                : 63;
1188         uint64_t start                        : 1;  /**< A 0->1 transition causes BiST to run. */
1189 #else
1190         uint64_t start                        : 1;
1191         uint64_t reserved_1_63                : 63;
1192 #endif
1193         } s;
1194         struct cvmx_lmcx_bist_ctl_s           cn50xx;
1195         struct cvmx_lmcx_bist_ctl_s           cn52xx;
1196         struct cvmx_lmcx_bist_ctl_s           cn52xxp1;
1197         struct cvmx_lmcx_bist_ctl_s           cn56xx;
1198         struct cvmx_lmcx_bist_ctl_s           cn56xxp1;
1199 };
1200 typedef union cvmx_lmcx_bist_ctl cvmx_lmcx_bist_ctl_t;
1201
1202 /**
1203  * cvmx_lmc#_bist_result
1204  *
1205  * Notes:
1206  * Access to the internal BiST results
1207  * Each bit is the BiST result of an individual memory (per bit, 0=pass and 1=fail).
1208  */
1209 union cvmx_lmcx_bist_result {
1210         uint64_t u64;
1211         struct cvmx_lmcx_bist_result_s {
1212 #ifdef __BIG_ENDIAN_BITFIELD
1213         uint64_t reserved_11_63               : 53;
1214         uint64_t csrd2e                       : 1;  /**< BiST result of CSRD2E memory (0=pass, !0=fail) */
1215         uint64_t csre2d                       : 1;  /**< BiST result of CSRE2D memory (0=pass, !0=fail) */
1216         uint64_t mwf                          : 1;  /**< BiST result of MWF memories (0=pass, !0=fail) */
1217         uint64_t mwd                          : 3;  /**< BiST result of MWD memories (0=pass, !0=fail) */
1218         uint64_t mwc                          : 1;  /**< BiST result of MWC memories (0=pass, !0=fail) */
1219         uint64_t mrf                          : 1;  /**< BiST result of MRF memories (0=pass, !0=fail) */
1220         uint64_t mrd                          : 3;  /**< BiST result of MRD memories (0=pass, !0=fail) */
1221 #else
1222         uint64_t mrd                          : 3;
1223         uint64_t mrf                          : 1;
1224         uint64_t mwc                          : 1;
1225         uint64_t mwd                          : 3;
1226         uint64_t mwf                          : 1;
1227         uint64_t csre2d                       : 1;
1228         uint64_t csrd2e                       : 1;
1229         uint64_t reserved_11_63               : 53;
1230 #endif
1231         } s;
1232         struct cvmx_lmcx_bist_result_cn50xx {
1233 #ifdef __BIG_ENDIAN_BITFIELD
1234         uint64_t reserved_9_63                : 55;
1235         uint64_t mwf                          : 1;  /**< BiST result of MWF memories (0=pass, !0=fail) */
1236         uint64_t mwd                          : 3;  /**< BiST result of MWD memories (0=pass, !0=fail) */
1237         uint64_t mwc                          : 1;  /**< BiST result of MWC memories (0=pass, !0=fail) */
1238         uint64_t mrf                          : 1;  /**< BiST result of MRF memories (0=pass, !0=fail) */
1239         uint64_t mrd                          : 3;  /**< BiST result of MRD memories (0=pass, !0=fail) */
1240 #else
1241         uint64_t mrd                          : 3;
1242         uint64_t mrf                          : 1;
1243         uint64_t mwc                          : 1;
1244         uint64_t mwd                          : 3;
1245         uint64_t mwf                          : 1;
1246         uint64_t reserved_9_63                : 55;
1247 #endif
1248         } cn50xx;
1249         struct cvmx_lmcx_bist_result_s        cn52xx;
1250         struct cvmx_lmcx_bist_result_s        cn52xxp1;
1251         struct cvmx_lmcx_bist_result_s        cn56xx;
1252         struct cvmx_lmcx_bist_result_s        cn56xxp1;
1253 };
1254 typedef union cvmx_lmcx_bist_result cvmx_lmcx_bist_result_t;
1255
1256 /**
1257  * cvmx_lmc#_char_ctl
1258  *
1259  * LMC_CHAR_CTL = LMC Characterization Control
1260  * This register is an assortment of various control fields needed to charecterize the DDR3 interface
1261  */
1262 union cvmx_lmcx_char_ctl {
1263         uint64_t u64;
1264         struct cvmx_lmcx_char_ctl_s {
1265 #ifdef __BIG_ENDIAN_BITFIELD
1266         uint64_t reserved_44_63               : 20;
1267         uint64_t dr                           : 1;  /**< Pattern at Data Rate (not Clock Rate) */
1268         uint64_t skew_on                      : 1;  /**< Skew adjacent bits */
1269         uint64_t en                           : 1;  /**< Enable characterization */
1270         uint64_t sel                          : 1;  /**< Pattern select
1271                                                          0 = PRBS
1272                                                          1 = Programmable pattern */
1273         uint64_t prog                         : 8;  /**< Programmable pattern */
1274         uint64_t prbs                         : 32; /**< PRBS Polynomial */
1275 #else
1276         uint64_t prbs                         : 32;
1277         uint64_t prog                         : 8;
1278         uint64_t sel                          : 1;
1279         uint64_t en                           : 1;
1280         uint64_t skew_on                      : 1;
1281         uint64_t dr                           : 1;
1282         uint64_t reserved_44_63               : 20;
1283 #endif
1284         } s;
1285         struct cvmx_lmcx_char_ctl_s           cn61xx;
1286         struct cvmx_lmcx_char_ctl_cn63xx {
1287 #ifdef __BIG_ENDIAN_BITFIELD
1288         uint64_t reserved_42_63               : 22;
1289         uint64_t en                           : 1;  /**< Enable characterization */
1290         uint64_t sel                          : 1;  /**< Pattern select
1291                                                          0 = PRBS
1292                                                          1 = Programmable pattern */
1293         uint64_t prog                         : 8;  /**< Programmable pattern */
1294         uint64_t prbs                         : 32; /**< PRBS Polynomial */
1295 #else
1296         uint64_t prbs                         : 32;
1297         uint64_t prog                         : 8;
1298         uint64_t sel                          : 1;
1299         uint64_t en                           : 1;
1300         uint64_t reserved_42_63               : 22;
1301 #endif
1302         } cn63xx;
1303         struct cvmx_lmcx_char_ctl_cn63xx      cn63xxp1;
1304         struct cvmx_lmcx_char_ctl_s           cn66xx;
1305         struct cvmx_lmcx_char_ctl_s           cn68xx;
1306         struct cvmx_lmcx_char_ctl_cn63xx      cn68xxp1;
1307         struct cvmx_lmcx_char_ctl_s           cnf71xx;
1308 };
1309 typedef union cvmx_lmcx_char_ctl cvmx_lmcx_char_ctl_t;
1310
1311 /**
1312  * cvmx_lmc#_char_mask0
1313  *
1314  * LMC_CHAR_MASK0 = LMC Characterization Mask0
1315  * This register is an assortment of various control fields needed to charecterize the DDR3 interface
1316  */
1317 union cvmx_lmcx_char_mask0 {
1318         uint64_t u64;
1319         struct cvmx_lmcx_char_mask0_s {
1320 #ifdef __BIG_ENDIAN_BITFIELD
1321         uint64_t mask                         : 64; /**< Mask for DQ0[63:0] */
1322 #else
1323         uint64_t mask                         : 64;
1324 #endif
1325         } s;
1326         struct cvmx_lmcx_char_mask0_s         cn61xx;
1327         struct cvmx_lmcx_char_mask0_s         cn63xx;
1328         struct cvmx_lmcx_char_mask0_s         cn63xxp1;
1329         struct cvmx_lmcx_char_mask0_s         cn66xx;
1330         struct cvmx_lmcx_char_mask0_s         cn68xx;
1331         struct cvmx_lmcx_char_mask0_s         cn68xxp1;
1332         struct cvmx_lmcx_char_mask0_s         cnf71xx;
1333 };
1334 typedef union cvmx_lmcx_char_mask0 cvmx_lmcx_char_mask0_t;
1335
1336 /**
1337  * cvmx_lmc#_char_mask1
1338  *
1339  * LMC_CHAR_MASK1 = LMC Characterization Mask1
1340  * This register is an assortment of various control fields needed to charecterize the DDR3 interface
1341  */
1342 union cvmx_lmcx_char_mask1 {
1343         uint64_t u64;
1344         struct cvmx_lmcx_char_mask1_s {
1345 #ifdef __BIG_ENDIAN_BITFIELD
1346         uint64_t reserved_8_63                : 56;
1347         uint64_t mask                         : 8;  /**< Mask for DQ0[71:64] */
1348 #else
1349         uint64_t mask                         : 8;
1350         uint64_t reserved_8_63                : 56;
1351 #endif
1352         } s;
1353         struct cvmx_lmcx_char_mask1_s         cn61xx;
1354         struct cvmx_lmcx_char_mask1_s         cn63xx;
1355         struct cvmx_lmcx_char_mask1_s         cn63xxp1;
1356         struct cvmx_lmcx_char_mask1_s         cn66xx;
1357         struct cvmx_lmcx_char_mask1_s         cn68xx;
1358         struct cvmx_lmcx_char_mask1_s         cn68xxp1;
1359         struct cvmx_lmcx_char_mask1_s         cnf71xx;
1360 };
1361 typedef union cvmx_lmcx_char_mask1 cvmx_lmcx_char_mask1_t;
1362
1363 /**
1364  * cvmx_lmc#_char_mask2
1365  *
1366  * LMC_CHAR_MASK2 = LMC Characterization Mask2
1367  * This register is an assortment of various control fields needed to charecterize the DDR3 interface
1368  */
1369 union cvmx_lmcx_char_mask2 {
1370         uint64_t u64;
1371         struct cvmx_lmcx_char_mask2_s {
1372 #ifdef __BIG_ENDIAN_BITFIELD
1373         uint64_t mask                         : 64; /**< Mask for DQ1[63:0] */
1374 #else
1375         uint64_t mask                         : 64;
1376 #endif
1377         } s;
1378         struct cvmx_lmcx_char_mask2_s         cn61xx;
1379         struct cvmx_lmcx_char_mask2_s         cn63xx;
1380         struct cvmx_lmcx_char_mask2_s         cn63xxp1;
1381         struct cvmx_lmcx_char_mask2_s         cn66xx;
1382         struct cvmx_lmcx_char_mask2_s         cn68xx;
1383         struct cvmx_lmcx_char_mask2_s         cn68xxp1;
1384         struct cvmx_lmcx_char_mask2_s         cnf71xx;
1385 };
1386 typedef union cvmx_lmcx_char_mask2 cvmx_lmcx_char_mask2_t;
1387
1388 /**
1389  * cvmx_lmc#_char_mask3
1390  *
1391  * LMC_CHAR_MASK3 = LMC Characterization Mask3
1392  * This register is an assortment of various control fields needed to charecterize the DDR3 interface
1393  */
1394 union cvmx_lmcx_char_mask3 {
1395         uint64_t u64;
1396         struct cvmx_lmcx_char_mask3_s {
1397 #ifdef __BIG_ENDIAN_BITFIELD
1398         uint64_t reserved_8_63                : 56;
1399         uint64_t mask                         : 8;  /**< Mask for DQ1[71:64] */
1400 #else
1401         uint64_t mask                         : 8;
1402         uint64_t reserved_8_63                : 56;
1403 #endif
1404         } s;
1405         struct cvmx_lmcx_char_mask3_s         cn61xx;
1406         struct cvmx_lmcx_char_mask3_s         cn63xx;
1407         struct cvmx_lmcx_char_mask3_s         cn63xxp1;
1408         struct cvmx_lmcx_char_mask3_s         cn66xx;
1409         struct cvmx_lmcx_char_mask3_s         cn68xx;
1410         struct cvmx_lmcx_char_mask3_s         cn68xxp1;
1411         struct cvmx_lmcx_char_mask3_s         cnf71xx;
1412 };
1413 typedef union cvmx_lmcx_char_mask3 cvmx_lmcx_char_mask3_t;
1414
1415 /**
1416  * cvmx_lmc#_char_mask4
1417  *
1418  * LMC_CHAR_MASK4 = LMC Characterization Mask4
1419  * This register is an assortment of various control fields needed to charecterize the DDR3 interface
1420  */
1421 union cvmx_lmcx_char_mask4 {
1422         uint64_t u64;
1423         struct cvmx_lmcx_char_mask4_s {
1424 #ifdef __BIG_ENDIAN_BITFIELD
1425         uint64_t reserved_33_63               : 31;
1426         uint64_t reset_n_mask                 : 1;  /**< Mask for RESET_L */
1427         uint64_t a_mask                       : 16; /**< Mask for A[15:0] */
1428         uint64_t ba_mask                      : 3;  /**< Mask for BA[2:0] */
1429         uint64_t we_n_mask                    : 1;  /**< Mask for WE_N */
1430         uint64_t cas_n_mask                   : 1;  /**< Mask for CAS_N */
1431         uint64_t ras_n_mask                   : 1;  /**< Mask for RAS_N */
1432         uint64_t odt1_mask                    : 2;  /**< Mask for ODT1 */
1433         uint64_t odt0_mask                    : 2;  /**< Mask for ODT0 */
1434         uint64_t cs1_n_mask                   : 2;  /**< Mask for CS1_N */
1435         uint64_t cs0_n_mask                   : 2;  /**< Mask for CS0_N */
1436         uint64_t cke_mask                     : 2;  /**< Mask for CKE* */
1437 #else
1438         uint64_t cke_mask                     : 2;
1439         uint64_t cs0_n_mask                   : 2;
1440         uint64_t cs1_n_mask                   : 2;
1441         uint64_t odt0_mask                    : 2;
1442         uint64_t odt1_mask                    : 2;
1443         uint64_t ras_n_mask                   : 1;
1444         uint64_t cas_n_mask                   : 1;
1445         uint64_t we_n_mask                    : 1;
1446         uint64_t ba_mask                      : 3;
1447         uint64_t a_mask                       : 16;
1448         uint64_t reset_n_mask                 : 1;
1449         uint64_t reserved_33_63               : 31;
1450 #endif
1451         } s;
1452         struct cvmx_lmcx_char_mask4_s         cn61xx;
1453         struct cvmx_lmcx_char_mask4_s         cn63xx;
1454         struct cvmx_lmcx_char_mask4_s         cn63xxp1;
1455         struct cvmx_lmcx_char_mask4_s         cn66xx;
1456         struct cvmx_lmcx_char_mask4_s         cn68xx;
1457         struct cvmx_lmcx_char_mask4_s         cn68xxp1;
1458         struct cvmx_lmcx_char_mask4_s         cnf71xx;
1459 };
1460 typedef union cvmx_lmcx_char_mask4 cvmx_lmcx_char_mask4_t;
1461
1462 /**
1463  * cvmx_lmc#_comp_ctl
1464  *
1465  * LMC_COMP_CTL = LMC Compensation control
1466  *
1467  */
1468 union cvmx_lmcx_comp_ctl {
1469         uint64_t u64;
1470         struct cvmx_lmcx_comp_ctl_s {
1471 #ifdef __BIG_ENDIAN_BITFIELD
1472         uint64_t reserved_32_63               : 32;
1473         uint64_t nctl_csr                     : 4;  /**< Compensation control bits */
1474         uint64_t nctl_clk                     : 4;  /**< Compensation control bits */
1475         uint64_t nctl_cmd                     : 4;  /**< Compensation control bits */
1476         uint64_t nctl_dat                     : 4;  /**< Compensation control bits */
1477         uint64_t pctl_csr                     : 4;  /**< Compensation control bits */
1478         uint64_t pctl_clk                     : 4;  /**< Compensation control bits */
1479         uint64_t reserved_0_7                 : 8;
1480 #else
1481         uint64_t reserved_0_7                 : 8;
1482         uint64_t pctl_clk                     : 4;
1483         uint64_t pctl_csr                     : 4;
1484         uint64_t nctl_dat                     : 4;
1485         uint64_t nctl_cmd                     : 4;
1486         uint64_t nctl_clk                     : 4;
1487         uint64_t nctl_csr                     : 4;
1488         uint64_t reserved_32_63               : 32;
1489 #endif
1490         } s;
1491         struct cvmx_lmcx_comp_ctl_cn30xx {
1492 #ifdef __BIG_ENDIAN_BITFIELD
1493         uint64_t reserved_32_63               : 32;
1494         uint64_t nctl_csr                     : 4;  /**< Compensation control bits */
1495         uint64_t nctl_clk                     : 4;  /**< Compensation control bits */
1496         uint64_t nctl_cmd                     : 4;  /**< Compensation control bits */
1497         uint64_t nctl_dat                     : 4;  /**< Compensation control bits */
1498         uint64_t pctl_csr                     : 4;  /**< Compensation control bits */
1499         uint64_t pctl_clk                     : 4;  /**< Compensation control bits */
1500         uint64_t pctl_cmd                     : 4;  /**< Compensation control bits */
1501         uint64_t pctl_dat                     : 4;  /**< Compensation control bits */
1502 #else
1503         uint64_t pctl_dat                     : 4;
1504         uint64_t pctl_cmd                     : 4;
1505         uint64_t pctl_clk                     : 4;
1506         uint64_t pctl_csr                     : 4;
1507         uint64_t nctl_dat                     : 4;
1508         uint64_t nctl_cmd                     : 4;
1509         uint64_t nctl_clk                     : 4;
1510         uint64_t nctl_csr                     : 4;
1511         uint64_t reserved_32_63               : 32;
1512 #endif
1513         } cn30xx;
1514         struct cvmx_lmcx_comp_ctl_cn30xx      cn31xx;
1515         struct cvmx_lmcx_comp_ctl_cn30xx      cn38xx;
1516         struct cvmx_lmcx_comp_ctl_cn30xx      cn38xxp2;
1517         struct cvmx_lmcx_comp_ctl_cn50xx {
1518 #ifdef __BIG_ENDIAN_BITFIELD
1519         uint64_t reserved_32_63               : 32;
1520         uint64_t nctl_csr                     : 4;  /**< Compensation control bits */
1521         uint64_t reserved_20_27               : 8;
1522         uint64_t nctl_dat                     : 4;  /**< Compensation control bits */
1523         uint64_t pctl_csr                     : 4;  /**< Compensation control bits */
1524         uint64_t reserved_5_11                : 7;
1525         uint64_t pctl_dat                     : 5;  /**< Compensation control bits */
1526 #else
1527         uint64_t pctl_dat                     : 5;
1528         uint64_t reserved_5_11                : 7;
1529         uint64_t pctl_csr                     : 4;
1530         uint64_t nctl_dat                     : 4;
1531         uint64_t reserved_20_27               : 8;
1532         uint64_t nctl_csr                     : 4;
1533         uint64_t reserved_32_63               : 32;
1534 #endif
1535         } cn50xx;
1536         struct cvmx_lmcx_comp_ctl_cn50xx      cn52xx;
1537         struct cvmx_lmcx_comp_ctl_cn50xx      cn52xxp1;
1538         struct cvmx_lmcx_comp_ctl_cn50xx      cn56xx;
1539         struct cvmx_lmcx_comp_ctl_cn50xx      cn56xxp1;
1540         struct cvmx_lmcx_comp_ctl_cn50xx      cn58xx;
1541         struct cvmx_lmcx_comp_ctl_cn58xxp1 {
1542 #ifdef __BIG_ENDIAN_BITFIELD
1543         uint64_t reserved_32_63               : 32;
1544         uint64_t nctl_csr                     : 4;  /**< Compensation control bits */
1545         uint64_t reserved_20_27               : 8;
1546         uint64_t nctl_dat                     : 4;  /**< Compensation control bits */
1547         uint64_t pctl_csr                     : 4;  /**< Compensation control bits */
1548         uint64_t reserved_4_11                : 8;
1549         uint64_t pctl_dat                     : 4;  /**< Compensation control bits */
1550 #else
1551         uint64_t pctl_dat                     : 4;
1552         uint64_t reserved_4_11                : 8;
1553         uint64_t pctl_csr                     : 4;
1554         uint64_t nctl_dat                     : 4;
1555         uint64_t reserved_20_27               : 8;
1556         uint64_t nctl_csr                     : 4;
1557         uint64_t reserved_32_63               : 32;
1558 #endif
1559         } cn58xxp1;
1560 };
1561 typedef union cvmx_lmcx_comp_ctl cvmx_lmcx_comp_ctl_t;
1562
1563 /**
1564  * cvmx_lmc#_comp_ctl2
1565  *
1566  * LMC_COMP_CTL2 = LMC Compensation control
1567  *
1568  */
1569 union cvmx_lmcx_comp_ctl2 {
1570         uint64_t u64;
1571         struct cvmx_lmcx_comp_ctl2_s {
1572 #ifdef __BIG_ENDIAN_BITFIELD
1573         uint64_t reserved_34_63               : 30;
1574         uint64_t ddr__ptune                   : 4;  /**< DDR PCTL from compensation circuit
1575                                                          The encoded value provides debug information for the
1576                                                          compensation impedance on P-pullup */
1577         uint64_t ddr__ntune                   : 4;  /**< DDR NCTL from compensation circuit
1578                                                          The encoded value provides debug information for the
1579                                                          compensation impedance on N-pulldown */
1580         uint64_t m180                         : 1;  /**< Cap impedance at 180 Ohm (instead of 240 Ohm) */
1581         uint64_t byp                          : 1;  /**< Bypass mode
1582                                                          When set, PTUNE,NTUNE are the compensation setting.
1583                                                          When clear, DDR_PTUNE,DDR_NTUNE are the compensation setting. */
1584         uint64_t ptune                        : 4;  /**< PCTL impedance control in bypass mode */
1585         uint64_t ntune                        : 4;  /**< NCTL impedance control in bypass mode */
1586         uint64_t rodt_ctl                     : 4;  /**< NCTL RODT impedance control bits
1587                                                          This field controls ODT values during a memory read
1588                                                          on the Octeon side
1589                                                          0000 = No ODT
1590                                                          0001 = 20 ohm
1591                                                          0010 = 30 ohm
1592                                                          0011 = 40 ohm
1593                                                          0100 = 60 ohm
1594                                                          0101 = 120 ohm
1595                                                          0110-1111 = Reserved */
1596         uint64_t cmd_ctl                      : 4;  /**< Drive strength control for CMD/A/RESET_L drivers
1597                                                          0001 = 24 ohm
1598                                                          0010 = 26.67 ohm
1599                                                          0011 = 30 ohm
1600                                                          0100 = 34.3 ohm
1601                                                          0101 = 40 ohm
1602                                                          0110 = 48 ohm
1603                                                          0111 = 60 ohm
1604                                                          0000,1000-1111 = Reserved */
1605         uint64_t ck_ctl                       : 4;  /**< Drive strength control for CK/CS*_L/ODT/CKE* drivers
1606                                                          0001 = 24 ohm
1607                                                          0010 = 26.67 ohm
1608                                                          0011 = 30 ohm
1609                                                          0100 = 34.3 ohm
1610                                                          0101 = 40 ohm
1611                                                          0110 = 48 ohm
1612                                                          0111 = 60 ohm
1613                                                          0000,1000-1111 = Reserved */
1614         uint64_t dqx_ctl                      : 4;  /**< Drive strength control for DQ/DQS drivers
1615                                                          0001 = 24 ohm
1616                                                          0010 = 26.67 ohm
1617                                                          0011 = 30 ohm
1618                                                          0100 = 34.3 ohm
1619                                                          0101 = 40 ohm
1620                                                          0110 = 48 ohm
1621                                                          0111 = 60 ohm
1622                                                          0000,1000-1111 = Reserved */
1623 #else
1624         uint64_t dqx_ctl                      : 4;
1625         uint64_t ck_ctl                       : 4;
1626         uint64_t cmd_ctl                      : 4;
1627         uint64_t rodt_ctl                     : 4;
1628         uint64_t ntune                        : 4;
1629         uint64_t ptune                        : 4;
1630         uint64_t byp                          : 1;
1631         uint64_t m180                         : 1;
1632         uint64_t ddr__ntune                   : 4;
1633         uint64_t ddr__ptune                   : 4;
1634         uint64_t reserved_34_63               : 30;
1635 #endif
1636         } s;
1637         struct cvmx_lmcx_comp_ctl2_s          cn61xx;
1638         struct cvmx_lmcx_comp_ctl2_s          cn63xx;
1639         struct cvmx_lmcx_comp_ctl2_s          cn63xxp1;
1640         struct cvmx_lmcx_comp_ctl2_s          cn66xx;
1641         struct cvmx_lmcx_comp_ctl2_s          cn68xx;
1642         struct cvmx_lmcx_comp_ctl2_s          cn68xxp1;
1643         struct cvmx_lmcx_comp_ctl2_s          cnf71xx;
1644 };
1645 typedef union cvmx_lmcx_comp_ctl2 cvmx_lmcx_comp_ctl2_t;
1646
1647 /**
1648  * cvmx_lmc#_config
1649  *
1650  * LMC_CONFIG = LMC Configuration Register
1651  *
1652  * This register controls certain parameters of  Memory Configuration
1653  *
1654  * Notes:
1655  * a. Priority order for hardware writes to LMC*_CONFIG/LMC*_FADR/LMC*_SCRAMBLED_FADR/LMC*_ECC_SYND: DED error >= NXM error > SEC error
1656  * b. The self refresh entry sequence(s) power the DLL up/down (depending on LMC*_MODEREG_PARAMS0[DLL])
1657  * when LMC*_CONFIG[SREF_WITH_DLL] is set
1658  * c. Prior to the self-refresh exit sequence, LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 should be re-programmed (if needed) to the
1659  * appropriate values
1660  *
1661  * LMC Bringup Sequence:
1662  * 1. SW must ensure there are no pending DRAM transactions and that the DDR PLL and the DLL have been initialized.
1663  * 2. Write LMC*_COMP_CTL2, LMC*_CONTROL, LMC*_WODT_MASK, LMC*_DUAL_MEMCFG, LMC*_TIMING_PARAMS0, LMC*_TIMING_PARAMS1,
1664  *    LMC*_MODEREG_PARAMS0, LMC*_MODEREG_PARAMS1, LMC*_RESET_CTL (with DDR3RST=0), LMC*_CONFIG (with INIT_START=0)
1665  *    with appropriate values, if necessary.
1666  * 3. Wait 200us, then write LMC*_RESET_CTL[DDR3RST] = 1.
1667  * 4. Initialize all ranks at once by writing LMC*_CONFIG[RANKMASK][n] = 1, LMC*_CONFIG[INIT_STATUS][n] = 1, and LMC*_CONFIG[INIT_START] = 1
1668  *    where n is a valid rank index for the specific board configuration.
1669  * 5. for each rank n to be write-leveled [
1670  *       if auto write-leveling is desired [
1671  *           write LMC*_CONFIG[RANKMASK][n] = 1, LMC*_WLEVEL_CTL appropriately and LMC*_CONFIG[INIT_START] = 1
1672  *           wait until LMC*_WLEVEL_RANKn[STATUS] = 3
1673  *       ] else [
1674  *           write LMC*_WLEVEL_RANKn with appropriate values
1675  *       ]
1676  *    ]
1677  * 6. for each rank n to be read-leveled [
1678  *       if auto read-leveling is desired [
1679  *           write LMC*_CONFIG[RANKMASK][n] = 1, LMC*_RLEVEL_CTL appropriately and LMC*_CONFIG[INIT_START] = 1
1680  *           wait until LMC*_RLEVEL_RANKn[STATUS] = 3
1681  *       ] else [
1682  *           write LMC*_RLEVEL_RANKn with appropriate values
1683  *       ]
1684  *    ]
1685  */
1686 union cvmx_lmcx_config {
1687         uint64_t u64;
1688         struct cvmx_lmcx_config_s {
1689 #ifdef __BIG_ENDIAN_BITFIELD
1690         uint64_t reserved_61_63               : 3;
1691         uint64_t mode32b                      : 1;  /**< 32b Datapath Mode                                          NS
1692                                                          Set to 1 if we use only 32 DQ pins
1693                                                          0 for 64b DQ mode. */
1694         uint64_t scrz                         : 1;  /**< Hide LMC*_SCRAMBLE_CFG0 and LMC*_SCRAMBLE_CFG1 when set */
1695         uint64_t early_unload_d1_r1           : 1;  /**< When set, unload the PHY silo one cycle early for Rank 3
1696                                                          reads
1697                                                          The recommended EARLY_UNLOAD_D1_R1 value can be calculated
1698                                                          after the final LMC*_RLEVEL_RANK3[BYTE*] values are
1699                                                          selected (as part of read-leveling initialization).
1700                                                          Then, determine the largest read-leveling setting
1701                                                          for rank 3 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK3[BYTEi])
1702                                                          across all i), then set EARLY_UNLOAD_D1_R1
1703                                                          when the low two bits of this largest setting is not
1704                                                          3 (i.e. EARLY_UNLOAD_D1_R1 = (maxset<1:0>!=3)). */
1705         uint64_t early_unload_d1_r0           : 1;  /**< When set, unload the PHY silo one cycle early for Rank 2
1706                                                          reads
1707                                                          The recommended EARLY_UNLOAD_D1_RO value can be calculated
1708                                                          after the final LMC*_RLEVEL_RANK2[BYTE*] values are
1709                                                          selected (as part of read-leveling initialization).
1710                                                          Then, determine the largest read-leveling setting
1711                                                          for rank 2 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK2[BYTEi])
1712                                                          across all i), then set EARLY_UNLOAD_D1_RO
1713                                                          when the low two bits of this largest setting is not
1714                                                          3 (i.e. EARLY_UNLOAD_D1_RO = (maxset<1:0>!=3)). */
1715         uint64_t early_unload_d0_r1           : 1;  /**< When set, unload the PHY silo one cycle early for Rank 1
1716                                                          reads
1717                                                          The recommended EARLY_UNLOAD_D0_R1 value can be calculated
1718                                                          after the final LMC*_RLEVEL_RANK1[BYTE*] values are
1719                                                          selected (as part of read-leveling initialization).
1720                                                          Then, determine the largest read-leveling setting
1721                                                          for rank 1 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK1[BYTEi])
1722                                                          across all i), then set EARLY_UNLOAD_D0_R1
1723                                                          when the low two bits of this largest setting is not
1724                                                          3 (i.e. EARLY_UNLOAD_D0_R1 = (maxset<1:0>!=3)). */
1725         uint64_t early_unload_d0_r0           : 1;  /**< When set, unload the PHY silo one cycle early for Rank 0
1726                                                          reads.
1727                                                          The recommended EARLY_UNLOAD_D0_R0 value can be calculated
1728                                                          after the final LMC*_RLEVEL_RANK0[BYTE*] values are
1729                                                          selected (as part of read-leveling initialization).
1730                                                          Then, determine the largest read-leveling setting
1731                                                          for rank 0 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK0[BYTEi])
1732                                                          across all i), then set EARLY_UNLOAD_D0_R0
1733                                                          when the low two bits of this largest setting is not
1734                                                          3 (i.e. EARLY_UNLOAD_D0_R0 = (maxset<1:0>!=3)). */
1735         uint64_t init_status                  : 4;  /**< Indicates status of initialization
1736                                                          INIT_STATUS[n] = 1 implies rank n has been initialized
1737                                                          SW must set necessary INIT_STATUS bits with the
1738                                                          same LMC*_CONFIG write that initiates
1739                                                          power-up/init and self-refresh exit sequences
1740                                                          (if the required INIT_STATUS bits are not already
1741                                                          set before LMC initiates the sequence).
1742                                                          INIT_STATUS determines the chip-selects that assert
1743                                                          during refresh, ZQCS, and precharge power-down and
1744                                                          self-refresh entry/exit SEQUENCE's. */
1745         uint64_t mirrmask                     : 4;  /**< Mask determining which ranks are address-mirrored.
1746                                                          MIRRMASK<n> = 1 means Rank n addresses are mirrored
1747                                                          for 0 <= n <= 3
1748                                                          A mirrored read/write has these differences:
1749                                                           - DDR_BA<1> is swapped with DDR_BA<0>
1750                                                           - DDR_A<8> is swapped with DDR_A<7>
1751                                                           - DDR_A<6> is swapped with DDR_A<5>
1752                                                           - DDR_A<4> is swapped with DDR_A<3>
1753                                                          When RANK_ENA=0, MIRRMASK<1> and MIRRMASK<3> MBZ */
1754         uint64_t rankmask                     : 4;  /**< Mask to select rank to be leveled/initialized.
1755                                                          To write-level/read-level/initialize rank i, set RANKMASK<i>
1756                                                                          RANK_ENA=1               RANK_ENA=0
1757                                                            RANKMASK<0> = DIMM0_CS0                DIMM0_CS0
1758                                                            RANKMASK<1> = DIMM0_CS1                  MBZ
1759                                                            RANKMASK<2> = DIMM1_CS0                DIMM1_CS0
1760                                                            RANKMASK<3> = DIMM1_CS1                  MBZ
1761                                                          For read/write leveling, each rank has to be leveled separately,
1762                                                          so RANKMASK should only have one bit set.
1763                                                          RANKMASK is not used during self-refresh entry/exit and
1764                                                          precharge power-down entry/exit instruction sequences.
1765                                                          When RANK_ENA=0, RANKMASK<1> and RANKMASK<3> MBZ */
1766         uint64_t rank_ena                     : 1;  /**< RANK ena (for use with dual-rank DIMMs)
1767                                                          For dual-rank DIMMs, the rank_ena bit will enable
1768                                                          the drive of the CS*_L[1:0] and ODT_<1:0> pins differently based on the
1769                                                          (pbank_lsb-1) address bit.
1770                                                          Write 0 for SINGLE ranked DIMM's. */
1771         uint64_t sref_with_dll                : 1;  /**< Self-refresh entry/exit write MR1 and MR2
1772                                                          When set, self-refresh entry and exit instruction sequences
1773                                                          write MR1 and MR2 (in all ranks). (The writes occur before
1774                                                          self-refresh entry, and after self-refresh exit.)
1775                                                          When clear, self-refresh entry and exit instruction sequences
1776                                                          do not write any registers in the DDR3 parts. */
1777         uint64_t early_dqx                    : 1;  /**< Send DQx signals one CK cycle earlier for the case when
1778                                                          the shortest DQx lines have a larger delay than the CK line */
1779         uint64_t sequence                     : 3;  /**< Selects the sequence that LMC runs after a 0->1
1780                                                          transition on LMC*_CONFIG[INIT_START].
1781                                                          SEQUENCE=0=power-up/init:
1782                                                            - RANKMASK selects participating ranks (should be all ranks with attached DRAM)
1783                                                            - INIT_STATUS must equal RANKMASK
1784                                                            - DDR_DIMM*_CKE signals activated (if they weren't already active)
1785                                                            - RDIMM register control words 0-15 will be written to RANKMASK-selected
1786                                                                RDIMM's when LMC(0)_CONTROL[RDIMM_ENA]=1 and corresponding
1787                                                                LMC*_DIMM_CTL[DIMM*_WMASK] bits are set. (Refer to LMC*_DIMM*_PARAMS and
1788                                                                LMC*_DIMM_CTL descriptions below for more details.)
1789                                                            - MR0, MR1, MR2, and MR3 will be written to selected ranks
1790                                                          SEQUENCE=1=read-leveling:
1791                                                            - RANKMASK selects the rank to be read-leveled
1792                                                            - MR3 written to selected rank
1793                                                          SEQUENCE=2=self-refresh entry:
1794                                                            - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM)
1795                                                            - MR1 and MR2 will be written to selected ranks if SREF_WITH_DLL=1
1796                                                            - DDR_DIMM*_CKE signals de-activated
1797                                                          SEQUENCE=3=self-refresh exit:
1798                                                            - INIT_STATUS must be set to indicate participating ranks (should be all ranks with attached DRAM)
1799                                                            - DDR_DIMM*_CKE signals activated
1800                                                            - MR0, MR1, MR2, and MR3 will be written to participating ranks if SREF_WITH_DLL=1
1801                                                          SEQUENCE=4=precharge power-down entry:
1802                                                            - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM)
1803                                                            - DDR_DIMM*_CKE signals de-activated
1804                                                          SEQUENCE=5=precharge power-down exit:
1805                                                            - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM)
1806                                                            - DDR_DIMM*_CKE signals activated
1807                                                          SEQUENCE=6=write-leveling:
1808                                                            - RANKMASK selects the rank to be write-leveled
1809                                                            - INIT_STATUS must indicate all ranks with attached DRAM
1810                                                            - MR1 and MR2 written to INIT_STATUS-selected ranks
1811                                                          SEQUENCE=7=illegal
1812                                                          Precharge power-down entry and exit SEQUENCE's may also
1813                                                          be automatically generated by the HW when IDLEPOWER!=0.
1814                                                          Self-refresh entry SEQUENCE's may also be automatically
1815                                                          generated by hardware upon a chip warm or soft reset
1816                                                          sequence when LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT] are set.
1817                                                          LMC writes the LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 CSR field values
1818                                                          to the Mode registers in the DRAM parts (i.e. MR0, MR1, MR2, and MR3) as part of some of these sequences.
1819                                                          Refer to the LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 descriptions for more details.
1820                                                          If there are two consecutive power-up/init's without
1821                                                          a DRESET assertion between them, LMC asserts DDR_DIMM*_CKE as part of
1822                                                          the first power-up/init, and continues to assert DDR_DIMM*_CKE
1823                                                          through the remainder of the first and the second power-up/init.
1824                                                          If DDR_DIMM*_CKE deactivation and reactivation is needed for
1825                                                          a second power-up/init, a DRESET assertion is required
1826                                                          between the first and the second. */
1827         uint64_t ref_zqcs_int                 : 19; /**< Refresh & ZQCS interval represented in \#of 512 CK cycle
1828                                                          increments. A Refresh sequence is triggered when bits
1829                                                          [24:18] are equal to 0, and a ZQCS sequence is triggered
1830                                                          when [36:18] are equal to 0.
1831                                                          Program [24:18] to RND-DN(tREFI/clkPeriod/512)
1832                                                          Program [36:25] to RND-DN(ZQCS_Interval/clkPeriod/(512*64)). Note
1833                                                          that this value should always be greater than 32, to account for
1834                                                          resistor calibration delays.
1835                                                          000_00000000_00000000: RESERVED
1836                                                          Max Refresh interval = 127 * 512           = 65024 CKs
1837                                                          Max ZQCS interval    = (8*256*256-1) * 512 = 268434944 CKs ~ 335ms for a 800 MHz CK
1838                                                          LMC*_CONFIG[INIT_STATUS] determines which ranks receive
1839                                                          the REF / ZQCS. LMC does not send any refreshes / ZQCS's
1840                                                          when LMC*_CONFIG[INIT_STATUS]=0. */
1841         uint64_t reset                        : 1;  /**< Reset oneshot pulse for refresh counter,
1842                                                          and LMC*_OPS_CNT, LMC*_IFB_CNT, and LMC*_DCLK_CNT
1843                                                          CSR's. SW should write this to a one, then re-write
1844                                                          it to a zero to cause the reset. */
1845         uint64_t ecc_adr                      : 1;  /**< Include memory reference address in the ECC calculation
1846                                                          0=disabled, 1=enabled */
1847         uint64_t forcewrite                   : 4;  /**< Force the oldest outstanding write to complete after
1848                                                          having waited for 2^FORCEWRITE CK cycles.  0=disabled. */
1849         uint64_t idlepower                    : 3;  /**< Enter precharge power-down mode after the memory
1850                                                          controller has been idle for 2^(2+IDLEPOWER) CK cycles.
1851                                                          0=disabled.
1852                                                          This field should only be programmed after initialization.
1853                                                          LMC*_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL
1854                                                          is disabled during the precharge power-down. */
1855         uint64_t pbank_lsb                    : 4;  /**< DIMM address bit select
1856                                                          Reverting to the explanation for ROW_LSB,
1857                                                          PBank_LSB would be Row_LSB bit + \#rowbits + \#rankbits
1858                                                          In the 512MB DIMM Example, assuming no rank bits:
1859                                                          pbank_lsb=mem_addr[15+13] for 64b mode
1860                                                                   =mem_addr[14+13] for 32b mode
1861                                                          Decoding for pbank_lsb
1862                                                               - 0000:DIMM = mem_adr[28]    / rank = mem_adr[27] (if RANK_ENA)
1863                                                               - 0001:DIMM = mem_adr[29]    / rank = mem_adr[28]      "
1864                                                               - 0010:DIMM = mem_adr[30]    / rank = mem_adr[29]      "
1865                                                               - 0011:DIMM = mem_adr[31]    / rank = mem_adr[30]      "
1866                                                               - 0100:DIMM = mem_adr[32]    / rank = mem_adr[31]      "
1867                                                               - 0101:DIMM = mem_adr[33]    / rank = mem_adr[32]      "
1868                                                               - 0110:DIMM = mem_adr[34]    / rank = mem_adr[33]      "
1869                                                               - 0111:DIMM = 0              / rank = mem_adr[34]      "
1870                                                               - 1000-1111: RESERVED
1871                                                          For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank)
1872                                                          DDR3 parts, the column address width = 10, so with
1873                                                          10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16]
1874                                                          With rank_ena = 0, pbank_lsb = 2
1875                                                          With rank_ena = 1, pbank_lsb = 3 */
1876         uint64_t row_lsb                      : 3;  /**< Row Address bit select
1877                                                          Encoding used to determine which memory address
1878                                                          bit position represents the low order DDR ROW address.
1879                                                          The processor's memory address[34:7] needs to be
1880                                                          translated to DRAM addresses (bnk,row,col,rank and DIMM)
1881                                                          and that is a function of the following:
1882                                                          1. Datapath Width (64 or 32)
1883                                                          2. \# Banks (8)
1884                                                          3. \# Column Bits of the memory part - spec'd indirectly
1885                                                          by this register.
1886                                                          4. \# Row Bits of the memory part - spec'd indirectly
1887                                                          5. \# Ranks in a DIMM - spec'd by RANK_ENA
1888                                                          6. \# DIMM's in the system by the register below (PBANK_LSB).
1889                                                          Col Address starts from mem_addr[2] for 32b (4Bytes)
1890                                                          dq width or from mem_addr[3] for 64b (8Bytes) dq width
1891                                                          \# col + \# bank = 12. Hence row_lsb is mem_adr[15] for
1892                                                          64bmode or mem_adr[14] for 32b mode. Hence row_lsb
1893                                                          parameter should be set to 001 (64b) or 000 (32b).
1894                                                          Decoding for row_lsb
1895                                                               - 000: row_lsb = mem_adr[14]
1896                                                               - 001: row_lsb = mem_adr[15]
1897                                                               - 010: row_lsb = mem_adr[16]
1898                                                               - 011: row_lsb = mem_adr[17]
1899                                                               - 100: row_lsb = mem_adr[18]
1900                                                               - 101: row_lsb = mem_adr[19]
1901                                                               - 110: row_lsb = mem_adr[20]
1902                                                               - 111: RESERVED
1903                                                          For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank)
1904                                                          DDR3 parts, the column address width = 10, so with
1905                                                          10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16] */
1906         uint64_t ecc_ena                      : 1;  /**< ECC Enable: When set will enable the 8b ECC
1907                                                          check/correct logic. Should be 1 when used with DIMMs
1908                                                          with ECC. 0, otherwise.
1909                                                          When this mode is turned on, DQ[71:64]
1910                                                          on writes, will contain the ECC code generated for
1911                                                          the 64 bits of data which will
1912                                                          written in the memory and then later on reads, used
1913                                                          to check for Single bit error (which will be auto-
1914                                                          corrected) and Double Bit error (which will be
1915                                                          reported). When not turned on, DQ[71:64]
1916                                                          are driven to 0.  Please refer to SEC_ERR, DED_ERR,
1917                                                          LMC*_FADR, LMC*_SCRAMBLED_FADR and LMC*_ECC_SYND registers
1918                                                          for diagnostics information when there is an error. */
1919         uint64_t init_start                   : 1;  /**< A 0->1 transition starts the DDR memory sequence that is
1920                                                          selected by LMC*_CONFIG[SEQUENCE].  This register is a
1921                                                          oneshot and clears itself each time it is set. */
1922 #else
1923         uint64_t init_start                   : 1;
1924         uint64_t ecc_ena                      : 1;
1925         uint64_t row_lsb                      : 3;
1926         uint64_t pbank_lsb                    : 4;
1927         uint64_t idlepower                    : 3;
1928         uint64_t forcewrite                   : 4;
1929         uint64_t ecc_adr                      : 1;
1930         uint64_t reset                        : 1;
1931         uint64_t ref_zqcs_int                 : 19;
1932         uint64_t sequence                     : 3;
1933         uint64_t early_dqx                    : 1;
1934         uint64_t sref_with_dll                : 1;
1935         uint64_t rank_ena                     : 1;
1936         uint64_t rankmask                     : 4;
1937         uint64_t mirrmask                     : 4;
1938         uint64_t init_status                  : 4;
1939         uint64_t early_unload_d0_r0           : 1;
1940         uint64_t early_unload_d0_r1           : 1;
1941         uint64_t early_unload_d1_r0           : 1;
1942         uint64_t early_unload_d1_r1           : 1;
1943         uint64_t scrz                         : 1;
1944         uint64_t mode32b                      : 1;
1945         uint64_t reserved_61_63               : 3;
1946 #endif
1947         } s;
1948         struct cvmx_lmcx_config_s             cn61xx;
1949         struct cvmx_lmcx_config_cn63xx {
1950 #ifdef __BIG_ENDIAN_BITFIELD
1951         uint64_t reserved_59_63               : 5;
1952         uint64_t early_unload_d1_r1           : 1;  /**< When set, unload the PHY silo one cycle early for Rank 3
1953                                                          reads
1954                                                          The recommended EARLY_UNLOAD_D1_R1 value can be calculated
1955                                                          after the final LMC*_RLEVEL_RANK3[BYTE*] values are
1956                                                          selected (as part of read-leveling initialization).
1957                                                          Then, determine the largest read-leveling setting
1958                                                          for rank 3 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK3[BYTEi])
1959                                                          across all i), then set EARLY_UNLOAD_D1_R1
1960                                                          when the low two bits of this largest setting is not
1961                                                          3 (i.e. EARLY_UNLOAD_D1_R1 = (maxset<1:0>!=3)). */
1962         uint64_t early_unload_d1_r0           : 1;  /**< When set, unload the PHY silo one cycle early for Rank 2
1963                                                          reads
1964                                                          The recommended EARLY_UNLOAD_D1_RO value can be calculated
1965                                                          after the final LMC*_RLEVEL_RANK2[BYTE*] values are
1966                                                          selected (as part of read-leveling initialization).
1967                                                          Then, determine the largest read-leveling setting
1968                                                          for rank 2 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK2[BYTEi])
1969                                                          across all i), then set EARLY_UNLOAD_D1_RO
1970                                                          when the low two bits of this largest setting is not
1971                                                          3 (i.e. EARLY_UNLOAD_D1_RO = (maxset<1:0>!=3)). */
1972         uint64_t early_unload_d0_r1           : 1;  /**< When set, unload the PHY silo one cycle early for Rank 1
1973                                                          reads
1974                                                          The recommended EARLY_UNLOAD_D0_R1 value can be calculated
1975                                                          after the final LMC*_RLEVEL_RANK1[BYTE*] values are
1976                                                          selected (as part of read-leveling initialization).
1977                                                          Then, determine the largest read-leveling setting
1978                                                          for rank 1 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK1[BYTEi])
1979                                                          across all i), then set EARLY_UNLOAD_D0_R1
1980                                                          when the low two bits of this largest setting is not
1981                                                          3 (i.e. EARLY_UNLOAD_D0_R1 = (maxset<1:0>!=3)). */
1982         uint64_t early_unload_d0_r0           : 1;  /**< When set, unload the PHY silo one cycle early for Rank 0
1983                                                          reads.
1984                                                          The recommended EARLY_UNLOAD_D0_R0 value can be calculated
1985                                                          after the final LMC*_RLEVEL_RANK0[BYTE*] values are
1986                                                          selected (as part of read-leveling initialization).
1987                                                          Then, determine the largest read-leveling setting
1988                                                          for rank 0 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK0[BYTEi])
1989                                                          across all i), then set EARLY_UNLOAD_D0_R0
1990                                                          when the low two bits of this largest setting is not
1991                                                          3 (i.e. EARLY_UNLOAD_D0_R0 = (maxset<1:0>!=3)). */
1992         uint64_t init_status                  : 4;  /**< Indicates status of initialization
1993                                                          INIT_STATUS[n] = 1 implies rank n has been initialized
1994                                                          SW must set necessary INIT_STATUS bits with the
1995                                                          same LMC*_CONFIG write that initiates
1996                                                          power-up/init and self-refresh exit sequences
1997                                                          (if the required INIT_STATUS bits are not already
1998                                                          set before LMC initiates the sequence).
1999                                                          INIT_STATUS determines the chip-selects that assert
2000                                                          during refresh, ZQCS, and precharge power-down and
2001                                                          self-refresh entry/exit SEQUENCE's. */
2002         uint64_t mirrmask                     : 4;  /**< Mask determining which ranks are address-mirrored.
2003                                                          MIRRMASK<n> = 1 means Rank n addresses are mirrored
2004                                                          for 0 <= n <= 3
2005                                                          A mirrored read/write has these differences:
2006                                                           - DDR_BA<1> is swapped with DDR_BA<0>
2007                                                           - DDR_A<8> is swapped with DDR_A<7>
2008                                                           - DDR_A<6> is swapped with DDR_A<5>
2009                                                           - DDR_A<4> is swapped with DDR_A<3>
2010                                                          When RANK_ENA=0, MIRRMASK<1> and MIRRMASK<3> MBZ */
2011         uint64_t rankmask                     : 4;  /**< Mask to select rank to be leveled/initialized.
2012                                                          To write-level/read-level/initialize rank i, set RANKMASK<i>
2013                                                                          RANK_ENA=1               RANK_ENA=0
2014                                                            RANKMASK<0> = DIMM0_CS0                DIMM0_CS0
2015                                                            RANKMASK<1> = DIMM0_CS1                  MBZ
2016                                                            RANKMASK<2> = DIMM1_CS0                DIMM1_CS0
2017                                                            RANKMASK<3> = DIMM1_CS1                  MBZ
2018                                                          For read/write leveling, each rank has to be leveled separately,
2019                                                          so RANKMASK should only have one bit set.
2020                                                          RANKMASK is not used during self-refresh entry/exit and
2021                                                          precharge power-down entry/exit instruction sequences.
2022                                                          When RANK_ENA=0, RANKMASK<1> and RANKMASK<3> MBZ */
2023         uint64_t rank_ena                     : 1;  /**< RANK ena (for use with dual-rank DIMMs)
2024                                                          For dual-rank DIMMs, the rank_ena bit will enable
2025                                                          the drive of the CS*_L[1:0] and ODT_<1:0> pins differently based on the
2026                                                          (pbank_lsb-1) address bit.
2027                                                          Write 0 for SINGLE ranked DIMM's. */
2028         uint64_t sref_with_dll                : 1;  /**< Self-refresh entry/exit write MR1 and MR2
2029                                                          When set, self-refresh entry and exit instruction sequences
2030                                                          write MR1 and MR2 (in all ranks). (The writes occur before
2031                                                          self-refresh entry, and after self-refresh exit.)
2032                                                          When clear, self-refresh entry and exit instruction sequences
2033                                                          do not write any registers in the DDR3 parts. */
2034         uint64_t early_dqx                    : 1;  /**< Send DQx signals one CK cycle earlier for the case when
2035                                                          the shortest DQx lines have a larger delay than the CK line */
2036         uint64_t sequence                     : 3;  /**< Selects the sequence that LMC runs after a 0->1
2037                                                          transition on LMC*_CONFIG[INIT_START].
2038                                                          SEQUENCE=0=power-up/init:
2039                                                            - RANKMASK selects participating ranks (should be all ranks with attached DRAM)
2040                                                            - INIT_STATUS must equal RANKMASK
2041                                                            - DDR_CKE* signals activated (if they weren't already active)
2042                                                            - RDIMM register control words 0-15 will be written to RANKMASK-selected
2043                                                                RDIMM's when LMC(0)_CONTROL[RDIMM_ENA]=1 and corresponding
2044                                                                LMC*_DIMM_CTL[DIMM*_WMASK] bits are set. (Refer to LMC*_DIMM*_PARAMS and
2045                                                                LMC*_DIMM_CTL descriptions below for more details.)
2046                                                            - MR0, MR1, MR2, and MR3 will be written to selected ranks
2047                                                          SEQUENCE=1=read-leveling:
2048                                                            - RANKMASK selects the rank to be read-leveled
2049                                                            - MR3 written to selected rank
2050                                                          SEQUENCE=2=self-refresh entry:
2051                                                            - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM)
2052                                                            - MR1 and MR2 will be written to selected ranks if SREF_WITH_DLL=1
2053                                                            - DDR_CKE* signals de-activated
2054                                                          SEQUENCE=3=self-refresh exit:
2055                                                            - INIT_STATUS must be set to indicate participating ranks (should be all ranks with attached DRAM)
2056                                                            - DDR_CKE* signals activated
2057                                                            - MR0, MR1, MR2, and MR3 will be written to participating ranks if SREF_WITH_DLL=1
2058                                                          SEQUENCE=4=precharge power-down entry:
2059                                                            - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM)
2060                                                            - DDR_CKE* signals de-activated
2061                                                          SEQUENCE=5=precharge power-down exit:
2062                                                            - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM)
2063                                                            - DDR_CKE* signals activated
2064                                                          SEQUENCE=6=write-leveling:
2065                                                            - RANKMASK selects the rank to be write-leveled
2066                                                            - INIT_STATUS must indicate all ranks with attached DRAM
2067                                                            - MR1 and MR2 written to INIT_STATUS-selected ranks
2068                                                          SEQUENCE=7=illegal
2069                                                          Precharge power-down entry and exit SEQUENCE's may also
2070                                                          be automatically generated by the HW when IDLEPOWER!=0.
2071                                                          Self-refresh entry SEQUENCE's may also be automatically
2072                                                          generated by hardware upon a chip warm or soft reset
2073                                                          sequence when LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT] are set.
2074                                                          LMC writes the LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 CSR field values
2075                                                          to the Mode registers in the DRAM parts (i.e. MR0, MR1, MR2, and MR3) as part of some of these sequences.
2076                                                          Refer to the LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 descriptions for more details.
2077                                                          If there are two consecutive power-up/init's without
2078                                                          a DRESET assertion between them, LMC asserts DDR_CKE* as part of
2079                                                          the first power-up/init, and continues to assert DDR_CKE*
2080                                                          through the remainder of the first and the second power-up/init.
2081                                                          If DDR_CKE* deactivation and reactivation is needed for
2082                                                          a second power-up/init, a DRESET assertion is required
2083                                                          between the first and the second. */
2084         uint64_t ref_zqcs_int                 : 19; /**< Refresh & ZQCS interval represented in \#of 512 CK cycle
2085                                                          increments. A Refresh sequence is triggered when bits
2086                                                          [24:18] are equal to 0, and a ZQCS sequence is triggered
2087                                                          when [36:18] are equal to 0.
2088                                                          Program [24:18] to RND-DN(tREFI/clkPeriod/512)
2089                                                          Program [36:25] to RND-DN(ZQCS_Interval/clkPeriod/(512*64)). Note
2090                                                          that this value should always be greater than 32, to account for
2091                                                          resistor calibration delays.
2092                                                          000_00000000_00000000: RESERVED
2093                                                          Max Refresh interval = 127 * 512           = 65024 CKs
2094                                                          Max ZQCS interval    = (8*256*256-1) * 512 = 268434944 CKs ~ 335ms for a 800 MHz CK
2095                                                          LMC*_CONFIG[INIT_STATUS] determines which ranks receive
2096                                                          the REF / ZQCS. LMC does not send any refreshes / ZQCS's
2097                                                          when LMC*_CONFIG[INIT_STATUS]=0. */
2098         uint64_t reset                        : 1;  /**< Reset oneshot pulse for refresh counter,
2099                                                          and LMC*_OPS_CNT, LMC*_IFB_CNT, and LMC*_DCLK_CNT
2100                                                          CSR's. SW should write this to a one, then re-write
2101                                                          it to a zero to cause the reset. */
2102         uint64_t ecc_adr                      : 1;  /**< Include memory reference address in the ECC calculation
2103                                                          0=disabled, 1=enabled */
2104         uint64_t forcewrite                   : 4;  /**< Force the oldest outstanding write to complete after
2105                                                          having waited for 2^FORCEWRITE CK cycles.  0=disabled. */
2106         uint64_t idlepower                    : 3;  /**< Enter precharge power-down mode after the memory
2107                                                          controller has been idle for 2^(2+IDLEPOWER) CK cycles.
2108                                                          0=disabled.
2109                                                          This field should only be programmed after initialization.
2110                                                          LMC*_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL
2111                                                          is disabled during the precharge power-down. */
2112         uint64_t pbank_lsb                    : 4;  /**< DIMM address bit select
2113                                                          Reverting to the explanation for ROW_LSB,
2114                                                          PBank_LSB would be Row_LSB bit + \#rowbits + \#rankbits
2115                                                          Decoding for pbank_lsb
2116                                                               - 0000:DIMM = mem_adr[28]    / rank = mem_adr[27] (if RANK_ENA)
2117                                                               - 0001:DIMM = mem_adr[29]    / rank = mem_adr[28]      "
2118                                                               - 0010:DIMM = mem_adr[30]    / rank = mem_adr[29]      "
2119                                                               - 0011:DIMM = mem_adr[31]    / rank = mem_adr[30]      "
2120                                                               - 0100:DIMM = mem_adr[32]    / rank = mem_adr[31]      "
2121                                                               - 0101:DIMM = mem_adr[33]    / rank = mem_adr[32]      "
2122                                                               - 0110:DIMM = mem_adr[34]    / rank = mem_adr[33]      "
2123                                                               - 0111:DIMM = 0              / rank = mem_adr[34]      "
2124                                                               - 1000-1111: RESERVED
2125                                                          For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank)
2126                                                          DDR3 parts, the column address width = 10, so with
2127                                                          10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16]
2128                                                          With rank_ena = 0, pbank_lsb = 2
2129                                                          With rank_ena = 1, pbank_lsb = 3 */
2130         uint64_t row_lsb                      : 3;  /**< Row Address bit select
2131                                                          Encoding used to determine which memory address
2132                                                          bit position represents the low order DDR ROW address.
2133                                                          The processor's memory address[34:7] needs to be
2134                                                          translated to DRAM addresses (bnk,row,col,rank and DIMM)
2135                                                          and that is a function of the following:
2136                                                          1. Datapath Width (64)
2137                                                          2. \# Banks (8)
2138                                                          3. \# Column Bits of the memory part - spec'd indirectly
2139                                                          by this register.
2140                                                          4. \# Row Bits of the memory part - spec'd indirectly
2141                                                          5. \# Ranks in a DIMM - spec'd by RANK_ENA
2142                                                          6. \# DIMM's in the system by the register below (PBANK_LSB).
2143                                                          Decoding for row_lsb
2144                                                               - 000: row_lsb = mem_adr[14]
2145                                                               - 001: row_lsb = mem_adr[15]
2146                                                               - 010: row_lsb = mem_adr[16]
2147                                                               - 011: row_lsb = mem_adr[17]
2148                                                               - 100: row_lsb = mem_adr[18]
2149                                                               - 101: row_lsb = mem_adr[19]
2150                                                               - 110: row_lsb = mem_adr[20]
2151                                                               - 111: RESERVED
2152                                                          For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank)
2153                                                          DDR3 parts, the column address width = 10, so with
2154                                                          10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16] */
2155         uint64_t ecc_ena                      : 1;  /**< ECC Enable: When set will enable the 8b ECC
2156                                                          check/correct logic. Should be 1 when used with DIMMs
2157                                                          with ECC. 0, otherwise.
2158                                                          When this mode is turned on, DQ[71:64]
2159                                                          on writes, will contain the ECC code generated for
2160                                                          the 64 bits of data which will
2161                                                          written in the memory and then later on reads, used
2162                                                          to check for Single bit error (which will be auto-
2163                                                          corrected) and Double Bit error (which will be
2164                                                          reported). When not turned on, DQ[71:64]
2165                                                          are driven to 0.  Please refer to SEC_ERR, DED_ERR,
2166                                                          LMC*_FADR, and LMC*_ECC_SYND registers
2167                                                          for diagnostics information when there is an error. */
2168         uint64_t init_start                   : 1;  /**< A 0->1 transition starts the DDR memory sequence that is
2169                                                          selected by LMC*_CONFIG[SEQUENCE].  This register is a
2170                                                          oneshot and clears itself each time it is set. */
2171 #else
2172         uint64_t init_start                   : 1;
2173         uint64_t ecc_ena                      : 1;
2174         uint64_t row_lsb                      : 3;
2175         uint64_t pbank_lsb                    : 4;
2176         uint64_t idlepower                    : 3;
2177         uint64_t forcewrite                   : 4;
2178         uint64_t ecc_adr                      : 1;
2179         uint64_t reset                        : 1;
2180         uint64_t ref_zqcs_int                 : 19;
2181         uint64_t sequence                     : 3;
2182         uint64_t early_dqx                    : 1;
2183         uint64_t sref_with_dll                : 1;
2184         uint64_t rank_ena                     : 1;
2185         uint64_t rankmask                     : 4;
2186         uint64_t mirrmask                     : 4;
2187         uint64_t init_status                  : 4;
2188         uint64_t early_unload_d0_r0           : 1;
2189         uint64_t early_unload_d0_r1           : 1;
2190         uint64_t early_unload_d1_r0           : 1;
2191         uint64_t early_unload_d1_r1           : 1;
2192         uint64_t reserved_59_63               : 5;
2193 #endif
2194         } cn63xx;
2195         struct cvmx_lmcx_config_cn63xxp1 {
2196 #ifdef __BIG_ENDIAN_BITFIELD
2197         uint64_t reserved_55_63               : 9;
2198         uint64_t init_status                  : 4;  /**< Indicates status of initialization
2199                                                          INIT_STATUS[n] = 1 implies rank n has been initialized
2200                                                          SW must set necessary INIT_STATUS bits with the
2201                                                          same LMC*_CONFIG write that initiates
2202                                                          power-up/init and self-refresh exit sequences
2203                                                          (if the required INIT_STATUS bits are not already
2204                                                          set before LMC initiates the sequence).
2205                                                          INIT_STATUS determines the chip-selects that assert
2206                                                          during refresh, ZQCS, and precharge power-down and
2207                                                          self-refresh entry/exit SEQUENCE's. */
2208         uint64_t mirrmask                     : 4;  /**< Mask determining which ranks are address-mirrored.
2209                                                          MIRRMASK<n> = 1 means Rank n addresses are mirrored
2210                                                          for 0 <= n <= 3
2211                                                          A mirrored read/write has these differences:
2212                                                           - DDR_BA<1> is swapped with DDR_BA<0>
2213                                                           - DDR_A<8> is swapped with DDR_A<7>
2214                                                           - DDR_A<6> is swapped with DDR_A<5>
2215                                                           - DDR_A<4> is swapped with DDR_A<3>
2216                                                          When RANK_ENA=0, MIRRMASK<1> and MIRRMASK<3> MBZ */
2217         uint64_t rankmask                     : 4;  /**< Mask to select rank to be leveled/initialized.
2218                                                          To write-level/read-level/initialize rank i, set RANKMASK<i>
2219                                                                          RANK_ENA=1               RANK_ENA=0
2220                                                            RANKMASK<0> = DIMM0_CS0                DIMM0_CS0
2221                                                            RANKMASK<1> = DIMM0_CS1                  MBZ
2222                                                            RANKMASK<2> = DIMM1_CS0                DIMM1_CS0
2223                                                            RANKMASK<3> = DIMM1_CS1                  MBZ
2224                                                          For read/write leveling, each rank has to be leveled separately,
2225                                                          so RANKMASK should only have one bit set.
2226                                                          RANKMASK is not used during self-refresh entry/exit and
2227                                                          precharge power-down entry/exit instruction sequences.
2228                                                          When RANK_ENA=0, RANKMASK<1> and RANKMASK<3> MBZ */
2229         uint64_t rank_ena                     : 1;  /**< RANK ena (for use with dual-rank DIMMs)
2230                                                          For dual-rank DIMMs, the rank_ena bit will enable
2231                                                          the drive of the CS*_L[1:0] and ODT_<1:0> pins differently based on the
2232                                                          (pbank_lsb-1) address bit.
2233                                                          Write 0 for SINGLE ranked DIMM's. */
2234         uint64_t sref_with_dll                : 1;  /**< Self-refresh entry/exit write MR1 and MR2
2235                                                          When set, self-refresh entry and exit instruction sequences
2236                                                          write MR1 and MR2 (in all ranks). (The writes occur before
2237                                                          self-refresh entry, and after self-refresh exit.)
2238                                                          When clear, self-refresh entry and exit instruction sequences
2239                                                          do not write any registers in the DDR3 parts. */
2240         uint64_t early_dqx                    : 1;  /**< Send DQx signals one CK cycle earlier for the case when
2241                                                          the shortest DQx lines have a larger delay than the CK line */
2242         uint64_t sequence                     : 3;  /**< Selects the sequence that LMC runs after a 0->1
2243                                                          transition on LMC*_CONFIG[INIT_START].
2244                                                          SEQUENCE=0=power-up/init:
2245                                                            - RANKMASK selects participating ranks (should be all ranks with attached DRAM)
2246                                                            - INIT_STATUS must equal RANKMASK
2247                                                            - DDR_CKE* signals activated (if they weren't already active)
2248                                                            - RDIMM register control words 0-15 will be written to RANKMASK-selected
2249                                                                RDIMM's when LMC(0)_CONTROL[RDIMM_ENA]=1 and corresponding
2250                                                                LMC*_DIMM_CTL[DIMM*_WMASK] bits are set. (Refer to LMC*_DIMM*_PARAMS and
2251                                                                LMC*_DIMM_CTL descriptions below for more details.)
2252                                                            - MR0, MR1, MR2, and MR3 will be written to selected ranks
2253                                                          SEQUENCE=1=read-leveling:
2254                                                            - RANKMASK selects the rank to be read-leveled
2255                                                            - MR3 written to selected rank
2256                                                          SEQUENCE=2=self-refresh entry:
2257                                                            - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM)
2258                                                            - MR1 and MR2 will be written to selected ranks if SREF_WITH_DLL=1
2259                                                            - DDR_CKE* signals de-activated
2260                                                          SEQUENCE=3=self-refresh exit:
2261                                                            - INIT_STATUS must be set to indicate participating ranks (should be all ranks with attached DRAM)
2262                                                            - DDR_CKE* signals activated
2263                                                            - MR0, MR1, MR2, and MR3 will be written to participating ranks if SREF_WITH_DLL=1
2264                                                          SEQUENCE=4=precharge power-down entry:
2265                                                            - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM)
2266                                                            - DDR_CKE* signals de-activated
2267                                                          SEQUENCE=5=precharge power-down exit:
2268                                                            - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM)
2269                                                            - DDR_CKE* signals activated
2270                                                          SEQUENCE=6=write-leveling:
2271                                                            - RANKMASK selects the rank to be write-leveled
2272                                                            - INIT_STATUS must indicate all ranks with attached DRAM
2273                                                            - MR1 and MR2 written to INIT_STATUS-selected ranks
2274                                                          SEQUENCE=7=illegal
2275                                                          Precharge power-down entry and exit SEQUENCE's may also
2276                                                          be automatically generated by the HW when IDLEPOWER!=0.
2277                                                          Self-refresh entry SEQUENCE's may also be automatically
2278                                                          generated by hardware upon a chip warm or soft reset
2279                                                          sequence when LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT] are set.
2280                                                          LMC writes the LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 CSR field values
2281                                                          to the Mode registers in the DRAM parts (i.e. MR0, MR1, MR2, and MR3) as part of some of these sequences.
2282                                                          Refer to the LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 descriptions for more details.
2283                                                          If there are two consecutive power-up/init's without
2284                                                          a DRESET assertion between them, LMC asserts DDR_CKE* as part of
2285                                                          the first power-up/init, and continues to assert DDR_CKE*
2286                                                          through the remainder of the first and the second power-up/init.
2287                                                          If DDR_CKE* deactivation and reactivation is needed for
2288                                                          a second power-up/init, a DRESET assertion is required
2289                                                          between the first and the second. */
2290         uint64_t ref_zqcs_int                 : 19; /**< Refresh & ZQCS interval represented in \#of 512 CK cycle
2291                                                          increments. A Refresh sequence is triggered when bits
2292                                                          [24:18] are equal to 0, and a ZQCS sequence is triggered
2293                                                          when [36:18] are equal to 0.
2294                                                          Program [24:18] to RND-DN(tREFI/clkPeriod/512)
2295                                                          Program [36:25] to RND-DN(ZQCS_Interval/clkPeriod/(512*64)). Note
2296                                                          that this value should always be greater than 32, to account for
2297                                                          resistor calibration delays.
2298                                                          000_00000000_00000000: RESERVED
2299                                                          Max Refresh interval = 127 * 512           = 65024 CKs
2300                                                          Max ZQCS interval    = (8*256*256-1) * 512 = 268434944 CKs ~ 335ms for a 800 MHz CK
2301                                                          LMC*_CONFIG[INIT_STATUS] determines which ranks receive
2302                                                          the REF / ZQCS. LMC does not send any refreshes / ZQCS's
2303                                                          when LMC*_CONFIG[INIT_STATUS]=0. */
2304         uint64_t reset                        : 1;  /**< Reset oneshot pulse for refresh counter,
2305                                                          and LMC*_OPS_CNT, LMC*_IFB_CNT, and LMC*_DCLK_CNT
2306                                                          CSR's. SW should write this to a one, then re-write
2307                                                          it to a zero to cause the reset. */
2308         uint64_t ecc_adr                      : 1;  /**< Include memory reference address in the ECC calculation
2309                                                          0=disabled, 1=enabled */
2310         uint64_t forcewrite                   : 4;  /**< Force the oldest outstanding write to complete after
2311                                                          having waited for 2^FORCEWRITE CK cycles.  0=disabled. */
2312         uint64_t idlepower                    : 3;  /**< Enter precharge power-down mode after the memory
2313                                                          controller has been idle for 2^(2+IDLEPOWER) CK cycles.
2314                                                          0=disabled.
2315                                                          This field should only be programmed after initialization.
2316                                                          LMC*_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL
2317                                                          is disabled during the precharge power-down. */
2318         uint64_t pbank_lsb                    : 4;  /**< DIMM address bit select
2319                                                          Reverting to the explanation for ROW_LSB,
2320                                                          PBank_LSB would be Row_LSB bit + \#rowbits + \#rankbits
2321                                                          Decoding for pbank_lsb
2322                                                               - 0000:DIMM = mem_adr[28]    / rank = mem_adr[27] (if RANK_ENA)
2323                                                               - 0001:DIMM = mem_adr[29]    / rank = mem_adr[28]      "
2324                                                               - 0010:DIMM = mem_adr[30]    / rank = mem_adr[29]      "
2325                                                               - 0011:DIMM = mem_adr[31]    / rank = mem_adr[30]      "
2326                                                               - 0100:DIMM = mem_adr[32]    / rank = mem_adr[31]      "
2327                                                               - 0101:DIMM = mem_adr[33]    / rank = mem_adr[32]      "
2328                                                               - 0110:DIMM = mem_adr[34]    / rank = mem_adr[33]      "
2329                                                               - 0111:DIMM = 0              / rank = mem_adr[34]      "
2330                                                               - 1000-1111: RESERVED
2331                                                          For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank)
2332                                                          DDR3 parts, the column address width = 10, so with
2333                                                          10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16]
2334                                                          With rank_ena = 0, pbank_lsb = 2
2335                                                          With rank_ena = 1, pbank_lsb = 3 */
2336         uint64_t row_lsb                      : 3;  /**< Row Address bit select
2337                                                          Encoding used to determine which memory address
2338                                                          bit position represents the low order DDR ROW address.
2339                                                          The processor's memory address[34:7] needs to be
2340                                                          translated to DRAM addresses (bnk,row,col,rank and DIMM)
2341                                                          and that is a function of the following:
2342                                                          1. Datapath Width (64)
2343                                                          2. \# Banks (8)
2344                                                          3. \# Column Bits of the memory part - spec'd indirectly
2345                                                          by this register.
2346                                                          4. \# Row Bits of the memory part - spec'd indirectly
2347                                                          5. \# Ranks in a DIMM - spec'd by RANK_ENA
2348                                                          6. \# DIMM's in the system by the register below (PBANK_LSB).
2349                                                          Decoding for row_lsb
2350                                                               - 000: row_lsb = mem_adr[14]
2351                                                               - 001: row_lsb = mem_adr[15]
2352                                                               - 010: row_lsb = mem_adr[16]
2353                                                               - 011: row_lsb = mem_adr[17]
2354                                                               - 100: row_lsb = mem_adr[18]
2355                                                               - 101: row_lsb = mem_adr[19]
2356                                                               - 110: row_lsb = mem_adr[20]
2357                                                               - 111: RESERVED
2358                                                          For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank)
2359                                                          DDR3 parts, the column address width = 10, so with
2360                                                          10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16] */
2361         uint64_t ecc_ena                      : 1;  /**< ECC Enable: When set will enable the 8b ECC
2362                                                          check/correct logic. Should be 1 when used with DIMMs
2363                                                          with ECC. 0, otherwise.
2364                                                          When this mode is turned on, DQ[71:64]
2365                                                          on writes, will contain the ECC code generated for
2366                                                          the 64 bits of data which will
2367                                                          written in the memory and then later on reads, used
2368                                                          to check for Single bit error (which will be auto-
2369                                                          corrected) and Double Bit error (which will be
2370                                                          reported). When not turned on, DQ[71:64]
2371                                                          are driven to 0.  Please refer to SEC_ERR, DED_ERR,
2372                                                          LMC*_FADR, and LMC*_ECC_SYND registers
2373                                                          for diagnostics information when there is an error. */
2374         uint64_t init_start                   : 1;  /**< A 0->1 transition starts the DDR memory sequence that is
2375                                                          selected by LMC*_CONFIG[SEQUENCE].  This register is a
2376                                                          oneshot and clears itself each time it is set. */
2377 #else
2378         uint64_t init_start                   : 1;
2379         uint64_t ecc_ena                      : 1;
2380         uint64_t row_lsb                      : 3;
2381         uint64_t pbank_lsb                    : 4;
2382         uint64_t idlepower                    : 3;
2383         uint64_t forcewrite                   : 4;
2384         uint64_t ecc_adr                      : 1;
2385         uint64_t reset                        : 1;
2386         uint64_t ref_zqcs_int                 : 19;
2387         uint64_t sequence                     : 3;
2388         uint64_t early_dqx                    : 1;
2389         uint64_t sref_with_dll                : 1;
2390         uint64_t rank_ena                     : 1;
2391         uint64_t rankmask                     : 4;
2392         uint64_t mirrmask                     : 4;
2393         uint64_t init_status                  : 4;
2394         uint64_t reserved_55_63               : 9;
2395 #endif
2396         } cn63xxp1;
2397         struct cvmx_lmcx_config_cn66xx {
2398 #ifdef __BIG_ENDIAN_BITFIELD
2399         uint64_t reserved_60_63               : 4;
2400         uint64_t scrz                         : 1;  /**< Hide LMC*_SCRAMBLE_CFG0 and LMC*_SCRAMBLE_CFG1 when set */
2401         uint64_t early_unload_d1_r1           : 1;  /**< When set, unload the PHY silo one cycle early for Rank 3
2402                                                          reads
2403                                                          The recommended EARLY_UNLOAD_D1_R1 value can be calculated
2404                                                          after the final LMC*_RLEVEL_RANK3[BYTE*] values are
2405                                                          selected (as part of read-leveling initialization).
2406                                                          Then, determine the largest read-leveling setting
2407                                                          for rank 3 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK3[BYTEi])
2408                                                          across all i), then set EARLY_UNLOAD_D1_R1
2409                                                          when the low two bits of this largest setting is not
2410                                                          3 (i.e. EARLY_UNLOAD_D1_R1 = (maxset<1:0>!=3)). */
2411         uint64_t early_unload_d1_r0           : 1;  /**< When set, unload the PHY silo one cycle early for Rank 2
2412                                                          reads
2413                                                          The recommended EARLY_UNLOAD_D1_RO value can be calculated
2414                                                          after the final LMC*_RLEVEL_RANK2[BYTE*] values are
2415                                                          selected (as part of read-leveling initialization).
2416                                                          Then, determine the largest read-leveling setting
2417                                                          for rank 2 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK2[BYTEi])
2418                                                          across all i), then set EARLY_UNLOAD_D1_RO
2419                                                          when the low two bits of this largest setting is not
2420                                                          3 (i.e. EARLY_UNLOAD_D1_RO = (maxset<1:0>!=3)). */
2421         uint64_t early_unload_d0_r1           : 1;  /**< When set, unload the PHY silo one cycle early for Rank 1
2422                                                          reads
2423                                                          The recommended EARLY_UNLOAD_D0_R1 value can be calculated
2424                                                          after the final LMC*_RLEVEL_RANK1[BYTE*] values are
2425                                                          selected (as part of read-leveling initialization).
2426                                                          Then, determine the largest read-leveling setting
2427                                                          for rank 1 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK1[BYTEi])
2428                                                          across all i), then set EARLY_UNLOAD_D0_R1
2429                                                          when the low two bits of this largest setting is not
2430                                                          3 (i.e. EARLY_UNLOAD_D0_R1 = (maxset<1:0>!=3)). */
2431         uint64_t early_unload_d0_r0           : 1;  /**< When set, unload the PHY silo one cycle early for Rank 0
2432                                                          reads.
2433                                                          The recommended EARLY_UNLOAD_D0_R0 value can be calculated
2434                                                          after the final LMC*_RLEVEL_RANK0[BYTE*] values are
2435                                                          selected (as part of read-leveling initialization).
2436                                                          Then, determine the largest read-leveling setting
2437                                                          for rank 0 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK0[BYTEi])
2438                                                          across all i), then set EARLY_UNLOAD_D0_R0
2439                                                          when the low two bits of this largest setting is not
2440                                                          3 (i.e. EARLY_UNLOAD_D0_R0 = (maxset<1:0>!=3)). */
2441         uint64_t init_status                  : 4;  /**< Indicates status of initialization
2442                                                          INIT_STATUS[n] = 1 implies rank n has been initialized
2443                                                          SW must set necessary INIT_STATUS bits with the
2444                                                          same LMC*_CONFIG write that initiates
2445                                                          power-up/init and self-refresh exit sequences
2446                                                          (if the required INIT_STATUS bits are not already
2447                                                          set before LMC initiates the sequence).
2448                                                          INIT_STATUS determines the chip-selects that assert
2449                                                          during refresh, ZQCS, and precharge power-down and
2450                                                          self-refresh entry/exit SEQUENCE's. */
2451         uint64_t mirrmask                     : 4;  /**< Mask determining which ranks are address-mirrored.
2452                                                          MIRRMASK<n> = 1 means Rank n addresses are mirrored
2453                                                          for 0 <= n <= 3
2454                                                          A mirrored read/write has these differences:
2455                                                           - DDR_BA<1> is swapped with DDR_BA<0>
2456                                                           - DDR_A<8> is swapped with DDR_A<7>
2457                                                           - DDR_A<6> is swapped with DDR_A<5>
2458                                                           - DDR_A<4> is swapped with DDR_A<3>
2459                                                          When RANK_ENA=0, MIRRMASK<1> and MIRRMASK<3> MBZ */
2460         uint64_t rankmask                     : 4;  /**< Mask to select rank to be leveled/initialized.
2461                                                          To write-level/read-level/initialize rank i, set RANKMASK<i>
2462                                                                          RANK_ENA=1               RANK_ENA=0
2463                                                            RANKMASK<0> = DIMM0_CS0                DIMM0_CS0
2464                                                            RANKMASK<1> = DIMM0_CS1                  MBZ
2465                                                            RANKMASK<2> = DIMM1_CS0                DIMM1_CS0
2466                                                            RANKMASK<3> = DIMM1_CS1                  MBZ
2467                                                          For read/write leveling, each rank has to be leveled separately,
2468                                                          so RANKMASK should only have one bit set.
2469                                                          RANKMASK is not used during self-refresh entry/exit and
2470                                                          precharge power-down entry/exit instruction sequences.
2471                                                          When RANK_ENA=0, RANKMASK<1> and RANKMASK<3> MBZ */
2472         uint64_t rank_ena                     : 1;  /**< RANK ena (for use with dual-rank DIMMs)
2473                                                          For dual-rank DIMMs, the rank_ena bit will enable
2474                                                          the drive of the CS*_L[1:0] and ODT_<1:0> pins differently based on the
2475                                                          (pbank_lsb-1) address bit.
2476                                                          Write 0 for SINGLE ranked DIMM's. */
2477         uint64_t sref_with_dll                : 1;  /**< Self-refresh entry/exit write MR1 and MR2
2478                                                          When set, self-refresh entry and exit instruction sequences
2479                                                          write MR1 and MR2 (in all ranks). (The writes occur before
2480                                                          self-refresh entry, and after self-refresh exit.)
2481                                                          When clear, self-refresh entry and exit instruction sequences
2482                                                          do not write any registers in the DDR3 parts. */
2483         uint64_t early_dqx                    : 1;  /**< Send DQx signals one CK cycle earlier for the case when
2484                                                          the shortest DQx lines have a larger delay than the CK line */
2485         uint64_t sequence                     : 3;  /**< Selects the sequence that LMC runs after a 0->1
2486                                                          transition on LMC*_CONFIG[INIT_START].
2487                                                          SEQUENCE=0=power-up/init:
2488                                                            - RANKMASK selects participating ranks (should be all ranks with attached DRAM)
2489                                                            - INIT_STATUS must equal RANKMASK
2490                                                            - DDR_CKE* signals activated (if they weren't already active)
2491                                                            - RDIMM register control words 0-15 will be written to RANKMASK-selected
2492                                                                RDIMM's when LMC(0)_CONTROL[RDIMM_ENA]=1 and corresponding
2493                                                                LMC*_DIMM_CTL[DIMM*_WMASK] bits are set. (Refer to LMC*_DIMM*_PARAMS and
2494                                                                LMC*_DIMM_CTL descriptions below for more details.)
2495                                                            - MR0, MR1, MR2, and MR3 will be written to selected ranks
2496                                                          SEQUENCE=1=read-leveling:
2497                                                            - RANKMASK selects the rank to be read-leveled
2498                                                            - MR3 written to selected rank
2499                                                          SEQUENCE=2=self-refresh entry:
2500                                                            - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM)
2501                                                            - MR1 and MR2 will be written to selected ranks if SREF_WITH_DLL=1
2502                                                            - DDR_CKE* signals de-activated
2503                                                          SEQUENCE=3=self-refresh exit:
2504                                                            - INIT_STATUS must be set to indicate participating ranks (should be all ranks with attached DRAM)
2505                                                            - DDR_CKE* signals activated
2506                                                            - MR0, MR1, MR2, and MR3 will be written to participating ranks if SREF_WITH_DLL=1
2507                                                          SEQUENCE=4=precharge power-down entry:
2508                                                            - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM)
2509                                                            - DDR_CKE* signals de-activated
2510                                                          SEQUENCE=5=precharge power-down exit:
2511                                                            - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM)
2512                                                            - DDR_CKE* signals activated
2513                                                          SEQUENCE=6=write-leveling:
2514                                                            - RANKMASK selects the rank to be write-leveled
2515                                                            - INIT_STATUS must indicate all ranks with attached DRAM
2516                                                            - MR1 and MR2 written to INIT_STATUS-selected ranks
2517                                                          SEQUENCE=7=illegal
2518                                                          Precharge power-down entry and exit SEQUENCE's may also
2519                                                          be automatically generated by the HW when IDLEPOWER!=0.
2520                                                          Self-refresh entry SEQUENCE's may also be automatically
2521                                                          generated by hardware upon a chip warm or soft reset
2522                                                          sequence when LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT] are set.
2523                                                          LMC writes the LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 CSR field values
2524                                                          to the Mode registers in the DRAM parts (i.e. MR0, MR1, MR2, and MR3) as part of some of these sequences.
2525                                                          Refer to the LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 descriptions for more details.
2526                                                          If there are two consecutive power-up/init's without
2527                                                          a DRESET assertion between them, LMC asserts DDR_CKE* as part of
2528                                                          the first power-up/init, and continues to assert DDR_CKE*
2529                                                          through the remainder of the first and the second power-up/init.
2530                                                          If DDR_CKE* deactivation and reactivation is needed for
2531                                                          a second power-up/init, a DRESET assertion is required
2532                                                          between the first and the second. */
2533         uint64_t ref_zqcs_int                 : 19; /**< Refresh & ZQCS interval represented in \#of 512 CK cycle
2534                                                          increments. A Refresh sequence is triggered when bits
2535                                                          [24:18] are equal to 0, and a ZQCS sequence is triggered
2536                                                          when [36:18] are equal to 0.
2537                                                          Program [24:18] to RND-DN(tREFI/clkPeriod/512)
2538                                                          Program [36:25] to RND-DN(ZQCS_Interval/clkPeriod/(512*64)). Note
2539                                                          that this value should always be greater than 32, to account for
2540                                                          resistor calibration delays.
2541                                                          000_00000000_00000000: RESERVED
2542                                                          Max Refresh interval = 127 * 512           = 65024 CKs
2543                                                          Max ZQCS interval    = (8*256*256-1) * 512 = 268434944 CKs ~ 335ms for a 800 MHz CK
2544                                                          LMC*_CONFIG[INIT_STATUS] determines which ranks receive
2545                                                          the REF / ZQCS. LMC does not send any refreshes / ZQCS's
2546                                                          when LMC*_CONFIG[INIT_STATUS]=0. */
2547         uint64_t reset                        : 1;  /**< Reset oneshot pulse for refresh counter,
2548                                                          and LMC*_OPS_CNT, LMC*_IFB_CNT, and LMC*_DCLK_CNT
2549                                                          CSR's. SW should write this to a one, then re-write
2550                                                          it to a zero to cause the reset. */
2551         uint64_t ecc_adr                      : 1;  /**< Include memory reference address in the ECC calculation
2552                                                          0=disabled, 1=enabled */
2553         uint64_t forcewrite                   : 4;  /**< Force the oldest outstanding write to complete after
2554                                                          having waited for 2^FORCEWRITE CK cycles.  0=disabled. */
2555         uint64_t idlepower                    : 3;  /**< Enter precharge power-down mode after the memory
2556                                                          controller has been idle for 2^(2+IDLEPOWER) CK cycles.
2557                                                          0=disabled.
2558                                                          This field should only be programmed after initialization.
2559                                                          LMC*_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL
2560                                                          is disabled during the precharge power-down. */
2561         uint64_t pbank_lsb                    : 4;  /**< DIMM address bit select
2562                                                          Reverting to the explanation for ROW_LSB,
2563                                                          PBank_LSB would be Row_LSB bit + \#rowbits + \#rankbits
2564                                                          Decoding for pbank_lsb
2565                                                               - 0000:DIMM = mem_adr[28]    / rank = mem_adr[27] (if RANK_ENA)
2566                                                               - 0001:DIMM = mem_adr[29]    / rank = mem_adr[28]      "
2567                                                               - 0010:DIMM = mem_adr[30]    / rank = mem_adr[29]      "
2568                                                               - 0011:DIMM = mem_adr[31]    / rank = mem_adr[30]      "
2569                                                               - 0100:DIMM = mem_adr[32]    / rank = mem_adr[31]      "
2570                                                               - 0101:DIMM = mem_adr[33]    / rank = mem_adr[32]      "
2571                                                               - 0110:DIMM = mem_adr[34]    / rank = mem_adr[33]      "
2572                                                               - 0111:DIMM = 0              / rank = mem_adr[34]      "
2573                                                               - 1000-1111: RESERVED
2574                                                          For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank)
2575                                                          DDR3 parts, the column address width = 10, so with
2576                                                          10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16]
2577                                                          With rank_ena = 0, pbank_lsb = 2
2578                                                          With rank_ena = 1, pbank_lsb = 3 */
2579         uint64_t row_lsb                      : 3;  /**< Row Address bit select
2580                                                          Encoding used to determine which memory address
2581                                                          bit position represents the low order DDR ROW address.
2582                                                          The processor's memory address[34:7] needs to be
2583                                                          translated to DRAM addresses (bnk,row,col,rank and DIMM)
2584                                                          and that is a function of the following:
2585                                                          1. Datapath Width (64)
2586                                                          2. \# Banks (8)
2587                                                          3. \# Column Bits of the memory part - spec'd indirectly
2588                                                          by this register.
2589                                                          4. \# Row Bits of the memory part - spec'd indirectly
2590                                                          5. \# Ranks in a DIMM - spec'd by RANK_ENA
2591                                                          6. \# DIMM's in the system by the register below (PBANK_LSB).
2592                                                          Decoding for row_lsb
2593                                                               - 000: row_lsb = mem_adr[14]
2594                                                               - 001: row_lsb = mem_adr[15]
2595                                                               - 010: row_lsb = mem_adr[16]
2596                                                               - 011: row_lsb = mem_adr[17]
2597                                                               - 100: row_lsb = mem_adr[18]
2598                                                               - 101: row_lsb = mem_adr[19]
2599                                                               - 110: row_lsb = mem_adr[20]
2600                                                               - 111: RESERVED
2601                                                          For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank)
2602                                                          DDR3 parts, the column address width = 10, so with
2603                                                          10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16] */
2604         uint64_t ecc_ena                      : 1;  /**< ECC Enable: When set will enable the 8b ECC
2605                                                          check/correct logic. Should be 1 when used with DIMMs
2606                                                          with ECC. 0, otherwise.
2607                                                          When this mode is turned on, DQ[71:64]
2608                                                          on writes, will contain the ECC code generated for
2609                                                          the 64 bits of data which will
2610                                                          written in the memory and then later on reads, used
2611                                                          to check for Single bit error (which will be auto-
2612                                                          corrected) and Double Bit error (which will be
2613                                                          reported). When not turned on, DQ[71:64]
2614                                                          are driven to 0.  Please refer to SEC_ERR, DED_ERR,
2615                                                          LMC*_FADR, LMC*_SCRAMBLED_FADR and LMC*_ECC_SYND registers
2616                                                          for diagnostics information when there is an error. */
2617         uint64_t init_start                   : 1;  /**< A 0->1 transition starts the DDR memory sequence that is
2618                                                          selected by LMC*_CONFIG[SEQUENCE].  This register is a
2619                                                          oneshot and clears itself each time it is set. */
2620 #else
2621         uint64_t init_start                   : 1;
2622         uint64_t ecc_ena                      : 1;
2623         uint64_t row_lsb                      : 3;
2624         uint64_t pbank_lsb                    : 4;
2625         uint64_t idlepower                    : 3;
2626         uint64_t forcewrite                   : 4;
2627         uint64_t ecc_adr                      : 1;
2628         uint64_t reset                        : 1;
2629         uint64_t ref_zqcs_int                 : 19;
2630         uint64_t sequence                     : 3;
2631         uint64_t early_dqx                    : 1;
2632         uint64_t sref_with_dll                : 1;
2633         uint64_t rank_ena                     : 1;
2634         uint64_t rankmask                     : 4;
2635         uint64_t mirrmask                     : 4;
2636         uint64_t init_status                  : 4;
2637         uint64_t early_unload_d0_r0           : 1;
2638         uint64_t early_unload_d0_r1           : 1;
2639         uint64_t early_unload_d1_r0           : 1;
2640         uint64_t early_unload_d1_r1           : 1;
2641         uint64_t scrz                         : 1;
2642         uint64_t reserved_60_63               : 4;
2643 #endif
2644         } cn66xx;
2645         struct cvmx_lmcx_config_cn63xx        cn68xx;
2646         struct cvmx_lmcx_config_cn63xx        cn68xxp1;
2647         struct cvmx_lmcx_config_s             cnf71xx;
2648 };
2649 typedef union cvmx_lmcx_config cvmx_lmcx_config_t;
2650
2651 /**
2652  * cvmx_lmc#_control
2653  *
2654  * LMC_CONTROL = LMC Control
2655  * This register is an assortment of various control fields needed by the memory controller
2656  */
2657 union cvmx_lmcx_control {
2658         uint64_t u64;
2659         struct cvmx_lmcx_control_s {
2660 #ifdef __BIG_ENDIAN_BITFIELD
2661         uint64_t scramble_ena                 : 1;  /**< When set, will enable the scramble/descramble logic */
2662         uint64_t thrcnt                       : 12; /**< Fine Count */
2663         uint64_t persub                       : 8;  /**< Offset for DFA rate-matching */
2664         uint64_t thrmax                       : 4;  /**< Fine Rate Matching Max Bucket Size
2665                                                          0 = Reserved
2666                                                          In conjunction with the Coarse Rate Matching Logic, the Fine Rate
2667                                                          Matching Logic gives SW the ability to prioritize DFA Rds over
2668                                                          L2C Writes. Higher PERSUB values result in a lower DFA Rd
2669                                                          bandwidth. */
2670         uint64_t crm_cnt                      : 5;  /**< Coarse Count */
2671         uint64_t crm_thr                      : 5;  /**< Coarse Rate Matching Threshold */
2672         uint64_t crm_max                      : 5;  /**< Coarse Rate Matching Max Bucket Size
2673                                                          0 = Reserved
2674                                                          The Coarse Rate Matching Logic is used to control the bandwidth
2675                                                          allocated to DFA Rds. CRM_MAX is subdivided into two regions
2676                                                          with DFA Rds being preferred over LMC Rd/Wrs when
2677                                                          CRM_CNT < CRM_THR. CRM_CNT increments by 1 when a DFA Rd is
2678                                                          slotted and by 2 when a LMC Rd/Wr is slotted, and rolls over
2679                                                          when CRM_MAX is reached. */
2680         uint64_t rodt_bprch                   : 1;  /**< When set, the turn-off time for the ODT pin during a
2681                                                          RD cmd is delayed an additional CK cycle. */
2682         uint64_t wodt_bprch                   : 1;  /**< When set, the turn-off time for the ODT pin during a
2683                                                          WR cmd is delayed an additional CK cycle. */
2684         uint64_t bprch                        : 2;  /**< Back Porch Enable: When set, the turn-on time for
2685                                                          the default DDR_DQ/DQS drivers is delayed an additional BPRCH
2686                                                          CK cycles.
2687                                                          00 = 0 CKs
2688                                                          01 = 1 CKs
2689                                                          10 = 2 CKs
2690                                                          11 = 3 CKs */
2691         uint64_t ext_zqcs_dis                 : 1;  /**< Disable (external) auto-zqcs calibration
2692                                                          When clear, LMC runs external ZQ calibration
2693                                                          every LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */
2694         uint64_t int_zqcs_dis                 : 1;  /**< Disable (internal) auto-zqcs calibration
2695                                                          When clear, LMC runs internal ZQ calibration
2696                                                          every LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */
2697         uint64_t auto_dclkdis                 : 1;  /**< When 1, LMC will automatically shut off its internal
2698                                                          clock to conserve power when there is no traffic. Note
2699                                                          that this has no effect on the DDR3 PHY and pads clocks. */
2700         uint64_t xor_bank                     : 1;  /**< If (XOR_BANK == 1), then
2701                                                           bank[2:0]=address[9:7] ^ address[14:12]
2702                                                          else
2703                                                           bank[2:0]=address[9:7] */
2704         uint64_t max_write_batch              : 4;  /**< Maximum number of consecutive writes to service before
2705                                                          forcing reads to interrupt. */
2706         uint64_t nxm_write_en                 : 1;  /**< NXM Write mode
2707                                                          When clear, LMC discards writes to addresses that don't
2708                                                          exist in the DRAM (as defined by LMC*_NXM configuration).
2709                                                          When set, LMC completes writes to addresses that don't
2710                                                          exist in the DRAM at an aliased address. */
2711         uint64_t elev_prio_dis                : 1;  /**< Disable elevate priority logic.
2712                                                          When set, writes are sent in
2713                                                          regardless of priority information from L2C. */
2714         uint64_t inorder_wr                   : 1;  /**< Send writes in order(regardless of priority) */
2715         uint64_t inorder_rd                   : 1;  /**< Send reads in order (regardless of priority) */
2716         uint64_t throttle_wr                  : 1;  /**< When set, use at most one IFB for writes */
2717         uint64_t throttle_rd                  : 1;  /**< When set, use at most one IFB for reads */
2718         uint64_t fprch2                       : 2;  /**< Front Porch Enable: When set, the turn-off
2719                                                          time for the default DDR_DQ/DQS drivers is FPRCH2 CKs earlier.
2720                                                          00 = 0 CKs
2721                                                          01 = 1 CKs
2722                                                          10 = 2 CKs
2723                                                          11 = RESERVED */
2724         uint64_t pocas                        : 1;  /**< Enable the Posted CAS feature of DDR3.
2725                                                          This bit must be set whenever LMC*_MODEREG_PARAMS0[AL]!=0,
2726                                                          and clear otherwise. */
2727         uint64_t ddr2t                        : 1;  /**< Turn on the DDR 2T mode. 2 CK cycle window for CMD and
2728                                                          address. This mode helps relieve setup time pressure
2729                                                          on the Address and command bus which nominally have
2730                                                          a very large fanout. Please refer to Micron's tech
2731                                                          note tn_47_01 titled "DDR2-533 Memory Design Guide
2732                                                          for Two Dimm Unbuffered Systems" for physical details. */
2733         uint64_t bwcnt                        : 1;  /**< Bus utilization counter Clear.
2734                                                          Clears the LMC*_OPS_CNT, LMC*_IFB_CNT, and
2735                                                          LMC*_DCLK_CNT registers. SW should first write this
2736                                                          field to a one, then write this field to a zero to
2737                                                          clear the CSR's. */
2738         uint64_t rdimm_ena                    : 1;  /**< Registered DIMM Enable - When set allows the use
2739                                                          of JEDEC Registered DIMMs which require address and
2740                                                          control bits to be registered in the controller. */
2741 #else
2742         uint64_t rdimm_ena                    : 1;
2743         uint64_t bwcnt                        : 1;
2744         uint64_t ddr2t                        : 1;
2745         uint64_t pocas                        : 1;
2746         uint64_t fprch2                       : 2;
2747         uint64_t throttle_rd                  : 1;
2748         uint64_t throttle_wr                  : 1;
2749         uint64_t inorder_rd                   : 1;
2750         uint64_t inorder_wr                   : 1;
2751         uint64_t elev_prio_dis                : 1;
2752         uint64_t nxm_write_en                 : 1;
2753         uint64_t max_write_batch              : 4;
2754         uint64_t xor_bank                     : 1;
2755         uint64_t auto_dclkdis                 : 1;
2756         uint64_t int_zqcs_dis                 : 1;
2757         uint64_t ext_zqcs_dis                 : 1;
2758         uint64_t bprch                        : 2;
2759         uint64_t wodt_bprch                   : 1;
2760         uint64_t rodt_bprch                   : 1;
2761         uint64_t crm_max                      : 5;
2762         uint64_t crm_thr                      : 5;
2763         uint64_t crm_cnt                      : 5;
2764         uint64_t thrmax                       : 4;
2765         uint64_t persub                       : 8;
2766         uint64_t thrcnt                       : 12;
2767         uint64_t scramble_ena                 : 1;
2768 #endif
2769         } s;
2770         struct cvmx_lmcx_control_s            cn61xx;
2771         struct cvmx_lmcx_control_cn63xx {
2772 #ifdef __BIG_ENDIAN_BITFIELD
2773         uint64_t reserved_24_63               : 40;
2774         uint64_t rodt_bprch                   : 1;  /**< When set, the turn-off time for the ODT pin during a
2775                                                          RD cmd is delayed an additional CK cycle. */
2776         uint64_t wodt_bprch                   : 1;  /**< When set, the turn-off time for the ODT pin during a
2777                                                          WR cmd is delayed an additional CK cycle. */
2778         uint64_t bprch                        : 2;  /**< Back Porch Enable: When set, the turn-on time for
2779                                                          the default DDR_DQ/DQS drivers is delayed an additional BPRCH
2780                                                          CK cycles.
2781                                                          00 = 0 CKs
2782                                                          01 = 1 CKs
2783                                                          10 = 2 CKs
2784                                                          11 = 3 CKs */
2785         uint64_t ext_zqcs_dis                 : 1;  /**< Disable (external) auto-zqcs calibration
2786                                                          When clear, LMC runs external ZQ calibration
2787                                                          every LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */
2788         uint64_t int_zqcs_dis                 : 1;  /**< Disable (internal) auto-zqcs calibration
2789                                                          When clear, LMC runs internal ZQ calibration
2790                                                          every LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */
2791         uint64_t auto_dclkdis                 : 1;  /**< When 1, LMC will automatically shut off its internal
2792                                                          clock to conserve power when there is no traffic. Note
2793                                                          that this has no effect on the DDR3 PHY and pads clocks. */
2794         uint64_t xor_bank                     : 1;  /**< If (XOR_BANK == 1), then
2795                                                           bank[2:0]=address[9:7] ^ address[14:12]
2796                                                          else
2797                                                           bank[2:0]=address[9:7] */
2798         uint64_t max_write_batch              : 4;  /**< Maximum number of consecutive writes to service before
2799                                                          forcing reads to interrupt. */
2800         uint64_t nxm_write_en                 : 1;  /**< NXM Write mode
2801                                                          When clear, LMC discards writes to addresses that don't
2802                                                          exist in the DRAM (as defined by LMC*_NXM configuration).
2803                                                          When set, LMC completes writes to addresses that don't
2804                                                          exist in the DRAM at an aliased address. */
2805         uint64_t elev_prio_dis                : 1;  /**< Disable elevate priority logic.
2806                                                          When set, writes are sent in
2807                                                          regardless of priority information from L2C. */
2808         uint64_t inorder_wr                   : 1;  /**< Send writes in order(regardless of priority) */
2809         uint64_t inorder_rd                   : 1;  /**< Send reads in order (regardless of priority) */
2810         uint64_t throttle_wr                  : 1;  /**< When set, use at most one IFB for writes */
2811         uint64_t throttle_rd                  : 1;  /**< When set, use at most one IFB for reads */
2812         uint64_t fprch2                       : 2;  /**< Front Porch Enable: When set, the turn-off
2813                                                          time for the default DDR_DQ/DQS drivers is FPRCH2 CKs earlier.
2814                                                          00 = 0 CKs
2815                                                          01 = 1 CKs
2816                                                          10 = 2 CKs
2817                                                          11 = RESERVED */
2818         uint64_t pocas                        : 1;  /**< Enable the Posted CAS feature of DDR3.
2819                                                          This bit must be set whenever LMC*_MODEREG_PARAMS0[AL]!=0,
2820                                                          and clear otherwise. */
2821         uint64_t ddr2t                        : 1;  /**< Turn on the DDR 2T mode. 2 CK cycle window for CMD and
2822                                                          address. This mode helps relieve setup time pressure
2823                                                          on the Address and command bus which nominally have
2824                                                          a very large fanout. Please refer to Micron's tech
2825                                                          note tn_47_01 titled "DDR2-533 Memory Design Guide
2826                                                          for Two Dimm Unbuffered Systems" for physical details. */
2827         uint64_t bwcnt                        : 1;  /**< Bus utilization counter Clear.
2828                                                          Clears the LMC*_OPS_CNT, LMC*_IFB_CNT, and
2829                                                          LMC*_DCLK_CNT registers. SW should first write this
2830                                                          field to a one, then write this field to a zero to
2831                                                          clear the CSR's. */
2832         uint64_t rdimm_ena                    : 1;  /**< Registered DIMM Enable - When set allows the use
2833                                                          of JEDEC Registered DIMMs which require address and
2834                                                          control bits to be registered in the controller. */
2835 #else
2836         uint64_t rdimm_ena                    : 1;
2837         uint64_t bwcnt                        : 1;
2838         uint64_t ddr2t                        : 1;
2839         uint64_t pocas                        : 1;
2840         uint64_t fprch2                       : 2;
2841         uint64_t throttle_rd                  : 1;
2842         uint64_t throttle_wr                  : 1;
2843         uint64_t inorder_rd                   : 1;
2844         uint64_t inorder_wr                   : 1;
2845         uint64_t elev_prio_dis                : 1;
2846         uint64_t nxm_write_en                 : 1;
2847         uint64_t max_write_batch              : 4;
2848         uint64_t xor_bank                     : 1;
2849         uint64_t auto_dclkdis                 : 1;
2850         uint64_t int_zqcs_dis                 : 1;
2851         uint64_t ext_zqcs_dis                 : 1;
2852         uint64_t bprch                        : 2;
2853         uint64_t wodt_bprch                   : 1;
2854         uint64_t rodt_bprch                   : 1;
2855         uint64_t reserved_24_63               : 40;
2856 #endif
2857         } cn63xx;
2858         struct cvmx_lmcx_control_cn63xx       cn63xxp1;
2859         struct cvmx_lmcx_control_cn66xx {
2860 #ifdef __BIG_ENDIAN_BITFIELD
2861         uint64_t scramble_ena                 : 1;  /**< When set, will enable the scramble/descramble logic */
2862         uint64_t reserved_24_62               : 39;
2863         uint64_t rodt_bprch                   : 1;  /**< When set, the turn-off time for the ODT pin during a
2864                                                          RD cmd is delayed an additional CK cycle. */
2865         uint64_t wodt_bprch                   : 1;  /**< When set, the turn-off time for the ODT pin during a
2866                                                          WR cmd is delayed an additional CK cycle. */
2867         uint64_t bprch                        : 2;  /**< Back Porch Enable: When set, the turn-on time for
2868                                                          the default DDR_DQ/DQS drivers is delayed an additional BPRCH
2869                                                          CK cycles.
2870                                                          00 = 0 CKs
2871                                                          01 = 1 CKs
2872                                                          10 = 2 CKs
2873                                                          11 = 3 CKs */
2874         uint64_t ext_zqcs_dis                 : 1;  /**< Disable (external) auto-zqcs calibration
2875                                                          When clear, LMC runs external ZQ calibration
2876                                                          every LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */
2877         uint64_t int_zqcs_dis                 : 1;  /**< Disable (internal) auto-zqcs calibration
2878                                                          When clear, LMC runs internal ZQ calibration
2879                                                          every LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */
2880         uint64_t auto_dclkdis                 : 1;  /**< When 1, LMC will automatically shut off its internal
2881                                                          clock to conserve power when there is no traffic. Note
2882                                                          that this has no effect on the DDR3 PHY and pads clocks. */
2883         uint64_t xor_bank                     : 1;  /**< If (XOR_BANK == 1), then
2884                                                           bank[2:0]=address[9:7] ^ address[14:12]
2885                                                          else
2886                                                           bank[2:0]=address[9:7] */
2887         uint64_t max_write_batch              : 4;  /**< Maximum number of consecutive writes to service before
2888                                                          forcing reads to interrupt. */
2889         uint64_t nxm_write_en                 : 1;  /**< NXM Write mode
2890                                                          When clear, LMC discards writes to addresses that don't
2891                                                          exist in the DRAM (as defined by LMC*_NXM configuration).
2892                                                          When set, LMC completes writes to addresses that don't
2893                                                          exist in the DRAM at an aliased address. */
2894         uint64_t elev_prio_dis                : 1;  /**< Disable elevate priority logic.
2895                                                          When set, writes are sent in
2896                                                          regardless of priority information from L2C. */
2897         uint64_t inorder_wr                   : 1;  /**< Send writes in order(regardless of priority) */
2898         uint64_t inorder_rd                   : 1;  /**< Send reads in order (regardless of priority) */
2899         uint64_t throttle_wr                  : 1;  /**< When set, use at most one IFB for writes */
2900         uint64_t throttle_rd                  : 1;  /**< When set, use at most one IFB for reads */
2901         uint64_t fprch2                       : 2;  /**< Front Porch Enable: When set, the turn-off
2902                                                          time for the default DDR_DQ/DQS drivers is FPRCH2 CKs earlier.
2903                                                          00 = 0 CKs
2904                                                          01 = 1 CKs
2905                                                          10 = 2 CKs
2906                                                          11 = RESERVED */
2907         uint64_t pocas                        : 1;  /**< Enable the Posted CAS feature of DDR3.
2908                                                          This bit must be set whenever LMC*_MODEREG_PARAMS0[AL]!=0,
2909                                                          and clear otherwise. */
2910         uint64_t ddr2t                        : 1;  /**< Turn on the DDR 2T mode. 2 CK cycle window for CMD and
2911                                                          address. This mode helps relieve setup time pressure
2912                                                          on the Address and command bus which nominally have
2913                                                          a very large fanout. Please refer to Micron's tech
2914                                                          note tn_47_01 titled "DDR2-533 Memory Design Guide
2915                                                          for Two Dimm Unbuffered Systems" for physical details. */
2916         uint64_t bwcnt                        : 1;  /**< Bus utilization counter Clear.
2917                                                          Clears the LMC*_OPS_CNT, LMC*_IFB_CNT, and
2918                                                          LMC*_DCLK_CNT registers. SW should first write this
2919                                                          field to a one, then write this field to a zero to
2920                                                          clear the CSR's. */
2921         uint64_t rdimm_ena                    : 1;  /**< Registered DIMM Enable - When set allows the use
2922                                                          of JEDEC Registered DIMMs which require address and
2923                                                          control bits to be registered in the controller. */
2924 #else
2925         uint64_t rdimm_ena                    : 1;
2926         uint64_t bwcnt                        : 1;
2927         uint64_t ddr2t                        : 1;
2928         uint64_t pocas                        : 1;
2929         uint64_t fprch2                       : 2;
2930         uint64_t throttle_rd                  : 1;
2931         uint64_t throttle_wr                  : 1;
2932         uint64_t inorder_rd                   : 1;
2933         uint64_t inorder_wr                   : 1;
2934         uint64_t elev_prio_dis                : 1;
2935         uint64_t nxm_write_en                 : 1;
2936         uint64_t max_write_batch              : 4;
2937         uint64_t xor_bank                     : 1;
2938         uint64_t auto_dclkdis                 : 1;
2939         uint64_t int_zqcs_dis                 : 1;
2940         uint64_t ext_zqcs_dis                 : 1;
2941         uint64_t bprch                        : 2;
2942         uint64_t wodt_bprch                   : 1;
2943         uint64_t rodt_bprch                   : 1;
2944         uint64_t reserved_24_62               : 39;
2945         uint64_t scramble_ena                 : 1;
2946 #endif
2947         } cn66xx;
2948         struct cvmx_lmcx_control_cn68xx {
2949 #ifdef __BIG_ENDIAN_BITFIELD
2950         uint64_t reserved_63_63               : 1;
2951         uint64_t thrcnt                       : 12; /**< Fine Count */
2952         uint64_t persub                       : 8;  /**< Offset for DFA rate-matching */
2953         uint64_t thrmax                       : 4;  /**< Fine Rate Matching Max Bucket Size
2954                                                          0 = Reserved
2955                                                          In conjunction with the Coarse Rate Matching Logic, the Fine Rate
2956                                                          Matching Logic gives SW the ability to prioritize DFA Rds over
2957                                                          L2C Writes. Higher PERSUB values result in a lower DFA Rd
2958                                                          bandwidth. */
2959         uint64_t crm_cnt                      : 5;  /**< Coarse Count */
2960         uint64_t crm_thr                      : 5;  /**< Coarse Rate Matching Threshold */
2961         uint64_t crm_max                      : 5;  /**< Coarse Rate Matching Max Bucket Size
2962                                                          0 = Reserved
2963                                                          The Coarse Rate Matching Logic is used to control the bandwidth
2964                                                          allocated to DFA Rds. CRM_MAX is subdivided into two regions
2965                                                          with DFA Rds being preferred over LMC Rd/Wrs when
2966                                                          CRM_CNT < CRM_THR. CRM_CNT increments by 1 when a DFA Rd is
2967                                                          slotted and by 2 when a LMC Rd/Wr is slotted, and rolls over
2968                                                          when CRM_MAX is reached. */
2969         uint64_t rodt_bprch                   : 1;  /**< When set, the turn-off time for the ODT pin during a
2970                                                          RD cmd is delayed an additional CK cycle. */
2971         uint64_t wodt_bprch                   : 1;  /**< When set, the turn-off time for the ODT pin during a
2972                                                          WR cmd is delayed an additional CK cycle. */
2973         uint64_t bprch                        : 2;  /**< Back Porch Enable: When set, the turn-on time for
2974                                                          the default DDR_DQ/DQS drivers is delayed an additional BPRCH
2975                                                          CK cycles.
2976                                                          00 = 0 CKs
2977                                                          01 = 1 CKs
2978                                                          10 = 2 CKs
2979                                                          11 = 3 CKs */
2980         uint64_t ext_zqcs_dis                 : 1;  /**< Disable (external) auto-zqcs calibration
2981                                                          When clear, LMC runs external ZQ calibration
2982                                                          every LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */
2983         uint64_t int_zqcs_dis                 : 1;  /**< Disable (internal) auto-zqcs calibration
2984                                                          When clear, LMC runs internal ZQ calibration
2985                                                          every LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */
2986         uint64_t auto_dclkdis                 : 1;  /**< When 1, LMC will automatically shut off its internal
2987                                                          clock to conserve power when there is no traffic. Note
2988                                                          that this has no effect on the DDR3 PHY and pads clocks. */
2989         uint64_t xor_bank                     : 1;  /**< If (XOR_BANK == 1), then
2990                                                           bank[2:0]=address[9:7] ^ address[14:12]
2991                                                          else
2992                                                           bank[2:0]=address[9:7] */
2993         uint64_t max_write_batch              : 4;  /**< Maximum number of consecutive writes to service before
2994                                                          forcing reads to interrupt. */
2995         uint64_t nxm_write_en                 : 1;  /**< NXM Write mode
2996                                                          When clear, LMC discards writes to addresses that don't
2997                                                          exist in the DRAM (as defined by LMC*_NXM configuration).
2998                                                          When set, LMC completes writes to addresses that don't
2999                                                          exist in the DRAM at an aliased address. */
3000         uint64_t elev_prio_dis                : 1;  /**< Disable elevate priority logic.
3001                                                          When set, writes are sent in
3002                                                          regardless of priority information from L2C. */
3003         uint64_t inorder_wr                   : 1;  /**< Send writes in order(regardless of priority) */
3004         uint64_t inorder_rd                   : 1;  /**< Send reads in order (regardless of priority) */
3005         uint64_t throttle_wr                  : 1;  /**< When set, use at most one IFB for writes */
3006         uint64_t throttle_rd                  : 1;  /**< When set, use at most one IFB for reads */
3007         uint64_t fprch2                       : 2;  /**< Front Porch Enable: When set, the turn-off
3008                                                          time for the default DDR_DQ/DQS drivers is FPRCH2 CKs earlier.
3009                                                          00 = 0 CKs
3010                                                          01 = 1 CKs
3011                                                          10 = 2 CKs
3012                                                          11 = RESERVED */
3013         uint64_t pocas                        : 1;  /**< Enable the Posted CAS feature of DDR3.
3014                                                          This bit must be set whenever LMC*_MODEREG_PARAMS0[AL]!=0,
3015                                                          and clear otherwise. */
3016         uint64_t ddr2t                        : 1;  /**< Turn on the DDR 2T mode. 2 CK cycle window for CMD and
3017                                                          address. This mode helps relieve setup time pressure
3018                                                          on the Address and command bus which nominally have
3019                                                          a very large fanout. Please refer to Micron's tech
3020                                                          note tn_47_01 titled "DDR2-533 Memory Design Guide
3021                                                          for Two Dimm Unbuffered Systems" for physical details. */
3022         uint64_t bwcnt                        : 1;  /**< Bus utilization counter Clear.
3023                                                          Clears the LMC*_OPS_CNT, LMC*_IFB_CNT, and
3024                                                          LMC*_DCLK_CNT registers. SW should first write this
3025                                                          field to a one, then write this field to a zero to
3026                                                          clear the CSR's. */
3027         uint64_t rdimm_ena                    : 1;  /**< Registered DIMM Enable - When set allows the use
3028                                                          of JEDEC Registered DIMMs which require address and
3029                                                          control bits to be registered in the controller. */
3030 #else
3031         uint64_t rdimm_ena                    : 1;
3032         uint64_t bwcnt                        : 1;
3033         uint64_t ddr2t                        : 1;
3034         uint64_t pocas                        : 1;
3035         uint64_t fprch2                       : 2;
3036         uint64_t throttle_rd                  : 1;
3037         uint64_t throttle_wr                  : 1;
3038         uint64_t inorder_rd                   : 1;
3039         uint64_t inorder_wr                   : 1;
3040         uint64_t elev_prio_dis                : 1;
3041         uint64_t nxm_write_en                 : 1;
3042         uint64_t max_write_batch              : 4;
3043         uint64_t xor_bank                     : 1;
3044         uint64_t auto_dclkdis                 : 1;
3045         uint64_t int_zqcs_dis                 : 1;
3046         uint64_t ext_zqcs_dis                 : 1;
3047         uint64_t bprch                        : 2;
3048         uint64_t wodt_bprch                   : 1;
3049         uint64_t rodt_bprch                   : 1;
3050         uint64_t crm_max                      : 5;
3051         uint64_t crm_thr                      : 5;
3052         uint64_t crm_cnt                      : 5;
3053         uint64_t thrmax                       : 4;
3054         uint64_t persub                       : 8;
3055         uint64_t thrcnt                       : 12;
3056         uint64_t reserved_63_63               : 1;
3057 #endif
3058         } cn68xx;
3059         struct cvmx_lmcx_control_cn68xx       cn68xxp1;
3060         struct cvmx_lmcx_control_cn66xx       cnf71xx;
3061 };
3062 typedef union cvmx_lmcx_control cvmx_lmcx_control_t;
3063
3064 /**
3065  * cvmx_lmc#_ctl
3066  *
3067  * LMC_CTL = LMC Control
3068  * This register is an assortment of various control fields needed by the memory controller
3069  */
3070 union cvmx_lmcx_ctl {
3071         uint64_t u64;
3072         struct cvmx_lmcx_ctl_s {
3073 #ifdef __BIG_ENDIAN_BITFIELD
3074         uint64_t reserved_32_63               : 32;
3075         uint64_t ddr__nctl                    : 4;  /**< DDR nctl from compensation circuit
3076                                                          The encoded value on this will adjust the drive strength
3077                                                          of the DDR DQ pulldns. */
3078         uint64_t ddr__pctl                    : 4;  /**< DDR pctl from compensation circuit
3079                                                          The encoded value on this will adjust the drive strength
3080                                                          of the DDR DQ pullup. */
3081         uint64_t slow_scf                     : 1;  /**< Should be cleared to zero */
3082         uint64_t xor_bank                     : 1;  /**< If (XOR_BANK == 1), then
3083                                                            bank[n:0]=address[n+7:7] ^ address[n+7+5:7+5]
3084                                                          else
3085                                                            bank[n:0]=address[n+7:7]
3086                                                          where n=1 for a 4 bank part and n=2 for an 8 bank part */
3087         uint64_t max_write_batch              : 4;  /**< Maximum number of consecutive writes to service before
3088                                                          allowing reads to interrupt. */
3089         uint64_t pll_div2                     : 1;  /**< PLL Div2. */
3090         uint64_t pll_bypass                   : 1;  /**< PLL Bypass. */
3091         uint64_t rdimm_ena                    : 1;  /**< Registered DIMM Enable - When set allows the use
3092                                                          of JEDEC Registered DIMMs which require Write
3093                                                          data to be registered in the controller. */
3094         uint64_t r2r_slot                     : 1;  /**< R2R Slot Enable: When set, all read-to-read trans
3095                                                          will slot an additional 1 cycle data bus bubble to
3096                                                          avoid DQ/DQS bus contention. This is only a CYA bit,
3097                                                          in case the "built-in" DIMM and RANK crossing logic
3098                                                          which should auto-detect and perfectly slot
3099                                                          read-to-reads to the same DIMM/RANK. */
3100         uint64_t inorder_mwf                  : 1;  /**< Reads as zero */
3101         uint64_t inorder_mrf                  : 1;  /**< Always clear to zero */
3102         uint64_t reserved_10_11               : 2;
3103         uint64_t fprch2                       : 1;  /**< Front Porch Enable: When set, the turn-off
3104                                                          time for the DDR_DQ/DQS drivers is 1 dclk earlier.
3105                                                          This bit should typically be set. */
3106         uint64_t bprch                        : 1;  /**< Back Porch Enable: When set, the turn-on time for
3107                                                          the DDR_DQ/DQS drivers is delayed an additional DCLK
3108                                                          cycle. This should be set to one whenever both SILO_HC
3109                                                          and SILO_QC are set. */
3110         uint64_t sil_lat                      : 2;  /**< SILO Latency: On reads, determines how many additional
3111                                                          dclks to wait (on top of TCL+1+TSKW) before pulling
3112                                                          data out of the pad silos.
3113                                                              - 00: illegal
3114                                                              - 01: 1 dclks
3115                                                              - 10: 2 dclks
3116                                                              - 11: illegal
3117                                                          This should always be set to 1. */
3118         uint64_t tskw                         : 2;  /**< This component is a representation of total BOARD
3119                                                          DELAY on DQ (used in the controller to determine the
3120                                                          R->W spacing to avoid DQS/DQ bus conflicts). Enter
3121                                                          the largest of the per byte Board delay
3122                                                              - 00: 0 dclk
3123                                                              - 01: 1 dclks
3124                                                              - 10: 2 dclks
3125                                                              - 11: 3 dclks */
3126         uint64_t qs_dic                       : 2;  /**< DDR2 Termination Resistor Setting
3127                                                          A non Zero value in this register
3128                                                          enables the On Die Termination (ODT) in DDR parts.
3129                                                          These two bits are loaded into the RTT
3130                                                          portion of the EMRS register bits A6 & A2. If DDR2's
3131                                                          termination (for the memory's DQ/DQS/DM pads) is not
3132                                                          desired, set it to 00. If it is, chose between
3133                                                          01 for 75 ohm and 10 for 150 ohm termination.
3134                                                              00 = ODT Disabled
3135                                                              01 = 75 ohm Termination
3136                                                              10 = 150 ohm Termination
3137                                                              11 = 50 ohm Termination
3138                                                          Octeon, on writes, by default, drives the 4/8 ODT
3139                                                          pins (64/128b mode) based on what the masks
3140                                                          (LMC_WODT_CTL) are programmed to.
3141                                                          LMC_DDR2_CTL->ODT_ENA enables Octeon to drive ODT pins
3142                                                          for READS. LMC_RODT_CTL needs to be programmed based
3143                                                          on the system's needs for ODT. */
3144         uint64_t dic                          : 2;  /**< Drive Strength Control:
3145                                                          DIC[0] is
3146                                                          loaded into the Extended Mode Register (EMRS) A1 bit
3147                                                          during initialization.
3148                                                              0 = Normal
3149                                                              1 = Reduced
3150                                                          DIC[1] is used to load into EMRS
3151                                                          bit 10 - DQSN Enable/Disable field. By default, we
3152                                                          program the DDR's to drive the DQSN also. Set it to
3153                                                          1 if DQSN should be Hi-Z.
3154                                                              0 - DQSN Enable
3155                                                              1 - DQSN Disable */
3156 #else
3157         uint64_t dic                          : 2;
3158         uint64_t qs_dic                       : 2;
3159         uint64_t tskw                         : 2;
3160         uint64_t sil_lat                      : 2;
3161         uint64_t bprch                        : 1;
3162         uint64_t fprch2                       : 1;
3163         uint64_t reserved_10_11               : 2;
3164         uint64_t inorder_mrf                  : 1;
3165         uint64_t inorder_mwf                  : 1;
3166         uint64_t r2r_slot                     : 1;
3167         uint64_t rdimm_ena                    : 1;
3168         uint64_t pll_bypass                   : 1;
3169         uint64_t pll_div2                     : 1;
3170         uint64_t max_write_batch              : 4;
3171         uint64_t xor_bank                     : 1;
3172         uint64_t slow_scf                     : 1;
3173         uint64_t ddr__pctl                    : 4;
3174         uint64_t ddr__nctl                    : 4;
3175         uint64_t reserved_32_63               : 32;
3176 #endif
3177         } s;
3178         struct cvmx_lmcx_ctl_cn30xx {
3179 #ifdef __BIG_ENDIAN_BITFIELD
3180         uint64_t reserved_32_63               : 32;
3181         uint64_t ddr__nctl                    : 4;  /**< DDR nctl from compensation circuit
3182                                                          The encoded value on this will adjust the drive strength
3183                                                          of the DDR DQ pulldns. */
3184         uint64_t ddr__pctl                    : 4;  /**< DDR pctl from compensation circuit
3185                                                          The encoded value on this will adjust the drive strength
3186                                                          of the DDR DQ pullup. */
3187         uint64_t slow_scf                     : 1;  /**< 1=SCF has pass1 latency, 0=SCF has 1 cycle lower latency
3188                                                          when compared to pass1 */
3189         uint64_t xor_bank                     : 1;  /**< If (XOR_BANK == 1), then
3190                                                            bank[n:0]=address[n+7:7] ^ address[n+7+5:7+5]
3191                                                          else
3192                                                            bank[n:0]=address[n+7:7]
3193                                                          where n=1 for a 4 bank part and n=2 for an 8 bank part */
3194         uint64_t max_write_batch              : 4;  /**< Maximum number of consecutive writes to service before
3195                                                          allowing reads to interrupt. */
3196         uint64_t pll_div2                     : 1;  /**< PLL Div2. */
3197         uint64_t pll_bypass                   : 1;  /**< PLL Bypass. */
3198         uint64_t rdimm_ena                    : 1;  /**< Registered DIMM Enable - When set allows the use
3199                                                          of JEDEC Registered DIMMs which require Write
3200                                                          data to be registered in the controller. */
3201         uint64_t r2r_slot                     : 1;  /**< R2R Slot Enable: When set, all read-to-read trans
3202                                                          will slot an additional 1 cycle data bus bubble to
3203                                                          avoid DQ/DQS bus contention. This is only a CYA bit,
3204                                                          in case the "built-in" DIMM and RANK crossing logic
3205                                                          which should auto-detect and perfectly slot
3206                                                          read-to-reads to the same DIMM/RANK. */
3207         uint64_t inorder_mwf                  : 1;  /**< Reads as zero */
3208         uint64_t inorder_mrf                  : 1;  /**< Always set to zero */
3209         uint64_t dreset                       : 1;  /**< Dclk domain reset.  The reset signal that is used by the
3210                                                          Dclk domain is (DRESET || ECLK_RESET). */
3211         uint64_t mode32b                      : 1;  /**< 32b data Path Mode
3212                                                          Set to 1 if we use only 32 DQ pins
3213                                                          0 for 16b DQ mode. */
3214         uint64_t fprch2                       : 1;  /**< Front Porch Enable: When set, the turn-off
3215                                                          time for the DDR_DQ/DQS drivers is 1 dclk earlier.
3216                                                          This bit should typically be set. */
3217         uint64_t bprch                        : 1;  /**< Back Porch Enable: When set, the turn-on time for
3218                                                          the DDR_DQ/DQS drivers is delayed an additional DCLK
3219                                                          cycle. This should be set to one whenever both SILO_HC
3220                                                          and SILO_QC are set. */
3221         uint64_t sil_lat                      : 2;  /**< SILO Latency: On reads, determines how many additional
3222                                                          dclks to wait (on top of TCL+1+TSKW) before pulling
3223                                                          data out of the pad silos.
3224                                                              - 00: illegal
3225                                                              - 01: 1 dclks
3226                                                              - 10: 2 dclks
3227                                                              - 11: illegal
3228                                                          This should always be set to 1. */
3229         uint64_t tskw                         : 2;  /**< This component is a representation of total BOARD
3230                                                          DELAY on DQ (used in the controller to determine the
3231                                                          R->W spacing to avoid DQS/DQ bus conflicts). Enter
3232                                                          the largest of the per byte Board delay
3233                                                              - 00: 0 dclk
3234                                                              - 01: 1 dclks
3235                                                              - 10: 2 dclks
3236                                                              - 11: 3 dclks */
3237         uint64_t qs_dic                       : 2;  /**< QS Drive Strength Control (DDR1):
3238                                                          & DDR2 Termination Resistor Setting
3239                                                          When in DDR2, a non Zero value in this register
3240                                                          enables the On Die Termination (ODT) in DDR parts.
3241                                                          These two bits are loaded into the RTT
3242                                                          portion of the EMRS register bits A6 & A2. If DDR2's
3243                                                          termination (for the memory's DQ/DQS/DM pads) is not
3244                                                          desired, set it to 00. If it is, chose between
3245                                                          01 for 75 ohm and 10 for 150 ohm termination.
3246                                                              00 = ODT Disabled
3247                                                              01 = 75 ohm Termination
3248                                                              10 = 150 ohm Termination
3249                                                              11 = 50 ohm Termination
3250                                                          Octeon, on writes, by default, drives the 8 ODT
3251                                                          pins based on what the masks (LMC_WODT_CTL1 & 2)
3252                                                          are programmed to. LMC_DDR2_CTL->ODT_ENA
3253                                                          enables Octeon to drive ODT pins for READS.
3254                                                          LMC_RODT_CTL needs to be programmed based on
3255                                                          the system's needs for ODT. */
3256         uint64_t dic                          : 2;  /**< Drive Strength Control:
3257                                                          For DDR-I/II Mode, DIC[0] is
3258                                                          loaded into the Extended Mode Register (EMRS) A1 bit
3259                                                          during initialization. (see DDR-I data sheet EMRS
3260                                                          description)
3261                                                              0 = Normal
3262                                                              1 = Reduced
3263                                                          For DDR-II Mode, DIC[1] is used to load into EMRS
3264                                                          bit 10 - DQSN Enable/Disable field. By default, we
3265                                                          program the DDR's to drive the DQSN also. Set it to
3266                                                          1 if DQSN should be Hi-Z.
3267                                                              0 - DQSN Enable
3268                                                              1 - DQSN Disable */
3269 #else
3270         uint64_t dic                          : 2;
3271         uint64_t qs_dic                       : 2;
3272         uint64_t tskw                         : 2;
3273         uint64_t sil_lat                      : 2;
3274         uint64_t bprch                        : 1;
3275         uint64_t fprch2                       : 1;
3276         uint64_t mode32b                      : 1;
3277         uint64_t dreset                       : 1;
3278         uint64_t inorder_mrf                  : 1;
3279         uint64_t inorder_mwf                  : 1;
3280         uint64_t r2r_slot                     : 1;
3281         uint64_t rdimm_ena                    : 1;
3282         uint64_t pll_bypass                   : 1;
3283         uint64_t pll_div2                     : 1;
3284         uint64_t max_write_batch              : 4;
3285         uint64_t xor_bank                     : 1;
3286         uint64_t slow_scf                     : 1;
3287         uint64_t ddr__pctl                    : 4;
3288         uint64_t ddr__nctl                    : 4;
3289         uint64_t reserved_32_63               : 32;
3290 #endif
3291         } cn30xx;
3292         struct cvmx_lmcx_ctl_cn30xx           cn31xx;
3293         struct cvmx_lmcx_ctl_cn38xx {
3294 #ifdef __BIG_ENDIAN_BITFIELD
3295         uint64_t reserved_32_63               : 32;
3296         uint64_t ddr__nctl                    : 4;  /**< DDR nctl from compensation circuit
3297                                                          The encoded value on this will adjust the drive strength
3298                                                          of the DDR DQ pulldns. */
3299         uint64_t ddr__pctl                    : 4;  /**< DDR pctl from compensation circuit
3300                                                          The encoded value on this will adjust the drive strength
3301                                                          of the DDR DQ pullup. */
3302         uint64_t slow_scf                     : 1;  /**< 1=SCF has pass1 latency, 0=SCF has 1 cycle lower latency
3303                                                          when compared to pass1
3304                                                          NOTE - This bit has NO effect in PASS1 */
3305         uint64_t xor_bank                     : 1;  /**< If (XOR_BANK == 1), then
3306                                                            bank[n:0]=address[n+7:7] ^ address[n+7+5:7+5]
3307                                                          else
3308                                                            bank[n:0]=address[n+7:7]
3309                                                          where n=1 for a 4 bank part and n=2 for an 8 bank part */
3310         uint64_t max_write_batch              : 4;  /**< Maximum number of consecutive writes to service before
3311                                                          allowing reads to interrupt. */
3312         uint64_t reserved_16_17               : 2;
3313         uint64_t rdimm_ena                    : 1;  /**< Registered DIMM Enable - When set allows the use
3314                                                          of JEDEC Registered DIMMs which require Write
3315                                                          data to be registered in the controller. */
3316         uint64_t r2r_slot                     : 1;  /**< R2R Slot Enable: When set, all read-to-read trans
3317                                                          will slot an additional 1 cycle data bus bubble to
3318                                                          avoid DQ/DQS bus contention. This is only a CYA bit,
3319                                                          in case the "built-in" DIMM and RANK crossing logic
3320                                                          which should auto-detect and perfectly slot
3321                                                          read-to-reads to the same DIMM/RANK. */
3322         uint64_t inorder_mwf                  : 1;  /**< When set, forces LMC_MWF (writes) into strict, in-order
3323                                                          mode.  When clear, writes may be serviced out of order
3324                                                          (optimized to keep multiple banks active).
3325                                                          This bit is ONLY to be set at power-on and
3326                                                          should not be set for normal use.
3327                                                          NOTE: For PASS1, set as follows:
3328                                                              DDR-I -> 1
3329                                                              DDR-II -> 0
3330                                                          For Pass2, this bit is RA0, write ignore (this feature
3331                                                          is permanently disabled) */
3332         uint64_t inorder_mrf                  : 1;  /**< When set, forces LMC_MRF (reads) into strict, in-order
3333                                                          mode.  When clear, reads may be serviced out of order
3334                                                          (optimized to keep multiple banks active).
3335                                                          This bit is ONLY to be set at power-on and
3336                                                          should not be set for normal use.
3337                                                          NOTE: For PASS1, set as follows:
3338                                                              DDR-I -> 1
3339                                                              DDR-II -> 0
3340                                                          For Pass2, this bit should be written ZERO for
3341                                                          DDR I & II */
3342         uint64_t set_zero                     : 1;  /**< Reserved. Always Set this Bit to Zero */
3343         uint64_t mode128b                     : 1;  /**< 128b data Path Mode
3344                                                          Set to 1 if we use all 128 DQ pins
3345                                                          0 for 64b DQ mode. */
3346         uint64_t fprch2                       : 1;  /**< Front Porch Enable: When set, the turn-off
3347                                                          time for the DDR_DQ/DQS drivers is 1 dclk earlier.
3348                                                          This bit should typically be set. */
3349         uint64_t bprch                        : 1;  /**< Back Porch Enable: When set, the turn-on time for
3350                                                          the DDR_DQ/DQS drivers is delayed an additional DCLK
3351                                                          cycle. This should be set to one whenever both SILO_HC
3352                                                          and SILO_QC are set. */
3353         uint64_t sil_lat                      : 2;  /**< SILO Latency: On reads, determines how many additional
3354                                                          dclks to wait (on top of TCL+1+TSKW) before pulling
3355                                                          data out of the pad silos.
3356                                                              - 00: illegal
3357                                                              - 01: 1 dclks
3358                                                              - 10: 2 dclks
3359                                                              - 11: illegal
3360                                                          This should always be set to 1. */
3361         uint64_t tskw                         : 2;  /**< This component is a representation of total BOARD
3362                                                          DELAY on DQ (used in the controller to determine the
3363                                                          R->W spacing to avoid DQS/DQ bus conflicts). Enter
3364                                                          the largest of the per byte Board delay
3365                                                              - 00: 0 dclk
3366                                                              - 01: 1 dclks
3367                                                              - 10: 2 dclks
3368                                                              - 11: 3 dclks */
3369         uint64_t qs_dic                       : 2;  /**< QS Drive Strength Control (DDR1):
3370                                                          & DDR2 Termination Resistor Setting
3371                                                          When in DDR2, a non Zero value in this register
3372                                                          enables the On Die Termination (ODT) in DDR parts.
3373                                                          These two bits are loaded into the RTT
3374                                                          portion of the EMRS register bits A6 & A2. If DDR2's
3375                                                          termination (for the memory's DQ/DQS/DM pads) is not
3376                                                          desired, set it to 00. If it is, chose between
3377                                                          01 for 75 ohm and 10 for 150 ohm termination.
3378                                                              00 = ODT Disabled
3379                                                              01 = 75 ohm Termination
3380                                                              10 = 150 ohm Termination
3381                                                              11 = 50 ohm Termination
3382                                                          Octeon, on writes, by default, drives the 4/8 ODT
3383                                                          pins (64/128b mode) based on what the masks
3384                                                          (LMC_WODT_CTL) are programmed to.
3385                                                          LMC_DDR2_CTL->ODT_ENA enables Octeon to drive ODT pins
3386                                                          for READS. LMC_RODT_CTL needs to be programmed based
3387                                                          on the system's needs for ODT. */
3388         uint64_t dic                          : 2;  /**< Drive Strength Control:
3389                                                          For DDR-I/II Mode, DIC[0] is
3390                                                          loaded into the Extended Mode Register (EMRS) A1 bit
3391                                                          during initialization. (see DDR-I data sheet EMRS
3392                                                          description)
3393                                                              0 = Normal
3394                                                              1 = Reduced
3395                                                          For DDR-II Mode, DIC[1] is used to load into EMRS
3396                                                          bit 10 - DQSN Enable/Disable field. By default, we
3397                                                          program the DDR's to drive the DQSN also. Set it to
3398                                                          1 if DQSN should be Hi-Z.
3399                                                              0 - DQSN Enable
3400                                                              1 - DQSN Disable */
3401 #else
3402         uint64_t dic                          : 2;
3403         uint64_t qs_dic                       : 2;
3404         uint64_t tskw                         : 2;
3405         uint64_t sil_lat                      : 2;
3406         uint64_t bprch                        : 1;
3407         uint64_t fprch2                       : 1;
3408         uint64_t mode128b                     : 1;
3409         uint64_t set_zero                     : 1;
3410         uint64_t inorder_mrf                  : 1;
3411         uint64_t inorder_mwf                  : 1;
3412         uint64_t r2r_slot                     : 1;
3413         uint64_t rdimm_ena                    : 1;
3414         uint64_t reserved_16_17               : 2;
3415         uint64_t max_write_batch              : 4;
3416         uint64_t xor_bank                     : 1;
3417         uint64_t slow_scf                     : 1;
3418         uint64_t ddr__pctl                    : 4;
3419         uint64_t ddr__nctl                    : 4;
3420         uint64_t reserved_32_63               : 32;
3421 #endif
3422         } cn38xx;
3423         struct cvmx_lmcx_ctl_cn38xx           cn38xxp2;
3424         struct cvmx_lmcx_ctl_cn50xx {
3425 #ifdef __BIG_ENDIAN_BITFIELD
3426         uint64_t reserved_32_63               : 32;
3427         uint64_t ddr__nctl                    : 4;  /**< DDR nctl from compensation circuit
3428                                                          The encoded value on this will adjust the drive strength
3429                                                          of the DDR DQ pulldns. */
3430         uint64_t ddr__pctl                    : 4;  /**< DDR pctl from compensation circuit
3431                                                          The encoded value on this will adjust the drive strength
3432                                                          of the DDR DQ pullup. */
3433         uint64_t slow_scf                     : 1;  /**< Should be cleared to zero */
3434         uint64_t xor_bank                     : 1;  /**< If (XOR_BANK == 1), then
3435                                                            bank[n:0]=address[n+7:7] ^ address[n+7+5:7+5]
3436                                                          else
3437                                                            bank[n:0]=address[n+7:7]
3438                                                          where n=1 for a 4 bank part and n=2 for an 8 bank part */
3439         uint64_t max_write_batch              : 4;  /**< Maximum number of consecutive writes to service before
3440                                                          allowing reads to interrupt. */
3441         uint64_t reserved_17_17               : 1;
3442         uint64_t pll_bypass                   : 1;  /**< PLL Bypass. */
3443         uint64_t rdimm_ena                    : 1;  /**< Registered DIMM Enable - When set allows the use
3444                                                          of JEDEC Registered DIMMs which require Write
3445                                                          data to be registered in the controller. */
3446         uint64_t r2r_slot                     : 1;  /**< R2R Slot Enable: When set, all read-to-read trans
3447                                                          will slot an additional 1 cycle data bus bubble to
3448                                                          avoid DQ/DQS bus contention. This is only a CYA bit,
3449                                                          in case the "built-in" DIMM and RANK crossing logic
3450                                                          which should auto-detect and perfectly slot
3451                                                          read-to-reads to the same DIMM/RANK. */
3452         uint64_t inorder_mwf                  : 1;  /**< Reads as zero */
3453         uint64_t inorder_mrf                  : 1;  /**< Always clear to zero */
3454         uint64_t dreset                       : 1;  /**< Dclk domain reset.  The reset signal that is used by the
3455                                                          Dclk domain is (DRESET || ECLK_RESET). */
3456         uint64_t mode32b                      : 1;  /**< 32b data Path Mode
3457                                                          Set to 1 if we use 32 DQ pins
3458                                                          0 for 16b DQ mode. */
3459         uint64_t fprch2                       : 1;  /**< Front Porch Enable: When set, the turn-off
3460                                                          time for the DDR_DQ/DQS drivers is 1 dclk earlier.
3461                                                          This bit should typically be set. */
3462         uint64_t bprch                        : 1;  /**< Back Porch Enable: When set, the turn-on time for
3463                                                          the DDR_DQ/DQS drivers is delayed an additional DCLK
3464                                                          cycle. This should be set to one whenever both SILO_HC
3465                                                          and SILO_QC are set. */
3466         uint64_t sil_lat                      : 2;  /**< SILO Latency: On reads, determines how many additional
3467                                                          dclks to wait (on top of TCL+1+TSKW) before pulling
3468                                                          data out of the pad silos.
3469                                                              - 00: illegal
3470                                                              - 01: 1 dclks
3471                                                              - 10: 2 dclks
3472                                                              - 11: illegal
3473                                                          This should always be set to 1. */
3474         uint64_t tskw                         : 2;  /**< This component is a representation of total BOARD
3475                                                          DELAY on DQ (used in the controller to determine the
3476                                                          R->W spacing to avoid DQS/DQ bus conflicts). Enter
3477                                                          the largest of the per byte Board delay
3478                                                              - 00: 0 dclk
3479                                                              - 01: 1 dclks
3480                                                              - 10: 2 dclks
3481                                                              - 11: 3 dclks */
3482         uint64_t qs_dic                       : 2;  /**< DDR2 Termination Resistor Setting
3483                                                          When in DDR2, a non Zero value in this register
3484                                                          enables the On Die Termination (ODT) in DDR parts.
3485                                                          These two bits are loaded into the RTT
3486                                                          portion of the EMRS register bits A6 & A2. If DDR2's
3487                                                          termination (for the memory's DQ/DQS/DM pads) is not
3488                                                          desired, set it to 00. If it is, chose between
3489                                                          01 for 75 ohm and 10 for 150 ohm termination.
3490                                                              00 = ODT Disabled
3491                                                              01 = 75 ohm Termination
3492                                                              10 = 150 ohm Termination
3493                                                              11 = 50 ohm Termination
3494                                                          Octeon, on writes, by default, drives the ODT
3495                                                          pins based on what the masks
3496                                                          (LMC_WODT_CTL) are programmed to.
3497                                                          LMC_DDR2_CTL->ODT_ENA enables Octeon to drive ODT pins
3498                                                          for READS. LMC_RODT_CTL needs to be programmed based
3499                                                          on the system's needs for ODT. */
3500         uint64_t dic                          : 2;  /**< Drive Strength Control:
3501                                                          DIC[0] is
3502                                                          loaded into the Extended Mode Register (EMRS) A1 bit
3503                                                          during initialization.
3504                                                              0 = Normal
3505                                                              1 = Reduced
3506                                                          DIC[1] is used to load into EMRS
3507                                                          bit 10 - DQSN Enable/Disable field. By default, we
3508                                                          program the DDR's to drive the DQSN also. Set it to
3509                                                          1 if DQSN should be Hi-Z.
3510                                                              0 - DQSN Enable
3511                                                              1 - DQSN Disable */
3512 #else
3513         uint64_t dic                          : 2;
3514         uint64_t qs_dic                       : 2;
3515         uint64_t tskw                         : 2;
3516         uint64_t sil_lat                      : 2;
3517         uint64_t bprch                        : 1;
3518         uint64_t fprch2                       : 1;
3519         uint64_t mode32b                      : 1;
3520         uint64_t dreset                       : 1;
3521         uint64_t inorder_mrf                  : 1;
3522         uint64_t inorder_mwf                  : 1;
3523         uint64_t r2r_slot                     : 1;
3524         uint64_t rdimm_ena                    : 1;
3525         uint64_t pll_bypass                   : 1;
3526         uint64_t reserved_17_17               : 1;
3527         uint64_t max_write_batch              : 4;
3528         uint64_t xor_bank                     : 1;
3529         uint64_t slow_scf                     : 1;
3530         uint64_t ddr__pctl                    : 4;
3531         uint64_t ddr__nctl                    : 4;
3532         uint64_t reserved_32_63               : 32;
3533 #endif
3534         } cn50xx;
3535         struct cvmx_lmcx_ctl_cn52xx {
3536 #ifdef __BIG_ENDIAN_BITFIELD
3537         uint64_t reserved_32_63               : 32;
3538         uint64_t ddr__nctl                    : 4;  /**< DDR nctl from compensation circuit
3539                                                          The encoded value on this will adjust the drive strength
3540                                                          of the DDR DQ pulldns. */
3541         uint64_t ddr__pctl                    : 4;  /**< DDR pctl from compensation circuit
3542                                                          The encoded value on this will adjust the drive strength
3543                                                          of the DDR DQ pullup. */
3544         uint64_t slow_scf                     : 1;  /**< Always clear to zero */
3545         uint64_t xor_bank                     : 1;  /**< If (XOR_BANK == 1), then
3546                                                            bank[n:0]=address[n+7:7] ^ address[n+7+5:7+5]
3547                                                          else
3548                                                            bank[n:0]=address[n+7:7]
3549                                                          where n=1 for a 4 bank part and n=2 for an 8 bank part */
3550         uint64_t max_write_batch              : 4;  /**< Maximum number of consecutive writes to service before
3551                                                          allowing reads to interrupt. */
3552         uint64_t reserved_16_17               : 2;
3553         uint64_t rdimm_ena                    : 1;  /**< Registered DIMM Enable - When set allows the use
3554                                                          of JEDEC Registered DIMMs which require Write
3555                                                          data to be registered in the controller. */
3556         uint64_t r2r_slot                     : 1;  /**< R2R Slot Enable: When set, all read-to-read trans
3557                                                          will slot an additional 1 cycle data bus bubble to
3558                                                          avoid DQ/DQS bus contention. This is only a CYA bit,
3559                                                          in case the "built-in" DIMM and RANK crossing logic
3560                                                          which should auto-detect and perfectly slot
3561                                                          read-to-reads to the same DIMM/RANK. */
3562         uint64_t inorder_mwf                  : 1;  /**< Reads as zero */
3563         uint64_t inorder_mrf                  : 1;  /**< Always set to zero */
3564         uint64_t dreset                       : 1;  /**< MBZ
3565                                                          THIS IS OBSOLETE.  Use LMC_DLL_CTL[DRESET] instead. */
3566         uint64_t mode32b                      : 1;  /**< 32b data Path Mode
3567                                                          Set to 1 if we use only 32 DQ pins
3568                                                          0 for 64b DQ mode. */
3569         uint64_t fprch2                       : 1;  /**< Front Porch Enable: When set, the turn-off
3570                                                          time for the DDR_DQ/DQS drivers is 1 dclk earlier.
3571                                                          This bit should typically be set. */
3572         uint64_t bprch                        : 1;  /**< Back Porch Enable: When set, the turn-on time for
3573                                                          the DDR_DQ/DQS drivers is delayed an additional DCLK
3574                                                          cycle. This should be set to one whenever both SILO_HC
3575                                                          and SILO_QC are set. */
3576         uint64_t sil_lat                      : 2;  /**< SILO Latency: On reads, determines how many additional
3577                                                          dclks to wait (on top of TCL+1+TSKW) before pulling
3578                                                          data out of the pad silos.
3579                                                              - 00: illegal
3580                                                              - 01: 1 dclks
3581                                                              - 10: 2 dclks
3582                                                              - 11: illegal
3583                                                          This should always be set to 1.
3584                                                          THIS IS OBSOLETE.  Use READ_LEVEL_RANK instead. */
3585         uint64_t tskw                         : 2;  /**< This component is a representation of total BOARD
3586                                                          DELAY on DQ (used in the controller to determine the
3587                                                          R->W spacing to avoid DQS/DQ bus conflicts). Enter
3588                                                          the largest of the per byte Board delay
3589                                                              - 00: 0 dclk
3590                                                              - 01: 1 dclks
3591                                                              - 10: 2 dclks
3592                                                              - 11: 3 dclks
3593                                                          THIS IS OBSOLETE.  Use READ_LEVEL_RANK instead. */
3594         uint64_t qs_dic                       : 2;  /**< DDR2 Termination Resistor Setting
3595                                                          When in DDR2, a non Zero value in this register
3596                                                          enables the On Die Termination (ODT) in DDR parts.
3597                                                          These two bits are loaded into the RTT
3598                                                          portion of the EMRS register bits A6 & A2. If DDR2's
3599                                                          termination (for the memory's DQ/DQS/DM pads) is not
3600                                                          desired, set it to 00. If it is, chose between
3601                                                          01 for 75 ohm and 10 for 150 ohm termination.
3602                                                              00 = ODT Disabled
3603                                                              01 = 75 ohm Termination
3604                                                              10 = 150 ohm Termination
3605                                                              11 = 50 ohm Termination
3606                                                          Octeon, on writes, by default, drives the 4/8 ODT
3607                                                          pins (64/128b mode) based on what the masks
3608                                                          (LMC_WODT_CTL0 & 1) are programmed to.
3609                                                          LMC_DDR2_CTL->ODT_ENA enables Octeon to drive ODT pins
3610                                                          for READS. LMC_RODT_CTL needs to be programmed based
3611                                                          on the system's needs for ODT. */
3612         uint64_t dic                          : 2;  /**< Drive Strength Control:
3613                                                          DIC[0] is
3614                                                          loaded into the Extended Mode Register (EMRS) A1 bit
3615                                                          during initialization.
3616                                                              0 = Normal
3617                                                              1 = Reduced
3618                                                          DIC[1] is used to load into EMRS
3619                                                          bit 10 - DQSN Enable/Disable field. By default, we
3620                                                          program the DDR's to drive the DQSN also. Set it to
3621                                                          1 if DQSN should be Hi-Z.
3622                                                              0 - DQSN Enable
3623                                                              1 - DQSN Disable */
3624 #else
3625         uint64_t dic                          : 2;
3626         uint64_t qs_dic                       : 2;
3627         uint64_t tskw                         : 2;
3628         uint64_t sil_lat                      : 2;
3629         uint64_t bprch                        : 1;
3630         uint64_t fprch2                       : 1;
3631         uint64_t mode32b                      : 1;
3632         uint64_t dreset                       : 1;
3633         uint64_t inorder_mrf                  : 1;
3634         uint64_t inorder_mwf                  : 1;
3635         uint64_t r2r_slot                     : 1;
3636         uint64_t rdimm_ena                    : 1;
3637         uint64_t reserved_16_17               : 2;
3638         uint64_t max_write_batch              : 4;
3639         uint64_t xor_bank                     : 1;
3640         uint64_t slow_scf                     : 1;
3641         uint64_t ddr__pctl                    : 4;
3642         uint64_t ddr__nctl                    : 4;
3643         uint64_t reserved_32_63               : 32;
3644 #endif
3645         } cn52xx;
3646         struct cvmx_lmcx_ctl_cn52xx           cn52xxp1;
3647         struct cvmx_lmcx_ctl_cn52xx           cn56xx;
3648         struct cvmx_lmcx_ctl_cn52xx           cn56xxp1;
3649         struct cvmx_lmcx_ctl_cn58xx {
3650 #ifdef __BIG_ENDIAN_BITFIELD
3651         uint64_t reserved_32_63               : 32;
3652         uint64_t ddr__nctl                    : 4;  /**< DDR nctl from compensation circuit
3653                                                          The encoded value on this will adjust the drive strength
3654                                                          of the DDR DQ pulldns. */
3655         uint64_t ddr__pctl                    : 4;  /**< DDR pctl from compensation circuit
3656                                                          The encoded value on this will adjust the drive strength
3657                                                          of the DDR DQ pullup. */
3658         uint64_t slow_scf                     : 1;  /**< Should be cleared to zero */
3659         uint64_t xor_bank                     : 1;  /**< If (XOR_BANK == 1), then
3660                                                            bank[n:0]=address[n+7:7] ^ address[n+7+5:7+5]
3661                                                          else
3662                                                            bank[n:0]=address[n+7:7]
3663                                                          where n=1 for a 4 bank part and n=2 for an 8 bank part */
3664         uint64_t max_write_batch              : 4;  /**< Maximum number of consecutive writes to service before
3665                                                          allowing reads to interrupt. */
3666         uint64_t reserved_16_17               : 2;
3667         uint64_t rdimm_ena                    : 1;  /**< Registered DIMM Enable - When set allows the use
3668                                                          of JEDEC Registered DIMMs which require Write
3669                                                          data to be registered in the controller. */
3670         uint64_t r2r_slot                     : 1;  /**< R2R Slot Enable: When set, all read-to-read trans
3671                                                          will slot an additional 1 cycle data bus bubble to
3672                                                          avoid DQ/DQS bus contention. This is only a CYA bit,
3673                                                          in case the "built-in" DIMM and RANK crossing logic
3674                                                          which should auto-detect and perfectly slot
3675                                                          read-to-reads to the same DIMM/RANK. */
3676         uint64_t inorder_mwf                  : 1;  /**< Reads as zero */
3677         uint64_t inorder_mrf                  : 1;  /**< Always clear to zero */
3678         uint64_t dreset                       : 1;  /**< Dclk domain reset.  The reset signal that is used by the
3679                                                          Dclk domain is (DRESET || ECLK_RESET). */
3680         uint64_t mode128b                     : 1;  /**< 128b data Path Mode
3681                                                          Set to 1 if we use all 128 DQ pins
3682                                                          0 for 64b DQ mode. */
3683         uint64_t fprch2                       : 1;  /**< Front Porch Enable: When set, the turn-off
3684                                                          time for the DDR_DQ/DQS drivers is 1 dclk earlier.
3685                                                          This bit should typically be set. */
3686         uint64_t bprch                        : 1;  /**< Back Porch Enable: When set, the turn-on time for
3687                                                          the DDR_DQ/DQS drivers is delayed an additional DCLK
3688                                                          cycle. This should be set to one whenever both SILO_HC
3689                                                          and SILO_QC are set. */
3690         uint64_t sil_lat                      : 2;  /**< SILO Latency: On reads, determines how many additional
3691                                                          dclks to wait (on top of TCL+1+TSKW) before pulling
3692                                                          data out of the pad silos.
3693                                                              - 00: illegal
3694                                                              - 01: 1 dclks
3695                                                              - 10: 2 dclks
3696                                                              - 11: illegal
3697                                                          This should always be set to 1. */
3698         uint64_t tskw                         : 2;  /**< This component is a representation of total BOARD
3699                                                          DELAY on DQ (used in the controller to determine the
3700                                                          R->W spacing to avoid DQS/DQ bus conflicts). Enter
3701                                                          the largest of the per byte Board delay
3702                                                              - 00: 0 dclk
3703                                                              - 01: 1 dclks
3704                                                              - 10: 2 dclks
3705                                                              - 11: 3 dclks */
3706         uint64_t qs_dic                       : 2;  /**< DDR2 Termination Resistor Setting
3707                                                          A non Zero value in this register
3708                                                          enables the On Die Termination (ODT) in DDR parts.
3709                                                          These two bits are loaded into the RTT
3710                                                          portion of the EMRS register bits A6 & A2. If DDR2's
3711                                                          termination (for the memory's DQ/DQS/DM pads) is not
3712                                                          desired, set it to 00. If it is, chose between
3713                                                          01 for 75 ohm and 10 for 150 ohm termination.
3714                                                              00 = ODT Disabled
3715                                                              01 = 75 ohm Termination
3716                                                              10 = 150 ohm Termination
3717                                                              11 = 50 ohm Termination
3718                                                          Octeon, on writes, by default, drives the 4/8 ODT
3719                                                          pins (64/128b mode) based on what the masks
3720                                                          (LMC_WODT_CTL) are programmed to.
3721                                                          LMC_DDR2_CTL->ODT_ENA enables Octeon to drive ODT pins
3722                                                          for READS. LMC_RODT_CTL needs to be programmed based
3723                                                          on the system's needs for ODT. */
3724         uint64_t dic                          : 2;  /**< Drive Strength Control:
3725                                                          DIC[0] is
3726                                                          loaded into the Extended Mode Register (EMRS) A1 bit
3727                                                          during initialization.
3728                                                              0 = Normal
3729                                                              1 = Reduced
3730                                                          DIC[1] is used to load into EMRS
3731                                                          bit 10 - DQSN Enable/Disable field. By default, we
3732                                                          program the DDR's to drive the DQSN also. Set it to
3733                                                          1 if DQSN should be Hi-Z.
3734                                                              0 - DQSN Enable
3735                                                              1 - DQSN Disable */
3736 #else
3737         uint64_t dic                          : 2;
3738         uint64_t qs_dic                       : 2;
3739         uint64_t tskw                         : 2;
3740         uint64_t sil_lat                      : 2;
3741         uint64_t bprch                        : 1;
3742         uint64_t fprch2                       : 1;
3743         uint64_t mode128b                     : 1;
3744         uint64_t dreset                       : 1;
3745         uint64_t inorder_mrf                  : 1;
3746         uint64_t inorder_mwf                  : 1;
3747         uint64_t r2r_slot                     : 1;
3748         uint64_t rdimm_ena                    : 1;
3749         uint64_t reserved_16_17               : 2;
3750         uint64_t max_write_batch              : 4;
3751         uint64_t xor_bank                     : 1;
3752         uint64_t slow_scf                     : 1;
3753         uint64_t ddr__pctl                    : 4;
3754         uint64_t ddr__nctl                    : 4;
3755         uint64_t reserved_32_63               : 32;
3756 #endif
3757         } cn58xx;
3758         struct cvmx_lmcx_ctl_cn58xx           cn58xxp1;
3759 };
3760 typedef union cvmx_lmcx_ctl cvmx_lmcx_ctl_t;
3761
3762 /**
3763  * cvmx_lmc#_ctl1
3764  *
3765  * LMC_CTL1 = LMC Control1
3766  * This register is an assortment of various control fields needed by the memory controller
3767  */
3768 union cvmx_lmcx_ctl1 {
3769         uint64_t u64;
3770         struct cvmx_lmcx_ctl1_s {
3771 #ifdef __BIG_ENDIAN_BITFIELD
3772         uint64_t reserved_21_63               : 43;
3773         uint64_t ecc_adr                      : 1;  /**< Include memory reference address in the ECC calculation
3774                                                          0=disabled, 1=enabled */
3775         uint64_t forcewrite                   : 4;  /**< Force the oldest outstanding write to complete after
3776                                                          having waited for 2^FORCEWRITE cycles.  0=disabled. */
3777         uint64_t idlepower                    : 3;  /**< Enter power-down mode after the memory controller has
3778                                                          been idle for 2^(2+IDLEPOWER) cycles.  0=disabled. */
3779         uint64_t sequence                     : 3;  /**< Instruction sequence that is run after a 0->1 transition
3780                                                          on LMC_MEM_CFG0[INIT_START].
3781                                                          0=DDR2 power-up/init, 1=read-leveling
3782                                                          2=self-refresh entry, 3=self-refresh exit,
3783                                                          4=power-down entry, 5=power-down exit, 6=7=illegal */
3784         uint64_t sil_mode                     : 1;  /**< Read Silo mode.  0=envelope, 1=self-timed. */
3785         uint64_t dcc_enable                   : 1;  /**< Duty Cycle Corrector Enable.
3786                                                          0=disable, 1=enable
3787                                                          If the memory part does not support DCC, then this bit
3788                                                          must be set to 0. */
3789         uint64_t reserved_2_7                 : 6;
3790         uint64_t data_layout                  : 2;  /**< Logical data layout per DQ byte lane:
3791                                                          In 32b mode, this setting has no effect and the data
3792                                                          layout DQ[35:0] is the following:
3793                                                              [E[3:0], D[31:24], D[23:16], D[15:8], D[7:0]]
3794                                                          In 16b mode, the DQ[35:0] layouts are the following:
3795                                                          0 - [0[3:0], 0[7:0], [0[7:2], E[1:0]], D[15:8], D[7:0]]
3796                                                          1 - [0[3:0], [0[7:2], E[1:0]], D[15:8], D[7:0], 0[7:0]]
3797                                                          2 - [[0[1:0], E[1:0]], D[15:8], D[7:0], 0[7:0], 0[7:0]]
3798                                                          where E means ecc, D means data, and 0 means unused
3799                                                          (ignored on reads and written as 0 on writes) */
3800 #else
3801         uint64_t data_layout                  : 2;
3802         uint64_t reserved_2_7                 : 6;
3803         uint64_t dcc_enable                   : 1;
3804         uint64_t sil_mode                     : 1;
3805         uint64_t sequence                     : 3;
3806         uint64_t idlepower                    : 3;
3807         uint64_t forcewrite                   : 4;
3808         uint64_t ecc_adr                      : 1;
3809         uint64_t reserved_21_63               : 43;
3810 #endif
3811         } s;
3812         struct cvmx_lmcx_ctl1_cn30xx {
3813 #ifdef __BIG_ENDIAN_BITFIELD
3814         uint64_t reserved_2_63                : 62;
3815         uint64_t data_layout                  : 2;  /**< Logical data layout per DQ byte lane:
3816                                                          In 32b mode, this setting has no effect and the data
3817                                                          layout DQ[35:0] is the following:
3818                                                              [E[3:0], D[31:24], D[23:16], D[15:8], D[7:0]]
3819                                                          In 16b mode, the DQ[35:0] layouts are the following:
3820                                                          0 - [0[3:0], 0[7:0], [0[7:2], E[1:0]], D[15:8], D[7:0]]
3821                                                          1 - [0[3:0], [0[7:2], E[1:0]], D[15:8], D[7:0], 0[7:0]]
3822                                                          2 - [[0[1:0], E[1:0]], D[15:8], D[7:0], 0[7:0], 0[7:0]]
3823                                                          where E means ecc, D means data, and 0 means unused
3824                                                          (ignored on reads and written as 0 on writes) */
3825 #else
3826         uint64_t data_layout                  : 2;
3827         uint64_t reserved_2_63                : 62;
3828 #endif
3829         } cn30xx;
3830         struct cvmx_lmcx_ctl1_cn50xx {
3831 #ifdef __BIG_ENDIAN_BITFIELD
3832         uint64_t reserved_10_63               : 54;
3833         uint64_t sil_mode                     : 1;  /**< Read Silo mode.  0=envelope, 1=self-timed. */
3834         uint64_t dcc_enable                   : 1;  /**< Duty Cycle Corrector Enable.
3835                                                          0=disable, 1=enable
3836                                                          If the memory part does not support DCC, then this bit
3837                                                          must be set to 0. */
3838         uint64_t reserved_2_7                 : 6;
3839         uint64_t data_layout                  : 2;  /**< Logical data layout per DQ byte lane:
3840                                                          In 32b mode, this setting has no effect and the data
3841                                                          layout DQ[35:0] is the following:
3842                                                              [E[3:0], D[31:24], D[23:16], D[15:8], D[7:0]]
3843                                                          In 16b mode, the DQ[35:0] layouts are the following:
3844                                                          0 - [0[3:0], 0[7:0], [0[7:2], E[1:0]], D[15:8], D[7:0]]
3845                                                          1 - [0[3:0], [0[7:2], E[1:0]], D[15:8], D[7:0], 0[7:0]]
3846                                                          2 - [[0[1:0], E[1:0]], D[15:8], D[7:0], 0[7:0], 0[7:0]]
3847                                                          where E means ecc, D means data, and 0 means unused
3848                                                          (ignored on reads and written as 0 on writes) */
3849 #else
3850         uint64_t data_layout                  : 2;
3851         uint64_t reserved_2_7                 : 6;
3852         uint64_t dcc_enable                   : 1;
3853         uint64_t sil_mode                     : 1;
3854         uint64_t reserved_10_63               : 54;
3855 #endif
3856         } cn50xx;
3857         struct cvmx_lmcx_ctl1_cn52xx {
3858 #ifdef __BIG_ENDIAN_BITFIELD
3859         uint64_t reserved_21_63               : 43;
3860         uint64_t ecc_adr                      : 1;  /**< Include memory reference address in the ECC calculation
3861                                                          0=disabled, 1=enabled */
3862         uint64_t forcewrite                   : 4;  /**< Force the oldest outstanding write to complete after
3863                                                          having waited for 2^FORCEWRITE cycles.  0=disabled. */
3864         uint64_t idlepower                    : 3;  /**< Enter power-down mode after the memory controller has
3865                                                          been idle for 2^(2+IDLEPOWER) cycles.  0=disabled. */
3866         uint64_t sequence                     : 3;  /**< Instruction sequence that is run after a 0->1 transition
3867                                                          on LMC_MEM_CFG0[INIT_START].
3868                                                          0=DDR2 power-up/init, 1=read-leveling
3869                                                          2=self-refresh entry, 3=self-refresh exit,
3870                                                          4=power-down entry, 5=power-down exit, 6=7=illegal */
3871         uint64_t sil_mode                     : 1;  /**< Read Silo mode.  0=envelope, 1=self-timed. */
3872         uint64_t dcc_enable                   : 1;  /**< Duty Cycle Corrector Enable.
3873                                                          0=disable, 1=enable
3874                                                          If the memory part does not support DCC, then this bit
3875                                                          must be set to 0. */
3876         uint64_t reserved_0_7                 : 8;
3877 #else
3878         uint64_t reserved_0_7                 : 8;
3879         uint64_t dcc_enable                   : 1;
3880         uint64_t sil_mode                     : 1;
3881         uint64_t sequence                     : 3;
3882         uint64_t idlepower                    : 3;
3883         uint64_t forcewrite                   : 4;
3884         uint64_t ecc_adr                      : 1;
3885         uint64_t reserved_21_63               : 43;
3886 #endif
3887         } cn52xx;
3888         struct cvmx_lmcx_ctl1_cn52xx          cn52xxp1;
3889         struct cvmx_lmcx_ctl1_cn52xx          cn56xx;
3890         struct cvmx_lmcx_ctl1_cn52xx          cn56xxp1;
3891         struct cvmx_lmcx_ctl1_cn58xx {
3892 #ifdef __BIG_ENDIAN_BITFIELD
3893         uint64_t reserved_10_63               : 54;
3894         uint64_t sil_mode                     : 1;  /**< Read Silo mode.  0=envelope, 1=self-timed. */
3895         uint64_t dcc_enable                   : 1;  /**< Duty Cycle Corrector Enable.
3896                                                          0=disable, 1=enable
3897                                                          If the memory part does not support DCC, then this bit
3898                                                          must be set to 0. */
3899         uint64_t reserved_0_7                 : 8;
3900 #else
3901         uint64_t reserved_0_7                 : 8;
3902         uint64_t dcc_enable                   : 1;
3903         uint64_t sil_mode                     : 1;
3904         uint64_t reserved_10_63               : 54;
3905 #endif
3906         } cn58xx;
3907         struct cvmx_lmcx_ctl1_cn58xx          cn58xxp1;
3908 };
3909 typedef union cvmx_lmcx_ctl1 cvmx_lmcx_ctl1_t;
3910
3911 /**
3912  * cvmx_lmc#_dclk_cnt
3913  *
3914  * LMC_DCLK_CNT  = Performance Counters
3915  *
3916  */
3917 union cvmx_lmcx_dclk_cnt {
3918         uint64_t u64;
3919         struct cvmx_lmcx_dclk_cnt_s {
3920 #ifdef __BIG_ENDIAN_BITFIELD
3921         uint64_t dclkcnt                      : 64; /**< Performance Counter
3922                                                          64-bit counter that increments every CK cycle */
3923 #else
3924         uint64_t dclkcnt                      : 64;
3925 #endif
3926         } s;
3927         struct cvmx_lmcx_dclk_cnt_s           cn61xx;
3928         struct cvmx_lmcx_dclk_cnt_s           cn63xx;
3929         struct cvmx_lmcx_dclk_cnt_s           cn63xxp1;
3930         struct cvmx_lmcx_dclk_cnt_s           cn66xx;
3931         struct cvmx_lmcx_dclk_cnt_s           cn68xx;
3932         struct cvmx_lmcx_dclk_cnt_s           cn68xxp1;
3933         struct cvmx_lmcx_dclk_cnt_s           cnf71xx;
3934 };
3935 typedef union cvmx_lmcx_dclk_cnt cvmx_lmcx_dclk_cnt_t;
3936
3937 /**
3938  * cvmx_lmc#_dclk_cnt_hi
3939  *
3940  * LMC_DCLK_CNT_HI  = Performance Counters
3941  *
3942  */
3943 union cvmx_lmcx_dclk_cnt_hi {
3944         uint64_t u64;
3945         struct cvmx_lmcx_dclk_cnt_hi_s {
3946 #ifdef __BIG_ENDIAN_BITFIELD
3947         uint64_t reserved_32_63               : 32;
3948         uint64_t dclkcnt_hi                   : 32; /**< Performance Counter that counts dclks
3949                                                          Upper 32-bits of a 64-bit counter. */
3950 #else
3951         uint64_t dclkcnt_hi                   : 32;
3952         uint64_t reserved_32_63               : 32;
3953 #endif
3954         } s;
3955         struct cvmx_lmcx_dclk_cnt_hi_s        cn30xx;
3956         struct cvmx_lmcx_dclk_cnt_hi_s        cn31xx;
3957         struct cvmx_lmcx_dclk_cnt_hi_s        cn38xx;
3958         struct cvmx_lmcx_dclk_cnt_hi_s        cn38xxp2;
3959         struct cvmx_lmcx_dclk_cnt_hi_s        cn50xx;
3960         struct cvmx_lmcx_dclk_cnt_hi_s        cn52xx;
3961         struct cvmx_lmcx_dclk_cnt_hi_s        cn52xxp1;
3962         struct cvmx_lmcx_dclk_cnt_hi_s        cn56xx;
3963         struct cvmx_lmcx_dclk_cnt_hi_s        cn56xxp1;
3964         struct cvmx_lmcx_dclk_cnt_hi_s        cn58xx;
3965         struct cvmx_lmcx_dclk_cnt_hi_s        cn58xxp1;
3966 };
3967 typedef union cvmx_lmcx_dclk_cnt_hi cvmx_lmcx_dclk_cnt_hi_t;
3968
3969 /**
3970  * cvmx_lmc#_dclk_cnt_lo
3971  *
3972  * LMC_DCLK_CNT_LO  = Performance Counters
3973  *
3974  */
3975 union cvmx_lmcx_dclk_cnt_lo {
3976         uint64_t u64;
3977         struct cvmx_lmcx_dclk_cnt_lo_s {
3978 #ifdef __BIG_ENDIAN_BITFIELD
3979         uint64_t reserved_32_63               : 32;
3980         uint64_t dclkcnt_lo                   : 32; /**< Performance Counter that counts dclks
3981                                                          Lower 32-bits of a 64-bit counter. */
3982 #else
3983         uint64_t dclkcnt_lo                   : 32;
3984         uint64_t reserved_32_63               : 32;
3985 #endif
3986         } s;
3987         struct cvmx_lmcx_dclk_cnt_lo_s        cn30xx;
3988         struct cvmx_lmcx_dclk_cnt_lo_s        cn31xx;
3989         struct cvmx_lmcx_dclk_cnt_lo_s        cn38xx;
3990         struct cvmx_lmcx_dclk_cnt_lo_s        cn38xxp2;
3991         struct cvmx_lmcx_dclk_cnt_lo_s        cn50xx;
3992         struct cvmx_lmcx_dclk_cnt_lo_s        cn52xx;
3993         struct cvmx_lmcx_dclk_cnt_lo_s        cn52xxp1;
3994         struct cvmx_lmcx_dclk_cnt_lo_s        cn56xx;
3995         struct cvmx_lmcx_dclk_cnt_lo_s        cn56xxp1;
3996         struct cvmx_lmcx_dclk_cnt_lo_s        cn58xx;
3997         struct cvmx_lmcx_dclk_cnt_lo_s        cn58xxp1;
3998 };
3999 typedef union cvmx_lmcx_dclk_cnt_lo cvmx_lmcx_dclk_cnt_lo_t;
4000
4001 /**
4002  * cvmx_lmc#_dclk_ctl
4003  *
4004  * LMC_DCLK_CTL = LMC DCLK generation control
4005  *
4006  *
4007  * Notes:
4008  * This CSR is only relevant for LMC1. LMC0_DCLK_CTL is not used.
4009  *
4010  */
4011 union cvmx_lmcx_dclk_ctl {
4012         uint64_t u64;
4013         struct cvmx_lmcx_dclk_ctl_s {
4014 #ifdef __BIG_ENDIAN_BITFIELD
4015         uint64_t reserved_8_63                : 56;
4016         uint64_t off90_ena                    : 1;  /**< 0=use global DCLK (i.e. the PLL) directly for LMC1
4017                                                          1=use the 90 degree DCLK DLL to offset LMC1 DCLK */
4018         uint64_t dclk90_byp                   : 1;  /**< 0=90 degree DCLK DLL uses sampled delay from LMC0
4019                                                          1=90 degree DCLK DLL uses DCLK90_VLU
4020                                                          See DCLK90_VLU. */
4021         uint64_t dclk90_ld                    : 1;  /**< The 90 degree DCLK DLL samples the delay setting
4022                                                          from LMC0's DLL when this field transitions 0->1 */
4023         uint64_t dclk90_vlu                   : 5;  /**< Manual open-loop delay setting.
4024                                                          The LMC1 90 degree DCLK DLL uses DCLK90_VLU rather
4025                                                          than the delay setting sampled from LMC0 when
4026                                                          DCLK90_BYP=1. */
4027 #else
4028         uint64_t dclk90_vlu                   : 5;
4029         uint64_t dclk90_ld                    : 1;
4030         uint64_t dclk90_byp                   : 1;
4031         uint64_t off90_ena                    : 1;
4032         uint64_t reserved_8_63                : 56;
4033 #endif
4034         } s;
4035         struct cvmx_lmcx_dclk_ctl_s           cn56xx;
4036         struct cvmx_lmcx_dclk_ctl_s           cn56xxp1;
4037 };
4038 typedef union cvmx_lmcx_dclk_ctl cvmx_lmcx_dclk_ctl_t;
4039
4040 /**
4041  * cvmx_lmc#_ddr2_ctl
4042  *
4043  * LMC_DDR2_CTL = LMC DDR2 & DLL Control Register
4044  *
4045  */
4046 union cvmx_lmcx_ddr2_ctl {
4047         uint64_t u64;
4048         struct cvmx_lmcx_ddr2_ctl_s {
4049 #ifdef __BIG_ENDIAN_BITFIELD
4050         uint64_t reserved_32_63               : 32;
4051         uint64_t bank8                        : 1;  /**< For 8 bank DDR2 parts
4052                                                          1 - DDR2 parts have 8 internal banks (BA is 3 bits
4053                                                          wide).
4054                                                          0 - DDR2 parts have 4 internal banks (BA is 2 bits
4055                                                          wide). */
4056         uint64_t burst8                       : 1;  /**< 8-burst mode.
4057                                                          1 - DDR data transfer happens in burst of 8
4058                                                          0 - DDR data transfer happens in burst of 4
4059                                                          BURST8 should be set when DDR2T is set
4060                                                          to minimize the command bandwidth loss. */
4061         uint64_t addlat                       : 3;  /**< Additional Latency for posted CAS
4062                                                          When Posted CAS is on, this configures the additional
4063                                                          latency. This should be set to
4064                                                                 1 .. LMC_MEM_CFG1[TRCD]-2
4065                                                          (Note the implication that posted CAS should not
4066                                                          be used when tRCD is two.) */
4067         uint64_t pocas                        : 1;  /**< Enable the Posted CAS feature of DDR2. */
4068         uint64_t bwcnt                        : 1;  /**< Bus utilization counter Clear.
4069                                                          Clears the LMC_OPS_CNT_*, LMC_IFB_CNT_*, and
4070                                                          LMC_DCLK_CNT_* registers. SW should first write this
4071                                                          field to a one, then write this field to a zero to
4072                                                          clear the CSR's. */
4073         uint64_t twr                          : 3;  /**< DDR Write Recovery time (tWR). Last Wr Brst to Pre delay
4074                                                          This is not a direct encoding of the value. Its
4075                                                          programmed as below per DDR2 spec. The decimal number
4076                                                          on the right is RNDUP(tWR(ns) / tCYC(ns))
4077                                                           TYP=15ns
4078                                                              - 000: RESERVED
4079                                                              - 001: 2
4080                                                              - 010: 3
4081                                                              - 011: 4
4082                                                              - 100: 5
4083                                                              - 101: 6
4084                                                              - 110: 7
4085                                                              - 111: 8 */
4086         uint64_t silo_hc                      : 1;  /**< Delays the read sample window by a Half Cycle. */
4087         uint64_t ddr_eof                      : 4;  /**< Early Fill Counter Init.
4088                                                          L2 needs to know a few cycle before a fill completes so
4089                                                          it can get its Control pipe started (for better overall
4090                                                          performance). This counter contains  an init value which
4091                                                          is a function of Eclk/Dclk ratio to account for the
4092                                                          asynchronous boundary between L2 cache and the DRAM
4093                                                          controller. This init value will
4094                                                          determine when to safely let the L2 know that a fill
4095                                                          termination is coming up.
4096                                                          Set DDR_EOF according to the following rule:
4097                                                          eclkFreq/dclkFreq = dclkPeriod/eclkPeriod = RATIO
4098                                                                 RATIO < 6/6  -> illegal
4099                                                          6/6 <= RATIO < 6/5  -> DDR_EOF=3
4100                                                          6/5 <= RATIO < 6/4  -> DDR_EOF=3
4101                                                          6/4 <= RATIO < 6/3  -> DDR_EOF=2
4102                                                          6/3 <= RATIO < 6/2  -> DDR_EOF=1
4103                                                          6/2 <= RATIO < 6/1  -> DDR_EOF=0
4104                                                          6/1 <= RATIO        -> DDR_EOF=0 */
4105         uint64_t tfaw                         : 5;  /**< tFAW - Cycles = RNDUP[tFAW(ns)/tcyc(ns)] - 1
4106                                                          Four Access Window time. Relevant only in DDR2 AND in
4107                                                          8-bank parts.
4108                                                              tFAW = 5'b0 in DDR2-4bank
4109                                                              tFAW = RNDUP[tFAW(ns)/tcyc(ns)] - 1
4110                                                                       in DDR2-8bank */
4111         uint64_t crip_mode                    : 1;  /**< Cripple Mode - When set, the LMC allows only
4112                                                          1 inflight transaction (.vs. 8 in normal mode).
4113                                                          This bit is ONLY to be set at power-on and
4114                                                          should not be set for normal use. */
4115         uint64_t ddr2t                        : 1;  /**< Turn on the DDR 2T mode. 2 cycle window for CMD and
4116                                                          address. This mode helps relieve setup time pressure
4117                                                          on the Address and command bus which nominally have
4118                                                          a very large fanout. Please refer to Micron's tech
4119                                                          note tn_47_01 titled "DDR2-533 Memory Design Guide
4120                                                          for Two Dimm Unbuffered Systems" for physical details.
4121                                                          BURST8 should be set when DDR2T is set to minimize
4122                                                          add/cmd loss. */
4123         uint64_t odt_ena                      : 1;  /**< Enable Obsolete ODT on Reads
4124                                                          Obsolete Read ODT wiggles DDR_ODT_* pins on reads.
4125                                                          Should normally be cleared to zero.
4126                                                          When this is on, the following fields must also be
4127                                                          programmed:
4128                                                              LMC_CTL->QS_DIC - programs the termination value
4129                                                              LMC_RODT_CTL - programs the ODT I/O mask for Reads */
4130         uint64_t qdll_ena                     : 1;  /**< DDR Quad DLL Enable: A 0->1 transition on this bit after
4131                                                          DCLK init sequence will reset the DDR 90 DLL. Should
4132                                                          happen at startup before any activity in DDR.
4133                                                          DRESET should be asserted before and for 10 usec
4134                                                          following the 0->1 transition on QDLL_ENA. */
4135         uint64_t dll90_vlu                    : 5;  /**< Contains the open loop setting value for the DDR90 delay
4136                                                          line. */
4137         uint64_t dll90_byp                    : 1;  /**< DDR DLL90 Bypass: When set, the DDR90 DLL is to be
4138                                                          bypassed and the setting is defined by DLL90_VLU */
4139         uint64_t rdqs                         : 1;  /**< DDR2 RDQS mode. When set, configures memory subsystem to
4140                                                          use unidirectional DQS pins. RDQS/DM - Rcv & DQS - Xmit */
4141         uint64_t ddr2                         : 1;  /**< Should be set */
4142 #else
4143         uint64_t ddr2                         : 1;
4144         uint64_t rdqs                         : 1;
4145         uint64_t dll90_byp                    : 1;
4146         uint64_t dll90_vlu                    : 5;
4147         uint64_t qdll_ena                     : 1;
4148         uint64_t odt_ena                      : 1;
4149         uint64_t ddr2t                        : 1;
4150         uint64_t crip_mode                    : 1;
4151         uint64_t tfaw                         : 5;
4152         uint64_t ddr_eof                      : 4;
4153         uint64_t silo_hc                      : 1;
4154         uint64_t twr                          : 3;
4155         uint64_t bwcnt                        : 1;
4156         uint64_t pocas                        : 1;
4157         uint64_t addlat                       : 3;
4158         uint64_t burst8                       : 1;
4159         uint64_t bank8                        : 1;
4160         uint64_t reserved_32_63               : 32;
4161 #endif
4162         } s;
4163         struct cvmx_lmcx_ddr2_ctl_cn30xx {
4164 #ifdef __BIG_ENDIAN_BITFIELD
4165         uint64_t reserved_32_63               : 32;
4166         uint64_t bank8                        : 1;  /**< For 8 bank DDR2 parts
4167                                                          1 - DDR2 parts have 8 internal banks (BA is 3 bits
4168                                                          wide).
4169                                                          0 - DDR2 parts have 4 internal banks (BA is 2 bits
4170                                                          wide). */
4171         uint64_t burst8                       : 1;  /**< 8-burst mode.
4172                                                          1 - DDR data transfer happens in burst of 8
4173                                                          0 - DDR data transfer happens in burst of 4
4174                                                          BURST8 should be set when DDR2T is set to minimize
4175                                                          add/cmd bandwidth loss. */
4176         uint64_t addlat                       : 3;  /**< Additional Latency for posted CAS
4177                                                          When Posted CAS is on, this configures the additional
4178                                                          latency. This should be set to
4179                                                                 1 .. LMC_MEM_CFG1[TRCD]-2
4180                                                          (Note the implication that posted CAS should not
4181                                                          be used when tRCD is two.) */
4182         uint64_t pocas                        : 1;  /**< Enable the Posted CAS feature of DDR2. */
4183         uint64_t bwcnt                        : 1;  /**< Bus utilization counter Clear.
4184                                                          Clears the LMC_OPS_CNT_*, LMC_IFB_CNT_*, and
4185                                                          LMC_DCLK_CNT_* registers. SW should first write this
4186                                                          field to a one, then write this field to a zero to
4187                                                          clear the CSR's. */
4188         uint64_t twr                          : 3;  /**< DDR Write Recovery time (tWR). Last Wr Brst to Pre delay
4189                                                          This is not a direct encoding of the value. Its
4190                                                          programmed as below per DDR2 spec. The decimal number
4191                                                          on the right is RNDUP(tWR(ns) / tCYC(ns))
4192                                                           TYP=15ns
4193                                                              - 000: RESERVED
4194                                                              - 001: 2
4195                                                              - 010: 3
4196                                                              - 011: 4
4197                                                              - 100: 5
4198                                                              - 101: 6
4199                                                              - 110-111: RESERVED */
4200         uint64_t silo_hc                      : 1;  /**< Delays the read sample window by a Half Cycle. */
4201         uint64_t ddr_eof                      : 4;  /**< Early Fill Counter Init.
4202                                                          L2 needs to know a few cycle before a fill completes so
4203                                                          it can get its Control pipe started (for better overall
4204                                                          performance). This counter contains  an init value which
4205                                                          is a function of Eclk/Dclk ratio to account for the
4206                                                          asynchronous boundary between L2 cache and the DRAM
4207                                                          controller. This init value will
4208                                                          determine when to safely let the L2 know that a fill
4209                                                          termination is coming up.
4210                                                          DDR_EOF = RNDUP (DCLK period/Eclk Period). If the ratio
4211                                                          is above 3, set DDR_EOF to 3.
4212                                                              DCLK/ECLK period         DDR_EOF
4213                                                                 Less than 1            1
4214                                                                 Less than 2            2
4215                                                                 More than 2            3 */
4216         uint64_t tfaw                         : 5;  /**< tFAW - Cycles = RNDUP[tFAW(ns)/tcyc(ns)] - 1
4217                                                          Four Access Window time. Relevant only in
4218                                                          8-bank parts.
4219                                                              TFAW = 5'b0 for DDR2-4bank
4220                                                              TFAW = RNDUP[tFAW(ns)/tcyc(ns)] - 1 in DDR2-8bank */
4221         uint64_t crip_mode                    : 1;  /**< Cripple Mode - When set, the LMC allows only
4222                                                          1 inflight transaction (.vs. 8 in normal mode).
4223                                                          This bit is ONLY to be set at power-on and
4224                                                          should not be set for normal use. */
4225         uint64_t ddr2t                        : 1;  /**< Turn on the DDR 2T mode. 2 cycle window for CMD and
4226                                                          address. This mode helps relieve setup time pressure
4227                                                          on the Address and command bus which nominally have
4228                                                          a very large fanout. Please refer to Micron's tech
4229                                                          note tn_47_01 titled "DDR2-533 Memory Design Guide
4230                                                          for Two Dimm Unbuffered Systems" for physical details.
4231                                                          BURST8 should be used when DDR2T is set to minimize
4232                                                          add/cmd bandwidth loss. */
4233         uint64_t odt_ena                      : 1;  /**< Enable ODT for DDR2 on Reads
4234                                                          When this is on, the following fields must also be
4235                                                          programmed:
4236                                                              LMC_CTL->QS_DIC - programs the termination value
4237                                                              LMC_RODT_CTL - programs the ODT I/O mask for writes
4238                                                          Program as 0 for DDR1 mode and ODT needs to be off
4239                                                          on Octeon Reads */
4240         uint64_t qdll_ena                     : 1;  /**< DDR Quad DLL Enable: A 0->1 transition on this bit after
4241                                                          erst deassertion will reset the DDR 90 DLL. Should
4242                                                          happen at startup before any activity in DDR. */
4243         uint64_t dll90_vlu                    : 5;  /**< Contains the open loop setting value for the DDR90 delay
4244                                                          line. */
4245         uint64_t dll90_byp                    : 1;  /**< DDR DLL90 Bypass: When set, the DDR90 DLL is to be
4246                                                          bypassed and the setting is defined by DLL90_VLU */
4247         uint64_t reserved_1_1                 : 1;
4248         uint64_t ddr2                         : 1;  /**< DDR2 Enable: When set, configures memory subsystem for
4249                                                          DDR-II SDRAMs. */
4250 #else
4251         uint64_t ddr2                         : 1;
4252         uint64_t reserved_1_1                 : 1;
4253         uint64_t dll90_byp                    : 1;
4254         uint64_t dll90_vlu                    : 5;
4255         uint64_t qdll_ena                     : 1;
4256         uint64_t odt_ena                      : 1;
4257         uint64_t ddr2t                        : 1;
4258         uint64_t crip_mode                    : 1;
4259         uint64_t tfaw                         : 5;
4260         uint64_t ddr_eof                      : 4;
4261         uint64_t silo_hc                      : 1;
4262         uint64_t twr                          : 3;
4263         uint64_t bwcnt                        : 1;
4264         uint64_t pocas                        : 1;
4265         uint64_t addlat                       : 3;
4266         uint64_t burst8                       : 1;
4267         uint64_t bank8                        : 1;
4268         uint64_t reserved_32_63               : 32;
4269 #endif
4270         } cn30xx;
4271         struct cvmx_lmcx_ddr2_ctl_cn30xx      cn31xx;
4272         struct cvmx_lmcx_ddr2_ctl_s           cn38xx;
4273         struct cvmx_lmcx_ddr2_ctl_s           cn38xxp2;
4274         struct cvmx_lmcx_ddr2_ctl_s           cn50xx;
4275         struct cvmx_lmcx_ddr2_ctl_s           cn52xx;
4276         struct cvmx_lmcx_ddr2_ctl_s           cn52xxp1;
4277         struct cvmx_lmcx_ddr2_ctl_s           cn56xx;
4278         struct cvmx_lmcx_ddr2_ctl_s           cn56xxp1;
4279         struct cvmx_lmcx_ddr2_ctl_s           cn58xx;
4280         struct cvmx_lmcx_ddr2_ctl_s           cn58xxp1;
4281 };
4282 typedef union cvmx_lmcx_ddr2_ctl cvmx_lmcx_ddr2_ctl_t;
4283
4284 /**
4285  * cvmx_lmc#_ddr_pll_ctl
4286  *
4287  * LMC_DDR_PLL_CTL = LMC DDR PLL control
4288  *
4289  *
4290  * Notes:
4291  * DDR PLL Bringup sequence:
4292  * 1.  Write CLKF, DDR_PS_EN, DFM_PS_EN, DIFFAMP, CPS, CPB.
4293  *     If test mode is going to be activated, then also write jtg__ddr_pll_tm_en1, jtg__ddr_pll_tm_en2, jtg__ddr_pll_tm_en3,
4294  *     jtg__ddr_pll_tm_en4, jtg__dfa_pll_tm_en1, jtg__dfa_pll_tm_en2, jtg__dfa_pll_tm_en3, jtg__dfa_pll_tm_en4, JTAG_TEST_MODE
4295  * 2.  Wait 128 ref clock cycles (7680 rclk cycles)
4296  * 3.  Write 1 to RESET_N
4297  * 4.  Wait 1152 ref clocks (1152*16 rclk cycles)
4298  * 5.  Write 0 to  DDR_DIV_RESET and DFM_DIV_RESET
4299  * 6.  Wait 10 ref clock cycles (160 rclk cycles) before bringing up the DDR interface
4300  *     If test mode is going to be activated, wait an additional 8191 ref clocks (8191*16 rclk cycles) to allow PLL
4301  *     clock alignment
4302  */
4303 union cvmx_lmcx_ddr_pll_ctl {
4304         uint64_t u64;
4305         struct cvmx_lmcx_ddr_pll_ctl_s {
4306 #ifdef __BIG_ENDIAN_BITFIELD
4307         uint64_t reserved_27_63               : 37;
4308         uint64_t jtg_test_mode                : 1;  /**< JTAG Test Mode
4309                                                          Clock alignment between DCLK & REFCLK as well as FCLK &
4310                                                          REFCLK can only be performed after the ddr_pll_divider_reset
4311                                                          is deasserted. SW need to wait atleast 10 reference clock
4312                                                          cycles after deasserting pll_divider_reset before asserting
4313                                                          LMC(0)_DDR_PLL_CTL[JTG_TEST_MODE]. During alignment (which can
4314                                                          take upto 160 microseconds) DCLK and FCLK can exhibit some
4315                                                          high frequency pulses. Therefore, all bring up activities in
4316                                                          that clock domain need to be delayed (when the chip operates
4317                                                          in jtg_test_mode) by about 160 microseconds to ensure that
4318                                                          lock is achieved. */
4319         uint64_t dfm_div_reset                : 1;  /**< DFM postscalar divider reset */
4320         uint64_t dfm_ps_en                    : 3;  /**< DFM postscalar divide ratio
4321                                                          Determines the DFM CK speed.
4322                                                          0x0 : Divide LMC+DFM PLL output by 1
4323                                                          0x1 : Divide LMC+DFM PLL output by 2
4324                                                          0x2 : Divide LMC+DFM PLL output by 3
4325                                                          0x3 : Divide LMC+DFM PLL output by 4
4326                                                          0x4 : Divide LMC+DFM PLL output by 6
4327                                                          0x5 : Divide LMC+DFM PLL output by 8
4328                                                          0x6 : Divide LMC+DFM PLL output by 12
4329                                                          0x7 : Divide LMC+DFM PLL output by 12
4330                                                          DFM_PS_EN is not used when DFM_DIV_RESET = 1 */
4331         uint64_t ddr_div_reset                : 1;  /**< DDR postscalar divider reset */
4332         uint64_t ddr_ps_en                    : 3;  /**< DDR postscalar divide ratio
4333                                                          Determines the LMC CK speed.
4334                                                          0x0 : Divide LMC+DFM PLL output by 1
4335                                                          0x1 : Divide LMC+DFM PLL output by 2
4336                                                          0x2 : Divide LMC+DFM PLL output by 3
4337                                                          0x3 : Divide LMC+DFM PLL output by 4
4338                                                          0x4 : Divide LMC+DFM PLL output by 6
4339                                                          0x5 : Divide LMC+DFM PLL output by 8
4340                                                          0x6 : Divide LMC+DFM PLL output by 12
4341                                                          0x7 : Divide LMC+DFM PLL output by 12
4342                                                          DDR_PS_EN is not used when DDR_DIV_RESET = 1 */
4343         uint64_t diffamp                      : 4;  /**< PLL diffamp input transconductance */
4344         uint64_t cps                          : 3;  /**< PLL charge-pump current */
4345         uint64_t cpb                          : 3;  /**< PLL charge-pump current */
4346         uint64_t reset_n                      : 1;  /**< PLL reset */
4347         uint64_t clkf                         : 7;  /**< Multiply reference by CLKF
4348                                                          32 <= CLKF <= 64
4349                                                          LMC+DFM PLL frequency = 50 * CLKF
4350                                                          min = 1.6 GHz, max = 3.2 GHz */
4351 #else
4352         uint64_t clkf                         : 7;
4353         uint64_t reset_n                      : 1;
4354         uint64_t cpb                          : 3;
4355         uint64_t cps                          : 3;
4356         uint64_t diffamp                      : 4;
4357         uint64_t ddr_ps_en                    : 3;
4358         uint64_t ddr_div_reset                : 1;
4359         uint64_t dfm_ps_en                    : 3;
4360         uint64_t dfm_div_reset                : 1;
4361         uint64_t jtg_test_mode                : 1;
4362         uint64_t reserved_27_63               : 37;
4363 #endif
4364         } s;
4365         struct cvmx_lmcx_ddr_pll_ctl_s        cn61xx;
4366         struct cvmx_lmcx_ddr_pll_ctl_s        cn63xx;
4367         struct cvmx_lmcx_ddr_pll_ctl_s        cn63xxp1;
4368         struct cvmx_lmcx_ddr_pll_ctl_s        cn66xx;
4369         struct cvmx_lmcx_ddr_pll_ctl_s        cn68xx;
4370         struct cvmx_lmcx_ddr_pll_ctl_s        cn68xxp1;
4371         struct cvmx_lmcx_ddr_pll_ctl_s        cnf71xx;
4372 };
4373 typedef union cvmx_lmcx_ddr_pll_ctl cvmx_lmcx_ddr_pll_ctl_t;
4374
4375 /**
4376  * cvmx_lmc#_delay_cfg
4377  *
4378  * LMC_DELAY_CFG = Open-loop delay line settings
4379  *
4380  *
4381  * Notes:
4382  * The DQ bits add OUTGOING delay only to dq, dqs_[p,n], cb, cbs_[p,n], dqm.  Delay is approximately
4383  * 50-80ps per setting depending on process/voltage.  There is no need to add incoming delay since by
4384  * default all strobe bits are delayed internally by 90 degrees (as was always the case in previous
4385  * passes and past chips.
4386  *
4387  * The CMD add delay to all command bits DDR_RAS, DDR_CAS, DDR_A<15:0>, DDR_BA<2:0>, DDR_n_CS<1:0>_L,
4388  * DDR_WE, DDR_CKE and DDR_ODT_<7:0>. Again, delay is 50-80ps per tap.
4389  *
4390  * The CLK bits add delay to all clock signals DDR_CK_<5:0>_P and DDR_CK_<5:0>_N.  Again, delay is
4391  * 50-80ps per tap.
4392  *
4393  * The usage scenario is the following: There is too much delay on command signals and setup on command
4394  * is not met. The user can then delay the clock until setup is met.
4395  *
4396  * At the same time though, dq/dqs should be delayed because there is also a DDR spec tying dqs with
4397  * clock. If clock is too much delayed with respect to dqs, writes will start to fail.
4398  *
4399  * This scheme should eliminate the board need of adding routing delay to clock signals to make high
4400  * frequencies work.
4401  */
4402 union cvmx_lmcx_delay_cfg {
4403         uint64_t u64;
4404         struct cvmx_lmcx_delay_cfg_s {
4405 #ifdef __BIG_ENDIAN_BITFIELD
4406         uint64_t reserved_15_63               : 49;
4407         uint64_t dq                           : 5;  /**< Setting for DQ  delay line */
4408         uint64_t cmd                          : 5;  /**< Setting for CMD delay line */
4409         uint64_t clk                          : 5;  /**< Setting for CLK delay line */
4410 #else
4411         uint64_t clk                          : 5;
4412         uint64_t cmd                          : 5;
4413         uint64_t dq                           : 5;
4414         uint64_t reserved_15_63               : 49;
4415 #endif
4416         } s;
4417         struct cvmx_lmcx_delay_cfg_s          cn30xx;
4418         struct cvmx_lmcx_delay_cfg_cn38xx {
4419 #ifdef __BIG_ENDIAN_BITFIELD
4420         uint64_t reserved_14_63               : 50;
4421         uint64_t dq                           : 4;  /**< Setting for DQ  delay line */
4422         uint64_t reserved_9_9                 : 1;
4423         uint64_t cmd                          : 4;  /**< Setting for CMD delay line */
4424         uint64_t reserved_4_4                 : 1;
4425         uint64_t clk                          : 4;  /**< Setting for CLK delay line */
4426 #else
4427         uint64_t clk                          : 4;
4428         uint64_t reserved_4_4                 : 1;
4429         uint64_t cmd                          : 4;
4430         uint64_t reserved_9_9                 : 1;
4431         uint64_t dq                           : 4;
4432         uint64_t reserved_14_63               : 50;
4433 #endif
4434         } cn38xx;
4435         struct cvmx_lmcx_delay_cfg_cn38xx     cn50xx;
4436         struct cvmx_lmcx_delay_cfg_cn38xx     cn52xx;
4437         struct cvmx_lmcx_delay_cfg_cn38xx     cn52xxp1;
4438         struct cvmx_lmcx_delay_cfg_cn38xx     cn56xx;
4439         struct cvmx_lmcx_delay_cfg_cn38xx     cn56xxp1;
4440         struct cvmx_lmcx_delay_cfg_cn38xx     cn58xx;
4441         struct cvmx_lmcx_delay_cfg_cn38xx     cn58xxp1;
4442 };
4443 typedef union cvmx_lmcx_delay_cfg cvmx_lmcx_delay_cfg_t;
4444
4445 /**
4446  * cvmx_lmc#_dimm#_params
4447  *
4448  * LMC_DIMMX_PARAMS = LMC DIMMX Params
4449  * This register contains values to be programmed into each control word in the corresponding (registered) DIMM. The control words allow
4450  * optimization of the device properties for different raw card designs.
4451  *
4452  * Notes:
4453  * LMC only uses this CSR when LMC*_CONTROL[RDIMM_ENA]=1. During a power-up/init sequence, LMC writes
4454  * these fields into the control words in the JEDEC standard SSTE32882 registering clock driver on an
4455  * RDIMM when corresponding LMC*_DIMM_CTL[DIMM*_WMASK] bits are set.
4456  */
4457 union cvmx_lmcx_dimmx_params {
4458         uint64_t u64;
4459         struct cvmx_lmcx_dimmx_params_s {
4460 #ifdef __BIG_ENDIAN_BITFIELD
4461         uint64_t rc15                         : 4;  /**< RC15, Reserved */
4462         uint64_t rc14                         : 4;  /**< RC14, Reserved */
4463         uint64_t rc13                         : 4;  /**< RC13, Reserved */
4464         uint64_t rc12                         : 4;  /**< RC12, Reserved */
4465         uint64_t rc11                         : 4;  /**< RC11, Encoding for RDIMM Operating VDD */
4466         uint64_t rc10                         : 4;  /**< RC10, Encoding for RDIMM Operating Speed */
4467         uint64_t rc9                          : 4;  /**< RC9 , Power Savings Settings Control Word */
4468         uint64_t rc8                          : 4;  /**< RC8 , Additional IBT Settings Control Word */
4469         uint64_t rc7                          : 4;  /**< RC7 , Reserved */
4470         uint64_t rc6                          : 4;  /**< RC6 , Reserved */
4471         uint64_t rc5                          : 4;  /**< RC5 , CK Driver Characterstics Control Word */
4472         uint64_t rc4                          : 4;  /**< RC4 , Control Signals Driver Characteristics Control Word */
4473         uint64_t rc3                          : 4;  /**< RC3 , CA Signals Driver Characterstics Control Word */
4474         uint64_t rc2                          : 4;  /**< RC2 , Timing Control Word */
4475         uint64_t rc1                          : 4;  /**< RC1 , Clock Driver Enable Control Word */
4476         uint64_t rc0                          : 4;  /**< RC0 , Global Features Control Word */
4477 #else
4478         uint64_t rc0                          : 4;
4479         uint64_t rc1                          : 4;
4480         uint64_t rc2                          : 4;
4481         uint64_t rc3                          : 4;
4482         uint64_t rc4                          : 4;
4483         uint64_t rc5                          : 4;
4484         uint64_t rc6                          : 4;
4485         uint64_t rc7                          : 4;
4486         uint64_t rc8                          : 4;
4487         uint64_t rc9                          : 4;
4488         uint64_t rc10                         : 4;
4489         uint64_t rc11                         : 4;
4490         uint64_t rc12                         : 4;
4491         uint64_t rc13                         : 4;
4492         uint64_t rc14                         : 4;
4493         uint64_t rc15                         : 4;
4494 #endif
4495         } s;
4496         struct cvmx_lmcx_dimmx_params_s       cn61xx;
4497         struct cvmx_lmcx_dimmx_params_s       cn63xx;
4498         struct cvmx_lmcx_dimmx_params_s       cn63xxp1;
4499         struct cvmx_lmcx_dimmx_params_s       cn66xx;
4500         struct cvmx_lmcx_dimmx_params_s       cn68xx;
4501         struct cvmx_lmcx_dimmx_params_s       cn68xxp1;
4502         struct cvmx_lmcx_dimmx_params_s       cnf71xx;
4503 };
4504 typedef union cvmx_lmcx_dimmx_params cvmx_lmcx_dimmx_params_t;
4505
4506 /**
4507  * cvmx_lmc#_dimm_ctl
4508  *
4509  * LMC_DIMM_CTL = LMC DIMM Control
4510  *
4511  *
4512  * Notes:
4513  * This CSR is only used when LMC*_CONTROL[RDIMM_ENA]=1. During a power-up/init sequence, this CSR
4514  * controls LMC's writes to the control words in the JEDEC standard SSTE32882 registering clock driver
4515  * on an RDIMM.
4516  */
4517 union cvmx_lmcx_dimm_ctl {
4518         uint64_t u64;
4519         struct cvmx_lmcx_dimm_ctl_s {
4520 #ifdef __BIG_ENDIAN_BITFIELD
4521         uint64_t reserved_46_63               : 18;
4522         uint64_t parity                       : 1;  /**< Parity
4523                                                          The PAR_IN input of a registered DIMM should be
4524                                                          tied off. LMC adjusts the value of the DDR_WE_L (DWE#)
4525                                                          pin during DDR3 register part control word writes to
4526                                                          ensure the parity is observed correctly by the receiving
4527                                                          SSTE32882 register part.
4528                                                          When PAR_IN is grounded, PARITY should be cleared to 0. */
4529         uint64_t tcws                         : 13; /**< LMC waits for this time period before and after a RDIMM
4530                                                          Control Word Access during a power-up/init SEQUENCE.
4531                                                          TCWS is in multiples of 8 CK cycles.
4532                                                          Set TCWS (CSR field) = RNDUP[tcws(ns)/(8*tCYC(ns))],
4533                                                          where tCWS is the desired time (ns), and tCYC(ns)
4534                                                          is the DDR clock frequency (not data rate).
4535                                                          TYP=0x4e0 (equivalent to 15us) when changing
4536                                                          clock timing (RC2.DBA1, RC6.DA4, RC10.DA3, RC10.DA4,
4537                                                          RC11.DA3, and RC11.DA4)
4538                                                          TYP=0x8, otherwise
4539                                                          0x0 = Reserved */
4540         uint64_t dimm1_wmask                  : 16; /**< DIMM1 Write Mask
4541                                                          if (DIMM1_WMASK[n] = 1)
4542                                                              Write DIMM1.RCn */
4543         uint64_t dimm0_wmask                  : 16; /**< DIMM0 Write Mask
4544                                                          if (DIMM0_WMASK[n] = 1)
4545                                                              Write DIMM0.RCn */
4546 #else
4547         uint64_t dimm0_wmask                  : 16;
4548         uint64_t dimm1_wmask                  : 16;
4549         uint64_t tcws                         : 13;
4550         uint64_t parity                       : 1;
4551         uint64_t reserved_46_63               : 18;
4552 #endif
4553         } s;
4554         struct cvmx_lmcx_dimm_ctl_s           cn61xx;
4555         struct cvmx_lmcx_dimm_ctl_s           cn63xx;
4556         struct cvmx_lmcx_dimm_ctl_s           cn63xxp1;
4557         struct cvmx_lmcx_dimm_ctl_s           cn66xx;
4558         struct cvmx_lmcx_dimm_ctl_s           cn68xx;
4559         struct cvmx_lmcx_dimm_ctl_s           cn68xxp1;
4560         struct cvmx_lmcx_dimm_ctl_s           cnf71xx;
4561 };
4562 typedef union cvmx_lmcx_dimm_ctl cvmx_lmcx_dimm_ctl_t;
4563
4564 /**
4565  * cvmx_lmc#_dll_ctl
4566  *
4567  * LMC_DLL_CTL = LMC DLL control and DCLK reset
4568  *
4569  */
4570 union cvmx_lmcx_dll_ctl {
4571         uint64_t u64;
4572         struct cvmx_lmcx_dll_ctl_s {
4573 #ifdef __BIG_ENDIAN_BITFIELD
4574         uint64_t reserved_8_63                : 56;
4575         uint64_t dreset                       : 1;  /**< Dclk domain reset.  The reset signal that is used by the
4576                                                          Dclk domain is (DRESET || ECLK_RESET). */
4577         uint64_t dll90_byp                    : 1;  /**< DDR DLL90 Bypass: When set, the DDR90 DLL is to be
4578                                                          bypassed and the setting is defined by DLL90_VLU */
4579         uint64_t dll90_ena                    : 1;  /**< DDR Quad DLL Enable: A 0->1 transition on this bit after
4580                                                          DCLK init sequence resets the DDR 90 DLL. Should
4581                                                          happen at startup before any activity in DDR. QDLL_ENA
4582                                                          must not transition 1->0 outside of a DRESET sequence
4583                                                          (i.e. it must remain 1 until the next DRESET).
4584                                                          DRESET should be asserted before and for 10 usec
4585                                                          following the 0->1 transition on QDLL_ENA. */
4586         uint64_t dll90_vlu                    : 5;  /**< Contains the open loop setting value for the DDR90 delay
4587                                                          line. */
4588 #else
4589         uint64_t dll90_vlu                    : 5;
4590         uint64_t dll90_ena                    : 1;
4591         uint64_t dll90_byp                    : 1;
4592         uint64_t dreset                       : 1;
4593         uint64_t reserved_8_63                : 56;
4594 #endif
4595         } s;
4596         struct cvmx_lmcx_dll_ctl_s            cn52xx;
4597         struct cvmx_lmcx_dll_ctl_s            cn52xxp1;
4598         struct cvmx_lmcx_dll_ctl_s            cn56xx;
4599         struct cvmx_lmcx_dll_ctl_s            cn56xxp1;
4600 };
4601 typedef union cvmx_lmcx_dll_ctl cvmx_lmcx_dll_ctl_t;
4602
4603 /**
4604  * cvmx_lmc#_dll_ctl2
4605  *
4606  * LMC_DLL_CTL2 = LMC (Octeon) DLL control and DCLK reset
4607  *
4608  *
4609  * Notes:
4610  * DLL Bringup sequence:
4611  * 1. If not done already, set LMC*_DLL_CTL2 = 0, except when LMC*_DLL_CTL2[DRESET] = 1.
4612  * 2. Write 1 to LMC*_DLL_CTL2[DLL_BRINGUP]
4613  * 3. Wait for 10 CK cycles, then write 1 to LMC*_DLL_CTL2[QUAD_DLL_ENA]. It may not be feasible to count 10 CK cycles, but the
4614  *    idea is to configure the delay line into DLL mode by asserting DLL_BRING_UP earlier than [QUAD_DLL_ENA], even if it is one
4615  *    cycle early. LMC*_DLL_CTL2[QUAD_DLL_ENA] must not change after this point without restarting the LMC and/or DRESET initialization
4616  *    sequence.
4617  * 4. Read L2D_BST0 and wait for the result. (L2D_BST0 is subject to change depending on how it called in o63. It is still ok to go
4618  *    without step 4, since step 5 has enough time)
4619  * 5. Wait 10 us.
4620  * 6. Write 0 to LMC*_DLL_CTL2[DLL_BRINGUP]. LMC*_DLL_CTL2[DLL_BRINGUP] must not change after this point without restarting the LMC
4621  *    and/or DRESET initialization sequence.
4622  * 7. Read L2D_BST0 and wait for the result. (same as step 4, but the idea here is the wait some time before going to step 8, even it
4623  *    is one cycle is fine)
4624  * 8. Write 0 to LMC*_DLL_CTL2[DRESET].  LMC*_DLL_CTL2[DRESET] must not change after this point without restarting the LMC and/or
4625  *    DRESET initialization sequence.
4626  */
4627 union cvmx_lmcx_dll_ctl2 {
4628         uint64_t u64;
4629         struct cvmx_lmcx_dll_ctl2_s {
4630 #ifdef __BIG_ENDIAN_BITFIELD
4631         uint64_t reserved_16_63               : 48;
4632         uint64_t intf_en                      : 1;  /**< Interface Enable */
4633         uint64_t dll_bringup                  : 1;  /**< DLL Bringup */
4634         uint64_t dreset                       : 1;  /**< Dclk domain reset.  The reset signal that is used by the
4635                                                          Dclk domain is (DRESET || ECLK_RESET). */
4636         uint64_t quad_dll_ena                 : 1;  /**< DLL Enable */
4637         uint64_t byp_sel                      : 4;  /**< Bypass select
4638                                                          0000 : no byte
4639                                                          0001 : byte 0
4640                                                          - ...
4641                                                          1001 : byte 8
4642                                                          1010 : all bytes
4643                                                          1011-1111 : Reserved */
4644         uint64_t byp_setting                  : 8;  /**< Bypass setting
4645                                                          DDR3-1600: 00100010
4646                                                          DDR3-1333: 00110010
4647                                                          DDR3-1066: 01001011
4648                                                          DDR3-800 : 01110101
4649                                                          DDR3-667 : 10010110
4650                                                          DDR3-600 : 10101100 */
4651 #else
4652         uint64_t byp_setting                  : 8;
4653         uint64_t byp_sel                      : 4;
4654         uint64_t quad_dll_ena                 : 1;
4655         uint64_t dreset                       : 1;
4656         uint64_t dll_bringup                  : 1;
4657         uint64_t intf_en                      : 1;
4658         uint64_t reserved_16_63               : 48;
4659 #endif
4660         } s;
4661         struct cvmx_lmcx_dll_ctl2_s           cn61xx;
4662         struct cvmx_lmcx_dll_ctl2_cn63xx {
4663 #ifdef __BIG_ENDIAN_BITFIELD
4664         uint64_t reserved_15_63               : 49;
4665         uint64_t dll_bringup                  : 1;  /**< DLL Bringup */
4666         uint64_t dreset                       : 1;  /**< Dclk domain reset.  The reset signal that is used by the
4667                                                          Dclk domain is (DRESET || ECLK_RESET). */
4668         uint64_t quad_dll_ena                 : 1;  /**< DLL Enable */
4669         uint64_t byp_sel                      : 4;  /**< Bypass select
4670                                                          0000 : no byte
4671                                                          0001 : byte 0
4672                                                          - ...
4673                                                          1001 : byte 8
4674                                                          1010 : all bytes
4675                                                          1011-1111 : Reserved */
4676         uint64_t byp_setting                  : 8;  /**< Bypass setting
4677                                                          DDR3-1600: 00100010
4678                                                          DDR3-1333: 00110010
4679                                                          DDR3-1066: 01001011
4680                                                          DDR3-800 : 01110101
4681                                                          DDR3-667 : 10010110
4682                                                          DDR3-600 : 10101100 */
4683 #else
4684         uint64_t byp_setting                  : 8;
4685         uint64_t byp_sel                      : 4;
4686         uint64_t quad_dll_ena                 : 1;
4687         uint64_t dreset                       : 1;
4688         uint64_t dll_bringup                  : 1;
4689         uint64_t reserved_15_63               : 49;
4690 #endif
4691         } cn63xx;
4692         struct cvmx_lmcx_dll_ctl2_cn63xx      cn63xxp1;
4693         struct cvmx_lmcx_dll_ctl2_cn63xx      cn66xx;
4694         struct cvmx_lmcx_dll_ctl2_s           cn68xx;
4695         struct cvmx_lmcx_dll_ctl2_s           cn68xxp1;
4696         struct cvmx_lmcx_dll_ctl2_s           cnf71xx;
4697 };
4698 typedef union cvmx_lmcx_dll_ctl2 cvmx_lmcx_dll_ctl2_t;
4699
4700 /**
4701  * cvmx_lmc#_dll_ctl3
4702  *
4703  * LMC_DLL_CTL3 = LMC DLL control and DCLK reset
4704  *
4705  */
4706 union cvmx_lmcx_dll_ctl3 {
4707         uint64_t u64;
4708         struct cvmx_lmcx_dll_ctl3_s {
4709 #ifdef __BIG_ENDIAN_BITFIELD
4710         uint64_t reserved_41_63               : 23;
4711         uint64_t dclk90_fwd                   : 1;  /**< Forward setting
4712                                                          0 : disable
4713                                                          1 : forward (generates a 1 cycle pulse to forward setting)
4714                                                          This register is oneshot and clears itself each time
4715                                                          it is set */
4716         uint64_t ddr_90_dly_byp               : 1;  /**< Bypass DDR90_DLY in Clock Tree */
4717         uint64_t dclk90_recal_dis             : 1;  /**< Disable periodic recalibration of DDR90 Delay Line in */
4718         uint64_t dclk90_byp_sel               : 1;  /**< Bypass Setting Select for DDR90 Delay Line */
4719         uint64_t dclk90_byp_setting           : 8;  /**< Bypass Setting for DDR90 Delay Line */
4720         uint64_t dll_fast                     : 1;  /**< DLL lock
4721                                                          0 = DLL locked */
4722         uint64_t dll90_setting                : 8;  /**< Encoded DLL settings. Works in conjuction with
4723                                                          DLL90_BYTE_SEL */
4724         uint64_t fine_tune_mode               : 1;  /**< DLL Fine Tune Mode
4725                                                          0 = disabled
4726                                                          1 = enable.
4727                                                          When enabled, calibrate internal PHY DLL every
4728                                                          LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */
4729         uint64_t dll_mode                     : 1;  /**< DLL Mode */
4730         uint64_t dll90_byte_sel               : 4;  /**< Observe DLL settings for selected byte
4731                                                          0001 : byte 0
4732                                                          - ...
4733                                                          1001 : byte 8
4734                                                          0000,1010-1111 : Reserved */
4735         uint64_t offset_ena                   : 1;  /**< Offset enable
4736                                                          0 = disable
4737                                                          1 = enable */
4738         uint64_t load_offset                  : 1;  /**< Load offset
4739                                                          0 : disable
4740                                                          1 : load (generates a 1 cycle pulse to the PHY)
4741                                                          This register is oneshot and clears itself each time
4742                                                          it is set */
4743         uint64_t mode_sel                     : 2;  /**< Mode select
4744                                                          00 : reset
4745                                                          01 : write
4746                                                          10 : read
4747                                                          11 : write & read */
4748         uint64_t byte_sel                     : 4;  /**< Byte select
4749                                                          0000 : no byte
4750                                                          0001 : byte 0
4751                                                          - ...
4752                                                          1001 : byte 8
4753                                                          1010 : all bytes
4754                                                          1011-1111 : Reserved */
4755         uint64_t offset                       : 6;  /**< Write/read offset setting
4756                                                          [4:0] : offset
4757                                                          [5]   : 0 = increment, 1 = decrement
4758                                                          Not a 2's complement value */
4759 #else
4760         uint64_t offset                       : 6;
4761         uint64_t byte_sel                     : 4;
4762         uint64_t mode_sel                     : 2;
4763         uint64_t load_offset                  : 1;
4764         uint64_t offset_ena                   : 1;
4765         uint64_t dll90_byte_sel               : 4;
4766         uint64_t dll_mode                     : 1;
4767         uint64_t fine_tune_mode               : 1;
4768         uint64_t dll90_setting                : 8;
4769         uint64_t dll_fast                     : 1;
4770         uint64_t dclk90_byp_setting           : 8;
4771         uint64_t dclk90_byp_sel               : 1;
4772         uint64_t dclk90_recal_dis             : 1;
4773         uint64_t ddr_90_dly_byp               : 1;
4774         uint64_t dclk90_fwd                   : 1;
4775         uint64_t reserved_41_63               : 23;
4776 #endif
4777         } s;
4778         struct cvmx_lmcx_dll_ctl3_s           cn61xx;
4779         struct cvmx_lmcx_dll_ctl3_cn63xx {
4780 #ifdef __BIG_ENDIAN_BITFIELD
4781         uint64_t reserved_29_63               : 35;
4782         uint64_t dll_fast                     : 1;  /**< DLL lock
4783                                                          0 = DLL locked */
4784         uint64_t dll90_setting                : 8;  /**< Encoded DLL settings. Works in conjuction with
4785                                                          DLL90_BYTE_SEL */
4786         uint64_t fine_tune_mode               : 1;  /**< DLL Fine Tune Mode
4787                                                          0 = disabled
4788                                                          1 = enable.
4789                                                          When enabled, calibrate internal PHY DLL every
4790                                                          LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */
4791         uint64_t dll_mode                     : 1;  /**< DLL Mode */
4792         uint64_t dll90_byte_sel               : 4;  /**< Observe DLL settings for selected byte
4793                                                          0001 : byte 0
4794                                                          - ...
4795                                                          1001 : byte 8
4796                                                          0000,1010-1111 : Reserved */
4797         uint64_t offset_ena                   : 1;  /**< Offset enable
4798                                                          0 = disable
4799                                                          1 = enable */
4800         uint64_t load_offset                  : 1;  /**< Load offset
4801                                                          0 : disable
4802                                                          1 : load (generates a 1 cycle pulse to the PHY)
4803                                                          This register is oneshot and clears itself each time
4804                                                          it is set */
4805         uint64_t mode_sel                     : 2;  /**< Mode select
4806                                                          00 : reset
4807                                                          01 : write
4808                                                          10 : read
4809                                                          11 : write & read */
4810         uint64_t byte_sel                     : 4;  /**< Byte select
4811                                                          0000 : no byte
4812                                                          0001 : byte 0
4813                                                          - ...
4814                                                          1001 : byte 8
4815                                                          1010 : all bytes
4816                                                          1011-1111 : Reserved */
4817         uint64_t offset                       : 6;  /**< Write/read offset setting
4818                                                          [4:0] : offset
4819                                                          [5]   : 0 = increment, 1 = decrement
4820                                                          Not a 2's complement value */
4821 #else
4822         uint64_t offset                       : 6;
4823         uint64_t byte_sel                     : 4;
4824         uint64_t mode_sel                     : 2;
4825         uint64_t load_offset                  : 1;
4826         uint64_t offset_ena                   : 1;
4827         uint64_t dll90_byte_sel               : 4;
4828         uint64_t dll_mode                     : 1;
4829         uint64_t fine_tune_mode               : 1;
4830         uint64_t dll90_setting                : 8;
4831         uint64_t dll_fast                     : 1;
4832         uint64_t reserved_29_63               : 35;
4833 #endif
4834         } cn63xx;
4835         struct cvmx_lmcx_dll_ctl3_cn63xx      cn63xxp1;
4836         struct cvmx_lmcx_dll_ctl3_cn63xx      cn66xx;
4837         struct cvmx_lmcx_dll_ctl3_s           cn68xx;
4838         struct cvmx_lmcx_dll_ctl3_s           cn68xxp1;
4839         struct cvmx_lmcx_dll_ctl3_s           cnf71xx;
4840 };
4841 typedef union cvmx_lmcx_dll_ctl3 cvmx_lmcx_dll_ctl3_t;
4842
4843 /**
4844  * cvmx_lmc#_dual_memcfg
4845  *
4846  * LMC_DUAL_MEMCFG = LMC Dual Memory Configuration Register
4847  *
4848  * This register controls certain parameters of Dual Memory Configuration
4849  *
4850  * Notes:
4851  * This register enables the design to have two, separate memory configurations, selected dynamically
4852  * by the reference address.  Note however, that both configurations share
4853  * LMC*_CONTROL[XOR_BANK], LMC*_CONFIG[PBANK_LSB], LMC*_CONFIG[RANK_ENA], and all timing parameters.
4854  * In this description, "config0" refers to the normal memory configuration that is defined by the
4855  * LMC*_CONFIG[ROW_LSB] parameters and "config1" refers to the dual (or second)
4856  * memory configuration that is defined by this register.
4857  *
4858  * Enable mask to chip select mapping is shown below:
4859  *   CS_MASK[3] -> DIMM1_CS_<1>
4860  *   CS_MASK[2] -> DIMM1_CS_<0>
4861  *
4862  *   CS_MASK[1] -> DIMM0_CS_<1>
4863  *   CS_MASK[0] -> DIMM0_CS_<0>
4864  *
4865  *  DIMM n uses the pair of chip selects DIMMn_CS_<1:0>.
4866  *
4867  *  Programming restrictions for CS_MASK:
4868  *    when LMC*_CONFIG[RANK_ENA] == 0, CS_MASK[2n + 1] = CS_MASK[2n]
4869  */
4870 union cvmx_lmcx_dual_memcfg {
4871         uint64_t u64;
4872         struct cvmx_lmcx_dual_memcfg_s {
4873 #ifdef __BIG_ENDIAN_BITFIELD
4874         uint64_t reserved_20_63               : 44;
4875         uint64_t bank8                        : 1;  /**< See LMC_DDR2_CTL[BANK8] */
4876         uint64_t row_lsb                      : 3;  /**< See LMC*_CONFIG[ROW_LSB] */
4877         uint64_t reserved_8_15                : 8;
4878         uint64_t cs_mask                      : 8;  /**< Chip select mask.
4879                                                          This mask corresponds to the 8 chip selects for a memory
4880                                                          configuration.  Each reference address will assert one of
4881                                                          the chip selects.  If that chip select has its
4882                                                          corresponding CS_MASK bit set, then the "config1"
4883                                                          parameters are used, otherwise the "config0" parameters
4884                                                          are used.  See additional notes below.
4885                                                          [7:4] *UNUSED IN 6xxx* */
4886 #else
4887         uint64_t cs_mask                      : 8;
4888         uint64_t reserved_8_15                : 8;
4889         uint64_t row_lsb                      : 3;
4890         uint64_t bank8                        : 1;
4891         uint64_t reserved_20_63               : 44;
4892 #endif
4893         } s;
4894         struct cvmx_lmcx_dual_memcfg_s        cn50xx;
4895         struct cvmx_lmcx_dual_memcfg_s        cn52xx;
4896         struct cvmx_lmcx_dual_memcfg_s        cn52xxp1;
4897         struct cvmx_lmcx_dual_memcfg_s        cn56xx;
4898         struct cvmx_lmcx_dual_memcfg_s        cn56xxp1;
4899         struct cvmx_lmcx_dual_memcfg_s        cn58xx;
4900         struct cvmx_lmcx_dual_memcfg_s        cn58xxp1;
4901         struct cvmx_lmcx_dual_memcfg_cn61xx {
4902 #ifdef __BIG_ENDIAN_BITFIELD
4903         uint64_t reserved_19_63               : 45;
4904         uint64_t row_lsb                      : 3;  /**< See LMC*_CONFIG[ROW_LSB] */
4905         uint64_t reserved_8_15                : 8;
4906         uint64_t cs_mask                      : 8;  /**< Chip select mask.
4907                                                          This mask corresponds to the 8 chip selects for a memory
4908                                                          configuration.  Each reference address will assert one of
4909                                                          the chip selects.  If that chip select has its
4910                                                          corresponding CS_MASK bit set, then the "config1"
4911                                                          parameters are used, otherwise the "config0" parameters
4912                                                          are used.  See additional notes below.
4913                                                          [7:4] *UNUSED IN 6xxx* */
4914 #else
4915         uint64_t cs_mask                      : 8;
4916         uint64_t reserved_8_15                : 8;
4917         uint64_t row_lsb                      : 3;
4918         uint64_t reserved_19_63               : 45;
4919 #endif
4920         } cn61xx;
4921         struct cvmx_lmcx_dual_memcfg_cn61xx   cn63xx;
4922         struct cvmx_lmcx_dual_memcfg_cn61xx   cn63xxp1;
4923         struct cvmx_lmcx_dual_memcfg_cn61xx   cn66xx;
4924         struct cvmx_lmcx_dual_memcfg_cn61xx   cn68xx;
4925         struct cvmx_lmcx_dual_memcfg_cn61xx   cn68xxp1;
4926         struct cvmx_lmcx_dual_memcfg_cn61xx   cnf71xx;
4927 };
4928 typedef union cvmx_lmcx_dual_memcfg cvmx_lmcx_dual_memcfg_t;
4929
4930 /**
4931  * cvmx_lmc#_ecc_synd
4932  *
4933  * LMC_ECC_SYND = MRD ECC Syndromes
4934  *
4935  */
4936 union cvmx_lmcx_ecc_synd {
4937         uint64_t u64;
4938         struct cvmx_lmcx_ecc_synd_s {
4939 #ifdef __BIG_ENDIAN_BITFIELD
4940         uint64_t reserved_32_63               : 32;
4941         uint64_t mrdsyn3                      : 8;  /**< MRD ECC Syndrome Quad3
4942                                                          MRDSYN3 corresponds to DQ[63:0]_c1_p1
4943                                                          In 32b mode, ecc is calculated on 4 cycle worth of data
4944                                                          MRDSYN3 corresponds to [DQ[31:0]_c3_p1, DQ[31:0]_c3_p0]
4945                                                            where _cC_pP denotes cycle C and phase P */
4946         uint64_t mrdsyn2                      : 8;  /**< MRD ECC Syndrome Quad2
4947                                                          MRDSYN2 corresponds to DQ[63:0]_c1_p0
4948                                                          In 32b mode, ecc is calculated on 4 cycle worth of data
4949                                                          MRDSYN2 corresponds to [DQ[31:0]_c2_p1, DQ[31:0]_c2_p0]
4950                                                            where _cC_pP denotes cycle C and phase P */
4951         uint64_t mrdsyn1                      : 8;  /**< MRD ECC Syndrome Quad1
4952                                                          MRDSYN1 corresponds to DQ[63:0]_c0_p1
4953                                                          In 32b mode, ecc is calculated on 4 cycle worth of data
4954                                                          MRDSYN1 corresponds to [DQ[31:0]_c1_p1, DQ[31:0]_c1_p0]
4955                                                            where _cC_pP denotes cycle C and phase P */
4956         uint64_t mrdsyn0                      : 8;  /**< MRD ECC Syndrome Quad0
4957                                                          MRDSYN0 corresponds to DQ[63:0]_c0_p0
4958                                                          In 32b mode, ecc is calculated on 4 cycle worth of data
4959                                                          MRDSYN0 corresponds to [DQ[31:0]_c0_p1, DQ[31:0]_c0_p0]
4960                                                            where _cC_pP denotes cycle C and phase P */
4961 #else
4962         uint64_t mrdsyn0                      : 8;
4963         uint64_t mrdsyn1                      : 8;
4964         uint64_t mrdsyn2                      : 8;
4965         uint64_t mrdsyn3                      : 8;
4966         uint64_t reserved_32_63               : 32;
4967 #endif
4968         } s;
4969         struct cvmx_lmcx_ecc_synd_s           cn30xx;
4970         struct cvmx_lmcx_ecc_synd_s           cn31xx;
4971         struct cvmx_lmcx_ecc_synd_s           cn38xx;
4972         struct cvmx_lmcx_ecc_synd_s           cn38xxp2;
4973         struct cvmx_lmcx_ecc_synd_s           cn50xx;
4974         struct cvmx_lmcx_ecc_synd_s           cn52xx;
4975         struct cvmx_lmcx_ecc_synd_s           cn52xxp1;
4976         struct cvmx_lmcx_ecc_synd_s           cn56xx;
4977         struct cvmx_lmcx_ecc_synd_s           cn56xxp1;
4978         struct cvmx_lmcx_ecc_synd_s           cn58xx;
4979         struct cvmx_lmcx_ecc_synd_s           cn58xxp1;
4980         struct cvmx_lmcx_ecc_synd_s           cn61xx;
4981         struct cvmx_lmcx_ecc_synd_s           cn63xx;
4982         struct cvmx_lmcx_ecc_synd_s           cn63xxp1;
4983         struct cvmx_lmcx_ecc_synd_s           cn66xx;
4984         struct cvmx_lmcx_ecc_synd_s           cn68xx;
4985         struct cvmx_lmcx_ecc_synd_s           cn68xxp1;
4986         struct cvmx_lmcx_ecc_synd_s           cnf71xx;
4987 };
4988 typedef union cvmx_lmcx_ecc_synd cvmx_lmcx_ecc_synd_t;
4989
4990 /**
4991  * cvmx_lmc#_fadr
4992  *
4993  * LMC_FADR = LMC Failing Address Register (SEC/DED/NXM)
4994  *
4995  * This register only captures the first transaction with ecc/nxm errors. A DED/NXM error can
4996  * over-write this register with its failing addresses if the first error was a SEC. If you write
4997  * LMC*_CONFIG->SEC_ERR/DED_ERR/NXM_ERR then it will clear the error bits and capture the
4998  * next failing address.
4999  *
5000  * If FDIMM is 2 that means the error is in the higher bits DIMM.
5001  *
5002  * Notes:
5003  * LMC*_FADR captures the failing pre-scrambled address location (split into dimm, bunk, bank, etc). If
5004  * scrambling is off, then LMC*_FADR will also capture the failing physical location in the DRAM parts.
5005  *
5006  * LMC*_SCRAMBLED_FADR captures the actual failing address location in the physical DRAM parts, i.e.,
5007  * a. if scrambling is on, LMC*_SCRAMBLE_FADR contains the failing physical location in the DRAM parts (split
5008  *    into dimm, bunk, bank, etc)
5009  * b. if scrambling is off, the pre-scramble and post-scramble addresses are the same, and so the contents of
5010  *    LMC*_SCRAMBLED_FADR match the contents of LMC*_FADR
5011  */
5012 union cvmx_lmcx_fadr {
5013         uint64_t u64;
5014         struct cvmx_lmcx_fadr_s {
5015 #ifdef __BIG_ENDIAN_BITFIELD
5016         uint64_t reserved_0_63                : 64;
5017 #else
5018         uint64_t reserved_0_63                : 64;
5019 #endif
5020         } s;
5021         struct cvmx_lmcx_fadr_cn30xx {
5022 #ifdef __BIG_ENDIAN_BITFIELD
5023         uint64_t reserved_32_63               : 32;
5024         uint64_t fdimm                        : 2;  /**< Failing DIMM# */
5025         uint64_t fbunk                        : 1;  /**< Failing Rank */
5026         uint64_t fbank                        : 3;  /**< Failing Bank[2:0] */
5027         uint64_t frow                         : 14; /**< Failing Row Address[13:0] */
5028         uint64_t fcol                         : 12; /**< Failing Column Start Address[11:0]
5029                                                          Represents the Failing read's starting column address
5030                                                          (and not the exact column address in which the SEC/DED
5031                                                          was detected) */
5032 #else
5033         uint64_t fcol                         : 12;
5034         uint64_t frow                         : 14;
5035         uint64_t fbank                        : 3;
5036         uint64_t fbunk                        : 1;
5037         uint64_t fdimm                        : 2;
5038         uint64_t reserved_32_63               : 32;
5039 #endif
5040         } cn30xx;
5041         struct cvmx_lmcx_fadr_cn30xx          cn31xx;
5042         struct cvmx_lmcx_fadr_cn30xx          cn38xx;
5043         struct cvmx_lmcx_fadr_cn30xx          cn38xxp2;
5044         struct cvmx_lmcx_fadr_cn30xx          cn50xx;
5045         struct cvmx_lmcx_fadr_cn30xx          cn52xx;
5046         struct cvmx_lmcx_fadr_cn30xx          cn52xxp1;
5047         struct cvmx_lmcx_fadr_cn30xx          cn56xx;
5048         struct cvmx_lmcx_fadr_cn30xx          cn56xxp1;
5049         struct cvmx_lmcx_fadr_cn30xx          cn58xx;
5050         struct cvmx_lmcx_fadr_cn30xx          cn58xxp1;
5051         struct cvmx_lmcx_fadr_cn61xx {
5052 #ifdef __BIG_ENDIAN_BITFIELD
5053         uint64_t reserved_36_63               : 28;
5054         uint64_t fdimm                        : 2;  /**< Failing DIMM# */
5055         uint64_t fbunk                        : 1;  /**< Failing Rank */
5056         uint64_t fbank                        : 3;  /**< Failing Bank[2:0] */
5057         uint64_t frow                         : 16; /**< Failing Row Address[15:0] */
5058         uint64_t fcol                         : 14; /**< Failing Column Address[13:0]
5059                                                          Technically, represents the address of the 128b data
5060                                                          that had an ecc error, i.e., fcol[0] is always 0. Can
5061                                                          be used in conjuction with LMC*_CONFIG[DED_ERR] to
5062                                                          isolate the 64b chunk of data in error */
5063 #else
5064         uint64_t fcol                         : 14;
5065         uint64_t frow                         : 16;
5066         uint64_t fbank                        : 3;
5067         uint64_t fbunk                        : 1;
5068         uint64_t fdimm                        : 2;
5069         uint64_t reserved_36_63               : 28;
5070 #endif
5071         } cn61xx;
5072         struct cvmx_lmcx_fadr_cn61xx          cn63xx;
5073         struct cvmx_lmcx_fadr_cn61xx          cn63xxp1;
5074         struct cvmx_lmcx_fadr_cn61xx          cn66xx;
5075         struct cvmx_lmcx_fadr_cn61xx          cn68xx;
5076         struct cvmx_lmcx_fadr_cn61xx          cn68xxp1;
5077         struct cvmx_lmcx_fadr_cn61xx          cnf71xx;
5078 };
5079 typedef union cvmx_lmcx_fadr cvmx_lmcx_fadr_t;
5080
5081 /**
5082  * cvmx_lmc#_ifb_cnt
5083  *
5084  * LMC_IFB_CNT  = Performance Counters
5085  *
5086  */
5087 union cvmx_lmcx_ifb_cnt {
5088         uint64_t u64;
5089         struct cvmx_lmcx_ifb_cnt_s {
5090 #ifdef __BIG_ENDIAN_BITFIELD
5091         uint64_t ifbcnt                       : 64; /**< Performance Counter
5092                                                          64-bit counter that increments every
5093                                                          CK cycle there is something in the in-flight buffer. */
5094 #else
5095         uint64_t ifbcnt                       : 64;
5096 #endif
5097         } s;
5098         struct cvmx_lmcx_ifb_cnt_s            cn61xx;
5099         struct cvmx_lmcx_ifb_cnt_s            cn63xx;
5100         struct cvmx_lmcx_ifb_cnt_s            cn63xxp1;
5101         struct cvmx_lmcx_ifb_cnt_s            cn66xx;
5102         struct cvmx_lmcx_ifb_cnt_s            cn68xx;
5103         struct cvmx_lmcx_ifb_cnt_s            cn68xxp1;
5104         struct cvmx_lmcx_ifb_cnt_s            cnf71xx;
5105 };
5106 typedef union cvmx_lmcx_ifb_cnt cvmx_lmcx_ifb_cnt_t;
5107
5108 /**
5109  * cvmx_lmc#_ifb_cnt_hi
5110  *
5111  * LMC_IFB_CNT_HI  = Performance Counters
5112  *
5113  */
5114 union cvmx_lmcx_ifb_cnt_hi {
5115         uint64_t u64;
5116         struct cvmx_lmcx_ifb_cnt_hi_s {
5117 #ifdef __BIG_ENDIAN_BITFIELD
5118         uint64_t reserved_32_63               : 32;
5119         uint64_t ifbcnt_hi                    : 32; /**< Performance Counter to measure Bus Utilization
5120                                                          Upper 32-bits of 64-bit counter that increments every
5121                                                          cycle there is something in the in-flight buffer. */
5122 #else
5123         uint64_t ifbcnt_hi                    : 32;
5124         uint64_t reserved_32_63               : 32;
5125 #endif
5126         } s;
5127         struct cvmx_lmcx_ifb_cnt_hi_s         cn30xx;
5128         struct cvmx_lmcx_ifb_cnt_hi_s         cn31xx;
5129         struct cvmx_lmcx_ifb_cnt_hi_s         cn38xx;
5130         struct cvmx_lmcx_ifb_cnt_hi_s         cn38xxp2;
5131         struct cvmx_lmcx_ifb_cnt_hi_s         cn50xx;
5132         struct cvmx_lmcx_ifb_cnt_hi_s         cn52xx;
5133         struct cvmx_lmcx_ifb_cnt_hi_s         cn52xxp1;
5134         struct cvmx_lmcx_ifb_cnt_hi_s         cn56xx;
5135         struct cvmx_lmcx_ifb_cnt_hi_s         cn56xxp1;
5136         struct cvmx_lmcx_ifb_cnt_hi_s         cn58xx;
5137         struct cvmx_lmcx_ifb_cnt_hi_s         cn58xxp1;
5138 };
5139 typedef union cvmx_lmcx_ifb_cnt_hi cvmx_lmcx_ifb_cnt_hi_t;
5140
5141 /**
5142  * cvmx_lmc#_ifb_cnt_lo
5143  *
5144  * LMC_IFB_CNT_LO  = Performance Counters
5145  *
5146  */
5147 union cvmx_lmcx_ifb_cnt_lo {
5148         uint64_t u64;
5149         struct cvmx_lmcx_ifb_cnt_lo_s {
5150 #ifdef __BIG_ENDIAN_BITFIELD
5151         uint64_t reserved_32_63               : 32;
5152         uint64_t ifbcnt_lo                    : 32; /**< Performance Counter
5153                                                          Low 32-bits of 64-bit counter that increments every
5154                                                          cycle there is something in the in-flight buffer. */
5155 #else
5156         uint64_t ifbcnt_lo                    : 32;
5157         uint64_t reserved_32_63               : 32;
5158 #endif
5159         } s;
5160         struct cvmx_lmcx_ifb_cnt_lo_s         cn30xx;
5161         struct cvmx_lmcx_ifb_cnt_lo_s         cn31xx;
5162         struct cvmx_lmcx_ifb_cnt_lo_s         cn38xx;
5163         struct cvmx_lmcx_ifb_cnt_lo_s         cn38xxp2;
5164         struct cvmx_lmcx_ifb_cnt_lo_s         cn50xx;
5165         struct cvmx_lmcx_ifb_cnt_lo_s         cn52xx;
5166         struct cvmx_lmcx_ifb_cnt_lo_s         cn52xxp1;
5167         struct cvmx_lmcx_ifb_cnt_lo_s         cn56xx;
5168         struct cvmx_lmcx_ifb_cnt_lo_s         cn56xxp1;
5169         struct cvmx_lmcx_ifb_cnt_lo_s         cn58xx;
5170         struct cvmx_lmcx_ifb_cnt_lo_s         cn58xxp1;
5171 };
5172 typedef union cvmx_lmcx_ifb_cnt_lo cvmx_lmcx_ifb_cnt_lo_t;
5173
5174 /**
5175  * cvmx_lmc#_int
5176  *
5177  * LMC_INT = LMC Interrupt Register
5178  *
5179  */
5180 union cvmx_lmcx_int {
5181         uint64_t u64;
5182         struct cvmx_lmcx_int_s {
5183 #ifdef __BIG_ENDIAN_BITFIELD
5184         uint64_t reserved_9_63                : 55;
5185         uint64_t ded_err                      : 4;  /**< Double Error detected (DED) of Rd Data
5186                                                          [0] corresponds to DQ[63:0]_c0_p0
5187                                                          [1] corresponds to DQ[63:0]_c0_p1
5188                                                          [2] corresponds to DQ[63:0]_c1_p0
5189                                                          [3] corresponds to DQ[63:0]_c1_p1
5190                                                          In 32b mode, ecc is calculated on 4 cycle worth of data
5191                                                          [0] corresponds to [DQ[31:0]_c0_p1, DQ[31:0]_c0_p0]
5192                                                          [1] corresponds to [DQ[31:0]_c1_p1, DQ[31:0]_c1_p0]
5193                                                          [2] corresponds to [DQ[31:0]_c2_p1, DQ[31:0]_c2_p0]
5194                                                          [3] corresponds to [DQ[31:0]_c3_p1, DQ[31:0]_c3_p0]
5195                                                           where _cC_pP denotes cycle C and phase P
5196                                                           Write of 1 will clear the corresponding error bit */
5197         uint64_t sec_err                      : 4;  /**< Single Error (corrected) of Rd Data
5198                                                          [0] corresponds to DQ[63:0]_c0_p0
5199                                                          [1] corresponds to DQ[63:0]_c0_p1
5200                                                          [2] corresponds to DQ[63:0]_c1_p0
5201                                                          [3] corresponds to DQ[63:0]_c1_p1
5202                                                          In 32b mode, ecc is calculated on 4 cycle worth of data
5203                                                          [0] corresponds to [DQ[31:0]_c0_p1, DQ[31:0]_c0_p0]
5204                                                          [1] corresponds to [DQ[31:0]_c1_p1, DQ[31:0]_c1_p0]
5205                                                          [2] corresponds to [DQ[31:0]_c2_p1, DQ[31:0]_c2_p0]
5206                                                          [3] corresponds to [DQ[31:0]_c3_p1, DQ[31:0]_c3_p0]
5207                                                           where _cC_pP denotes cycle C and phase P
5208                                                           Write of 1 will clear the corresponding error bit */
5209         uint64_t nxm_wr_err                   : 1;  /**< Write to non-existent memory
5210                                                          Write of 1 will clear the corresponding error bit */
5211 #else
5212         uint64_t nxm_wr_err                   : 1;
5213         uint64_t sec_err                      : 4;
5214         uint64_t ded_err                      : 4;
5215         uint64_t reserved_9_63                : 55;
5216 #endif
5217         } s;
5218         struct cvmx_lmcx_int_s                cn61xx;
5219         struct cvmx_lmcx_int_s                cn63xx;
5220         struct cvmx_lmcx_int_s                cn63xxp1;
5221         struct cvmx_lmcx_int_s                cn66xx;
5222         struct cvmx_lmcx_int_s                cn68xx;
5223         struct cvmx_lmcx_int_s                cn68xxp1;
5224         struct cvmx_lmcx_int_s                cnf71xx;
5225 };
5226 typedef union cvmx_lmcx_int cvmx_lmcx_int_t;
5227
5228 /**
5229  * cvmx_lmc#_int_en
5230  *
5231  * LMC_INT_EN = LMC Interrupt Enable Register
5232  *
5233  */
5234 union cvmx_lmcx_int_en {
5235         uint64_t u64;
5236         struct cvmx_lmcx_int_en_s {
5237 #ifdef __BIG_ENDIAN_BITFIELD
5238         uint64_t reserved_3_63                : 61;
5239         uint64_t intr_ded_ena                 : 1;  /**< ECC Double Error Detect(DED) Interrupt Enable bit
5240                                                          When set, the memory controller raises a processor
5241                                                          interrupt on detecting an uncorrectable Dbl Bit ECC
5242                                                          error. */
5243         uint64_t intr_sec_ena                 : 1;  /**< ECC Single Error Correct(SEC) Interrupt Enable bit
5244                                                          When set, the memory controller raises a processor
5245                                                          interrupt on detecting a correctable Single Bit ECC
5246                                                          error. */
5247         uint64_t intr_nxm_wr_ena              : 1;  /**< Non Write Error Interrupt Enable bit
5248                                                          When set, the memory controller raises a processor
5249                                                          interrupt on detecting an non-existent memory write */
5250 #else
5251         uint64_t intr_nxm_wr_ena              : 1;
5252         uint64_t intr_sec_ena                 : 1;
5253         uint64_t intr_ded_ena                 : 1;
5254         uint64_t reserved_3_63                : 61;
5255 #endif
5256         } s;
5257         struct cvmx_lmcx_int_en_s             cn61xx;
5258         struct cvmx_lmcx_int_en_s             cn63xx;
5259         struct cvmx_lmcx_int_en_s             cn63xxp1;
5260         struct cvmx_lmcx_int_en_s             cn66xx;
5261         struct cvmx_lmcx_int_en_s             cn68xx;
5262         struct cvmx_lmcx_int_en_s             cn68xxp1;
5263         struct cvmx_lmcx_int_en_s             cnf71xx;
5264 };
5265 typedef union cvmx_lmcx_int_en cvmx_lmcx_int_en_t;
5266
5267 /**
5268  * cvmx_lmc#_mem_cfg0
5269  *
5270  * Specify the RSL base addresses for the block
5271  *
5272  *                  LMC_MEM_CFG0 = LMC Memory Configuration Register0
5273  *
5274  * This register controls certain parameters of  Memory Configuration
5275  */
5276 union cvmx_lmcx_mem_cfg0 {
5277         uint64_t u64;
5278         struct cvmx_lmcx_mem_cfg0_s {
5279 #ifdef __BIG_ENDIAN_BITFIELD
5280         uint64_t reserved_32_63               : 32;
5281         uint64_t reset                        : 1;  /**< Reset oneshot pulse for refresh counter,
5282                                                          and LMC_OPS_CNT_*, LMC_IFB_CNT_*, and LMC_DCLK_CNT_*
5283                                                          CSR's. SW should write this to a one, then re-write
5284                                                          it to a zero to cause the reset. */
5285         uint64_t silo_qc                      : 1;  /**< Adds a Quarter Cycle granularity to generate
5286                                                          dqs pulse generation for silo.
5287                                                          Combination of Silo_HC and Silo_QC gives the
5288                                                          ability to position the read enable with quarter
5289                                                          cycle resolution. This is applied on all the bytes
5290                                                          uniformly. */
5291         uint64_t bunk_ena                     : 1;  /**< Bunk Enable aka RANK ena (for use with dual-rank DIMMs)
5292                                                          For dual-rank DIMMs, the bunk_ena bit will enable
5293                                                          the drive of the CS_N[1:0] pins based on the
5294                                                          (pbank_lsb-1) address bit.
5295                                                          Write 0 for SINGLE ranked DIMM's. */
5296         uint64_t ded_err                      : 4;  /**< Double Error detected (DED) of Rd Data
5297                                                          In 128b mode, ecc is calulated on 1 cycle worth of data
5298                                                          [25] corresponds to DQ[63:0], Phase0
5299                                                          [26] corresponds to DQ[127:64], Phase0
5300                                                          [27] corresponds to DQ[63:0], Phase1
5301                                                          [28] corresponds to DQ[127:64], Phase1
5302                                                          In 64b mode, ecc is calculated on 2 cycle worth of data
5303                                                          [25] corresponds to DQ[63:0], Phase0, cycle0
5304                                                          [26] corresponds to DQ[63:0], Phase0, cycle1
5305                                                          [27] corresponds to DQ[63:0], Phase1, cycle0
5306                                                          [28] corresponds to DQ[63:0], Phase1, cycle1
5307                                                          Write of 1 will clear the corresponding error bit */
5308         uint64_t sec_err                      : 4;  /**< Single Error (corrected) of Rd Data
5309                                                          In 128b mode, ecc is calulated on 1 cycle worth of data
5310                                                          [21] corresponds to DQ[63:0], Phase0
5311                                                          [22] corresponds to DQ[127:64], Phase0
5312                                                          [23] corresponds to DQ[63:0], Phase1
5313                                                          [24] corresponds to DQ[127:64], Phase1
5314                                                          In 64b mode, ecc is calculated on 2 cycle worth of data
5315                                                          [21] corresponds to DQ[63:0], Phase0, cycle0
5316                                                          [22] corresponds to DQ[63:0], Phase0, cycle1
5317                                                          [23] corresponds to DQ[63:0], Phase1, cycle0
5318                                                          [24] corresponds to DQ[63:0], Phase1, cycle1
5319                                                          Write of 1 will clear the corresponding error bit */
5320         uint64_t intr_ded_ena                 : 1;  /**< ECC Double Error Detect(DED) Interrupt Enable bit
5321                                                          When set, the memory controller raises a processor
5322                                                          interrupt on detecting an uncorrectable Dbl Bit ECC
5323                                                          error. */
5324         uint64_t intr_sec_ena                 : 1;  /**< ECC Single Error Correct(SEC) Interrupt Enable bit
5325                                                          When set, the memory controller raises a processor
5326                                                          interrupt on detecting a correctable Single Bit ECC
5327                                                          error. */
5328         uint64_t tcl                          : 4;  /**< This register is not used */
5329         uint64_t ref_int                      : 6;  /**< Refresh interval represented in \#of 512 dclk increments.
5330                                                          Program this to RND-DN(tREFI/clkPeriod/512)
5331                                                             - 000000: RESERVED
5332                                                             - 000001: 1 * 512  = 512 dclks
5333                                                              - ...
5334                                                             - 111111: 63 * 512 = 32256 dclks */
5335         uint64_t pbank_lsb                    : 4;  /**< Physical Bank address select
5336                                                                                  Reverting to the explanation for ROW_LSB,
5337                                                                                  PBank_LSB would be Row_LSB bit + \#rowbits
5338                                                                                  + \#rankbits
5339                                                                                  In the 512MB DIMM Example, assuming no rank bits:
5340                                                                                  pbank_lsb=mem_addr[15+13] for 64 b mode
5341                                                                                           =mem_addr[16+13] for 128b mode
5342                                                                                  Hence the parameter
5343                                                          0000:pbank[1:0] = mem_adr[28:27]    / rank = mem_adr[26] (if bunk_ena)
5344                                                          0001:pbank[1:0] = mem_adr[29:28]    / rank = mem_adr[27]      "
5345                                                          0010:pbank[1:0] = mem_adr[30:29]    / rank = mem_adr[28]      "
5346                                                          0011:pbank[1:0] = mem_adr[31:30]    / rank = mem_adr[29]      "
5347                                                          0100:pbank[1:0] = mem_adr[32:31]    / rank = mem_adr[30]      "
5348                                                          0101:pbank[1:0] = mem_adr[33:32]    / rank = mem_adr[31]      "
5349                                                          0110:pbank[1:0] =[1'b0,mem_adr[33]] / rank = mem_adr[32]      "
5350                                                          0111:pbank[1:0] =[2'b0]             / rank = mem_adr[33]      "
5351                                                          1000-1111: RESERVED */
5352         uint64_t row_lsb                      : 3;  /**< Encoding used to determine which memory address
5353                                                          bit position represents the low order DDR ROW address.
5354                                                          The processor's memory address[33:7] needs to be
5355                                                          translated to DRAM addresses (bnk,row,col,rank and dimm)
5356                                                          and that is a function of the following:
5357                                                          1. \# Banks (4 or 8) - spec'd by BANK8
5358                                                          2. Datapath Width(64 or 128) - MODE128b
5359                                                          3. \# Ranks in a DIMM - spec'd by BUNK_ENA
5360                                                          4. \# DIMM's in the system
5361                                                          5. \# Column Bits of the memory part - spec'd indirectly
5362                                                          by this register.
5363                                                          6. \# Row Bits of the memory part - spec'd indirectly
5364                                                          by the register below (PBANK_LSB).
5365                                                          Illustration: For Micron's MT18HTF6472A,512MB DDR2
5366                                                          Unbuffered DIMM which uses 256Mb parts (8M x 8 x 4),
5367                                                          \# Banks = 4 -> 2 bits of BA
5368                                                          \# Columns = 1K -> 10 bits of Col
5369                                                          \# Rows = 8K -> 13 bits of Row
5370                                                          Assuming that the total Data width is 128, this is how
5371                                                          we arrive at row_lsb:
5372                                                          Col Address starts from mem_addr[4] for 128b (16Bytes)
5373                                                          dq width or from mem_addr[3] for 64b (8Bytes) dq width
5374                                                          \# col + \# bank = 12. Hence row_lsb is mem_adr[15] for
5375                                                          64bmode or mem_adr[16] for 128b mode. Hence row_lsb
5376                                                          parameter should be set to 001 (64b) or 010 (128b).
5377                                                               - 000: row_lsb = mem_adr[14]
5378                                                               - 001: row_lsb = mem_adr[15]
5379                                                               - 010: row_lsb = mem_adr[16]
5380                                                               - 011: row_lsb = mem_adr[17]
5381                                                               - 100: row_lsb = mem_adr[18]
5382                                                               - 101-111:row_lsb = RESERVED */
5383         uint64_t ecc_ena                      : 1;  /**< ECC Enable: When set will enable the 8b ECC
5384                                                          check/correct logic. Should be 1 when used with DIMMs
5385                                                          with ECC. 0, otherwise.
5386                                                          When this mode is turned on, DQ[71:64] and DQ[143:137]
5387                                                          on writes, will contain the ECC code generated for
5388                                                          the lower 64 and upper 64 bits of data which will
5389                                                          written in the memory and then later on reads, used
5390                                                          to check for Single bit error (which will be auto-
5391                                                          corrected) and Double Bit error (which will be
5392                                                          reported). When not turned on, DQ[71:64] and DQ[143:137]
5393                                                          are driven to 0.  Please refer to SEC_ERR, DED_ERR,
5394                                                          LMC_FADR, and LMC_ECC_SYND registers
5395                                                          for diagnostics information when there is an error. */
5396         uint64_t init_start                   : 1;  /**< A 0->1 transition starts the DDR memory initialization
5397                                                          sequence. */
5398 #else
5399         uint64_t init_start                   : 1;
5400         uint64_t ecc_ena                      : 1;
5401         uint64_t row_lsb                      : 3;
5402         uint64_t pbank_lsb                    : 4;
5403         uint64_t ref_int                      : 6;
5404         uint64_t tcl                          : 4;
5405         uint64_t intr_sec_ena                 : 1;
5406         uint64_t intr_ded_ena                 : 1;
5407         uint64_t sec_err                      : 4;
5408         uint64_t ded_err                      : 4;
5409         uint64_t bunk_ena                     : 1;
5410         uint64_t silo_qc                      : 1;
5411         uint64_t reset                        : 1;
5412         uint64_t reserved_32_63               : 32;
5413 #endif
5414         } s;
5415         struct cvmx_lmcx_mem_cfg0_s           cn30xx;
5416         struct cvmx_lmcx_mem_cfg0_s           cn31xx;
5417         struct cvmx_lmcx_mem_cfg0_s           cn38xx;
5418         struct cvmx_lmcx_mem_cfg0_s           cn38xxp2;
5419         struct cvmx_lmcx_mem_cfg0_s           cn50xx;
5420         struct cvmx_lmcx_mem_cfg0_s           cn52xx;
5421         struct cvmx_lmcx_mem_cfg0_s           cn52xxp1;
5422         struct cvmx_lmcx_mem_cfg0_s           cn56xx;
5423         struct cvmx_lmcx_mem_cfg0_s           cn56xxp1;
5424         struct cvmx_lmcx_mem_cfg0_s           cn58xx;
5425         struct cvmx_lmcx_mem_cfg0_s           cn58xxp1;
5426 };
5427 typedef union cvmx_lmcx_mem_cfg0 cvmx_lmcx_mem_cfg0_t;
5428
5429 /**
5430  * cvmx_lmc#_mem_cfg1
5431  *
5432  * LMC_MEM_CFG1 = LMC Memory Configuration Register1
5433  *
5434  * This register controls the External Memory Configuration Timing Parameters. Please refer to the
5435  * appropriate DDR part spec from your memory vendor for the various values in this CSR.
5436  * The details of each of these timing parameters can be found in the JEDEC spec or the vendor
5437  * spec of the memory parts.
5438  */
5439 union cvmx_lmcx_mem_cfg1 {
5440         uint64_t u64;
5441         struct cvmx_lmcx_mem_cfg1_s {
5442 #ifdef __BIG_ENDIAN_BITFIELD
5443         uint64_t reserved_32_63               : 32;
5444         uint64_t comp_bypass                  : 1;  /**< Compensation bypass. */
5445         uint64_t trrd                         : 3;  /**< tRRD cycles: ACT-ACT timing parameter for different
5446                                                          banks. (Represented in tCYC cycles == 1dclks)
5447                                                          TYP=15ns (66MHz=1,167MHz=3,200MHz=3)
5448                                                          For DDR2, TYP=7.5ns
5449                                                             - 000: RESERVED
5450                                                             - 001: 1 tCYC
5451                                                             - 010: 2 tCYC
5452                                                             - 011: 3 tCYC
5453                                                             - 100: 4 tCYC
5454                                                             - 101: 5 tCYC
5455                                                             - 110: 6 tCYC
5456                                                             - 111: 7 tCYC */
5457         uint64_t caslat                       : 3;  /**< CAS Latency Encoding which is loaded into each DDR
5458                                                          SDRAM device (MRS[6:4]) upon power-up (INIT_START=1).
5459                                                          (Represented in tCYC cycles == 1 dclks)
5460                                                             000 RESERVED
5461                                                             001 RESERVED
5462                                                             010 2.0 tCYC
5463                                                             011 3.0 tCYC
5464                                                             100 4.0 tCYC
5465                                                             101 5.0 tCYC
5466                                                             110 6.0 tCYC
5467                                                             111 RESERVED
5468                                                          eg). The parameters TSKW, SILO_HC, and SILO_QC can
5469                                                          account for 1/4 cycle granularity in board/etch delays. */
5470         uint64_t tmrd                         : 3;  /**< tMRD Cycles
5471                                                          (Represented in dclk tCYC)
5472                                                          For DDR2, its TYP 2*tCYC)
5473                                                              - 000: RESERVED
5474                                                              - 001: 1
5475                                                              - 010: 2
5476                                                              - 011: 3
5477                                                              - 100: 4
5478                                                              - 101-111: RESERVED */
5479         uint64_t trfc                         : 5;  /**< Indicates tRFC constraints.
5480                                                          Set TRFC (CSR field) = RNDUP[tRFC(ns)/4*tcyc(ns)],
5481                                                          where tRFC is from the DDR2 spec, and tcyc(ns)
5482                                                          is the DDR clock frequency (not data rate).
5483                                                          For example, with 2Gb, DDR2-667 parts,
5484                                                          typ tRFC=195ns, so TRFC (CSR field) = 0x11.
5485                                                              TRFC (binary): Corresponding tRFC Cycles
5486                                                              ----------------------------------------
5487                                                              - 00000-00001: RESERVED
5488                                                              - 00010: 0-8
5489                                                              - 00011: 9-12
5490                                                              - 00100: 13-16
5491                                                              - ...
5492                                                              - 11110: 117-120
5493                                                              - 11111: 121-124 */
5494         uint64_t trp                          : 4;  /**< tRP Cycles = RNDUP[tRP(ns)/tcyc(ns)]
5495                                                          (Represented in tCYC cycles == 1dclk)
5496                                                          TYP=15ns (66MHz=1,167MHz=3,400MHz=6 for TYP)
5497                                                              - 0000: RESERVED
5498                                                              - 0001: 1
5499                                                              - ...
5500                                                              - 1001: 9
5501                                                              - 1010-1111: RESERVED
5502                                                          When using parts with 8 banks (LMC_DDR2_CTL->BANK8
5503                                                          is 1), load tRP cycles + 1 into this register. */
5504         uint64_t twtr                         : 4;  /**< tWTR Cycles = RNDUP[tWTR(ns)/tcyc(ns)]
5505                                                          Last Wr Data to Rd Command time.
5506                                                          (Represented in tCYC cycles == 1dclks)
5507                                                          TYP=15ns (66MHz=1,167MHz=3,400MHz=6, for TYP)
5508                                                              - 0000: RESERVED
5509                                                              - 0001: 1
5510                                                              - ...
5511                                                              - 0111: 7
5512                                                              - 1000-1111: RESERVED */
5513         uint64_t trcd                         : 4;  /**< tRCD Cycles = RNDUP[tRCD(ns)/tcyc(ns)]
5514                                                          (Represented in tCYC cycles == 1dclk)
5515                                                          TYP=15ns (66MHz=1,167MHz=3,400MHz=6 for TYP)
5516                                                              - 0000: RESERVED
5517                                                              - 0001: 2 (2 is the smallest value allowed)
5518                                                              - 0002: 2
5519                                                              - ...
5520                                                              - 1001: 9
5521                                                              - 1010-1111: RESERVED
5522                                                          In 2T mode, make this register TRCD-1, not going
5523                                                          below 2. */
5524         uint64_t tras                         : 5;  /**< tRAS Cycles = RNDUP[tRAS(ns)/tcyc(ns)]
5525                                                          (Represented in tCYC cycles == 1 dclk)
5526                                                              - 00000-0001: RESERVED
5527                                                              - 00010: 2
5528                                                              - ...
5529                                                              - 11111: 31 */
5530 #else
5531         uint64_t tras                         : 5;
5532         uint64_t trcd                         : 4;
5533         uint64_t twtr                         : 4;
5534         uint64_t trp                          : 4;
5535         uint64_t trfc                         : 5;
5536         uint64_t tmrd                         : 3;
5537         uint64_t caslat                       : 3;
5538         uint64_t trrd                         : 3;
5539         uint64_t comp_bypass                  : 1;
5540         uint64_t reserved_32_63               : 32;
5541 #endif
5542         } s;
5543         struct cvmx_lmcx_mem_cfg1_s           cn30xx;
5544         struct cvmx_lmcx_mem_cfg1_s           cn31xx;
5545         struct cvmx_lmcx_mem_cfg1_cn38xx {
5546 #ifdef __BIG_ENDIAN_BITFIELD
5547         uint64_t reserved_31_63               : 33;
5548         uint64_t trrd                         : 3;  /**< tRRD cycles: ACT-ACT timing parameter for different
5549                                                          banks. (Represented in tCYC cycles == 1dclks)
5550                                                          TYP=15ns (66MHz=1,167MHz=3,200MHz=3)
5551                                                          For DDR2, TYP=7.5ns
5552                                                             - 000: RESERVED
5553                                                             - 001: 1 tCYC
5554                                                             - 010: 2 tCYC
5555                                                             - 011: 3 tCYC
5556                                                             - 100: 4 tCYC
5557                                                             - 101: 5 tCYC
5558                                                             - 110-111: RESERVED */
5559         uint64_t caslat                       : 3;  /**< CAS Latency Encoding which is loaded into each DDR
5560                                                          SDRAM device (MRS[6:4]) upon power-up (INIT_START=1).
5561                                                          (Represented in tCYC cycles == 1 dclks)
5562                                                             000 RESERVED
5563                                                             001 RESERVED
5564                                                             010 2.0 tCYC
5565                                                             011 3.0 tCYC
5566                                                             100 4.0 tCYC
5567                                                             101 5.0 tCYC
5568                                                             110 6.0 tCYC (DDR2)
5569                                                                 2.5 tCYC (DDR1)
5570                                                             111 RESERVED
5571                                                          eg). The parameters TSKW, SILO_HC, and SILO_QC can
5572                                                          account for 1/4 cycle granularity in board/etch delays. */
5573         uint64_t tmrd                         : 3;  /**< tMRD Cycles
5574                                                          (Represented in dclk tCYC)
5575                                                          For DDR2, its TYP 2*tCYC)
5576                                                              - 000: RESERVED
5577                                                              - 001: 1
5578                                                              - 010: 2
5579                                                              - 011: 3
5580                                                              - 100: 4
5581                                                              - 101-111: RESERVED */
5582         uint64_t trfc                         : 5;  /**< Indicates tRFC constraints.
5583                                                          Set TRFC (CSR field) = RNDUP[tRFC(ns)/4*tcyc(ns)],
5584                                                          where tRFC is from the DDR2 spec, and tcyc(ns)
5585                                                          is the DDR clock frequency (not data rate).
5586                                                          For example, with 2Gb, DDR2-667 parts,
5587                                                          typ tRFC=195ns, so TRFC (CSR field) = 0x11.
5588                                                              TRFC (binary): Corresponding tRFC Cycles
5589                                                              ----------------------------------------
5590                                                              - 00000-00001: RESERVED
5591                                                              - 00010: 0-8
5592                                                              - 00011: 9-12
5593                                                              - 00100: 13-16
5594                                                              - ...
5595                                                              - 11110: 117-120
5596                                                              - 11111: 121-124 */
5597         uint64_t trp                          : 4;  /**< tRP Cycles = RNDUP[tRP(ns)/tcyc(ns)]
5598                                                          (Represented in tCYC cycles == 1dclk)
5599                                                          TYP=15ns (66MHz=1,167MHz=3,400MHz=6 for TYP)
5600                                                              - 0000: RESERVED
5601                                                              - 0001: 1
5602                                                              - ...
5603                                                              - 0111: 7
5604                                                              - 1000-1111: RESERVED
5605                                                          When using parts with 8 banks (LMC_DDR2_CTL->BANK8
5606                                                          is 1), load tRP cycles + 1 into this register. */
5607         uint64_t twtr                         : 4;  /**< tWTR Cycles = RNDUP[tWTR(ns)/tcyc(ns)]
5608                                                          Last Wr Data to Rd Command time.
5609                                                          (Represented in tCYC cycles == 1dclks)
5610                                                          TYP=15ns (66MHz=1,167MHz=3,400MHz=6, for TYP)
5611                                                              - 0000: RESERVED
5612                                                              - 0001: 1
5613                                                              - ...
5614                                                              - 0111: 7
5615                                                              - 1000-1111: RESERVED */
5616         uint64_t trcd                         : 4;  /**< tRCD Cycles = RNDUP[tRCD(ns)/tcyc(ns)]
5617                                                          (Represented in tCYC cycles == 1dclk)
5618                                                          TYP=15ns (66MHz=1,167MHz=3,400MHz=6 for TYP)
5619                                                              - 0000: RESERVED
5620                                                              - 0001: 2 (2 is the smallest value allowed)
5621                                                              - 0002: 2
5622                                                              - ...
5623                                                              - 0111: 7
5624                                                              - 1110-1111: RESERVED
5625                                                          In 2T mode, make this register TRCD-1, not going
5626                                                          below 2. */
5627         uint64_t tras                         : 5;  /**< tRAS Cycles = RNDUP[tRAS(ns)/tcyc(ns)]
5628                                                          (Represented in tCYC cycles == 1 dclk)
5629                                                          For DDR-I mode:
5630                                                          TYP=45ns (66MHz=3,167MHz=8,400MHz=18
5631                                                              - 00000-0001: RESERVED
5632                                                              - 00010: 2
5633                                                              - ...
5634                                                              - 10100: 20
5635                                                              - 10101-11111: RESERVED */
5636 #else
5637         uint64_t tras                         : 5;
5638         uint64_t trcd                         : 4;
5639         uint64_t twtr                         : 4;
5640         uint64_t trp                          : 4;
5641         uint64_t trfc                         : 5;
5642         uint64_t tmrd                         : 3;
5643         uint64_t caslat                       : 3;
5644         uint64_t trrd                         : 3;
5645         uint64_t reserved_31_63               : 33;
5646 #endif
5647         } cn38xx;
5648         struct cvmx_lmcx_mem_cfg1_cn38xx      cn38xxp2;
5649         struct cvmx_lmcx_mem_cfg1_s           cn50xx;
5650         struct cvmx_lmcx_mem_cfg1_cn38xx      cn52xx;
5651         struct cvmx_lmcx_mem_cfg1_cn38xx      cn52xxp1;
5652         struct cvmx_lmcx_mem_cfg1_cn38xx      cn56xx;
5653         struct cvmx_lmcx_mem_cfg1_cn38xx      cn56xxp1;
5654         struct cvmx_lmcx_mem_cfg1_cn38xx      cn58xx;
5655         struct cvmx_lmcx_mem_cfg1_cn38xx      cn58xxp1;
5656 };
5657 typedef union cvmx_lmcx_mem_cfg1 cvmx_lmcx_mem_cfg1_t;
5658
5659 /**
5660  * cvmx_lmc#_modereg_params0
5661  *
5662  * Notes:
5663  * These parameters are written into the DDR3 MR0, MR1, MR2 and MR3 registers.
5664  *
5665  */
5666 union cvmx_lmcx_modereg_params0 {
5667         uint64_t u64;
5668         struct cvmx_lmcx_modereg_params0_s {
5669 #ifdef __BIG_ENDIAN_BITFIELD
5670         uint64_t reserved_25_63               : 39;
5671         uint64_t ppd                          : 1;  /**< DLL Control for precharge powerdown
5672                                                          0 = Slow exit (DLL off)
5673                                                          1 = Fast exit (DLL on)
5674                                                          LMC writes this value to MR0[PPD] in the selected DDR3 parts
5675                                                          during power-up/init and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5676                                                          self-refresh exit instruction sequences.
5677                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK].
5678                                                          This value must equal the MR0[PPD] value in all the DDR3
5679                                                          parts attached to all ranks during normal operation. */
5680         uint64_t wrp                          : 3;  /**< Write recovery for auto precharge
5681                                                          Should be programmed to be equal to or greater than
5682                                                          RNDUP[tWR(ns)/tCYC(ns)]
5683                                                          000 = 5
5684                                                          001 = 5
5685                                                          010 = 6
5686                                                          011 = 7
5687                                                          100 = 8
5688                                                          101 = 10
5689                                                          110 = 12
5690                                                          111 = 14
5691                                                          LMC writes this value to MR0[WR] in the selected DDR3 parts
5692                                                          during power-up/init and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5693                                                          self-refresh exit instruction sequences.
5694                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK].
5695                                                          This value must equal the MR0[WR] value in all the DDR3
5696                                                          parts attached to all ranks during normal operation. */
5697         uint64_t dllr                         : 1;  /**< DLL Reset
5698                                                          LMC writes this value to MR0[DLL] in the selected DDR3 parts
5699                                                          during power-up/init and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5700                                                          self-refresh exit instruction sequences.
5701                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK].
5702                                                          The MR0[DLL] value must be 0 in all the DDR3
5703                                                          parts attached to all ranks during normal operation. */
5704         uint64_t tm                           : 1;  /**< Test Mode
5705                                                          LMC writes this value to MR0[TM] in the selected DDR3 parts
5706                                                          during power-up/init and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5707                                                          self-refresh exit instruction sequences.
5708                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK].
5709                                                          The MR0[TM] value must be 0 in all the DDR3
5710                                                          parts attached to all ranks during normal operation. */
5711         uint64_t rbt                          : 1;  /**< Read Burst Type
5712                                                          1 = interleaved (fixed)
5713                                                          LMC writes this value to MR0[RBT] in the selected DDR3 parts
5714                                                          during power-up/init and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5715                                                          self-refresh exit instruction sequences.
5716                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK].
5717                                                          The MR0[RBT] value must be 1 in all the DDR3
5718                                                          parts attached to all ranks during normal operation. */
5719         uint64_t cl                           : 4;  /**< CAS Latency
5720                                                          0010 = 5
5721                                                          0100 = 6
5722                                                          0110 = 7
5723                                                          1000 = 8
5724                                                          1010 = 9
5725                                                          1100 = 10
5726                                                          1110 = 11
5727                                                          0001 = 12
5728                                                          0011 = 13
5729                                                          0101 = 14
5730                                                          0111 = 15
5731                                                          1001 = 16
5732                                                          0000, 1011, 1101, 1111 = Reserved
5733                                                          LMC writes this value to MR0[CAS Latency / CL] in the selected DDR3 parts
5734                                                          during power-up/init and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5735                                                          self-refresh exit instruction sequences.
5736                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK].
5737                                                          This value must equal the MR0[CAS Latency / CL] value in all the DDR3
5738                                                          parts attached to all ranks during normal operation. */
5739         uint64_t bl                           : 2;  /**< Burst Length
5740                                                          0 = 8 (fixed)
5741                                                          LMC writes this value to MR0[BL] in the selected DDR3 parts
5742                                                          during power-up/init and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5743                                                          self-refresh exit instruction sequences.
5744                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK].
5745                                                          The MR0[BL] value must be 0 in all the DDR3
5746                                                          parts attached to all ranks during normal operation. */
5747         uint64_t qoff                         : 1;  /**< Qoff Enable
5748                                                          0 = enable
5749                                                          1 = disable
5750                                                          LMC writes this value to MR1[Qoff] in the DDR3 parts in the selected ranks
5751                                                          during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5752                                                          self-refresh entry and exit instruction sequences.
5753                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK,INIT_STATUS] and
5754                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT].
5755                                                          The MR1[Qoff] value must be 0 in all the DDR3
5756                                                          parts attached to all ranks during normal operation. */
5757         uint64_t tdqs                         : 1;  /**< TDQS Enable
5758                                                          0 = disable
5759                                                          LMC writes this value to MR1[TDQS] in the DDR3 parts in the selected ranks
5760                                                          during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5761                                                          self-refresh entry and exit instruction sequences.
5762                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK,INIT_STATUS] and
5763                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
5764         uint64_t wlev                         : 1;  /**< Write Leveling Enable
5765                                                          0 = disable
5766                                                          LMC writes MR1[Level]=0 in the DDR3 parts in the selected ranks
5767                                                          during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5768                                                          self-refresh entry and exit instruction sequences.
5769                                                          (Write-leveling can only be initiated via the
5770                                                          write-leveling instruction sequence.)
5771                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK,INIT_STATUS] and
5772                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
5773         uint64_t al                           : 2;  /**< Additive Latency
5774                                                          00 = 0
5775                                                          01 = CL-1
5776                                                          10 = CL-2
5777                                                          11 = Reserved
5778                                                          LMC writes this value to MR1[AL] in the selected DDR3 parts
5779                                                          during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5780                                                          self-refresh entry and exit instruction sequences.
5781                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
5782                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT].
5783                                                          This value must equal the MR1[AL] value in all the DDR3
5784                                                          parts attached to all ranks during normal operation.
5785                                                          See also LMC*_CONTROL[POCAS]. */
5786         uint64_t dll                          : 1;  /**< DLL Enable
5787                                                          0 = enable
5788                                                          1 = disable.
5789                                                          LMC writes this value to MR1[DLL] in the selected DDR3 parts
5790                                                          during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5791                                                          self-refresh entry and exit instruction sequences.
5792                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
5793                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT].
5794                                                          This value must equal the MR1[DLL] value in all the DDR3
5795                                                          parts attached to all ranks during normal operation.
5796                                                          In dll-off mode, CL/CWL must be programmed
5797                                                          equal to 6/6, respectively, as per the DDR3 specifications. */
5798         uint64_t mpr                          : 1;  /**< MPR
5799                                                          LMC writes this value to MR3[MPR] in the selected DDR3 parts
5800                                                          during power-up/init, read-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5801                                                          self-refresh exit instruction sequences.
5802                                                          (LMC also writes MR3[MPR]=1 at the beginning of the
5803                                                          read-leveling instruction sequence. Read-leveling should only be initiated via the
5804                                                          read-leveling instruction sequence.)
5805                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK].
5806                                                          The MR3[MPR] value must be 0 in all the DDR3
5807                                                          parts attached to all ranks during normal operation. */
5808         uint64_t mprloc                       : 2;  /**< MPR Location
5809                                                          LMC writes this value to MR3[MPRLoc] in the selected DDR3 parts
5810                                                          during power-up/init, read-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5811                                                          self-refresh exit instruction sequences.
5812                                                          (LMC also writes MR3[MPRLoc]=0 at the beginning of the
5813                                                          read-leveling instruction sequence.)
5814                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK].
5815                                                          The MR3[MPRLoc] value must be 0 in all the DDR3
5816                                                          parts attached to all ranks during normal operation. */
5817         uint64_t cwl                          : 3;  /**< CAS Write Latency
5818                                                          - 000: 5
5819                                                          - 001: 6
5820                                                          - 010: 7
5821                                                          - 011: 8
5822                                                          - 100: 9
5823                                                          - 101: 10
5824                                                          - 110: 11
5825                                                          - 111: 12
5826                                                          LMC writes this value to MR2[CWL] in the selected DDR3 parts
5827                                                          during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5828                                                          self-refresh entry and exit instruction sequences.
5829                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
5830                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT].
5831                                                          This value must equal the MR2[CWL] value in all the DDR3
5832                                                          parts attached to all ranks during normal operation. */
5833 #else
5834         uint64_t cwl                          : 3;
5835         uint64_t mprloc                       : 2;
5836         uint64_t mpr                          : 1;
5837         uint64_t dll                          : 1;
5838         uint64_t al                           : 2;
5839         uint64_t wlev                         : 1;
5840         uint64_t tdqs                         : 1;
5841         uint64_t qoff                         : 1;
5842         uint64_t bl                           : 2;
5843         uint64_t cl                           : 4;
5844         uint64_t rbt                          : 1;
5845         uint64_t tm                           : 1;
5846         uint64_t dllr                         : 1;
5847         uint64_t wrp                          : 3;
5848         uint64_t ppd                          : 1;
5849         uint64_t reserved_25_63               : 39;
5850 #endif
5851         } s;
5852         struct cvmx_lmcx_modereg_params0_s    cn61xx;
5853         struct cvmx_lmcx_modereg_params0_s    cn63xx;
5854         struct cvmx_lmcx_modereg_params0_s    cn63xxp1;
5855         struct cvmx_lmcx_modereg_params0_s    cn66xx;
5856         struct cvmx_lmcx_modereg_params0_s    cn68xx;
5857         struct cvmx_lmcx_modereg_params0_s    cn68xxp1;
5858         struct cvmx_lmcx_modereg_params0_s    cnf71xx;
5859 };
5860 typedef union cvmx_lmcx_modereg_params0 cvmx_lmcx_modereg_params0_t;
5861
5862 /**
5863  * cvmx_lmc#_modereg_params1
5864  *
5865  * Notes:
5866  * These parameters are written into the DDR3 MR0, MR1, MR2 and MR3 registers.
5867  *
5868  */
5869 union cvmx_lmcx_modereg_params1 {
5870         uint64_t u64;
5871         struct cvmx_lmcx_modereg_params1_s {
5872 #ifdef __BIG_ENDIAN_BITFIELD
5873         uint64_t reserved_48_63               : 16;
5874         uint64_t rtt_nom_11                   : 3;  /**< RTT_NOM Rank 3
5875                                                          LMC writes this value to MR1[Rtt_Nom] in the rank 3 (i.e. DIMM1_CS1) DDR3 parts
5876                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5877                                                          self-refresh entry and exit instruction sequences.
5878                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
5879                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT].
5880                                                          Per JEDEC DDR3 specifications, if RTT_Nom is used during writes,
5881                                                          only values MR1[Rtt_Nom] = 1 (RQZ/4), 2 (RQZ/2), or 3 (RQZ/6) are allowed.
5882                                                          Otherwise, values MR1[Rtt_Nom] = 4 (RQZ/12) and 5 (RQZ/8) are also allowed. */
5883         uint64_t dic_11                       : 2;  /**< Output Driver Impedance Control Rank 3
5884                                                          LMC writes this value to MR1[D.I.C.] in the rank 3 (i.e. DIMM1_CS1) DDR3 parts
5885                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5886                                                          self-refresh entry and exit instruction sequences.
5887                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
5888                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
5889         uint64_t rtt_wr_11                    : 2;  /**< RTT_WR Rank 3
5890                                                          LMC writes this value to MR2[Rtt_WR] in the rank 3 (i.e. DIMM1_CS1) DDR3 parts
5891                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5892                                                          self-refresh entry and exit instruction sequences.
5893                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
5894                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
5895         uint64_t srt_11                       : 1;  /**< Self-refresh temperature range Rank 3
5896                                                          LMC writes this value to MR2[SRT] in the rank 3 (i.e. DIMM1_CS1) DDR3 parts
5897                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5898                                                          self-refresh entry and exit instruction sequences.
5899                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
5900                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
5901         uint64_t asr_11                       : 1;  /**< Auto self-refresh Rank 3
5902                                                          LMC writes this value to MR2[ASR] in the rank 3 (i.e. DIMM1_CS1) DDR3 parts
5903                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5904                                                          self-refresh entry and exit instruction sequences.
5905                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
5906                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
5907         uint64_t pasr_11                      : 3;  /**< Partial array self-refresh Rank 3
5908                                                          LMC writes this value to MR2[PASR] in the rank 3 (i.e. DIMM1_CS1) DDR3 parts
5909                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5910                                                          self-refresh entry and exit instruction sequences.
5911                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
5912                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
5913         uint64_t rtt_nom_10                   : 3;  /**< RTT_NOM Rank 2
5914                                                          LMC writes this value to MR1[Rtt_Nom] in the rank 2 (i.e. DIMM1_CS0) DDR3 parts
5915                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5916                                                          self-refresh entry and exit instruction sequences.
5917                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
5918                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT].
5919                                                          Per JEDEC DDR3 specifications, if RTT_Nom is used during writes,
5920                                                          only values MR1[Rtt_Nom] = 1 (RQZ/4), 2 (RQZ/2), or 3 (RQZ/6) are allowed.
5921                                                          Otherwise, values MR1[Rtt_Nom] = 4 (RQZ/12) and 5 (RQZ/8) are also allowed. */
5922         uint64_t dic_10                       : 2;  /**< Output Driver Impedance Control Rank 2
5923                                                          LMC writes this value to MR1[D.I.C.] in the rank 2 (i.e. DIMM1_CS0) DDR3 parts
5924                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5925                                                          self-refresh entry and exit instruction sequences.
5926                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
5927                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
5928         uint64_t rtt_wr_10                    : 2;  /**< RTT_WR Rank 2
5929                                                          LMC writes this value to MR2[Rtt_WR] in the rank 2 (i.e. DIMM1_CS0) DDR3 parts
5930                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5931                                                          self-refresh entry and exit instruction sequences.
5932                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
5933                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
5934         uint64_t srt_10                       : 1;  /**< Self-refresh temperature range Rank 2
5935                                                          LMC writes this value to MR2[SRT] in the rank 2 (i.e. DIMM1_CS0) DDR3 parts
5936                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5937                                                          self-refresh entry and exit instruction sequences.
5938                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
5939                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
5940         uint64_t asr_10                       : 1;  /**< Auto self-refresh Rank 2
5941                                                          LMC writes this value to MR2[ASR] in the rank 2 (i.e. DIMM1_CS0) DDR3 parts
5942                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5943                                                          self-refresh entry and exit instruction sequences.
5944                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
5945                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
5946         uint64_t pasr_10                      : 3;  /**< Partial array self-refresh Rank 2
5947                                                          LMC writes this value to MR2[PASR] in the rank 2 (i.e. DIMM1_CS0) DDR3 parts
5948                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5949                                                          self-refresh entry and exit instruction sequences.
5950                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
5951                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
5952         uint64_t rtt_nom_01                   : 3;  /**< RTT_NOM Rank 1
5953                                                          LMC writes this value to MR1[Rtt_Nom] in the rank 1 (i.e. DIMM0_CS1) DDR3 parts
5954                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5955                                                          self-refresh entry and exit instruction sequences.
5956                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
5957                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT].
5958                                                          Per JEDEC DDR3 specifications, if RTT_Nom is used during writes,
5959                                                          only values MR1[Rtt_Nom] = 1 (RQZ/4), 2 (RQZ/2), or 3 (RQZ/6) are allowed.
5960                                                          Otherwise, values MR1[Rtt_Nom] = 4 (RQZ/12) and 5 (RQZ/8) are also allowed. */
5961         uint64_t dic_01                       : 2;  /**< Output Driver Impedance Control Rank 1
5962                                                          LMC writes this value to MR1[D.I.C.] in the rank 1 (i.e. DIMM0_CS1) DDR3 parts
5963                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5964                                                          self-refresh entry and exit instruction sequences.
5965                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
5966                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
5967         uint64_t rtt_wr_01                    : 2;  /**< RTT_WR Rank 1
5968                                                          LMC writes this value to MR2[Rtt_WR] in the rank 1 (i.e. DIMM0_CS1) DDR3 parts
5969                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5970                                                          self-refresh entry and exit instruction sequences.
5971                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
5972                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
5973         uint64_t srt_01                       : 1;  /**< Self-refresh temperature range Rank 1
5974                                                          LMC writes this value to MR2[SRT] in the rank 1 (i.e. DIMM0_CS1) DDR3 parts
5975                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5976                                                          self-refresh entry and exit instruction sequences.
5977                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
5978                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
5979         uint64_t asr_01                       : 1;  /**< Auto self-refresh Rank 1
5980                                                          LMC writes this value to MR2[ASR] in the rank 1 (i.e. DIMM0_CS1) DDR3 parts
5981                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5982                                                          self-refresh entry and exit instruction sequences.
5983                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
5984                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
5985         uint64_t pasr_01                      : 3;  /**< Partial array self-refresh Rank 1
5986                                                          LMC writes this value to MR2[PASR] in the rank 1 (i.e. DIMM0_CS1) DDR3 parts
5987                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5988                                                          self-refresh entry and exit instruction sequences.
5989                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
5990                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
5991         uint64_t rtt_nom_00                   : 3;  /**< RTT_NOM Rank 0
5992                                                          LMC writes this value to MR1[Rtt_Nom] in the rank 0 (i.e. DIMM0_CS0) DDR3 parts
5993                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
5994                                                          self-refresh entry and exit instruction sequences.
5995                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
5996                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT].
5997                                                          Per JEDEC DDR3 specifications, if RTT_Nom is used during writes,
5998                                                          only values MR1[Rtt_Nom] = 1 (RQZ/4), 2 (RQZ/2), or 3 (RQZ/6) are allowed.
5999                                                          Otherwise, values MR1[Rtt_Nom] = 4 (RQZ/12) and 5 (RQZ/8) are also allowed. */
6000         uint64_t dic_00                       : 2;  /**< Output Driver Impedance Control Rank 0
6001                                                          LMC writes this value to MR1[D.I.C.] in the rank 0 (i.e. DIMM0_CS0) DDR3 parts
6002                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
6003                                                          self-refresh entry and exit instruction sequences.
6004                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
6005                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
6006         uint64_t rtt_wr_00                    : 2;  /**< RTT_WR Rank 0
6007                                                          LMC writes this value to MR2[Rtt_WR] in the rank 0 (i.e. DIMM0_CS0) DDR3 parts
6008                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
6009                                                          self-refresh entry and exit instruction sequences.
6010                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
6011                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
6012         uint64_t srt_00                       : 1;  /**< Self-refresh temperature range Rank 0
6013                                                          LMC writes this value to MR2[SRT] in the rank 0 (i.e. DIMM0_CS0) DDR3 parts
6014                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
6015                                                          self-refresh entry and exit instruction sequences.
6016                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
6017                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
6018         uint64_t asr_00                       : 1;  /**< Auto self-refresh Rank 0
6019                                                          LMC writes this value to MR2[ASR] in the rank 0 (i.e. DIMM0_CS0) DDR3 parts
6020                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
6021                                                          self-refresh entry and exit instruction sequences.
6022                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
6023                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
6024         uint64_t pasr_00                      : 3;  /**< Partial array self-refresh Rank 0
6025                                                          LMC writes this value to MR2[PASR] in the rank 0 (i.e. DIMM0_CS0) DDR3 parts
6026                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
6027                                                          self-refresh entry and exit instruction sequences.
6028                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
6029                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
6030 #else
6031         uint64_t pasr_00                      : 3;
6032         uint64_t asr_00                       : 1;
6033         uint64_t srt_00                       : 1;
6034         uint64_t rtt_wr_00                    : 2;
6035         uint64_t dic_00                       : 2;
6036         uint64_t rtt_nom_00                   : 3;
6037         uint64_t pasr_01                      : 3;
6038         uint64_t asr_01                       : 1;
6039         uint64_t srt_01                       : 1;
6040         uint64_t rtt_wr_01                    : 2;
6041         uint64_t dic_01                       : 2;
6042         uint64_t rtt_nom_01                   : 3;
6043         uint64_t pasr_10                      : 3;
6044         uint64_t asr_10                       : 1;
6045         uint64_t srt_10                       : 1;
6046         uint64_t rtt_wr_10                    : 2;
6047         uint64_t dic_10                       : 2;
6048         uint64_t rtt_nom_10                   : 3;
6049         uint64_t pasr_11                      : 3;
6050         uint64_t asr_11                       : 1;
6051         uint64_t srt_11                       : 1;
6052         uint64_t rtt_wr_11                    : 2;
6053         uint64_t dic_11                       : 2;
6054         uint64_t rtt_nom_11                   : 3;
6055         uint64_t reserved_48_63               : 16;
6056 #endif
6057         } s;
6058         struct cvmx_lmcx_modereg_params1_s    cn61xx;
6059         struct cvmx_lmcx_modereg_params1_s    cn63xx;
6060         struct cvmx_lmcx_modereg_params1_s    cn63xxp1;
6061         struct cvmx_lmcx_modereg_params1_s    cn66xx;
6062         struct cvmx_lmcx_modereg_params1_s    cn68xx;
6063         struct cvmx_lmcx_modereg_params1_s    cn68xxp1;
6064         struct cvmx_lmcx_modereg_params1_s    cnf71xx;
6065 };
6066 typedef union cvmx_lmcx_modereg_params1 cvmx_lmcx_modereg_params1_t;
6067
6068 /**
6069  * cvmx_lmc#_nxm
6070  *
6071  * LMC_NXM = LMC non-existent memory
6072  *
6073  *
6074  * Notes:
6075  * Decoding for mem_msb/rank
6076  *      - 0000: mem_msb = mem_adr[25]
6077  *      - 0001: mem_msb = mem_adr[26]
6078  *      - 0010: mem_msb = mem_adr[27]
6079  *      - 0011: mem_msb = mem_adr[28]
6080  *      - 0100: mem_msb = mem_adr[29]
6081  *      - 0101: mem_msb = mem_adr[30]
6082  *      - 0110: mem_msb = mem_adr[31]
6083  *      - 0111: mem_msb = mem_adr[32]
6084  *      - 1000: mem_msb = mem_adr[33]
6085  *      - 1001: mem_msb = mem_adr[34]
6086  *      1010-1111 = Reserved
6087  * For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank)
6088  * DDR3 parts, the column address width = 10, so with
6089  * 10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16] and
6090  * mem_msb = 4
6091  *
6092  * Note also that addresses greater the max defined space (pbank_msb) are also treated
6093  * as NXM accesses
6094  */
6095 union cvmx_lmcx_nxm {
6096         uint64_t u64;
6097         struct cvmx_lmcx_nxm_s {
6098 #ifdef __BIG_ENDIAN_BITFIELD
6099         uint64_t reserved_40_63               : 24;
6100         uint64_t mem_msb_d3_r1                : 4;  /**< Max Row MSB for DIMM3, RANK1/DIMM3 in Single Ranked
6101                                                          *UNUSED IN 6xxx* */
6102         uint64_t mem_msb_d3_r0                : 4;  /**< Max Row MSB for DIMM3, RANK0
6103                                                          *UNUSED IN 6xxx* */
6104         uint64_t mem_msb_d2_r1                : 4;  /**< Max Row MSB for DIMM2, RANK1/DIMM2 in Single Ranked
6105                                                          *UNUSED IN 6xxx* */
6106         uint64_t mem_msb_d2_r0                : 4;  /**< Max Row MSB for DIMM2, RANK0
6107                                                          *UNUSED IN 6xxx* */
6108         uint64_t mem_msb_d1_r1                : 4;  /**< Max Row MSB for DIMM1, RANK1/DIMM1 in Single Ranked */
6109         uint64_t mem_msb_d1_r0                : 4;  /**< Max Row MSB for DIMM1, RANK0 */
6110         uint64_t mem_msb_d0_r1                : 4;  /**< Max Row MSB for DIMM0, RANK1/DIMM0 in Single Ranked */
6111         uint64_t mem_msb_d0_r0                : 4;  /**< Max Row MSB for DIMM0, RANK0 */
6112         uint64_t cs_mask                      : 8;  /**< Chip select mask.
6113                                                          This mask corresponds to the 8 chip selects for a memory
6114                                                          configuration.  If LMC*_CONFIG[RANK_ENA]==0 then this
6115                                                          mask must be set in pairs because each reference address
6116                                                          will assert a pair of chip selects.  If the chip
6117                                                          select(s) have a corresponding CS_MASK bit set, then the
6118                                                          reference is to non-existent memory (NXM).  LMC will alias a
6119                                                          NXM read reference to use the lowest, legal chip select(s)
6120                                                          and return 0's. LMC normally discards NXM writes, but will
6121                                                          also alias them when LMC*_CONTROL[NXM_WRITE_EN]=1.
6122                                                          CS_MASK<7:4> MBZ in 6xxx */
6123 #else
6124         uint64_t cs_mask                      : 8;
6125         uint64_t mem_msb_d0_r0                : 4;
6126         uint64_t mem_msb_d0_r1                : 4;
6127         uint64_t mem_msb_d1_r0                : 4;
6128         uint64_t mem_msb_d1_r1                : 4;
6129         uint64_t mem_msb_d2_r0                : 4;
6130         uint64_t mem_msb_d2_r1                : 4;
6131         uint64_t mem_msb_d3_r0                : 4;
6132         uint64_t mem_msb_d3_r1                : 4;
6133         uint64_t reserved_40_63               : 24;
6134 #endif
6135         } s;
6136         struct cvmx_lmcx_nxm_cn52xx {
6137 #ifdef __BIG_ENDIAN_BITFIELD
6138         uint64_t reserved_8_63                : 56;
6139         uint64_t cs_mask                      : 8;  /**< Chip select mask.
6140                                                          This mask corresponds to the 8 chip selects for a memory
6141                                                          configuration.  If LMC_MEM_CFG0[BUNK_ENA]==0 then this
6142                                                          mask must be set in pairs because each reference address
6143                                                          will assert a pair of chip selects.  If the chip
6144                                                          select(s) have a corresponding CS_MASK bit set, then the
6145                                                          reference is to non-existent memory.  LMC will alias the
6146                                                          reference to use the lowest, legal chip select(s) in
6147                                                          that case. */
6148 #else
6149         uint64_t cs_mask                      : 8;
6150         uint64_t reserved_8_63                : 56;
6151 #endif
6152         } cn52xx;
6153         struct cvmx_lmcx_nxm_cn52xx           cn56xx;
6154         struct cvmx_lmcx_nxm_cn52xx           cn58xx;
6155         struct cvmx_lmcx_nxm_s                cn61xx;
6156         struct cvmx_lmcx_nxm_s                cn63xx;
6157         struct cvmx_lmcx_nxm_s                cn63xxp1;
6158         struct cvmx_lmcx_nxm_s                cn66xx;
6159         struct cvmx_lmcx_nxm_s                cn68xx;
6160         struct cvmx_lmcx_nxm_s                cn68xxp1;
6161         struct cvmx_lmcx_nxm_s                cnf71xx;
6162 };
6163 typedef union cvmx_lmcx_nxm cvmx_lmcx_nxm_t;
6164
6165 /**
6166  * cvmx_lmc#_ops_cnt
6167  *
6168  * LMC_OPS_CNT  = Performance Counters
6169  *
6170  */
6171 union cvmx_lmcx_ops_cnt {
6172         uint64_t u64;
6173         struct cvmx_lmcx_ops_cnt_s {
6174 #ifdef __BIG_ENDIAN_BITFIELD
6175         uint64_t opscnt                       : 64; /**< Performance Counter
6176                                                          64-bit counter that increments when the DDR3 data bus
6177                                                          is being used
6178                                                          DRAM bus utilization = LMC*_OPS_CNT/LMC*_DCLK_CNT */
6179 #else
6180         uint64_t opscnt                       : 64;
6181 #endif
6182         } s;
6183         struct cvmx_lmcx_ops_cnt_s            cn61xx;
6184         struct cvmx_lmcx_ops_cnt_s            cn63xx;
6185         struct cvmx_lmcx_ops_cnt_s            cn63xxp1;
6186         struct cvmx_lmcx_ops_cnt_s            cn66xx;
6187         struct cvmx_lmcx_ops_cnt_s            cn68xx;
6188         struct cvmx_lmcx_ops_cnt_s            cn68xxp1;
6189         struct cvmx_lmcx_ops_cnt_s            cnf71xx;
6190 };
6191 typedef union cvmx_lmcx_ops_cnt cvmx_lmcx_ops_cnt_t;
6192
6193 /**
6194  * cvmx_lmc#_ops_cnt_hi
6195  *
6196  * LMC_OPS_CNT_HI  = Performance Counters
6197  *
6198  */
6199 union cvmx_lmcx_ops_cnt_hi {
6200         uint64_t u64;
6201         struct cvmx_lmcx_ops_cnt_hi_s {
6202 #ifdef __BIG_ENDIAN_BITFIELD
6203         uint64_t reserved_32_63               : 32;
6204         uint64_t opscnt_hi                    : 32; /**< Performance Counter to measure Bus Utilization
6205                                                          Upper 32-bits of 64-bit counter
6206                                                            DRAM bus utilization = LMC_OPS_CNT_* /LMC_DCLK_CNT_* */
6207 #else
6208         uint64_t opscnt_hi                    : 32;
6209         uint64_t reserved_32_63               : 32;
6210 #endif
6211         } s;
6212         struct cvmx_lmcx_ops_cnt_hi_s         cn30xx;
6213         struct cvmx_lmcx_ops_cnt_hi_s         cn31xx;
6214         struct cvmx_lmcx_ops_cnt_hi_s         cn38xx;
6215         struct cvmx_lmcx_ops_cnt_hi_s         cn38xxp2;
6216         struct cvmx_lmcx_ops_cnt_hi_s         cn50xx;
6217         struct cvmx_lmcx_ops_cnt_hi_s         cn52xx;
6218         struct cvmx_lmcx_ops_cnt_hi_s         cn52xxp1;
6219         struct cvmx_lmcx_ops_cnt_hi_s         cn56xx;
6220         struct cvmx_lmcx_ops_cnt_hi_s         cn56xxp1;
6221         struct cvmx_lmcx_ops_cnt_hi_s         cn58xx;
6222         struct cvmx_lmcx_ops_cnt_hi_s         cn58xxp1;
6223 };
6224 typedef union cvmx_lmcx_ops_cnt_hi cvmx_lmcx_ops_cnt_hi_t;
6225
6226 /**
6227  * cvmx_lmc#_ops_cnt_lo
6228  *
6229  * LMC_OPS_CNT_LO  = Performance Counters
6230  *
6231  */
6232 union cvmx_lmcx_ops_cnt_lo {
6233         uint64_t u64;
6234         struct cvmx_lmcx_ops_cnt_lo_s {
6235 #ifdef __BIG_ENDIAN_BITFIELD
6236         uint64_t reserved_32_63               : 32;
6237         uint64_t opscnt_lo                    : 32; /**< Performance Counter
6238                                                          Low 32-bits of 64-bit counter
6239                                                            DRAM bus utilization = LMC_OPS_CNT_* /LMC_DCLK_CNT_* */
6240 #else
6241         uint64_t opscnt_lo                    : 32;
6242         uint64_t reserved_32_63               : 32;
6243 #endif
6244         } s;
6245         struct cvmx_lmcx_ops_cnt_lo_s         cn30xx;
6246         struct cvmx_lmcx_ops_cnt_lo_s         cn31xx;
6247         struct cvmx_lmcx_ops_cnt_lo_s         cn38xx;
6248         struct cvmx_lmcx_ops_cnt_lo_s         cn38xxp2;
6249         struct cvmx_lmcx_ops_cnt_lo_s         cn50xx;
6250         struct cvmx_lmcx_ops_cnt_lo_s         cn52xx;
6251         struct cvmx_lmcx_ops_cnt_lo_s         cn52xxp1;
6252         struct cvmx_lmcx_ops_cnt_lo_s         cn56xx;
6253         struct cvmx_lmcx_ops_cnt_lo_s         cn56xxp1;
6254         struct cvmx_lmcx_ops_cnt_lo_s         cn58xx;
6255         struct cvmx_lmcx_ops_cnt_lo_s         cn58xxp1;
6256 };
6257 typedef union cvmx_lmcx_ops_cnt_lo cvmx_lmcx_ops_cnt_lo_t;
6258
6259 /**
6260  * cvmx_lmc#_phy_ctl
6261  *
6262  * LMC_PHY_CTL = LMC PHY Control
6263  *
6264  */
6265 union cvmx_lmcx_phy_ctl {
6266         uint64_t u64;
6267         struct cvmx_lmcx_phy_ctl_s {
6268 #ifdef __BIG_ENDIAN_BITFIELD
6269         uint64_t reserved_15_63               : 49;
6270         uint64_t rx_always_on                 : 1;  /**< Disable dynamic DDR3 IO Rx power gating */
6271         uint64_t lv_mode                      : 1;  /**< Low Voltage Mode (1.35V) */
6272         uint64_t ck_tune1                     : 1;  /**< Clock Tune */
6273         uint64_t ck_dlyout1                   : 4;  /**< Clock delay out setting */
6274         uint64_t ck_tune0                     : 1;  /**< Clock Tune */
6275         uint64_t ck_dlyout0                   : 4;  /**< Clock delay out setting */
6276         uint64_t loopback                     : 1;  /**< Loopback enable */
6277         uint64_t loopback_pos                 : 1;  /**< Loopback pos mode */
6278         uint64_t ts_stagger                   : 1;  /**< TS Staggermode
6279                                                          This mode configures output drivers with 2-stage drive
6280                                                          strength to avoid undershoot issues on the bus when strong
6281                                                          drivers are suddenly turned on. When this mode is asserted,
6282                                                          Octeon will configure output drivers to be weak drivers
6283                                                          (60 ohm output impedance) at the first CK cycle, and
6284                                                          change drivers to the designated drive strengths specified
6285                                                          in $LMC(0)_COMP_CTL2 [CMD_CTL/CK_CTL/DQX_CTL] starting
6286                                                          at the following cycle */
6287 #else
6288         uint64_t ts_stagger                   : 1;
6289         uint64_t loopback_pos                 : 1;
6290         uint64_t loopback                     : 1;
6291         uint64_t ck_dlyout0                   : 4;
6292         uint64_t ck_tune0                     : 1;
6293         uint64_t ck_dlyout1                   : 4;
6294         uint64_t ck_tune1                     : 1;
6295         uint64_t lv_mode                      : 1;
6296         uint64_t rx_always_on                 : 1;
6297         uint64_t reserved_15_63               : 49;
6298 #endif
6299         } s;
6300         struct cvmx_lmcx_phy_ctl_s            cn61xx;
6301         struct cvmx_lmcx_phy_ctl_s            cn63xx;
6302         struct cvmx_lmcx_phy_ctl_cn63xxp1 {
6303 #ifdef __BIG_ENDIAN_BITFIELD
6304         uint64_t reserved_14_63               : 50;
6305         uint64_t lv_mode                      : 1;  /**< Low Voltage Mode (1.35V) */
6306         uint64_t ck_tune1                     : 1;  /**< Clock Tune */
6307         uint64_t ck_dlyout1                   : 4;  /**< Clock delay out setting */
6308         uint64_t ck_tune0                     : 1;  /**< Clock Tune */
6309         uint64_t ck_dlyout0                   : 4;  /**< Clock delay out setting */
6310         uint64_t loopback                     : 1;  /**< Loopback enable */
6311         uint64_t loopback_pos                 : 1;  /**< Loopback pos mode */
6312         uint64_t ts_stagger                   : 1;  /**< TS Staggermode
6313                                                          This mode configures output drivers with 2-stage drive
6314                                                          strength to avoid undershoot issues on the bus when strong
6315                                                          drivers are suddenly turned on. When this mode is asserted,
6316                                                          Octeon will configure output drivers to be weak drivers
6317                                                          (60 ohm output impedance) at the first CK cycle, and
6318                                                          change drivers to the designated drive strengths specified
6319                                                          in $LMC(0)_COMP_CTL2 [CMD_CTL/CK_CTL/DQX_CTL] starting
6320                                                          at the following cycle */
6321 #else
6322         uint64_t ts_stagger                   : 1;
6323         uint64_t loopback_pos                 : 1;
6324         uint64_t loopback                     : 1;
6325         uint64_t ck_dlyout0                   : 4;
6326         uint64_t ck_tune0                     : 1;
6327         uint64_t ck_dlyout1                   : 4;
6328         uint64_t ck_tune1                     : 1;
6329         uint64_t lv_mode                      : 1;
6330         uint64_t reserved_14_63               : 50;
6331 #endif
6332         } cn63xxp1;
6333         struct cvmx_lmcx_phy_ctl_s            cn66xx;
6334         struct cvmx_lmcx_phy_ctl_s            cn68xx;
6335         struct cvmx_lmcx_phy_ctl_s            cn68xxp1;
6336         struct cvmx_lmcx_phy_ctl_s            cnf71xx;
6337 };
6338 typedef union cvmx_lmcx_phy_ctl cvmx_lmcx_phy_ctl_t;
6339
6340 /**
6341  * cvmx_lmc#_pll_bwctl
6342  *
6343  * LMC_PLL_BWCTL  = DDR PLL Bandwidth Control Register
6344  *
6345  */
6346 union cvmx_lmcx_pll_bwctl {
6347         uint64_t u64;
6348         struct cvmx_lmcx_pll_bwctl_s {
6349 #ifdef __BIG_ENDIAN_BITFIELD
6350         uint64_t reserved_5_63                : 59;
6351         uint64_t bwupd                        : 1;  /**< Load this Bandwidth Register value into the PLL */
6352         uint64_t bwctl                        : 4;  /**< Bandwidth Control Register for DDR PLL */
6353 #else
6354         uint64_t bwctl                        : 4;
6355         uint64_t bwupd                        : 1;
6356         uint64_t reserved_5_63                : 59;
6357 #endif
6358         } s;
6359         struct cvmx_lmcx_pll_bwctl_s          cn30xx;
6360         struct cvmx_lmcx_pll_bwctl_s          cn31xx;
6361         struct cvmx_lmcx_pll_bwctl_s          cn38xx;
6362         struct cvmx_lmcx_pll_bwctl_s          cn38xxp2;
6363 };
6364 typedef union cvmx_lmcx_pll_bwctl cvmx_lmcx_pll_bwctl_t;
6365
6366 /**
6367  * cvmx_lmc#_pll_ctl
6368  *
6369  * LMC_PLL_CTL = LMC pll control
6370  *
6371  *
6372  * Notes:
6373  * This CSR is only relevant for LMC0. LMC1_PLL_CTL is not used.
6374  *
6375  * Exactly one of EN2, EN4, EN6, EN8, EN12, EN16 must be set.
6376  *
6377  * The resultant DDR_CK frequency is the DDR2_REF_CLK
6378  * frequency multiplied by:
6379  *
6380  *     (CLKF + 1) / ((CLKR + 1) * EN(2,4,6,8,12,16))
6381  *
6382  * The PLL frequency, which is:
6383  *
6384  *     (DDR2_REF_CLK freq) * ((CLKF + 1) / (CLKR + 1))
6385  *
6386  * must reside between 1.2 and 2.5 GHz. A faster PLL frequency is desirable if there is a choice.
6387  */
6388 union cvmx_lmcx_pll_ctl {
6389         uint64_t u64;
6390         struct cvmx_lmcx_pll_ctl_s {
6391 #ifdef __BIG_ENDIAN_BITFIELD
6392         uint64_t reserved_30_63               : 34;
6393         uint64_t bypass                       : 1;  /**< PLL Bypass */
6394         uint64_t fasten_n                     : 1;  /**< Should be set, especially when CLKF > ~80 */
6395         uint64_t div_reset                    : 1;  /**< Analog pll divider reset
6396                                                          De-assert at least 500*(CLKR+1) reference clock
6397                                                          cycles following RESET_N de-assertion. */
6398         uint64_t reset_n                      : 1;  /**< Analog pll reset
6399                                                          De-assert at least 5 usec after CLKF, CLKR,
6400                                                          and EN* are set up. */
6401         uint64_t clkf                         : 12; /**< Multiply reference by CLKF + 1
6402                                                          CLKF must be <= 128 */
6403         uint64_t clkr                         : 6;  /**< Divide reference by CLKR + 1 */
6404         uint64_t reserved_6_7                 : 2;
6405         uint64_t en16                         : 1;  /**< Divide output by 16 */
6406         uint64_t en12                         : 1;  /**< Divide output by 12 */
6407         uint64_t en8                          : 1;  /**< Divide output by 8 */
6408         uint64_t en6                          : 1;  /**< Divide output by 6 */
6409         uint64_t en4                          : 1;  /**< Divide output by 4 */
6410         uint64_t en2                          : 1;  /**< Divide output by 2 */
6411 #else
6412         uint64_t en2                          : 1;
6413         uint64_t en4                          : 1;
6414         uint64_t en6                          : 1;
6415         uint64_t en8                          : 1;
6416         uint64_t en12                         : 1;
6417         uint64_t en16                         : 1;
6418         uint64_t reserved_6_7                 : 2;
6419         uint64_t clkr                         : 6;
6420         uint64_t clkf                         : 12;
6421         uint64_t reset_n                      : 1;
6422         uint64_t div_reset                    : 1;
6423         uint64_t fasten_n                     : 1;
6424         uint64_t bypass                       : 1;
6425         uint64_t reserved_30_63               : 34;
6426 #endif
6427         } s;
6428         struct cvmx_lmcx_pll_ctl_cn50xx {
6429 #ifdef __BIG_ENDIAN_BITFIELD
6430         uint64_t reserved_29_63               : 35;
6431         uint64_t fasten_n                     : 1;  /**< Should be set, especially when CLKF > ~80 */
6432         uint64_t div_reset                    : 1;  /**< Analog pll divider reset
6433                                                          De-assert at least 500*(CLKR+1) reference clock
6434                                                          cycles following RESET_N de-assertion. */
6435         uint64_t reset_n                      : 1;  /**< Analog pll reset
6436                                                          De-assert at least 5 usec after CLKF, CLKR,
6437                                                          and EN* are set up. */
6438         uint64_t clkf                         : 12; /**< Multiply reference by CLKF + 1
6439                                                          CLKF must be <= 256 */
6440         uint64_t clkr                         : 6;  /**< Divide reference by CLKR + 1 */
6441         uint64_t reserved_6_7                 : 2;
6442         uint64_t en16                         : 1;  /**< Divide output by 16 */
6443         uint64_t en12                         : 1;  /**< Divide output by 12 */
6444         uint64_t en8                          : 1;  /**< Divide output by 8 */
6445         uint64_t en6                          : 1;  /**< Divide output by 6 */
6446         uint64_t en4                          : 1;  /**< Divide output by 4 */
6447         uint64_t en2                          : 1;  /**< Divide output by 2 */
6448 #else
6449         uint64_t en2                          : 1;
6450         uint64_t en4                          : 1;
6451         uint64_t en6                          : 1;
6452         uint64_t en8                          : 1;
6453         uint64_t en12                         : 1;
6454         uint64_t en16                         : 1;
6455         uint64_t reserved_6_7                 : 2;
6456         uint64_t clkr                         : 6;
6457         uint64_t clkf                         : 12;
6458         uint64_t reset_n                      : 1;
6459         uint64_t div_reset                    : 1;
6460         uint64_t fasten_n                     : 1;
6461         uint64_t reserved_29_63               : 35;
6462 #endif
6463         } cn50xx;
6464         struct cvmx_lmcx_pll_ctl_s            cn52xx;
6465         struct cvmx_lmcx_pll_ctl_s            cn52xxp1;
6466         struct cvmx_lmcx_pll_ctl_cn50xx       cn56xx;
6467         struct cvmx_lmcx_pll_ctl_cn56xxp1 {
6468 #ifdef __BIG_ENDIAN_BITFIELD
6469         uint64_t reserved_28_63               : 36;
6470         uint64_t div_reset                    : 1;  /**< Analog pll divider reset
6471                                                          De-assert at least 500*(CLKR+1) reference clock
6472                                                          cycles following RESET_N de-assertion. */
6473         uint64_t reset_n                      : 1;  /**< Analog pll reset
6474                                                          De-assert at least 5 usec after CLKF, CLKR,
6475                                                          and EN* are set up. */
6476         uint64_t clkf                         : 12; /**< Multiply reference by CLKF + 1
6477                                                          CLKF must be <= 128 */
6478         uint64_t clkr                         : 6;  /**< Divide reference by CLKR + 1 */
6479         uint64_t reserved_6_7                 : 2;
6480         uint64_t en16                         : 1;  /**< Divide output by 16 */
6481         uint64_t en12                         : 1;  /**< Divide output by 12 */
6482         uint64_t en8                          : 1;  /**< Divide output by 8 */
6483         uint64_t en6                          : 1;  /**< Divide output by 6 */
6484         uint64_t en4                          : 1;  /**< Divide output by 4 */
6485         uint64_t en2                          : 1;  /**< Divide output by 2 */
6486 #else
6487         uint64_t en2                          : 1;
6488         uint64_t en4                          : 1;
6489         uint64_t en6                          : 1;
6490         uint64_t en8                          : 1;
6491         uint64_t en12                         : 1;
6492         uint64_t en16                         : 1;
6493         uint64_t reserved_6_7                 : 2;
6494         uint64_t clkr                         : 6;
6495         uint64_t clkf                         : 12;
6496         uint64_t reset_n                      : 1;
6497         uint64_t div_reset                    : 1;
6498         uint64_t reserved_28_63               : 36;
6499 #endif
6500         } cn56xxp1;
6501         struct cvmx_lmcx_pll_ctl_cn56xxp1     cn58xx;
6502         struct cvmx_lmcx_pll_ctl_cn56xxp1     cn58xxp1;
6503 };
6504 typedef union cvmx_lmcx_pll_ctl cvmx_lmcx_pll_ctl_t;
6505
6506 /**
6507  * cvmx_lmc#_pll_status
6508  *
6509  * LMC_PLL_STATUS = LMC pll status
6510  *
6511  */
6512 union cvmx_lmcx_pll_status {
6513         uint64_t u64;
6514         struct cvmx_lmcx_pll_status_s {
6515 #ifdef __BIG_ENDIAN_BITFIELD
6516         uint64_t reserved_32_63               : 32;
6517         uint64_t ddr__nctl                    : 5;  /**< DDR nctl from compensation circuit */
6518         uint64_t ddr__pctl                    : 5;  /**< DDR pctl from compensation circuit */
6519         uint64_t reserved_2_21                : 20;
6520         uint64_t rfslip                       : 1;  /**< Reference clock slip */
6521         uint64_t fbslip                       : 1;  /**< Feedback clock slip */
6522 #else
6523         uint64_t fbslip                       : 1;
6524         uint64_t rfslip                       : 1;
6525         uint64_t reserved_2_21                : 20;
6526         uint64_t ddr__pctl                    : 5;
6527         uint64_t ddr__nctl                    : 5;
6528         uint64_t reserved_32_63               : 32;
6529 #endif
6530         } s;
6531         struct cvmx_lmcx_pll_status_s         cn50xx;
6532         struct cvmx_lmcx_pll_status_s         cn52xx;
6533         struct cvmx_lmcx_pll_status_s         cn52xxp1;
6534         struct cvmx_lmcx_pll_status_s         cn56xx;
6535         struct cvmx_lmcx_pll_status_s         cn56xxp1;
6536         struct cvmx_lmcx_pll_status_s         cn58xx;
6537         struct cvmx_lmcx_pll_status_cn58xxp1 {
6538 #ifdef __BIG_ENDIAN_BITFIELD
6539         uint64_t reserved_2_63                : 62;
6540         uint64_t rfslip                       : 1;  /**< Reference clock slip */
6541         uint64_t fbslip                       : 1;  /**< Feedback clock slip */
6542 #else
6543         uint64_t fbslip                       : 1;
6544         uint64_t rfslip                       : 1;
6545         uint64_t reserved_2_63                : 62;
6546 #endif
6547         } cn58xxp1;
6548 };
6549 typedef union cvmx_lmcx_pll_status cvmx_lmcx_pll_status_t;
6550
6551 /**
6552  * cvmx_lmc#_read_level_ctl
6553  *
6554  * Notes:
6555  * The HW writes and reads the cache block selected by ROW, COL, BNK and the rank as part of a read-leveling sequence for a rank.
6556  * A cache block write is 16 72-bit words. PATTERN selects the write value. For the first 8
6557  * words, the write value is the bit PATTERN<i> duplicated into a 72-bit vector. The write value of
6558  * the last 8 words is the inverse of the write value of the first 8 words.
6559  * See LMC*_READ_LEVEL_RANK*.
6560  */
6561 union cvmx_lmcx_read_level_ctl {
6562         uint64_t u64;
6563         struct cvmx_lmcx_read_level_ctl_s {
6564 #ifdef __BIG_ENDIAN_BITFIELD
6565         uint64_t reserved_44_63               : 20;
6566         uint64_t rankmask                     : 4;  /**< Selects ranks to be leveled
6567                                                          to read-level rank i, set RANKMASK<i> */
6568         uint64_t pattern                      : 8;  /**< All DQ driven to PATTERN[burst], 0 <= burst <= 7
6569                                                          All DQ driven to ~PATTERN[burst-8], 8 <= burst <= 15 */
6570         uint64_t row                          : 16; /**< Row    address used to write/read data pattern */
6571         uint64_t col                          : 12; /**< Column address used to write/read data pattern */
6572         uint64_t reserved_3_3                 : 1;
6573         uint64_t bnk                          : 3;  /**< Bank   address used to write/read data pattern */
6574 #else
6575         uint64_t bnk                          : 3;
6576         uint64_t reserved_3_3                 : 1;
6577         uint64_t col                          : 12;
6578         uint64_t row                          : 16;
6579         uint64_t pattern                      : 8;
6580         uint64_t rankmask                     : 4;
6581         uint64_t reserved_44_63               : 20;
6582 #endif
6583         } s;
6584         struct cvmx_lmcx_read_level_ctl_s     cn52xx;
6585         struct cvmx_lmcx_read_level_ctl_s     cn52xxp1;
6586         struct cvmx_lmcx_read_level_ctl_s     cn56xx;
6587         struct cvmx_lmcx_read_level_ctl_s     cn56xxp1;
6588 };
6589 typedef union cvmx_lmcx_read_level_ctl cvmx_lmcx_read_level_ctl_t;
6590
6591 /**
6592  * cvmx_lmc#_read_level_dbg
6593  *
6594  * Notes:
6595  * A given read of LMC*_READ_LEVEL_DBG returns the read-leveling pass/fail results for all possible
6596  * delay settings (i.e. the BITMASK) for only one byte in the last rank that the HW read-leveled.
6597  * LMC*_READ_LEVEL_DBG[BYTE] selects the particular byte.
6598  * To get these pass/fail results for another different rank, you must run the hardware read-leveling
6599  * again. For example, it is possible to get the BITMASK results for every byte of every rank
6600  * if you run read-leveling separately for each rank, probing LMC*_READ_LEVEL_DBG between each
6601  * read-leveling.
6602  */
6603 union cvmx_lmcx_read_level_dbg {
6604         uint64_t u64;
6605         struct cvmx_lmcx_read_level_dbg_s {
6606 #ifdef __BIG_ENDIAN_BITFIELD
6607         uint64_t reserved_32_63               : 32;
6608         uint64_t bitmask                      : 16; /**< Bitmask generated during deskew settings sweep
6609                                                          BITMASK[n]=0 means deskew setting n failed
6610                                                          BITMASK[n]=1 means deskew setting n passed
6611                                                          for 0 <= n <= 15 */
6612         uint64_t reserved_4_15                : 12;
6613         uint64_t byte                         : 4;  /**< 0 <= BYTE <= 8 */
6614 #else
6615         uint64_t byte                         : 4;
6616         uint64_t reserved_4_15                : 12;
6617         uint64_t bitmask                      : 16;
6618         uint64_t reserved_32_63               : 32;
6619 #endif
6620         } s;
6621         struct cvmx_lmcx_read_level_dbg_s     cn52xx;
6622         struct cvmx_lmcx_read_level_dbg_s     cn52xxp1;
6623         struct cvmx_lmcx_read_level_dbg_s     cn56xx;
6624         struct cvmx_lmcx_read_level_dbg_s     cn56xxp1;
6625 };
6626 typedef union cvmx_lmcx_read_level_dbg cvmx_lmcx_read_level_dbg_t;
6627
6628 /**
6629  * cvmx_lmc#_read_level_rank#
6630  *
6631  * Notes:
6632  * This is four CSRs per LMC, one per each rank.
6633  * Each CSR is written by HW during a read-leveling sequence for the rank. (HW sets STATUS==3 after HW read-leveling completes for the rank.)
6634  * Each CSR may also be written by SW, but not while a read-leveling sequence is in progress. (HW sets STATUS==1 after a CSR write.)
6635  * Deskew setting is measured in units of 1/4 DCLK, so the above BYTE* values can range over 4 DCLKs.
6636  * SW initiates a HW read-leveling sequence by programming LMC*_READ_LEVEL_CTL and writing INIT_START=1 with SEQUENCE=1.
6637  * See LMC*_READ_LEVEL_CTL.
6638  */
6639 union cvmx_lmcx_read_level_rankx {
6640         uint64_t u64;
6641         struct cvmx_lmcx_read_level_rankx_s {
6642 #ifdef __BIG_ENDIAN_BITFIELD
6643         uint64_t reserved_38_63               : 26;
6644         uint64_t status                       : 2;  /**< Indicates status of the read-levelling and where
6645                                                          the BYTE* programmings in <35:0> came from:
6646                                                          0 = BYTE* values are their reset value
6647                                                          1 = BYTE* values were set via a CSR write to this register
6648                                                          2 = read-leveling sequence currently in progress (BYTE* values are unpredictable)
6649                                                          3 = BYTE* values came from a complete read-leveling sequence */
6650         uint64_t byte8                        : 4;  /**< Deskew setting */
6651         uint64_t byte7                        : 4;  /**< Deskew setting */
6652         uint64_t byte6                        : 4;  /**< Deskew setting */
6653         uint64_t byte5                        : 4;  /**< Deskew setting */
6654         uint64_t byte4                        : 4;  /**< Deskew setting */
6655         uint64_t byte3                        : 4;  /**< Deskew setting */
6656         uint64_t byte2                        : 4;  /**< Deskew setting */
6657         uint64_t byte1                        : 4;  /**< Deskew setting */
6658         uint64_t byte0                        : 4;  /**< Deskew setting */
6659 #else
6660         uint64_t byte0                        : 4;
6661         uint64_t byte1                        : 4;
6662         uint64_t byte2                        : 4;
6663         uint64_t byte3                        : 4;
6664         uint64_t byte4                        : 4;
6665         uint64_t byte5                        : 4;
6666         uint64_t byte6                        : 4;
6667         uint64_t byte7                        : 4;
6668         uint64_t byte8                        : 4;
6669         uint64_t status                       : 2;
6670         uint64_t reserved_38_63               : 26;
6671 #endif
6672         } s;
6673         struct cvmx_lmcx_read_level_rankx_s   cn52xx;
6674         struct cvmx_lmcx_read_level_rankx_s   cn52xxp1;
6675         struct cvmx_lmcx_read_level_rankx_s   cn56xx;
6676         struct cvmx_lmcx_read_level_rankx_s   cn56xxp1;
6677 };
6678 typedef union cvmx_lmcx_read_level_rankx cvmx_lmcx_read_level_rankx_t;
6679
6680 /**
6681  * cvmx_lmc#_reset_ctl
6682  *
6683  * Specify the RSL base addresses for the block
6684  *
6685  *
6686  * Notes:
6687  * DDR3RST - DDR3 DRAM parts have a new RESET#
6688  *   pin that wasn't present in DDR2 parts. The
6689  *   DDR3RST CSR field controls the assertion of
6690  *   the new 6xxx pin that attaches to RESET#.
6691  *   When DDR3RST is set, 6xxx asserts RESET#.
6692  *   When DDR3RST is clear, 6xxx de-asserts
6693  *   RESET#.
6694  *
6695  *   DDR3RST is set on a cold reset. Warm and
6696  *   soft chip resets do not affect the DDR3RST
6697  *   value. Outside of cold reset, only software
6698  *   CSR writes change the DDR3RST value.
6699  *
6700  * DDR3PWARM - Enables preserve mode during a warm
6701  *   reset. When set, the DDR3 controller hardware
6702  *   automatically puts the attached DDR3 DRAM parts
6703  *   into self refresh (see LMC*CONFIG[SEQUENCE] below) at the beginning of a warm
6704  *   reset sequence, provided that the DDR3 controller
6705  *   is up. When clear, the DDR3 controller hardware
6706  *   does not put the attached DDR3 DRAM parts into
6707  *   self-refresh during a warm reset sequence.
6708  *
6709  *   DDR3PWARM is cleared on a cold reset. Warm and
6710  *   soft chip resets do not affect the DDR3PWARM
6711  *   value. Outside of cold reset, only software
6712  *   CSR writes change the DDR3PWARM value.
6713  *
6714  *   Note that if a warm reset follows a soft reset,
6715  *   DDR3PWARM has no effect, as the DDR3 controller
6716  *   is no longer up after any cold/warm/soft
6717  *   reset sequence.
6718  *
6719  * DDR3PSOFT - Enables preserve mode during a soft
6720  *   reset. When set, the DDR3 controller hardware
6721  *   automatically puts the attached DDR3 DRAM parts
6722  *   into self refresh (see LMC*CONFIG[SEQUENCE] below) at the beginning of a soft
6723  *   reset sequence, provided that the DDR3 controller
6724  *   is up. When clear, the DDR3 controller hardware
6725  *   does not put the attached DDR3 DRAM parts into
6726  *   self-refresh during a soft reset sequence.
6727  *
6728  *   DDR3PSOFT is cleared on a cold reset. Warm and
6729  *   soft chip resets do not affect the DDR3PSOFT
6730  *   value. Outside of cold reset, only software
6731  *   CSR writes change the DDR3PSOFT value.
6732  *
6733  * DDR3PSV - May be useful for system software to
6734  *   determine when the DDR3 contents have been
6735  *   preserved.
6736  *
6737  *   Cleared by hardware during a cold reset. Never
6738  *   cleared by hardware during a warm/soft reset.
6739  *   Set by hardware during a warm/soft reset if
6740  *   the hardware automatically put the DDR3 DRAM
6741  *   into self-refresh during the reset sequence.
6742  *
6743  *   Can also be written by software (to any value).
6744  */
6745 union cvmx_lmcx_reset_ctl {
6746         uint64_t u64;
6747         struct cvmx_lmcx_reset_ctl_s {
6748 #ifdef __BIG_ENDIAN_BITFIELD
6749         uint64_t reserved_4_63                : 60;
6750         uint64_t ddr3psv                      : 1;  /**< Memory Reset
6751                                                          1 = DDR contents preserved */
6752         uint64_t ddr3psoft                    : 1;  /**< Memory Reset
6753                                                          1 = Enable Preserve mode during soft reset */
6754         uint64_t ddr3pwarm                    : 1;  /**< Memory Reset
6755                                                          1 = Enable Preserve mode during warm reset */
6756         uint64_t ddr3rst                      : 1;  /**< Memory Reset
6757                                                          0 = Reset asserted
6758                                                          1 = Reset de-asserted */
6759 #else
6760         uint64_t ddr3rst                      : 1;
6761         uint64_t ddr3pwarm                    : 1;
6762         uint64_t ddr3psoft                    : 1;
6763         uint64_t ddr3psv                      : 1;
6764         uint64_t reserved_4_63                : 60;
6765 #endif
6766         } s;
6767         struct cvmx_lmcx_reset_ctl_s          cn61xx;
6768         struct cvmx_lmcx_reset_ctl_s          cn63xx;
6769         struct cvmx_lmcx_reset_ctl_s          cn63xxp1;
6770         struct cvmx_lmcx_reset_ctl_s          cn66xx;
6771         struct cvmx_lmcx_reset_ctl_s          cn68xx;
6772         struct cvmx_lmcx_reset_ctl_s          cn68xxp1;
6773         struct cvmx_lmcx_reset_ctl_s          cnf71xx;
6774 };
6775 typedef union cvmx_lmcx_reset_ctl cvmx_lmcx_reset_ctl_t;
6776
6777 /**
6778  * cvmx_lmc#_rlevel_ctl
6779  */
6780 union cvmx_lmcx_rlevel_ctl {
6781         uint64_t u64;
6782         struct cvmx_lmcx_rlevel_ctl_s {
6783 #ifdef __BIG_ENDIAN_BITFIELD
6784         uint64_t reserved_22_63               : 42;
6785         uint64_t delay_unload_3               : 1;  /**< When set, unload the PHY silo one cycle later
6786                                                          during read-leveling if LMC*_RLEVEL_RANKi[BYTE*<1:0>] = 3
6787                                                          DELAY_UNLOAD_3 should normally be set, particularly at higher speeds. */
6788         uint64_t delay_unload_2               : 1;  /**< When set, unload the PHY silo one cycle later
6789                                                          during read-leveling if LMC*_RLEVEL_RANKi[BYTE*<1:0>] = 2
6790                                                          DELAY_UNLOAD_2 should normally not be set. */
6791         uint64_t delay_unload_1               : 1;  /**< When set, unload the PHY silo one cycle later
6792                                                          during read-leveling if LMC*_RLEVEL_RANKi[BYTE*<1:0>] = 1
6793                                                          DELAY_UNLOAD_1 should normally not be set. */
6794         uint64_t delay_unload_0               : 1;  /**< When set, unload the PHY silo one cycle later
6795                                                          during read-leveling if LMC*_RLEVEL_RANKi[BYTE*<1:0>] = 0
6796                                                          DELAY_UNLOAD_0 should normally not be set. */
6797         uint64_t bitmask                      : 8;  /**< Mask to select bit lanes on which read-leveling
6798                                                          feedback is returned when OR_DIS is set to 1 */
6799         uint64_t or_dis                       : 1;  /**< Disable or'ing of bits in a byte lane when computing
6800                                                          the read-leveling bitmask
6801                                                          OR_DIS should normally not be set. */
6802         uint64_t offset_en                    : 1;  /**< When set, LMC attempts to select the read-leveling
6803                                                          setting that is LMC*RLEVEL_CTL[OFFSET] settings earlier than the
6804                                                          last passing read-leveling setting in the largest
6805                                                          contiguous sequence of passing settings.
6806                                                          When clear, or if the setting selected by LMC*RLEVEL_CTL[OFFSET]
6807                                                          did not pass, LMC selects the middle setting in the
6808                                                          largest contiguous sequence of passing settings,
6809                                                          rounding earlier when necessary. */
6810         uint64_t offset                       : 4;  /**< The offset used when LMC*RLEVEL_CTL[OFFSET] is set */
6811         uint64_t byte                         : 4;  /**< 0 <= BYTE <= 8
6812                                                          Byte index for which bitmask results are saved
6813                                                          in LMC*_RLEVEL_DBG */
6814 #else
6815         uint64_t byte                         : 4;
6816         uint64_t offset                       : 4;
6817         uint64_t offset_en                    : 1;
6818         uint64_t or_dis                       : 1;
6819         uint64_t bitmask                      : 8;
6820         uint64_t delay_unload_0               : 1;
6821         uint64_t delay_unload_1               : 1;
6822         uint64_t delay_unload_2               : 1;
6823         uint64_t delay_unload_3               : 1;
6824         uint64_t reserved_22_63               : 42;
6825 #endif
6826         } s;
6827         struct cvmx_lmcx_rlevel_ctl_s         cn61xx;
6828         struct cvmx_lmcx_rlevel_ctl_s         cn63xx;
6829         struct cvmx_lmcx_rlevel_ctl_cn63xxp1 {
6830 #ifdef __BIG_ENDIAN_BITFIELD
6831         uint64_t reserved_9_63                : 55;
6832         uint64_t offset_en                    : 1;  /**< When set, LMC attempts to select the read-leveling
6833                                                          setting that is LMC*RLEVEL_CTL[OFFSET] settings earlier than the
6834                                                          last passing read-leveling setting in the largest
6835                                                          contiguous sequence of passing settings.
6836                                                          When clear, or if the setting selected by LMC*RLEVEL_CTL[OFFSET]
6837                                                          did not pass, LMC selects the middle setting in the
6838                                                          largest contiguous sequence of passing settings,
6839                                                          rounding earlier when necessary. */
6840         uint64_t offset                       : 4;  /**< The offset used when LMC*RLEVEL_CTL[OFFSET] is set */
6841         uint64_t byte                         : 4;  /**< 0 <= BYTE <= 8
6842                                                          Byte index for which bitmask results are saved
6843                                                          in LMC*_RLEVEL_DBG */
6844 #else
6845         uint64_t byte                         : 4;
6846         uint64_t offset                       : 4;
6847         uint64_t offset_en                    : 1;
6848         uint64_t reserved_9_63                : 55;
6849 #endif
6850         } cn63xxp1;
6851         struct cvmx_lmcx_rlevel_ctl_s         cn66xx;
6852         struct cvmx_lmcx_rlevel_ctl_s         cn68xx;
6853         struct cvmx_lmcx_rlevel_ctl_s         cn68xxp1;
6854         struct cvmx_lmcx_rlevel_ctl_s         cnf71xx;
6855 };
6856 typedef union cvmx_lmcx_rlevel_ctl cvmx_lmcx_rlevel_ctl_t;
6857
6858 /**
6859  * cvmx_lmc#_rlevel_dbg
6860  *
6861  * Notes:
6862  * A given read of LMC*_RLEVEL_DBG returns the read-leveling pass/fail results for all possible
6863  * delay settings (i.e. the BITMASK) for only one byte in the last rank that the HW read-leveled.
6864  * LMC*_RLEVEL_CTL[BYTE] selects the particular byte.
6865  *
6866  * To get these pass/fail results for another different rank, you must run the hardware read-leveling
6867  * again. For example, it is possible to get the BITMASK results for every byte of every rank
6868  * if you run read-leveling separately for each rank, probing LMC*_RLEVEL_DBG between each
6869  * read-leveling.
6870  */
6871 union cvmx_lmcx_rlevel_dbg {
6872         uint64_t u64;
6873         struct cvmx_lmcx_rlevel_dbg_s {
6874 #ifdef __BIG_ENDIAN_BITFIELD
6875         uint64_t bitmask                      : 64; /**< Bitmask generated during deskew settings sweep
6876                                                          BITMASK[n]=0 means deskew setting n failed
6877                                                          BITMASK[n]=1 means deskew setting n passed
6878                                                          for 0 <= n <= 63 */
6879 #else
6880         uint64_t bitmask                      : 64;
6881 #endif
6882         } s;
6883         struct cvmx_lmcx_rlevel_dbg_s         cn61xx;
6884         struct cvmx_lmcx_rlevel_dbg_s         cn63xx;
6885         struct cvmx_lmcx_rlevel_dbg_s         cn63xxp1;
6886         struct cvmx_lmcx_rlevel_dbg_s         cn66xx;
6887         struct cvmx_lmcx_rlevel_dbg_s         cn68xx;
6888         struct cvmx_lmcx_rlevel_dbg_s         cn68xxp1;
6889         struct cvmx_lmcx_rlevel_dbg_s         cnf71xx;
6890 };
6891 typedef union cvmx_lmcx_rlevel_dbg cvmx_lmcx_rlevel_dbg_t;
6892
6893 /**
6894  * cvmx_lmc#_rlevel_rank#
6895  *
6896  * Notes:
6897  * This is four CSRs per LMC, one per each rank.
6898  *
6899  * Deskew setting is measured in units of 1/4 CK, so the above BYTE* values can range over 16 CKs.
6900  *
6901  * Each CSR is written by HW during a read-leveling sequence for the rank. (HW sets STATUS==3 after HW read-leveling completes for the rank.)
6902  * If HW is unable to find a match per LMC*_RLEVEL_CTL[OFFSET_ENA] and LMC*_RLEVEL_CTL[OFFSET], then HW will set LMC*_RLEVEL_RANKi[BYTE*<5:0>]
6903  * to  0.
6904  *
6905  * Each CSR may also be written by SW, but not while a read-leveling sequence is in progress. (HW sets STATUS==1 after a CSR write.)
6906  *
6907  * SW initiates a HW read-leveling sequence by programming LMC*_RLEVEL_CTL and writing INIT_START=1 with SEQUENCE=1.
6908  * See LMC*_RLEVEL_CTL.
6909  *
6910  * LMC*_RLEVEL_RANKi values for ranks i without attached DRAM should be set such that
6911  * they do not increase the range of possible BYTE values for any byte
6912  * lane. The easiest way to do this is to set
6913  *     LMC*_RLEVEL_RANKi = LMC*_RLEVEL_RANKj,
6914  * where j is some rank with attached DRAM whose LMC*_RLEVEL_RANKj is already fully initialized.
6915  */
6916 union cvmx_lmcx_rlevel_rankx {
6917         uint64_t u64;
6918         struct cvmx_lmcx_rlevel_rankx_s {
6919 #ifdef __BIG_ENDIAN_BITFIELD
6920         uint64_t reserved_56_63               : 8;
6921         uint64_t status                       : 2;  /**< Indicates status of the read-levelling and where
6922                                                          the BYTE* programmings in <35:0> came from:
6923                                                          0 = BYTE* values are their reset value
6924                                                          1 = BYTE* values were set via a CSR write to this register
6925                                                          2 = read-leveling sequence currently in progress (BYTE* values are unpredictable)
6926                                                          3 = BYTE* values came from a complete read-leveling sequence */
6927         uint64_t byte8                        : 6;  /**< Deskew setting
6928                                                          When ECC DRAM is not present (i.e. when DRAM is not
6929                                                          attached to chip signals DDR_CBS_0_* and DDR_CB[7:0]),
6930                                                          SW should write BYTE8 to a value that does
6931                                                          not increase the range of possible BYTE* values. The
6932                                                          easiest way to do this is to set
6933                                                          LMC*_RLEVEL_RANK*[BYTE8] = LMC*_RLEVEL_RANK*[BYTE0]
6934                                                          when there is no ECC DRAM, using the final BYTE0 value. */
6935         uint64_t byte7                        : 6;  /**< Deskew setting */
6936         uint64_t byte6                        : 6;  /**< Deskew setting */
6937         uint64_t byte5                        : 6;  /**< Deskew setting */
6938         uint64_t byte4                        : 6;  /**< Deskew setting */
6939         uint64_t byte3                        : 6;  /**< Deskew setting */
6940         uint64_t byte2                        : 6;  /**< Deskew setting */
6941         uint64_t byte1                        : 6;  /**< Deskew setting */
6942         uint64_t byte0                        : 6;  /**< Deskew setting */
6943 #else
6944         uint64_t byte0                        : 6;
6945         uint64_t byte1                        : 6;
6946         uint64_t byte2                        : 6;
6947         uint64_t byte3                        : 6;
6948         uint64_t byte4                        : 6;
6949         uint64_t byte5                        : 6;
6950         uint64_t byte6                        : 6;
6951         uint64_t byte7                        : 6;
6952         uint64_t byte8                        : 6;
6953         uint64_t status                       : 2;
6954         uint64_t reserved_56_63               : 8;
6955 #endif
6956         } s;
6957         struct cvmx_lmcx_rlevel_rankx_s       cn61xx;
6958         struct cvmx_lmcx_rlevel_rankx_s       cn63xx;
6959         struct cvmx_lmcx_rlevel_rankx_s       cn63xxp1;
6960         struct cvmx_lmcx_rlevel_rankx_s       cn66xx;
6961         struct cvmx_lmcx_rlevel_rankx_s       cn68xx;
6962         struct cvmx_lmcx_rlevel_rankx_s       cn68xxp1;
6963         struct cvmx_lmcx_rlevel_rankx_s       cnf71xx;
6964 };
6965 typedef union cvmx_lmcx_rlevel_rankx cvmx_lmcx_rlevel_rankx_t;
6966
6967 /**
6968  * cvmx_lmc#_rodt_comp_ctl
6969  *
6970  * LMC_RODT_COMP_CTL = LMC Compensation control
6971  *
6972  */
6973 union cvmx_lmcx_rodt_comp_ctl {
6974         uint64_t u64;
6975         struct cvmx_lmcx_rodt_comp_ctl_s {
6976 #ifdef __BIG_ENDIAN_BITFIELD
6977         uint64_t reserved_17_63               : 47;
6978         uint64_t enable                       : 1;  /**< 0=not enabled, 1=enable */
6979         uint64_t reserved_12_15               : 4;
6980         uint64_t nctl                         : 4;  /**< Compensation control bits */
6981         uint64_t reserved_5_7                 : 3;
6982         uint64_t pctl                         : 5;  /**< Compensation control bits */
6983 #else
6984         uint64_t pctl                         : 5;
6985         uint64_t reserved_5_7                 : 3;
6986         uint64_t nctl                         : 4;
6987         uint64_t reserved_12_15               : 4;
6988         uint64_t enable                       : 1;
6989         uint64_t reserved_17_63               : 47;
6990 #endif
6991         } s;
6992         struct cvmx_lmcx_rodt_comp_ctl_s      cn50xx;
6993         struct cvmx_lmcx_rodt_comp_ctl_s      cn52xx;
6994         struct cvmx_lmcx_rodt_comp_ctl_s      cn52xxp1;
6995         struct cvmx_lmcx_rodt_comp_ctl_s      cn56xx;
6996         struct cvmx_lmcx_rodt_comp_ctl_s      cn56xxp1;
6997         struct cvmx_lmcx_rodt_comp_ctl_s      cn58xx;
6998         struct cvmx_lmcx_rodt_comp_ctl_s      cn58xxp1;
6999 };
7000 typedef union cvmx_lmcx_rodt_comp_ctl cvmx_lmcx_rodt_comp_ctl_t;
7001
7002 /**
7003  * cvmx_lmc#_rodt_ctl
7004  *
7005  * LMC_RODT_CTL = Obsolete LMC Read OnDieTermination control
7006  * See the description in LMC_WODT_CTL1. On Reads, Octeon only supports turning on ODT's in
7007  * the lower 2 DIMM's with the masks as below.
7008  *
7009  * Notes:
7010  * When a given RANK in position N is selected, the RODT _HI and _LO masks for that position are used.
7011  * Mask[3:0] is used for RODT control of the RANKs in positions 3, 2, 1, and 0, respectively.
7012  * In  64b mode, DIMMs are assumed to be ordered in the following order:
7013  *  position 3: [unused        , DIMM1_RANK1_LO]
7014  *  position 2: [unused        , DIMM1_RANK0_LO]
7015  *  position 1: [unused        , DIMM0_RANK1_LO]
7016  *  position 0: [unused        , DIMM0_RANK0_LO]
7017  * In 128b mode, DIMMs are assumed to be ordered in the following order:
7018  *  position 3: [DIMM3_RANK1_HI, DIMM1_RANK1_LO]
7019  *  position 2: [DIMM3_RANK0_HI, DIMM1_RANK0_LO]
7020  *  position 1: [DIMM2_RANK1_HI, DIMM0_RANK1_LO]
7021  *  position 0: [DIMM2_RANK0_HI, DIMM0_RANK0_LO]
7022  */
7023 union cvmx_lmcx_rodt_ctl {
7024         uint64_t u64;
7025         struct cvmx_lmcx_rodt_ctl_s {
7026 #ifdef __BIG_ENDIAN_BITFIELD
7027         uint64_t reserved_32_63               : 32;
7028         uint64_t rodt_hi3                     : 4;  /**< Read ODT mask for position 3, data[127:64] */
7029         uint64_t rodt_hi2                     : 4;  /**< Read ODT mask for position 2, data[127:64] */
7030         uint64_t rodt_hi1                     : 4;  /**< Read ODT mask for position 1, data[127:64] */
7031         uint64_t rodt_hi0                     : 4;  /**< Read ODT mask for position 0, data[127:64] */
7032         uint64_t rodt_lo3                     : 4;  /**< Read ODT mask for position 3, data[ 63: 0] */
7033         uint64_t rodt_lo2                     : 4;  /**< Read ODT mask for position 2, data[ 63: 0] */
7034         uint64_t rodt_lo1                     : 4;  /**< Read ODT mask for position 1, data[ 63: 0] */
7035         uint64_t rodt_lo0                     : 4;  /**< Read ODT mask for position 0, data[ 63: 0] */
7036 #else
7037         uint64_t rodt_lo0                     : 4;
7038         uint64_t rodt_lo1                     : 4;
7039         uint64_t rodt_lo2                     : 4;
7040         uint64_t rodt_lo3                     : 4;
7041         uint64_t rodt_hi0                     : 4;
7042         uint64_t rodt_hi1                     : 4;
7043         uint64_t rodt_hi2                     : 4;
7044         uint64_t rodt_hi3                     : 4;
7045         uint64_t reserved_32_63               : 32;
7046 #endif
7047         } s;
7048         struct cvmx_lmcx_rodt_ctl_s           cn30xx;
7049         struct cvmx_lmcx_rodt_ctl_s           cn31xx;
7050         struct cvmx_lmcx_rodt_ctl_s           cn38xx;
7051         struct cvmx_lmcx_rodt_ctl_s           cn38xxp2;
7052         struct cvmx_lmcx_rodt_ctl_s           cn50xx;
7053         struct cvmx_lmcx_rodt_ctl_s           cn52xx;
7054         struct cvmx_lmcx_rodt_ctl_s           cn52xxp1;
7055         struct cvmx_lmcx_rodt_ctl_s           cn56xx;
7056         struct cvmx_lmcx_rodt_ctl_s           cn56xxp1;
7057         struct cvmx_lmcx_rodt_ctl_s           cn58xx;
7058         struct cvmx_lmcx_rodt_ctl_s           cn58xxp1;
7059 };
7060 typedef union cvmx_lmcx_rodt_ctl cvmx_lmcx_rodt_ctl_t;
7061
7062 /**
7063  * cvmx_lmc#_rodt_mask
7064  *
7065  * LMC_RODT_MASK = LMC Read OnDieTermination mask
7066  * System designers may desire to terminate DQ/DQS lines for higher frequency DDR operations
7067  * especially on a multi-rank system. DDR3 DQ/DQS I/O's have built in
7068  * Termination resistor that can be turned on or off by the controller, after meeting tAOND and tAOF
7069  * timing requirements. Each Rank has its own ODT pin that fans out to all the memory parts
7070  * in that DIMM. System designers may prefer different combinations of ODT ON's for reads
7071  * into different ranks. Octeon supports full programmability by way of the mask register below.
7072  * Each Rank position has its own 8-bit programmable field.
7073  * When the controller does a read to that rank, it sets the 4 ODT pins to the MASK pins below.
7074  * For eg., When doing a read from Rank0, a system designer may desire to terminate the lines
7075  * with the resistor on DIMM0/Rank1. The mask RODT_D0_R0 would then be [00000010].
7076  * Octeon drives the appropriate mask values on the ODT pins by default. If this feature is not
7077  * required, write 0 in this register. Note that, as per the DDR3 specifications, the ODT pin
7078  * for the rank that is being read should always be 0.
7079  *
7080  * Notes:
7081  * When a given RANK is selected, the RODT mask for that RANK is used.  The resulting RODT mask is
7082  * driven to the DIMMs in the following manner:
7083  *             RANK_ENA=1                    RANK_ENA=0
7084  * Mask[3] -> DIMM1_ODT_1                    MBZ
7085  * Mask[2] -> DIMM1_ODT_0                    DIMM1_ODT_0
7086  * Mask[1] -> DIMM0_ODT_1                    MBZ
7087  * Mask[0] -> DIMM0_ODT_0                    DIMM0_ODT_0
7088  *
7089  * LMC always reads entire cache blocks and always reads them via two consecutive
7090  * read CAS operations to the same rank+bank+row spaced exactly 4 CK's apart.
7091  * When a RODT mask bit is set, LMC asserts the OCTEON ODT output
7092  * pin(s) starting (CL - CWL) CK's after the first read CAS operation. Then, OCTEON
7093  * normally continues to assert the ODT output pin(s) for 9+LMC*_CONTROL[RODT_BPRCH] more CK's
7094  * - for a total of 10+LMC*_CONTROL[RODT_BPRCH] CK's for the entire cache block read -
7095  * through the second read CAS operation of the cache block,
7096  * satisfying the 6 CK DDR3 ODTH8 requirements.
7097  * But it is possible for OCTEON to issue two cache block reads separated by as few as
7098  * RtR = 8 or 9 (10 if LMC*_CONTROL[RODT_BPRCH]=1) CK's. In that case, OCTEON asserts the ODT output pin(s)
7099  * for the RODT mask of the first cache block read for RtR CK's, then asserts
7100  * the ODT output pin(s) for the RODT mask of the second cache block read for 10+LMC*_CONTROL[RODT_BPRCH] CK's
7101  * (or less if a third cache block read follows within 8 or 9 (or 10) CK's of this second cache block read).
7102  * Note that it may be necessary to force LMC to space back-to-back cache block reads
7103  * to different ranks apart by at least 10+LMC*_CONTROL[RODT_BPRCH] CK's to prevent DDR3 ODTH8 violations.
7104  */
7105 union cvmx_lmcx_rodt_mask {
7106         uint64_t u64;
7107         struct cvmx_lmcx_rodt_mask_s {
7108 #ifdef __BIG_ENDIAN_BITFIELD
7109         uint64_t rodt_d3_r1                   : 8;  /**< Read ODT mask DIMM3, RANK1/DIMM3 in SingleRanked
7110                                                          *UNUSED IN 6xxx, and MBZ* */
7111         uint64_t rodt_d3_r0                   : 8;  /**< Read ODT mask DIMM3, RANK0
7112                                                          *UNUSED IN 6xxx, and MBZ* */
7113         uint64_t rodt_d2_r1                   : 8;  /**< Read ODT mask DIMM2, RANK1/DIMM2 in SingleRanked
7114                                                          *UNUSED IN 6xxx, and MBZ* */
7115         uint64_t rodt_d2_r0                   : 8;  /**< Read ODT mask DIMM2, RANK0
7116                                                          *UNUSED IN 6xxx, and MBZ* */
7117         uint64_t rodt_d1_r1                   : 8;  /**< Read ODT mask DIMM1, RANK1/DIMM1 in SingleRanked
7118                                                          if (RANK_ENA) then
7119                                                              RODT_D1_R1[3] must be 0
7120                                                          else
7121                                                              RODT_D1_R1[3:0] is not used and MBZ
7122                                                          *Upper 4 bits UNUSED IN 6xxx, and MBZ* */
7123         uint64_t rodt_d1_r0                   : 8;  /**< Read ODT mask DIMM1, RANK0
7124                                                          if (RANK_ENA) then
7125                                                              RODT_D1_RO[2] must be 0
7126                                                          else
7127                                                              RODT_D1_RO[3:2,1] must be 0
7128                                                          *Upper 4 bits UNUSED IN 6xxx, and MBZ* */
7129         uint64_t rodt_d0_r1                   : 8;  /**< Read ODT mask DIMM0, RANK1/DIMM0 in SingleRanked
7130                                                          if (RANK_ENA) then
7131                                                              RODT_D0_R1[1] must be 0
7132                                                          else
7133                                                              RODT_D0_R1[3:0] is not used and MBZ
7134                                                          *Upper 4 bits UNUSED IN 6xxx, and MBZ* */
7135         uint64_t rodt_d0_r0                   : 8;  /**< Read ODT mask DIMM0, RANK0
7136                                                          if (RANK_ENA) then
7137                                                              RODT_D0_RO[0] must be 0
7138                                                          else
7139                                                              RODT_D0_RO[1:0,3] must be 0
7140                                                          *Upper 4 bits UNUSED IN 6xxx, and MBZ* */
7141 #else
7142         uint64_t rodt_d0_r0                   : 8;
7143         uint64_t rodt_d0_r1                   : 8;
7144         uint64_t rodt_d1_r0                   : 8;
7145         uint64_t rodt_d1_r1                   : 8;
7146         uint64_t rodt_d2_r0                   : 8;
7147         uint64_t rodt_d2_r1                   : 8;
7148         uint64_t rodt_d3_r0                   : 8;
7149         uint64_t rodt_d3_r1                   : 8;
7150 #endif
7151         } s;
7152         struct cvmx_lmcx_rodt_mask_s          cn61xx;
7153         struct cvmx_lmcx_rodt_mask_s          cn63xx;
7154         struct cvmx_lmcx_rodt_mask_s          cn63xxp1;
7155         struct cvmx_lmcx_rodt_mask_s          cn66xx;
7156         struct cvmx_lmcx_rodt_mask_s          cn68xx;
7157         struct cvmx_lmcx_rodt_mask_s          cn68xxp1;
7158         struct cvmx_lmcx_rodt_mask_s          cnf71xx;
7159 };
7160 typedef union cvmx_lmcx_rodt_mask cvmx_lmcx_rodt_mask_t;
7161
7162 /**
7163  * cvmx_lmc#_scramble_cfg0
7164  *
7165  * LMC_SCRAMBLE_CFG0 = LMC Scramble Config0
7166  *
7167  */
7168 union cvmx_lmcx_scramble_cfg0 {
7169         uint64_t u64;
7170         struct cvmx_lmcx_scramble_cfg0_s {
7171 #ifdef __BIG_ENDIAN_BITFIELD
7172         uint64_t key                          : 64; /**< Scramble Key for Data */
7173 #else
7174         uint64_t key                          : 64;
7175 #endif
7176         } s;
7177         struct cvmx_lmcx_scramble_cfg0_s      cn61xx;
7178         struct cvmx_lmcx_scramble_cfg0_s      cn66xx;
7179         struct cvmx_lmcx_scramble_cfg0_s      cnf71xx;
7180 };
7181 typedef union cvmx_lmcx_scramble_cfg0 cvmx_lmcx_scramble_cfg0_t;
7182
7183 /**
7184  * cvmx_lmc#_scramble_cfg1
7185  *
7186  * LMC_SCRAMBLE_CFG1 = LMC Scramble Config1
7187  *
7188  *
7189  * Notes:
7190  * Address scrambling usually maps addresses into the same rank. Exceptions are when LMC_NXM[CS_MASK] requires
7191  * aliasing that uses the lowest, legal chip select(s).
7192  */
7193 union cvmx_lmcx_scramble_cfg1 {
7194         uint64_t u64;
7195         struct cvmx_lmcx_scramble_cfg1_s {
7196 #ifdef __BIG_ENDIAN_BITFIELD
7197         uint64_t key                          : 64; /**< Scramble Key for Addresses */
7198 #else
7199         uint64_t key                          : 64;
7200 #endif
7201         } s;
7202         struct cvmx_lmcx_scramble_cfg1_s      cn61xx;
7203         struct cvmx_lmcx_scramble_cfg1_s      cn66xx;
7204         struct cvmx_lmcx_scramble_cfg1_s      cnf71xx;
7205 };
7206 typedef union cvmx_lmcx_scramble_cfg1 cvmx_lmcx_scramble_cfg1_t;
7207
7208 /**
7209  * cvmx_lmc#_scrambled_fadr
7210  *
7211  * LMC_SCRAMBLED_FADR = LMC Scrambled Failing Address Register (SEC/DED/NXM)
7212  *
7213  * This register only captures the first transaction with ecc/nxm errors. A DED/NXM error can
7214  * over-write this register with its failing addresses if the first error was a SEC. If you write
7215  * LMC*_CONFIG->SEC_ERR/DED_ERR/NXM_ERR then it will clear the error bits and capture the
7216  * next failing address.
7217  *
7218  * If FDIMM is 2 that means the error is in the higher bits DIMM.
7219  *
7220  * Notes:
7221  * LMC*_FADR captures the failing pre-scrambled address location (split into dimm, bunk, bank, etc). If
7222  * scrambling is off, then LMC*_FADR will also capture the failing physical location in the DRAM parts.
7223  *
7224  * LMC*_SCRAMBLED_FADR captures the actual failing address location in the physical DRAM parts, i.e.,
7225  * a. if scrambling is on, LMC*_SCRAMBLE_FADR contains the failing physical location in the DRAM parts (split
7226  *    into dimm, bunk, bank, etc)
7227  * b. if scrambling is off, the pre-scramble and post-scramble addresses are the same, and so the contents of
7228  *    LMC*_SCRAMBLED_FADR match the contents of LMC*_FADR
7229  */
7230 union cvmx_lmcx_scrambled_fadr {
7231         uint64_t u64;
7232         struct cvmx_lmcx_scrambled_fadr_s {
7233 #ifdef __BIG_ENDIAN_BITFIELD
7234         uint64_t reserved_36_63               : 28;
7235         uint64_t fdimm                        : 2;  /**< Failing DIMM# */
7236         uint64_t fbunk                        : 1;  /**< Failing Rank */
7237         uint64_t fbank                        : 3;  /**< Failing Bank[2:0] */
7238         uint64_t frow                         : 16; /**< Failing Row Address[15:0] */
7239         uint64_t fcol                         : 14; /**< Failing Column Address[13:0]
7240                                                          Technically, represents the address of the 128b data
7241                                                          that had an ecc error, i.e., fcol[0] is always 0. Can
7242                                                          be used in conjuction with LMC*_CONFIG[DED_ERR] to
7243                                                          isolate the 64b chunk of data in error */
7244 #else
7245         uint64_t fcol                         : 14;
7246         uint64_t frow                         : 16;
7247         uint64_t fbank                        : 3;
7248         uint64_t fbunk                        : 1;
7249         uint64_t fdimm                        : 2;
7250         uint64_t reserved_36_63               : 28;
7251 #endif
7252         } s;
7253         struct cvmx_lmcx_scrambled_fadr_s     cn61xx;
7254         struct cvmx_lmcx_scrambled_fadr_s     cn66xx;
7255         struct cvmx_lmcx_scrambled_fadr_s     cnf71xx;
7256 };
7257 typedef union cvmx_lmcx_scrambled_fadr cvmx_lmcx_scrambled_fadr_t;
7258
7259 /**
7260  * cvmx_lmc#_slot_ctl0
7261  *
7262  * LMC_SLOT_CTL0 = LMC Slot Control0
7263  * This register is an assortment of various control fields needed by the memory controller
7264  *
7265  * Notes:
7266  * If SW has not previously written to this register (since the last DRESET),
7267  * HW updates the fields in this register to the minimum allowed value
7268  * when any of LMC*_RLEVEL_RANKn, LMC*_WLEVEL_RANKn, LMC*_CONTROL and
7269  * LMC*_MODEREG_PARAMS0 CSR's change. Ideally, only read this register
7270  * after LMC has been initialized and LMC*_RLEVEL_RANKn, LMC*_WLEVEL_RANKn
7271  * have valid data.
7272  *
7273  * The interpretation of the fields in this CSR depends on LMC*_CONFIG[DDR2T]:
7274  *  - If LMC*_CONFIG[DDR2T]=1, (FieldValue + 4) is the minimum CK cycles
7275  *    between when the DRAM part registers CAS commands of the 1st and 2nd types
7276  *    from different cache blocks.
7277  *  - If LMC*_CONFIG[DDR2T]=0, (FieldValue + 3) is the minimum CK cycles
7278  *    between when the DRAM part registers CAS commands of the 1st and 2nd types
7279  *    from different cache blocks. FieldValue = 0 is always illegal in this
7280  *    case.
7281  *
7282  * The hardware-calculated minimums are:
7283  *
7284  * min R2R_INIT = 1 - LMC*_CONFIG[DDR2T]
7285  * min R2W_INIT = 5 - LMC*_CONFIG[DDR2T] + (RL + MaxRdSkew) - (WL + MinWrSkew) + LMC*_CONTROL[BPRCH]
7286  * min W2R_INIT = 2 - LMC*_CONFIG[DDR2T] + LMC*_TIMING_PARAMS1[TWTR] + WL
7287  * min W2W_INIT = 1 - LMC*_CONFIG[DDR2T]
7288  *
7289  * where
7290  *
7291  * RL        = CL  + AL (LMC*_MODEREG_PARAMS0[CL] selects CL, LMC*_MODEREG_PARAMS0[AL] selects AL)
7292  * WL        = CWL + AL (LMC*_MODEREG_PARAMS0[CWL] selects CWL)
7293  * MaxRdSkew = max(LMC*_RLEVEL_RANKi[BYTEj]/4) + 1                          (max is across all ranks i (0..3) and bytes j (0..8))
7294  * MinWrSkew = min(LMC*_WLEVEL_RANKi[BYTEj]/8) - LMC*_CONFIG[EARLY_DQX]     (min is across all ranks i (0..3) and bytes j (0..8))
7295  *
7296  * R2W_INIT has 1 CK cycle built in for OCTEON-internal ODT settling/channel turnaround time.
7297  */
7298 union cvmx_lmcx_slot_ctl0 {
7299         uint64_t u64;
7300         struct cvmx_lmcx_slot_ctl0_s {
7301 #ifdef __BIG_ENDIAN_BITFIELD
7302         uint64_t reserved_24_63               : 40;
7303         uint64_t w2w_init                     : 6;  /**< Write-to-write spacing control
7304                                                          for back to back write followed by write cache block
7305                                                          accesses to the same rank and DIMM */
7306         uint64_t w2r_init                     : 6;  /**< Write-to-read spacing control
7307                                                          for back to back write followed by read cache block
7308                                                          accesses to the same rank and DIMM */
7309         uint64_t r2w_init                     : 6;  /**< Read-to-write spacing control
7310                                                          for back to back read followed by write cache block
7311                                                          accesses to the same rank and DIMM */
7312         uint64_t r2r_init                     : 6;  /**< Read-to-read spacing control
7313                                                          for back to back read followed by read cache block
7314                                                          accesses to the same rank and DIMM */
7315 #else
7316         uint64_t r2r_init                     : 6;
7317         uint64_t r2w_init                     : 6;
7318         uint64_t w2r_init                     : 6;
7319         uint64_t w2w_init                     : 6;
7320         uint64_t reserved_24_63               : 40;
7321 #endif
7322         } s;
7323         struct cvmx_lmcx_slot_ctl0_s          cn61xx;
7324         struct cvmx_lmcx_slot_ctl0_s          cn63xx;
7325         struct cvmx_lmcx_slot_ctl0_s          cn63xxp1;
7326         struct cvmx_lmcx_slot_ctl0_s          cn66xx;
7327         struct cvmx_lmcx_slot_ctl0_s          cn68xx;
7328         struct cvmx_lmcx_slot_ctl0_s          cn68xxp1;
7329         struct cvmx_lmcx_slot_ctl0_s          cnf71xx;
7330 };
7331 typedef union cvmx_lmcx_slot_ctl0 cvmx_lmcx_slot_ctl0_t;
7332
7333 /**
7334  * cvmx_lmc#_slot_ctl1
7335  *
7336  * LMC_SLOT_CTL1 = LMC Slot Control1
7337  * This register is an assortment of various control fields needed by the memory controller
7338  *
7339  * Notes:
7340  * If SW has not previously written to this register (since the last DRESET),
7341  * HW updates the fields in this register to the minimum allowed value
7342  * when any of LMC*_RLEVEL_RANKn, LMC*_WLEVEL_RANKn, LMC*_CONTROL and
7343  * LMC*_MODEREG_PARAMS0 CSR's change. Ideally, only read this register
7344  * after LMC has been initialized and LMC*_RLEVEL_RANKn, LMC*_WLEVEL_RANKn
7345  * have valid data.
7346  *
7347  * The interpretation of the fields in this CSR depends on LMC*_CONFIG[DDR2T]:
7348  *  - If LMC*_CONFIG[DDR2T]=1, (FieldValue + 4) is the minimum CK cycles
7349  *    between when the DRAM part registers CAS commands of the 1st and 2nd types
7350  *    from different cache blocks.
7351  *  - If LMC*_CONFIG[DDR2T]=0, (FieldValue + 3) is the minimum CK cycles
7352  *    between when the DRAM part registers CAS commands of the 1st and 2nd types
7353  *    from different cache blocks. FieldValue = 0 is always illegal in this
7354  *    case.
7355  *
7356  * The hardware-calculated minimums are:
7357  *
7358  * min R2R_XRANK_INIT = 2 - LMC*_CONFIG[DDR2T] + MaxRdSkew - MinRdSkew + LMC*_CONTROL[RODT_BPRCH]
7359  * min R2W_XRANK_INIT = 5 - LMC*_CONFIG[DDR2T] + (RL + MaxRdSkew) - (WL + MinWrSkew) + LMC*_CONTROL[BPRCH]
7360  * min W2R_XRANK_INIT = 3 - LMC*_CONFIG[DDR2T] + MaxWrSkew + LMC*_CONTROL[FPRCH2]
7361  * min W2W_XRANK_INIT = 4 - LMC*_CONFIG[DDR2T] + MaxWrSkew - MinWrSkew
7362  *
7363  * where
7364  *
7365  * RL        = CL  + AL (LMC*_MODEREG_PARAMS0[CL] selects CL, LMC*_MODEREG_PARAMS0[AL] selects AL)
7366  * WL        = CWL + AL (LMC*_MODEREG_PARAMS0[CWL] selects CWL)
7367  * MinRdSkew = min(LMC*_RLEVEL_RANKi[BYTEj]/4)                              (min is across all ranks i (0..3) and bytes j (0..8))
7368  * MaxRdSkew = max(LMC*_RLEVEL_RANKi[BYTEj]/4) + 1                          (max is across all ranks i (0..3) and bytes j (0..8))
7369  * MinWrSkew = min(LMC*_WLEVEL_RANKi[BYTEj]/8) - LMC*_CONFIG[EARLY_DQX]     (min is across all ranks i (0..3) and bytes j (0..8))
7370  * MaxWrSkew = max(LMC*_WLEVEL_RANKi[BYTEj]/8) - LMC*_CONFIG[EARLY_DQX] + 1 (max is across all ranks i (0..3) and bytes j (0..8))
7371  *
7372  * R2W_XRANK_INIT has 1 extra CK cycle built in for OCTEON-internal ODT settling/channel turnaround time.
7373  *
7374  * W2R_XRANK_INIT has 1 extra CK cycle built in for channel turnaround time.
7375  */
7376 union cvmx_lmcx_slot_ctl1 {
7377         uint64_t u64;
7378         struct cvmx_lmcx_slot_ctl1_s {
7379 #ifdef __BIG_ENDIAN_BITFIELD
7380         uint64_t reserved_24_63               : 40;
7381         uint64_t w2w_xrank_init               : 6;  /**< Write-to-write spacing control
7382                                                          for back to back write followed by write cache block
7383                                                          accesses across ranks of the same DIMM */
7384         uint64_t w2r_xrank_init               : 6;  /**< Write-to-read spacing control
7385                                                          for back to back write followed by read cache block
7386                                                          accesses across ranks of the same DIMM */
7387         uint64_t r2w_xrank_init               : 6;  /**< Read-to-write spacing control
7388                                                          for back to back read followed by write cache block
7389                                                          accesses across ranks of the same DIMM */
7390         uint64_t r2r_xrank_init               : 6;  /**< Read-to-read spacing control
7391                                                          for back to back read followed by read cache block
7392                                                          accesses across ranks of the same DIMM */
7393 #else
7394         uint64_t r2r_xrank_init               : 6;
7395         uint64_t r2w_xrank_init               : 6;
7396         uint64_t w2r_xrank_init               : 6;
7397         uint64_t w2w_xrank_init               : 6;
7398         uint64_t reserved_24_63               : 40;
7399 #endif
7400         } s;
7401         struct cvmx_lmcx_slot_ctl1_s          cn61xx;
7402         struct cvmx_lmcx_slot_ctl1_s          cn63xx;
7403         struct cvmx_lmcx_slot_ctl1_s          cn63xxp1;
7404         struct cvmx_lmcx_slot_ctl1_s          cn66xx;
7405         struct cvmx_lmcx_slot_ctl1_s          cn68xx;
7406         struct cvmx_lmcx_slot_ctl1_s          cn68xxp1;
7407         struct cvmx_lmcx_slot_ctl1_s          cnf71xx;
7408 };
7409 typedef union cvmx_lmcx_slot_ctl1 cvmx_lmcx_slot_ctl1_t;
7410
7411 /**
7412  * cvmx_lmc#_slot_ctl2
7413  *
7414  * LMC_SLOT_CTL2 = LMC Slot Control2
7415  * This register is an assortment of various control fields needed by the memory controller
7416  *
7417  * Notes:
7418  * If SW has not previously written to this register (since the last DRESET),
7419  * HW updates the fields in this register to the minimum allowed value
7420  * when any of LMC*_RLEVEL_RANKn, LMC*_WLEVEL_RANKn, LMC*_CONTROL and
7421  * LMC*_MODEREG_PARAMS0 CSR's change. Ideally, only read this register
7422  * after LMC has been initialized and LMC*_RLEVEL_RANKn, LMC*_WLEVEL_RANKn
7423  * have valid data.
7424  *
7425  * The interpretation of the fields in this CSR depends on LMC*_CONFIG[DDR2T]:
7426  *  - If LMC*_CONFIG[DDR2T]=1, (FieldValue + 4) is the minimum CK cycles
7427  *    between when the DRAM part registers CAS commands of the 1st and 2nd types
7428  *    from different cache blocks.
7429  *  - If LMC*_CONFIG[DDR2T]=0, (FieldValue + 3) is the minimum CK cycles
7430  *    between when the DRAM part registers CAS commands of the 1st and 2nd types
7431  *    from different cache blocks. FieldValue = 0 is always illegal in this
7432  *    case.
7433  *
7434  * The hardware-calculated minimums are:
7435  *
7436  * min R2R_XDIMM_INIT = 3 - LMC*_CONFIG[DDR2T] + MaxRdSkew - MinRdSkew + LMC*_CONTROL[RODT_BPRCH]
7437  * min R2W_XDIMM_INIT = 6 - LMC*_CONFIG[DDR2T] + (RL + MaxRdSkew) - (WL + MinWrSkew) + LMC*_CONTROL[BPRCH]
7438  * min W2R_XDIMM_INIT = 3 - LMC*_CONFIG[DDR2T] + MaxWrSkew + LMC*_CONTROL[FPRCH2]
7439  * min W2W_XDIMM_INIT = 5 - LMC*_CONFIG[DDR2T] + MaxWrSkew - MinWrSkew
7440  *
7441  * where
7442  *
7443  * RL        = CL  + AL (LMC*_MODEREG_PARAMS0[CL] selects CL, LMC*_MODEREG_PARAMS0[AL] selects AL)
7444  * WL        = CWL + AL (LMC*_MODEREG_PARAMS0[CWL] selects CWL)
7445  * MinRdSkew = min(LMC*_RLEVEL_RANKi[BYTEj]/4)                              (min is across all ranks i (0..3) and bytes j (0..8))
7446  * MaxRdSkew = max(LMC*_RLEVEL_RANKi[BYTEj]/4) + 1                          (max is across all ranks i (0..3) and bytes j (0..8))
7447  * MinWrSkew = min(LMC*_WLEVEL_RANKi[BYTEj]/8) - LMC*_CONFIG[EARLY_DQX]     (min is across all ranks i (0..3) and bytes j (0..8))
7448  * MaxWrSkew = max(LMC*_WLEVEL_RANKi[BYTEj]/8) - LMC*_CONFIG[EARLY_DQX] + 1 (max is across all ranks i (0..3) and bytes j (0..8))
7449  *
7450  * R2W_XDIMM_INIT has 2 extra CK cycles built in for OCTEON-internal ODT settling/channel turnaround time.
7451  *
7452  * R2R_XDIMM_INIT, W2R_XRANK_INIT, W2W_XDIMM_INIT have 1 extra CK cycle built in for channel turnaround time.
7453  */
7454 union cvmx_lmcx_slot_ctl2 {
7455         uint64_t u64;
7456         struct cvmx_lmcx_slot_ctl2_s {
7457 #ifdef __BIG_ENDIAN_BITFIELD
7458         uint64_t reserved_24_63               : 40;
7459         uint64_t w2w_xdimm_init               : 6;  /**< Write-to-write spacing control
7460                                                          for back to back write followed by write cache block
7461                                                          accesses across DIMMs */
7462         uint64_t w2r_xdimm_init               : 6;  /**< Write-to-read spacing control
7463                                                          for back to back write followed by read cache block
7464                                                          accesses across DIMMs */
7465         uint64_t r2w_xdimm_init               : 6;  /**< Read-to-write spacing control
7466                                                          for back to back read followed by write cache block
7467                                                          accesses across DIMMs */
7468         uint64_t r2r_xdimm_init               : 6;  /**< Read-to-read spacing control
7469                                                          for back to back read followed by read cache block
7470                                                          accesses across DIMMs */
7471 #else
7472         uint64_t r2r_xdimm_init               : 6;
7473         uint64_t r2w_xdimm_init               : 6;
7474         uint64_t w2r_xdimm_init               : 6;
7475         uint64_t w2w_xdimm_init               : 6;
7476         uint64_t reserved_24_63               : 40;
7477 #endif
7478         } s;
7479         struct cvmx_lmcx_slot_ctl2_s          cn61xx;
7480         struct cvmx_lmcx_slot_ctl2_s          cn63xx;
7481         struct cvmx_lmcx_slot_ctl2_s          cn63xxp1;
7482         struct cvmx_lmcx_slot_ctl2_s          cn66xx;
7483         struct cvmx_lmcx_slot_ctl2_s          cn68xx;
7484         struct cvmx_lmcx_slot_ctl2_s          cn68xxp1;
7485         struct cvmx_lmcx_slot_ctl2_s          cnf71xx;
7486 };
7487 typedef union cvmx_lmcx_slot_ctl2 cvmx_lmcx_slot_ctl2_t;
7488
7489 /**
7490  * cvmx_lmc#_timing_params0
7491  */
7492 union cvmx_lmcx_timing_params0 {
7493         uint64_t u64;
7494         struct cvmx_lmcx_timing_params0_s {
7495 #ifdef __BIG_ENDIAN_BITFIELD
7496         uint64_t reserved_47_63               : 17;
7497         uint64_t trp_ext                      : 1;  /**< Indicates tRP constraints.
7498                                                          Set [TRP_EXT[0:0], TRP[3:0]] (CSR field) = RNDUP[tRP(ns)/tCYC(ns)]
7499                                                          + (RNDUP[tRTP(ns)/tCYC(ns)]-4)-1,
7500                                                          where tRP, tRTP are from the DDR3 spec, and tCYC(ns)
7501                                                          is the DDR clock frequency (not data rate).
7502                                                          TYP tRP=10-15ns
7503                                                          TYP tRTP=max(4nCK, 7.5ns) */
7504         uint64_t tcksre                       : 4;  /**< Indicates tCKSRE constraints.
7505                                                          Set TCKSRE (CSR field) = RNDUP[tCKSRE(ns)/tCYC(ns)]-1,
7506                                                          where tCKSRE is from the DDR3 spec, and tCYC(ns)
7507                                                          is the DDR clock frequency (not data rate).
7508                                                          TYP=max(5nCK, 10ns) */
7509         uint64_t trp                          : 4;  /**< Indicates tRP constraints.
7510                                                          Set [TRP_EXT[0:0], TRP[3:0]] (CSR field) = RNDUP[tRP(ns)/tCYC(ns)]
7511                                                          + (RNDUP[tRTP(ns)/tCYC(ns)])-4)-1,
7512                                                          where tRP, tRTP are from the DDR3 spec, and tCYC(ns)
7513                                                          is the DDR clock frequency (not data rate).
7514                                                          TYP tRP=10-15ns
7515                                                          TYP tRTP=max(4nCK, 7.5ns) */
7516         uint64_t tzqinit                      : 4;  /**< Indicates tZQINIT constraints.
7517                                                          Set TZQINIT (CSR field) = RNDUP[tZQINIT(ns)/(256*tCYC(ns))],
7518                                                          where tZQINIT is from the DDR3 spec, and tCYC(ns)
7519                                                          is the DDR clock frequency (not data rate).
7520                                                          TYP=2 (equivalent to 512) */
7521         uint64_t tdllk                        : 4;  /**< Indicates tDLLK constraints.
7522                                                          Set TDLLK (CSR field) = RNDUP[tDLLK(ns)/(256*tCYC(ns))],
7523                                                          where tDLLK is from the DDR3 spec, and tCYC(ns)
7524                                                          is the DDR clock frequency (not data rate).
7525                                                          TYP=2 (equivalent to 512)
7526                                                          This parameter is used in self-refresh exit
7527                                                          and assumed to be greater than tRFC */
7528         uint64_t tmod                         : 4;  /**< Indicates tMOD constraints.
7529                                                          Set TMOD (CSR field) = RNDUP[tMOD(ns)/tCYC(ns)]-1,
7530                                                          where tMOD is from the DDR3 spec, and tCYC(ns)
7531                                                          is the DDR clock frequency (not data rate).
7532                                                          TYP=max(12nCK, 15ns) */
7533         uint64_t tmrd                         : 4;  /**< Indicates tMRD constraints.
7534                                                          Set TMRD (CSR field) = RNDUP[tMRD(ns)/tCYC(ns)]-1,
7535                                                          where tMRD is from the DDR3 spec, and tCYC(ns)
7536                                                          is the DDR clock frequency (not data rate).
7537                                                          TYP=4nCK */
7538         uint64_t txpr                         : 4;  /**< Indicates tXPR constraints.
7539                                                          Set TXPR (CSR field) = RNDUP[tXPR(ns)/(16*tCYC(ns))],
7540                                                          where tXPR is from the DDR3 spec, and tCYC(ns)
7541                                                          is the DDR clock frequency (not data rate).
7542                                                          TYP=max(5nCK, tRFC+10ns) */
7543         uint64_t tcke                         : 4;  /**< Indicates tCKE constraints.
7544                                                          Set TCKE (CSR field) = RNDUP[tCKE(ns)/tCYC(ns)]-1,
7545                                                          where tCKE is from the DDR3 spec, and tCYC(ns)
7546                                                          is the DDR clock frequency (not data rate).
7547                                                          TYP=max(3nCK, 7.5/5.625/5.625/5ns) */
7548         uint64_t tzqcs                        : 4;  /**< Indicates tZQCS constraints.
7549                                                          Set TZQCS (CSR field) = RNDUP[tZQCS(ns)/(16*tCYC(ns))],
7550                                                          where tZQCS is from the DDR3 spec, and tCYC(ns)
7551                                                          is the DDR clock frequency (not data rate).
7552                                                          TYP=4 (equivalent to 64) */
7553         uint64_t tckeon                       : 10; /**< Reserved. Should be written to zero. */
7554 #else
7555         uint64_t tckeon                       : 10;
7556         uint64_t tzqcs                        : 4;
7557         uint64_t tcke                         : 4;
7558         uint64_t txpr                         : 4;
7559         uint64_t tmrd                         : 4;
7560         uint64_t tmod                         : 4;
7561         uint64_t tdllk                        : 4;
7562         uint64_t tzqinit                      : 4;
7563         uint64_t trp                          : 4;
7564         uint64_t tcksre                       : 4;
7565         uint64_t trp_ext                      : 1;
7566         uint64_t reserved_47_63               : 17;
7567 #endif
7568         } s;
7569         struct cvmx_lmcx_timing_params0_cn61xx {
7570 #ifdef __BIG_ENDIAN_BITFIELD
7571         uint64_t reserved_47_63               : 17;
7572         uint64_t trp_ext                      : 1;  /**< Indicates tRP constraints.
7573                                                          Set [TRP_EXT[0:0], TRP[3:0]] (CSR field) = RNDUP[tRP(ns)/tCYC(ns)]
7574                                                          + (RNDUP[tRTP(ns)/tCYC(ns)]-4)-1,
7575                                                          where tRP, tRTP are from the DDR3 spec, and tCYC(ns)
7576                                                          is the DDR clock frequency (not data rate).
7577                                                          TYP tRP=10-15ns
7578                                                          TYP tRTP=max(4nCK, 7.5ns) */
7579         uint64_t tcksre                       : 4;  /**< Indicates tCKSRE constraints.
7580                                                          Set TCKSRE (CSR field) = RNDUP[tCKSRE(ns)/tCYC(ns)]-1,
7581                                                          where tCKSRE is from the DDR3 spec, and tCYC(ns)
7582                                                          is the DDR clock frequency (not data rate).
7583                                                          TYP=max(5nCK, 10ns) */
7584         uint64_t trp                          : 4;  /**< Indicates tRP constraints.
7585                                                          Set [TRP_EXT[0:0], TRP[3:0]] (CSR field) = RNDUP[tRP(ns)/tCYC(ns)]
7586                                                          + (RNDUP[tRTP(ns)/tCYC(ns)])-4)-1,
7587                                                          where tRP, tRTP are from the DDR3 spec, and tCYC(ns)
7588                                                          is the DDR clock frequency (not data rate).
7589                                                          TYP tRP=10-15ns
7590                                                          TYP tRTP=max(4nCK, 7.5ns) */
7591         uint64_t tzqinit                      : 4;  /**< Indicates tZQINIT constraints.
7592                                                          Set TZQINIT (CSR field) = RNDUP[tZQINIT(ns)/(256*tCYC(ns))],
7593                                                          where tZQINIT is from the DDR3 spec, and tCYC(ns)
7594                                                          is the DDR clock frequency (not data rate).
7595                                                          TYP=2 (equivalent to 512) */
7596         uint64_t tdllk                        : 4;  /**< Indicates tDLLK constraints.
7597                                                          Set TDLLK (CSR field) = RNDUP[tDLLK(ns)/(256*tCYC(ns))],
7598                                                          where tDLLK is from the DDR3 spec, and tCYC(ns)
7599                                                          is the DDR clock frequency (not data rate).
7600                                                          TYP=2 (equivalent to 512)
7601                                                          This parameter is used in self-refresh exit
7602                                                          and assumed to be greater than tRFC */
7603         uint64_t tmod                         : 4;  /**< Indicates tMOD constraints.
7604                                                          Set TMOD (CSR field) = RNDUP[tMOD(ns)/tCYC(ns)]-1,
7605                                                          where tMOD is from the DDR3 spec, and tCYC(ns)
7606                                                          is the DDR clock frequency (not data rate).
7607                                                          TYP=max(12nCK, 15ns) */
7608         uint64_t tmrd                         : 4;  /**< Indicates tMRD constraints.
7609                                                          Set TMRD (CSR field) = RNDUP[tMRD(ns)/tCYC(ns)]-1,
7610                                                          where tMRD is from the DDR3 spec, and tCYC(ns)
7611                                                          is the DDR clock frequency (not data rate).
7612                                                          TYP=4nCK */
7613         uint64_t txpr                         : 4;  /**< Indicates tXPR constraints.
7614                                                          Set TXPR (CSR field) = RNDUP[tXPR(ns)/(16*tCYC(ns))],
7615                                                          where tXPR is from the DDR3 spec, and tCYC(ns)
7616                                                          is the DDR clock frequency (not data rate).
7617                                                          TYP=max(5nCK, tRFC+10ns) */
7618         uint64_t tcke                         : 4;  /**< Indicates tCKE constraints.
7619                                                          Set TCKE (CSR field) = RNDUP[tCKE(ns)/tCYC(ns)]-1,
7620                                                          where tCKE is from the DDR3 spec, and tCYC(ns)
7621                                                          is the DDR clock frequency (not data rate).
7622                                                          TYP=max(3nCK, 7.5/5.625/5.625/5ns) */
7623         uint64_t tzqcs                        : 4;  /**< Indicates tZQCS constraints.
7624                                                          Set TZQCS (CSR field) = RNDUP[tZQCS(ns)/(16*tCYC(ns))],
7625                                                          where tZQCS is from the DDR3 spec, and tCYC(ns)
7626                                                          is the DDR clock frequency (not data rate).
7627                                                          TYP=4 (equivalent to 64) */
7628         uint64_t reserved_0_9                 : 10;
7629 #else
7630         uint64_t reserved_0_9                 : 10;
7631         uint64_t tzqcs                        : 4;
7632         uint64_t tcke                         : 4;
7633         uint64_t txpr                         : 4;
7634         uint64_t tmrd                         : 4;
7635         uint64_t tmod                         : 4;
7636         uint64_t tdllk                        : 4;
7637         uint64_t tzqinit                      : 4;
7638         uint64_t trp                          : 4;
7639         uint64_t tcksre                       : 4;
7640         uint64_t trp_ext                      : 1;
7641         uint64_t reserved_47_63               : 17;
7642 #endif
7643         } cn61xx;
7644         struct cvmx_lmcx_timing_params0_cn61xx cn63xx;
7645         struct cvmx_lmcx_timing_params0_cn63xxp1 {
7646 #ifdef __BIG_ENDIAN_BITFIELD
7647         uint64_t reserved_46_63               : 18;
7648         uint64_t tcksre                       : 4;  /**< Indicates tCKSRE constraints.
7649                                                          Set TCKSRE (CSR field) = RNDUP[tCKSRE(ns)/tCYC(ns)]-1,
7650                                                          where tCKSRE is from the DDR3 spec, and tCYC(ns)
7651                                                          is the DDR clock frequency (not data rate).
7652                                                          TYP=max(5nCK, 10ns) */
7653         uint64_t trp                          : 4;  /**< Indicates tRP constraints.
7654                                                          Set TRP (CSR field) = RNDUP[tRP(ns)/tCYC(ns)]
7655                                                          + (RNDUP[tRTP(ns)/tCYC(ns)])-4)-1,
7656                                                          where tRP, tRTP are from the DDR3 spec, and tCYC(ns)
7657                                                          is the DDR clock frequency (not data rate).
7658                                                          TYP tRP=10-15ns
7659                                                          TYP tRTP=max(4nCK, 7.5ns) */
7660         uint64_t tzqinit                      : 4;  /**< Indicates tZQINIT constraints.
7661                                                          Set TZQINIT (CSR field) = RNDUP[tZQINIT(ns)/(256*tCYC(ns))],
7662                                                          where tZQINIT is from the DDR3 spec, and tCYC(ns)
7663                                                          is the DDR clock frequency (not data rate).
7664                                                          TYP=2 (equivalent to 512) */
7665         uint64_t tdllk                        : 4;  /**< Indicates tDLLK constraints.
7666                                                          Set TDLLK (CSR field) = RNDUP[tDLLK(ns)/(256*tCYC(ns))],
7667                                                          where tDLLK is from the DDR3 spec, and tCYC(ns)
7668                                                          is the DDR clock frequency (not data rate).
7669                                                          TYP=2 (equivalent to 512)
7670                                                          This parameter is used in self-refresh exit
7671                                                          and assumed to be greater than tRFC */
7672         uint64_t tmod                         : 4;  /**< Indicates tMOD constraints.
7673                                                          Set TMOD (CSR field) = RNDUP[tMOD(ns)/tCYC(ns)]-1,
7674                                                          where tMOD is from the DDR3 spec, and tCYC(ns)
7675                                                          is the DDR clock frequency (not data rate).
7676                                                          TYP=max(12nCK, 15ns) */
7677         uint64_t tmrd                         : 4;  /**< Indicates tMRD constraints.
7678                                                          Set TMRD (CSR field) = RNDUP[tMRD(ns)/tCYC(ns)]-1,
7679                                                          where tMRD is from the DDR3 spec, and tCYC(ns)
7680                                                          is the DDR clock frequency (not data rate).
7681                                                          TYP=4nCK */
7682         uint64_t txpr                         : 4;  /**< Indicates tXPR constraints.
7683                                                          Set TXPR (CSR field) = RNDUP[tXPR(ns)/(16*tCYC(ns))],
7684                                                          where tXPR is from the DDR3 spec, and tCYC(ns)
7685                                                          is the DDR clock frequency (not data rate).
7686                                                          TYP=max(5nCK, tRFC+10ns) */
7687         uint64_t tcke                         : 4;  /**< Indicates tCKE constraints.
7688                                                          Set TCKE (CSR field) = RNDUP[tCKE(ns)/tCYC(ns)]-1,
7689                                                          where tCKE is from the DDR3 spec, and tCYC(ns)
7690                                                          is the DDR clock frequency (not data rate).
7691                                                          TYP=max(3nCK, 7.5/5.625/5.625/5ns) */
7692         uint64_t tzqcs                        : 4;  /**< Indicates tZQCS constraints.
7693                                                          Set TZQCS (CSR field) = RNDUP[tZQCS(ns)/(16*tCYC(ns))],
7694                                                          where tZQCS is from the DDR3 spec, and tCYC(ns)
7695                                                          is the DDR clock frequency (not data rate).
7696                                                          TYP=4 (equivalent to 64) */
7697         uint64_t tckeon                       : 10; /**< Reserved. Should be written to zero. */
7698 #else
7699         uint64_t tckeon                       : 10;
7700         uint64_t tzqcs                        : 4;
7701         uint64_t tcke                         : 4;
7702         uint64_t txpr                         : 4;
7703         uint64_t tmrd                         : 4;
7704         uint64_t tmod                         : 4;
7705         uint64_t tdllk                        : 4;
7706         uint64_t tzqinit                      : 4;
7707         uint64_t trp                          : 4;
7708         uint64_t tcksre                       : 4;
7709         uint64_t reserved_46_63               : 18;
7710 #endif
7711         } cn63xxp1;
7712         struct cvmx_lmcx_timing_params0_cn61xx cn66xx;
7713         struct cvmx_lmcx_timing_params0_cn61xx cn68xx;
7714         struct cvmx_lmcx_timing_params0_cn61xx cn68xxp1;
7715         struct cvmx_lmcx_timing_params0_cn61xx cnf71xx;
7716 };
7717 typedef union cvmx_lmcx_timing_params0 cvmx_lmcx_timing_params0_t;
7718
7719 /**
7720  * cvmx_lmc#_timing_params1
7721  */
7722 union cvmx_lmcx_timing_params1 {
7723         uint64_t u64;
7724         struct cvmx_lmcx_timing_params1_s {
7725 #ifdef __BIG_ENDIAN_BITFIELD
7726         uint64_t reserved_47_63               : 17;
7727         uint64_t tras_ext                     : 1;  /**< Indicates tRAS constraints.
7728                                                          Set [TRAS_EXT[0:0], TRAS[4:0]] (CSR field) = RNDUP[tRAS(ns)/tCYC(ns)]-1,
7729                                                          where tRAS is from the DDR3 spec, and tCYC(ns)
7730                                                          is the DDR clock frequency (not data rate).
7731                                                          TYP=35ns-9*tREFI
7732                                                              - 000000: RESERVED
7733                                                              - 000001: 2 tCYC
7734                                                              - 000010: 3 tCYC
7735                                                              - ...
7736                                                              - 111111: 64 tCYC */
7737         uint64_t txpdll                       : 5;  /**< Indicates tXPDLL constraints.
7738                                                          Set TXPDLL (CSR field) = RNDUP[tXPDLL(ns)/tCYC(ns)]-1,
7739                                                          where tXPDLL is from the DDR3 spec, and tCYC(ns)
7740                                                          is the DDR clock frequency (not data rate).
7741                                                          TYP=max(10nCK, 24ns) */
7742         uint64_t tfaw                         : 5;  /**< Indicates tFAW constraints.
7743                                                          Set TFAW (CSR field) = RNDUP[tFAW(ns)/(4*tCYC(ns))],
7744                                                          where tFAW is from the DDR3 spec, and tCYC(ns)
7745                                                          is the DDR clock frequency (not data rate).
7746                                                          TYP=30-40ns */
7747         uint64_t twldqsen                     : 4;  /**< Indicates tWLDQSEN constraints.
7748                                                          Set TWLDQSEN (CSR field) = RNDUP[tWLDQSEN(ns)/(4*tCYC(ns))],
7749                                                          where tWLDQSEN is from the DDR3 spec, and tCYC(ns)
7750                                                          is the DDR clock frequency (not data rate).
7751                                                          TYP=max(25nCK) */
7752         uint64_t twlmrd                       : 4;  /**< Indicates tWLMRD constraints.
7753                                                          Set TWLMRD (CSR field) = RNDUP[tWLMRD(ns)/(4*tCYC(ns))],
7754                                                          where tWLMRD is from the DDR3 spec, and tCYC(ns)
7755                                                          is the DDR clock frequency (not data rate).
7756                                                          TYP=max(40nCK) */
7757         uint64_t txp                          : 3;  /**< Indicates tXP constraints.
7758                                                          Set TXP (CSR field) = RNDUP[tXP(ns)/tCYC(ns)]-1,
7759                                                          where tXP is from the DDR3 spec, and tCYC(ns)
7760                                                          is the DDR clock frequency (not data rate).
7761                                                          TYP=max(3nCK, 7.5ns) */
7762         uint64_t trrd                         : 3;  /**< Indicates tRRD constraints.
7763                                                          Set TRRD (CSR field) = RNDUP[tRRD(ns)/tCYC(ns)]-2,
7764                                                          where tRRD is from the DDR3 spec, and tCYC(ns)
7765                                                          is the DDR clock frequency (not data rate).
7766                                                          TYP=max(4nCK, 10ns)
7767                                                             - 000: RESERVED
7768                                                             - 001: 3 tCYC
7769                                                             - ...
7770                                                             - 110: 8 tCYC
7771                                                             - 111: 9 tCYC */
7772         uint64_t trfc                         : 5;  /**< Indicates tRFC constraints.
7773                                                          Set TRFC (CSR field) = RNDUP[tRFC(ns)/(8*tCYC(ns))],
7774                                                          where tRFC is from the DDR3 spec, and tCYC(ns)
7775                                                          is the DDR clock frequency (not data rate).
7776                                                          TYP=90-350ns
7777                                                               - 00000: RESERVED
7778                                                               - 00001: 8 tCYC
7779                                                               - 00010: 16 tCYC
7780                                                               - 00011: 24 tCYC
7781                                                               - 00100: 32 tCYC
7782                                                               - ...
7783                                                               - 11110: 240 tCYC
7784                                                               - 11111: 248 tCYC */
7785         uint64_t twtr                         : 4;  /**< Indicates tWTR constraints.
7786                                                          Set TWTR (CSR field) = RNDUP[tWTR(ns)/tCYC(ns)]-1,
7787                                                          where tWTR is from the DDR3 spec, and tCYC(ns)
7788                                                          is the DDR clock frequency (not data rate).
7789                                                          TYP=max(4nCK, 7.5ns)
7790                                                              - 0000: RESERVED
7791                                                              - 0001: 2
7792                                                              - ...
7793                                                              - 0111: 8
7794                                                              - 1000-1111: RESERVED */
7795         uint64_t trcd                         : 4;  /**< Indicates tRCD constraints.
7796                                                          Set TRCD (CSR field) = RNDUP[tRCD(ns)/tCYC(ns)],
7797                                                          where tRCD is from the DDR3 spec, and tCYC(ns)
7798                                                          is the DDR clock frequency (not data rate).
7799                                                          TYP=10-15ns
7800                                                              - 0000: RESERVED
7801                                                              - 0001: 2 (2 is the smallest value allowed)
7802                                                              - 0002: 2
7803                                                              - ...
7804                                                              - 1110: 14
7805                                                              - 1111: RESERVED
7806                                                          In 2T mode, make this register TRCD-1, not going
7807                                                          below 2. */
7808         uint64_t tras                         : 5;  /**< Indicates tRAS constraints.
7809                                                          Set [TRAS_EXT[0:0], TRAS[4:0]] (CSR field) = RNDUP[tRAS(ns)/tCYC(ns)]-1,
7810                                                          where tRAS is from the DDR3 spec, and tCYC(ns)
7811                                                          is the DDR clock frequency (not data rate).
7812                                                          TYP=35ns-9*tREFI
7813                                                              - 000000: RESERVED
7814                                                              - 000001: 2 tCYC
7815                                                              - 000010: 3 tCYC
7816                                                              - ...
7817                                                              - 111111: 64 tCYC */
7818         uint64_t tmprr                        : 4;  /**< Indicates tMPRR constraints.
7819                                                          Set TMPRR (CSR field) = RNDUP[tMPRR(ns)/tCYC(ns)]-1,
7820                                                          where tMPRR is from the DDR3 spec, and tCYC(ns)
7821                                                          is the DDR clock frequency (not data rate).
7822                                                          TYP=1nCK */
7823 #else
7824         uint64_t tmprr                        : 4;
7825         uint64_t tras                         : 5;
7826         uint64_t trcd                         : 4;
7827         uint64_t twtr                         : 4;
7828         uint64_t trfc                         : 5;
7829         uint64_t trrd                         : 3;
7830         uint64_t txp                          : 3;
7831         uint64_t twlmrd                       : 4;
7832         uint64_t twldqsen                     : 4;
7833         uint64_t tfaw                         : 5;
7834         uint64_t txpdll                       : 5;
7835         uint64_t tras_ext                     : 1;
7836         uint64_t reserved_47_63               : 17;
7837 #endif
7838         } s;
7839         struct cvmx_lmcx_timing_params1_s     cn61xx;
7840         struct cvmx_lmcx_timing_params1_s     cn63xx;
7841         struct cvmx_lmcx_timing_params1_cn63xxp1 {
7842 #ifdef __BIG_ENDIAN_BITFIELD
7843         uint64_t reserved_46_63               : 18;
7844         uint64_t txpdll                       : 5;  /**< Indicates tXPDLL constraints.
7845                                                          Set TXPDLL (CSR field) = RNDUP[tXPDLL(ns)/tCYC(ns)]-1,
7846                                                          where tXPDLL is from the DDR3 spec, and tCYC(ns)
7847                                                          is the DDR clock frequency (not data rate).
7848                                                          TYP=max(10nCK, 24ns) */
7849         uint64_t tfaw                         : 5;  /**< Indicates tFAW constraints.
7850                                                          Set TFAW (CSR field) = RNDUP[tFAW(ns)/(4*tCYC(ns))],
7851                                                          where tFAW is from the DDR3 spec, and tCYC(ns)
7852                                                          is the DDR clock frequency (not data rate).
7853                                                          TYP=30-40ns */
7854         uint64_t twldqsen                     : 4;  /**< Indicates tWLDQSEN constraints.
7855                                                          Set TWLDQSEN (CSR field) = RNDUP[tWLDQSEN(ns)/(4*tCYC(ns))],
7856                                                          where tWLDQSEN is from the DDR3 spec, and tCYC(ns)
7857                                                          is the DDR clock frequency (not data rate).
7858                                                          TYP=max(25nCK) */
7859         uint64_t twlmrd                       : 4;  /**< Indicates tWLMRD constraints.
7860                                                          Set TWLMRD (CSR field) = RNDUP[tWLMRD(ns)/(4*tCYC(ns))],
7861                                                          where tWLMRD is from the DDR3 spec, and tCYC(ns)
7862                                                          is the DDR clock frequency (not data rate).
7863                                                          TYP=max(40nCK) */
7864         uint64_t txp                          : 3;  /**< Indicates tXP constraints.
7865                                                          Set TXP (CSR field) = RNDUP[tXP(ns)/tCYC(ns)]-1,
7866                                                          where tXP is from the DDR3 spec, and tCYC(ns)
7867                                                          is the DDR clock frequency (not data rate).
7868                                                          TYP=max(3nCK, 7.5ns) */
7869         uint64_t trrd                         : 3;  /**< Indicates tRRD constraints.
7870                                                          Set TRRD (CSR field) = RNDUP[tRRD(ns)/tCYC(ns)]-2,
7871                                                          where tRRD is from the DDR3 spec, and tCYC(ns)
7872                                                          is the DDR clock frequency (not data rate).
7873                                                          TYP=max(4nCK, 10ns)
7874                                                             - 000: RESERVED
7875                                                             - 001: 3 tCYC
7876                                                             - ...
7877                                                             - 110: 8 tCYC
7878                                                             - 111: 9 tCYC */
7879         uint64_t trfc                         : 5;  /**< Indicates tRFC constraints.
7880                                                          Set TRFC (CSR field) = RNDUP[tRFC(ns)/(8*tCYC(ns))],
7881                                                          where tRFC is from the DDR3 spec, and tCYC(ns)
7882                                                          is the DDR clock frequency (not data rate).
7883                                                          TYP=90-350ns
7884                                                               - 00000: RESERVED
7885                                                               - 00001: 8 tCYC
7886                                                               - 00010: 16 tCYC
7887                                                               - 00011: 24 tCYC
7888                                                               - 00100: 32 tCYC
7889                                                               - ...
7890                                                               - 11110: 240 tCYC
7891                                                               - 11111: 248 tCYC */
7892         uint64_t twtr                         : 4;  /**< Indicates tWTR constraints.
7893                                                          Set TWTR (CSR field) = RNDUP[tWTR(ns)/tCYC(ns)]-1,
7894                                                          where tWTR is from the DDR3 spec, and tCYC(ns)
7895                                                          is the DDR clock frequency (not data rate).
7896                                                          TYP=max(4nCK, 7.5ns)
7897                                                              - 0000: RESERVED
7898                                                              - 0001: 2
7899                                                              - ...
7900                                                              - 0111: 8
7901                                                              - 1000-1111: RESERVED */
7902         uint64_t trcd                         : 4;  /**< Indicates tRCD constraints.
7903                                                          Set TRCD (CSR field) = RNDUP[tRCD(ns)/tCYC(ns)],
7904                                                          where tRCD is from the DDR3 spec, and tCYC(ns)
7905                                                          is the DDR clock frequency (not data rate).
7906                                                          TYP=10-15ns
7907                                                              - 0000: RESERVED
7908                                                              - 0001: 2 (2 is the smallest value allowed)
7909                                                              - 0002: 2
7910                                                              - ...
7911                                                              - 1001: 9
7912                                                              - 1010-1111: RESERVED
7913                                                          In 2T mode, make this register TRCD-1, not going
7914                                                          below 2. */
7915         uint64_t tras                         : 5;  /**< Indicates tRAS constraints.
7916                                                          Set TRAS (CSR field) = RNDUP[tRAS(ns)/tCYC(ns)]-1,
7917                                                          where tRAS is from the DDR3 spec, and tCYC(ns)
7918                                                          is the DDR clock frequency (not data rate).
7919                                                          TYP=35ns-9*tREFI
7920                                                              - 00000: RESERVED
7921                                                              - 00001: 2 tCYC
7922                                                              - 00010: 3 tCYC
7923                                                              - ...
7924                                                              - 11111: 32 tCYC */
7925         uint64_t tmprr                        : 4;  /**< Indicates tMPRR constraints.
7926                                                          Set TMPRR (CSR field) = RNDUP[tMPRR(ns)/tCYC(ns)]-1,
7927                                                          where tMPRR is from the DDR3 spec, and tCYC(ns)
7928                                                          is the DDR clock frequency (not data rate).
7929                                                          TYP=1nCK */
7930 #else
7931         uint64_t tmprr                        : 4;
7932         uint64_t tras                         : 5;
7933         uint64_t trcd                         : 4;
7934         uint64_t twtr                         : 4;
7935         uint64_t trfc                         : 5;
7936         uint64_t trrd                         : 3;
7937         uint64_t txp                          : 3;
7938         uint64_t twlmrd                       : 4;
7939         uint64_t twldqsen                     : 4;
7940         uint64_t tfaw                         : 5;
7941         uint64_t txpdll                       : 5;
7942         uint64_t reserved_46_63               : 18;
7943 #endif
7944         } cn63xxp1;
7945         struct cvmx_lmcx_timing_params1_s     cn66xx;
7946         struct cvmx_lmcx_timing_params1_s     cn68xx;
7947         struct cvmx_lmcx_timing_params1_s     cn68xxp1;
7948         struct cvmx_lmcx_timing_params1_s     cnf71xx;
7949 };
7950 typedef union cvmx_lmcx_timing_params1 cvmx_lmcx_timing_params1_t;
7951
7952 /**
7953  * cvmx_lmc#_tro_ctl
7954  *
7955  * LMC_TRO_CTL = LMC Temperature Ring Osc Control
7956  * This register is an assortment of various control fields needed to control the temperature ring oscillator
7957  *
7958  * Notes:
7959  * To bring up the temperature ring oscillator, write TRESET to 0, and follow by initializing RCLK_CNT to desired
7960  * value
7961  */
7962 union cvmx_lmcx_tro_ctl {
7963         uint64_t u64;
7964         struct cvmx_lmcx_tro_ctl_s {
7965 #ifdef __BIG_ENDIAN_BITFIELD
7966         uint64_t reserved_33_63               : 31;
7967         uint64_t rclk_cnt                     : 32; /**< rclk counter */
7968         uint64_t treset                       : 1;  /**< Reset ring oscillator */
7969 #else
7970         uint64_t treset                       : 1;
7971         uint64_t rclk_cnt                     : 32;
7972         uint64_t reserved_33_63               : 31;
7973 #endif
7974         } s;
7975         struct cvmx_lmcx_tro_ctl_s            cn61xx;
7976         struct cvmx_lmcx_tro_ctl_s            cn63xx;
7977         struct cvmx_lmcx_tro_ctl_s            cn63xxp1;
7978         struct cvmx_lmcx_tro_ctl_s            cn66xx;
7979         struct cvmx_lmcx_tro_ctl_s            cn68xx;
7980         struct cvmx_lmcx_tro_ctl_s            cn68xxp1;
7981         struct cvmx_lmcx_tro_ctl_s            cnf71xx;
7982 };
7983 typedef union cvmx_lmcx_tro_ctl cvmx_lmcx_tro_ctl_t;
7984
7985 /**
7986  * cvmx_lmc#_tro_stat
7987  *
7988  * LMC_TRO_STAT = LMC Temperature Ring Osc Status
7989  * This register is an assortment of various control fields needed to control the temperature ring oscillator
7990  */
7991 union cvmx_lmcx_tro_stat {
7992         uint64_t u64;
7993         struct cvmx_lmcx_tro_stat_s {
7994 #ifdef __BIG_ENDIAN_BITFIELD
7995         uint64_t reserved_32_63               : 32;
7996         uint64_t ring_cnt                     : 32; /**< ring counter */
7997 #else
7998         uint64_t ring_cnt                     : 32;
7999         uint64_t reserved_32_63               : 32;
8000 #endif
8001         } s;
8002         struct cvmx_lmcx_tro_stat_s           cn61xx;
8003         struct cvmx_lmcx_tro_stat_s           cn63xx;
8004         struct cvmx_lmcx_tro_stat_s           cn63xxp1;
8005         struct cvmx_lmcx_tro_stat_s           cn66xx;
8006         struct cvmx_lmcx_tro_stat_s           cn68xx;
8007         struct cvmx_lmcx_tro_stat_s           cn68xxp1;
8008         struct cvmx_lmcx_tro_stat_s           cnf71xx;
8009 };
8010 typedef union cvmx_lmcx_tro_stat cvmx_lmcx_tro_stat_t;
8011
8012 /**
8013  * cvmx_lmc#_wlevel_ctl
8014  */
8015 union cvmx_lmcx_wlevel_ctl {
8016         uint64_t u64;
8017         struct cvmx_lmcx_wlevel_ctl_s {
8018 #ifdef __BIG_ENDIAN_BITFIELD
8019         uint64_t reserved_22_63               : 42;
8020         uint64_t rtt_nom                      : 3;  /**< RTT_NOM
8021                                                          LMC writes a decoded value to MR1[Rtt_Nom] of the rank during
8022                                                          write leveling. Per JEDEC DDR3 specifications,
8023                                                          only values MR1[Rtt_Nom] = 1 (RQZ/4), 2 (RQZ/2), or 3 (RQZ/6)
8024                                                          are allowed during write leveling with output buffer enabled.
8025                                                          000 : LMC writes 001 (RZQ/4)   to MR1[Rtt_Nom]
8026                                                          001 : LMC writes 010 (RZQ/2)   to MR1[Rtt_Nom]
8027                                                          010 : LMC writes 011 (RZQ/6)   to MR1[Rtt_Nom]
8028                                                          011 : LMC writes 100 (RZQ/12)  to MR1[Rtt_Nom]
8029                                                          100 : LMC writes 101 (RZQ/8)   to MR1[Rtt_Nom]
8030                                                          101 : LMC writes 110 (Rsvd)    to MR1[Rtt_Nom]
8031                                                          110 : LMC writes 111 (Rsvd)    to  MR1[Rtt_Nom]
8032                                                          111 : LMC writes 000 (Disabled) to MR1[Rtt_Nom] */
8033         uint64_t bitmask                      : 8;  /**< Mask to select bit lanes on which write-leveling
8034                                                          feedback is returned when OR_DIS is set to 1 */
8035         uint64_t or_dis                       : 1;  /**< Disable or'ing of bits in a byte lane when computing
8036                                                          the write-leveling bitmask */
8037         uint64_t sset                         : 1;  /**< Run write-leveling on the current setting only. */
8038         uint64_t lanemask                     : 9;  /**< One-hot mask to select byte lane to be leveled by
8039                                                          the write-leveling sequence
8040                                                          Used with x16 parts where the upper and lower byte
8041                                                          lanes need to be leveled independently */
8042 #else
8043         uint64_t lanemask                     : 9;
8044         uint64_t sset                         : 1;
8045         uint64_t or_dis                       : 1;
8046         uint64_t bitmask                      : 8;
8047         uint64_t rtt_nom                      : 3;
8048         uint64_t reserved_22_63               : 42;
8049 #endif
8050         } s;
8051         struct cvmx_lmcx_wlevel_ctl_s         cn61xx;
8052         struct cvmx_lmcx_wlevel_ctl_s         cn63xx;
8053         struct cvmx_lmcx_wlevel_ctl_cn63xxp1 {
8054 #ifdef __BIG_ENDIAN_BITFIELD
8055         uint64_t reserved_10_63               : 54;
8056         uint64_t sset                         : 1;  /**< Run write-leveling on the current setting only. */
8057         uint64_t lanemask                     : 9;  /**< One-hot mask to select byte lane to be leveled by
8058                                                          the write-leveling sequence
8059                                                          Used with x16 parts where the upper and lower byte
8060                                                          lanes need to be leveled independently */
8061 #else
8062         uint64_t lanemask                     : 9;
8063         uint64_t sset                         : 1;
8064         uint64_t reserved_10_63               : 54;
8065 #endif
8066         } cn63xxp1;
8067         struct cvmx_lmcx_wlevel_ctl_s         cn66xx;
8068         struct cvmx_lmcx_wlevel_ctl_s         cn68xx;
8069         struct cvmx_lmcx_wlevel_ctl_s         cn68xxp1;
8070         struct cvmx_lmcx_wlevel_ctl_s         cnf71xx;
8071 };
8072 typedef union cvmx_lmcx_wlevel_ctl cvmx_lmcx_wlevel_ctl_t;
8073
8074 /**
8075  * cvmx_lmc#_wlevel_dbg
8076  *
8077  * Notes:
8078  * A given write of LMC*_WLEVEL_DBG returns the write-leveling pass/fail results for all possible
8079  * delay settings (i.e. the BITMASK) for only one byte in the last rank that the HW write-leveled.
8080  * LMC*_WLEVEL_DBG[BYTE] selects the particular byte.
8081  * To get these pass/fail results for another different rank, you must run the hardware write-leveling
8082  * again. For example, it is possible to get the BITMASK results for every byte of every rank
8083  * if you run write-leveling separately for each rank, probing LMC*_WLEVEL_DBG between each
8084  * write-leveling.
8085  */
8086 union cvmx_lmcx_wlevel_dbg {
8087         uint64_t u64;
8088         struct cvmx_lmcx_wlevel_dbg_s {
8089 #ifdef __BIG_ENDIAN_BITFIELD
8090         uint64_t reserved_12_63               : 52;
8091         uint64_t bitmask                      : 8;  /**< Bitmask generated during deskew settings sweep
8092                                                          if LMCX_WLEVEL_CTL[SSET]=0
8093                                                            BITMASK[n]=0 means deskew setting n failed
8094                                                            BITMASK[n]=1 means deskew setting n passed
8095                                                            for 0 <= n <= 7
8096                                                            BITMASK contains the first 8 results of the total 16
8097                                                            collected by LMC during the write-leveling sequence
8098                                                          else if LMCX_WLEVEL_CTL[SSET]=1
8099                                                            BITMASK[0]=0 means curr deskew setting failed
8100                                                            BITMASK[0]=1 means curr deskew setting passed */
8101         uint64_t byte                         : 4;  /**< 0 <= BYTE <= 8 */
8102 #else
8103         uint64_t byte                         : 4;
8104         uint64_t bitmask                      : 8;
8105         uint64_t reserved_12_63               : 52;
8106 #endif
8107         } s;
8108         struct cvmx_lmcx_wlevel_dbg_s         cn61xx;
8109         struct cvmx_lmcx_wlevel_dbg_s         cn63xx;
8110         struct cvmx_lmcx_wlevel_dbg_s         cn63xxp1;
8111         struct cvmx_lmcx_wlevel_dbg_s         cn66xx;
8112         struct cvmx_lmcx_wlevel_dbg_s         cn68xx;
8113         struct cvmx_lmcx_wlevel_dbg_s         cn68xxp1;
8114         struct cvmx_lmcx_wlevel_dbg_s         cnf71xx;
8115 };
8116 typedef union cvmx_lmcx_wlevel_dbg cvmx_lmcx_wlevel_dbg_t;
8117
8118 /**
8119  * cvmx_lmc#_wlevel_rank#
8120  *
8121  * Notes:
8122  * This is four CSRs per LMC, one per each rank.
8123  *
8124  * Deskew setting is measured in units of 1/8 CK, so the above BYTE* values can range over 4 CKs.
8125  *
8126  * Assuming LMC*_WLEVEL_CTL[SSET]=0, the BYTE*<2:0> values are not used during write-leveling, and
8127  * they are over-written by the hardware as part of the write-leveling sequence. (HW sets STATUS==3
8128  * after HW write-leveling completes for the rank). SW needs to set BYTE*<4:3> bits.
8129  *
8130  * Each CSR may also be written by SW, but not while a write-leveling sequence is in progress. (HW sets STATUS==1 after a CSR write.)
8131  *
8132  * SW initiates a HW write-leveling sequence by programming LMC*_WLEVEL_CTL and writing RANKMASK and INIT_START=1 with SEQUENCE=6 in LMC*_CONFIG.
8133  * LMC will then step through and accumulate write leveling results for 8 unique delay settings (twice), starting at a delay of
8134  * LMC*_WLEVEL_RANKn[BYTE*<4:3>]*8 CK increasing by 1/8 CK each setting. HW will then set LMC*_WLEVEL_RANKi[BYTE*<2:0>] to indicate the
8135  * first write leveling result of '1' that followed a reslt of '0' during the sequence by searching for a '1100' pattern in the generated
8136  * bitmask, except that LMC will always write LMC*_WLEVEL_RANKi[BYTE*<0>]=0. If HW is unable to find a match for a '1100' pattern, then HW will
8137  * set LMC*_WLEVEL_RANKi[BYTE*<2:0>] to 4.
8138  * See LMC*_WLEVEL_CTL.
8139  *
8140  * LMC*_WLEVEL_RANKi values for ranks i without attached DRAM should be set such that
8141  * they do not increase the range of possible BYTE values for any byte
8142  * lane. The easiest way to do this is to set
8143  *     LMC*_WLEVEL_RANKi = LMC*_WLEVEL_RANKj,
8144  * where j is some rank with attached DRAM whose LMC*_WLEVEL_RANKj is already fully initialized.
8145  */
8146 union cvmx_lmcx_wlevel_rankx {
8147         uint64_t u64;
8148         struct cvmx_lmcx_wlevel_rankx_s {
8149 #ifdef __BIG_ENDIAN_BITFIELD
8150         uint64_t reserved_47_63               : 17;
8151         uint64_t status                       : 2;  /**< Indicates status of the write-leveling and where
8152                                                          the BYTE* programmings in <44:0> came from:
8153                                                          0 = BYTE* values are their reset value
8154                                                          1 = BYTE* values were set via a CSR write to this register
8155                                                          2 = write-leveling sequence currently in progress (BYTE* values are unpredictable)
8156                                                          3 = BYTE* values came from a complete write-leveling sequence, irrespective of
8157                                                              which lanes are masked via LMC*WLEVEL_CTL[LANEMASK] */
8158         uint64_t byte8                        : 5;  /**< Deskew setting
8159                                                          Bit 0 of BYTE8 must be zero during normal operation.
8160                                                          When ECC DRAM is not present (i.e. when DRAM is not
8161                                                          attached to chip signals DDR_CBS_0_* and DDR_CB[7:0]),
8162                                                          SW should write BYTE8 with a value that does
8163                                                          not increase the range of possible BYTE* values. The
8164                                                          easiest way to do this is to set
8165                                                          LMC*_WLEVEL_RANK*[BYTE8] = LMC*_WLEVEL_RANK*[BYTE0]
8166                                                          when there is no ECC DRAM, using the final BYTE0 value. */
8167         uint64_t byte7                        : 5;  /**< Deskew setting
8168                                                          Bit 0 of BYTE7 must be zero during normal operation */
8169         uint64_t byte6                        : 5;  /**< Deskew setting
8170                                                          Bit 0 of BYTE6 must be zero during normal operation */
8171         uint64_t byte5                        : 5;  /**< Deskew setting
8172                                                          Bit 0 of BYTE5 must be zero during normal operation */
8173         uint64_t byte4                        : 5;  /**< Deskew setting
8174                                                          Bit 0 of BYTE4 must be zero during normal operation */
8175         uint64_t byte3                        : 5;  /**< Deskew setting
8176                                                          Bit 0 of BYTE3 must be zero during normal operation */
8177         uint64_t byte2                        : 5;  /**< Deskew setting
8178                                                          Bit 0 of BYTE2 must be zero during normal operation */
8179         uint64_t byte1                        : 5;  /**< Deskew setting
8180                                                          Bit 0 of BYTE1 must be zero during normal operation */
8181         uint64_t byte0                        : 5;  /**< Deskew setting
8182                                                          Bit 0 of BYTE0 must be zero during normal operation */
8183 #else
8184         uint64_t byte0                        : 5;
8185         uint64_t byte1                        : 5;
8186         uint64_t byte2                        : 5;
8187         uint64_t byte3                        : 5;
8188         uint64_t byte4                        : 5;
8189         uint64_t byte5                        : 5;
8190         uint64_t byte6                        : 5;
8191         uint64_t byte7                        : 5;
8192         uint64_t byte8                        : 5;
8193         uint64_t status                       : 2;
8194         uint64_t reserved_47_63               : 17;
8195 #endif
8196         } s;
8197         struct cvmx_lmcx_wlevel_rankx_s       cn61xx;
8198         struct cvmx_lmcx_wlevel_rankx_s       cn63xx;
8199         struct cvmx_lmcx_wlevel_rankx_s       cn63xxp1;
8200         struct cvmx_lmcx_wlevel_rankx_s       cn66xx;
8201         struct cvmx_lmcx_wlevel_rankx_s       cn68xx;
8202         struct cvmx_lmcx_wlevel_rankx_s       cn68xxp1;
8203         struct cvmx_lmcx_wlevel_rankx_s       cnf71xx;
8204 };
8205 typedef union cvmx_lmcx_wlevel_rankx cvmx_lmcx_wlevel_rankx_t;
8206
8207 /**
8208  * cvmx_lmc#_wodt_ctl0
8209  *
8210  * LMC_WODT_CTL0 = LMC Write OnDieTermination control
8211  * See the description in LMC_WODT_CTL1.
8212  *
8213  * Notes:
8214  * Together, the LMC_WODT_CTL1 and LMC_WODT_CTL0 CSRs control the write ODT mask.  See LMC_WODT_CTL1.
8215  *
8216  */
8217 union cvmx_lmcx_wodt_ctl0 {
8218         uint64_t u64;
8219         struct cvmx_lmcx_wodt_ctl0_s {
8220 #ifdef __BIG_ENDIAN_BITFIELD
8221         uint64_t reserved_0_63                : 64;
8222 #else
8223         uint64_t reserved_0_63                : 64;
8224 #endif
8225         } s;
8226         struct cvmx_lmcx_wodt_ctl0_cn30xx {
8227 #ifdef __BIG_ENDIAN_BITFIELD
8228         uint64_t reserved_32_63               : 32;
8229         uint64_t wodt_d1_r1                   : 8;  /**< Write ODT mask DIMM1, RANK1 */
8230         uint64_t wodt_d1_r0                   : 8;  /**< Write ODT mask DIMM1, RANK0 */
8231         uint64_t wodt_d0_r1                   : 8;  /**< Write ODT mask DIMM0, RANK1 */
8232         uint64_t wodt_d0_r0                   : 8;  /**< Write ODT mask DIMM0, RANK0 */
8233 #else
8234         uint64_t wodt_d0_r0                   : 8;
8235         uint64_t wodt_d0_r1                   : 8;
8236         uint64_t wodt_d1_r0                   : 8;
8237         uint64_t wodt_d1_r1                   : 8;
8238         uint64_t reserved_32_63               : 32;
8239 #endif
8240         } cn30xx;
8241         struct cvmx_lmcx_wodt_ctl0_cn30xx     cn31xx;
8242         struct cvmx_lmcx_wodt_ctl0_cn38xx {
8243 #ifdef __BIG_ENDIAN_BITFIELD
8244         uint64_t reserved_32_63               : 32;
8245         uint64_t wodt_hi3                     : 4;  /**< Write ODT mask for position 3, data[127:64] */
8246         uint64_t wodt_hi2                     : 4;  /**< Write ODT mask for position 2, data[127:64] */
8247         uint64_t wodt_hi1                     : 4;  /**< Write ODT mask for position 1, data[127:64] */
8248         uint64_t wodt_hi0                     : 4;  /**< Write ODT mask for position 0, data[127:64] */
8249         uint64_t wodt_lo3                     : 4;  /**< Write ODT mask for position 3, data[ 63: 0] */
8250         uint64_t wodt_lo2                     : 4;  /**< Write ODT mask for position 2, data[ 63: 0] */
8251         uint64_t wodt_lo1                     : 4;  /**< Write ODT mask for position 1, data[ 63: 0] */
8252         uint64_t wodt_lo0                     : 4;  /**< Write ODT mask for position 0, data[ 63: 0] */
8253 #else
8254         uint64_t wodt_lo0                     : 4;
8255         uint64_t wodt_lo1                     : 4;
8256         uint64_t wodt_lo2                     : 4;
8257         uint64_t wodt_lo3                     : 4;
8258         uint64_t wodt_hi0                     : 4;
8259         uint64_t wodt_hi1                     : 4;
8260         uint64_t wodt_hi2                     : 4;
8261         uint64_t wodt_hi3                     : 4;
8262         uint64_t reserved_32_63               : 32;
8263 #endif
8264         } cn38xx;
8265         struct cvmx_lmcx_wodt_ctl0_cn38xx     cn38xxp2;
8266         struct cvmx_lmcx_wodt_ctl0_cn38xx     cn50xx;
8267         struct cvmx_lmcx_wodt_ctl0_cn30xx     cn52xx;
8268         struct cvmx_lmcx_wodt_ctl0_cn30xx     cn52xxp1;
8269         struct cvmx_lmcx_wodt_ctl0_cn30xx     cn56xx;
8270         struct cvmx_lmcx_wodt_ctl0_cn30xx     cn56xxp1;
8271         struct cvmx_lmcx_wodt_ctl0_cn38xx     cn58xx;
8272         struct cvmx_lmcx_wodt_ctl0_cn38xx     cn58xxp1;
8273 };
8274 typedef union cvmx_lmcx_wodt_ctl0 cvmx_lmcx_wodt_ctl0_t;
8275
8276 /**
8277  * cvmx_lmc#_wodt_ctl1
8278  *
8279  * LMC_WODT_CTL1 = LMC Write OnDieTermination control
8280  * System designers may desire to terminate DQ/DQS/DM lines for higher frequency DDR operations
8281  * (667MHz and faster), especially on a multi-rank system. DDR2 DQ/DM/DQS I/O's have built in
8282  * Termination resistor that can be turned on or off by the controller, after meeting tAOND and tAOF
8283  * timing requirements. Each Rank has its own ODT pin that fans out to all the memory parts
8284  * in that DIMM. System designers may prefer different combinations of ODT ON's for read and write
8285  * into different ranks. Octeon supports full programmability by way of the mask register below.
8286  * Each Rank position has its own 8-bit programmable field.
8287  * When the controller does a write to that rank, it sets the 8 ODT pins to the MASK pins below.
8288  * For eg., When doing a write into Rank0, a system designer may desire to terminate the lines
8289  * with the resistor on Dimm0/Rank1. The mask WODT_D0_R0 would then be [00000010].
8290  * If ODT feature is not desired, the DDR parts can be programmed to not look at these pins by
8291  * writing 0 in QS_DIC. Octeon drives the appropriate mask values on the ODT pins by default.
8292  * If this feature is not required, write 0 in this register.
8293  *
8294  * Notes:
8295  * Together, the LMC_WODT_CTL1 and LMC_WODT_CTL0 CSRs control the write ODT mask.
8296  * When a given RANK is selected, the WODT mask for that RANK is used.  The resulting WODT mask is
8297  * driven to the DIMMs in the following manner:
8298  *            BUNK_ENA=1     BUNK_ENA=0
8299  * Mask[7] -> DIMM3, RANK1    DIMM3
8300  * Mask[6] -> DIMM3, RANK0
8301  * Mask[5] -> DIMM2, RANK1    DIMM2
8302  * Mask[4] -> DIMM2, RANK0
8303  * Mask[3] -> DIMM1, RANK1    DIMM1
8304  * Mask[2] -> DIMM1, RANK0
8305  * Mask[1] -> DIMM0, RANK1    DIMM0
8306  * Mask[0] -> DIMM0, RANK0
8307  */
8308 union cvmx_lmcx_wodt_ctl1 {
8309         uint64_t u64;
8310         struct cvmx_lmcx_wodt_ctl1_s {
8311 #ifdef __BIG_ENDIAN_BITFIELD
8312         uint64_t reserved_32_63               : 32;
8313         uint64_t wodt_d3_r1                   : 8;  /**< Write ODT mask DIMM3, RANK1/DIMM3 in SingleRanked */
8314         uint64_t wodt_d3_r0                   : 8;  /**< Write ODT mask DIMM3, RANK0 */
8315         uint64_t wodt_d2_r1                   : 8;  /**< Write ODT mask DIMM2, RANK1/DIMM2 in SingleRanked */
8316         uint64_t wodt_d2_r0                   : 8;  /**< Write ODT mask DIMM2, RANK0 */
8317 #else
8318         uint64_t wodt_d2_r0                   : 8;
8319         uint64_t wodt_d2_r1                   : 8;
8320         uint64_t wodt_d3_r0                   : 8;
8321         uint64_t wodt_d3_r1                   : 8;
8322         uint64_t reserved_32_63               : 32;
8323 #endif
8324         } s;
8325         struct cvmx_lmcx_wodt_ctl1_s          cn30xx;
8326         struct cvmx_lmcx_wodt_ctl1_s          cn31xx;
8327         struct cvmx_lmcx_wodt_ctl1_s          cn52xx;
8328         struct cvmx_lmcx_wodt_ctl1_s          cn52xxp1;
8329         struct cvmx_lmcx_wodt_ctl1_s          cn56xx;
8330         struct cvmx_lmcx_wodt_ctl1_s          cn56xxp1;
8331 };
8332 typedef union cvmx_lmcx_wodt_ctl1 cvmx_lmcx_wodt_ctl1_t;
8333
8334 /**
8335  * cvmx_lmc#_wodt_mask
8336  *
8337  * LMC_WODT_MASK = LMC Write OnDieTermination mask
8338  * System designers may desire to terminate DQ/DQS lines for higher frequency DDR operations
8339  * especially on a multi-rank system. DDR3 DQ/DQS I/O's have built in
8340  * Termination resistor that can be turned on or off by the controller, after meeting tAOND and tAOF
8341  * timing requirements. Each Rank has its own ODT pin that fans out to all the memory parts
8342  * in that DIMM. System designers may prefer different combinations of ODT ON's for writes
8343  * into different ranks. Octeon supports full programmability by way of the mask register below.
8344  * Each Rank position has its own 8-bit programmable field.
8345  * When the controller does a write to that rank, it sets the 4 ODT pins to the MASK pins below.
8346  * For eg., When doing a write into Rank0, a system designer may desire to terminate the lines
8347  * with the resistor on DIMM0/Rank1. The mask WODT_D0_R0 would then be [00000010].
8348  * Octeon drives the appropriate mask values on the ODT pins by default. If this feature is not
8349  * required, write 0 in this register.
8350  *
8351  * Notes:
8352  * When a given RANK is selected, the WODT mask for that RANK is used.  The resulting WODT mask is
8353  * driven to the DIMMs in the following manner:
8354  *             RANK_ENA=1                    RANK_ENA=0
8355  * Mask[3] -> DIMM1_ODT_1                     MBZ
8356  * Mask[2] -> DIMM1_ODT_0                     DIMM1_ODT_0
8357  * Mask[1] -> DIMM0_ODT_1                     MBZ
8358  * Mask[0] -> DIMM0_ODT_0                     DIMM0_ODT_0
8359  *
8360  * LMC always writes entire cache blocks and always writes them via two consecutive
8361  * write CAS operations to the same rank+bank+row spaced exactly 4 CK's apart.
8362  * When a WODT mask bit is set, LMC asserts the OCTEON ODT output
8363  * pin(s) starting the same CK as the first write CAS operation. Then, OCTEON
8364  * normally continues to assert the ODT output pin(s) for 9+LMC*_CONTROL[WODT_BPRCH] more CK's
8365  * - for a total of 10+LMC*_CONTROL[WODT_BPRCH] CK's for the entire cache block write -
8366  * through the second write CAS operation of the cache block,
8367  * satisfying the 6 CK DDR3 ODTH8 requirements.
8368  * But it is possible for OCTEON to issue two cache block writes separated by as few as
8369  * WtW = 8 or 9 (10 if LMC*_CONTROL[WODT_BPRCH]=1) CK's. In that case, OCTEON asserts the ODT output pin(s)
8370  * for the WODT mask of the first cache block write for WtW CK's, then asserts
8371  * the ODT output pin(s) for the WODT mask of the second cache block write for 10+LMC*_CONTROL[WODT_BPRCH] CK's
8372  * (or less if a third cache block write follows within 8 or 9 (or 10) CK's of this second cache block write).
8373  * Note that it may be necessary to force LMC to space back-to-back cache block writes
8374  * to different ranks apart by at least 10+LMC*_CONTROL[WODT_BPRCH] CK's to prevent DDR3 ODTH8 violations.
8375  */
8376 union cvmx_lmcx_wodt_mask {
8377         uint64_t u64;
8378         struct cvmx_lmcx_wodt_mask_s {
8379 #ifdef __BIG_ENDIAN_BITFIELD
8380         uint64_t wodt_d3_r1                   : 8;  /**< Write ODT mask DIMM3, RANK1/DIMM3 in SingleRanked
8381                                                          *UNUSED IN 6xxx, and MBZ* */
8382         uint64_t wodt_d3_r0                   : 8;  /**< Write ODT mask DIMM3, RANK0
8383                                                          *UNUSED IN 6xxx, and MBZ* */
8384         uint64_t wodt_d2_r1                   : 8;  /**< Write ODT mask DIMM2, RANK1/DIMM2 in SingleRanked
8385                                                          *UNUSED IN 6xxx, and MBZ* */
8386         uint64_t wodt_d2_r0                   : 8;  /**< Write ODT mask DIMM2, RANK0
8387                                                          *UNUSED IN 6xxx, and MBZ* */
8388         uint64_t wodt_d1_r1                   : 8;  /**< Write ODT mask DIMM1, RANK1/DIMM1 in SingleRanked
8389                                                          if (!RANK_ENA) then WODT_D1_R1[3:0] MBZ
8390                                                          *Upper 4 bits UNUSED IN 6xxx, and MBZ* */
8391         uint64_t wodt_d1_r0                   : 8;  /**< Write ODT mask DIMM1, RANK0
8392                                                          if (!RANK_ENA) then WODT_D1_R0[3,1] MBZ
8393                                                          *Upper 4 bits UNUSED IN 6xxx, and MBZ* */
8394         uint64_t wodt_d0_r1                   : 8;  /**< Write ODT mask DIMM0, RANK1/DIMM0 in SingleRanked
8395                                                          if (!RANK_ENA) then WODT_D0_R1[3:0] MBZ
8396                                                          *Upper 4 bits UNUSED IN 6xxx, and MBZ* */
8397         uint64_t wodt_d0_r0                   : 8;  /**< Write ODT mask DIMM0, RANK0
8398                                                          if (!RANK_ENA) then WODT_D0_R0[3,1] MBZ
8399                                                          *Upper 4 bits UNUSED IN 6xxx, and MBZ* */
8400 #else
8401         uint64_t wodt_d0_r0                   : 8;
8402         uint64_t wodt_d0_r1                   : 8;
8403         uint64_t wodt_d1_r0                   : 8;
8404         uint64_t wodt_d1_r1                   : 8;
8405         uint64_t wodt_d2_r0                   : 8;
8406         uint64_t wodt_d2_r1                   : 8;
8407         uint64_t wodt_d3_r0                   : 8;
8408         uint64_t wodt_d3_r1                   : 8;
8409 #endif
8410         } s;
8411         struct cvmx_lmcx_wodt_mask_s          cn61xx;
8412         struct cvmx_lmcx_wodt_mask_s          cn63xx;
8413         struct cvmx_lmcx_wodt_mask_s          cn63xxp1;
8414         struct cvmx_lmcx_wodt_mask_s          cn66xx;
8415         struct cvmx_lmcx_wodt_mask_s          cn68xx;
8416         struct cvmx_lmcx_wodt_mask_s          cn68xxp1;
8417         struct cvmx_lmcx_wodt_mask_s          cnf71xx;
8418 };
8419 typedef union cvmx_lmcx_wodt_mask cvmx_lmcx_wodt_mask_t;
8420
8421 #endif