]> CyberLeo.Net >> Repos - FreeBSD/releng/9.2.git/blob - sys/contrib/octeon-sdk/cvmx-lmcx-defs.h
- Copy stable/9 to releng/9.2 as part of the 9.2-RELEASE cycle.
[FreeBSD/releng/9.2.git] / sys / contrib / octeon-sdk / cvmx-lmcx-defs.h
1 /***********************license start***************
2  * Copyright (c) 2003-2010  Cavium Networks (support@cavium.com). All rights
3  * reserved.
4  *
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are
8  * met:
9  *
10  *   * Redistributions of source code must retain the above copyright
11  *     notice, this list of conditions and the following disclaimer.
12  *
13  *   * Redistributions in binary form must reproduce the above
14  *     copyright notice, this list of conditions and the following
15  *     disclaimer in the documentation and/or other materials provided
16  *     with the distribution.
17
18  *   * Neither the name of Cavium Networks nor the names of
19  *     its contributors may be used to endorse or promote products
20  *     derived from this software without specific prior written
21  *     permission.
22
23  * This Software, including technical data, may be subject to U.S. export  control
24  * laws, including the U.S. Export Administration Act and its  associated
25  * regulations, and may be subject to export or import  regulations in other
26  * countries.
27
28  * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29  * AND WITH ALL FAULTS AND CAVIUM  NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
30  * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31  * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32  * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33  * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34  * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35  * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36  * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
37  * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38  ***********************license end**************************************/
39
40
41 /**
42  * cvmx-lmcx-defs.h
43  *
44  * Configuration and status register (CSR) type definitions for
45  * Octeon lmcx.
46  *
47  * This file is auto generated. Do not edit.
48  *
49  * <hr>$Revision$<hr>
50  *
51  */
52 #ifndef __CVMX_LMCX_TYPEDEFS_H__
53 #define __CVMX_LMCX_TYPEDEFS_H__
54
55 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
56 static inline uint64_t CVMX_LMCX_BIST_CTL(unsigned long block_id)
57 {
58         if (!(
59               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
60               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
61               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
62                 cvmx_warn("CVMX_LMCX_BIST_CTL(%lu) is invalid on this chip\n", block_id);
63         return CVMX_ADD_IO_SEG(0x00011800880000F0ull) + ((block_id) & 1) * 0x60000000ull;
64 }
65 #else
66 #define CVMX_LMCX_BIST_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000F0ull) + ((block_id) & 1) * 0x60000000ull)
67 #endif
68 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
69 static inline uint64_t CVMX_LMCX_BIST_RESULT(unsigned long block_id)
70 {
71         if (!(
72               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
73               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
74               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
75                 cvmx_warn("CVMX_LMCX_BIST_RESULT(%lu) is invalid on this chip\n", block_id);
76         return CVMX_ADD_IO_SEG(0x00011800880000F8ull) + ((block_id) & 1) * 0x60000000ull;
77 }
78 #else
79 #define CVMX_LMCX_BIST_RESULT(block_id) (CVMX_ADD_IO_SEG(0x00011800880000F8ull) + ((block_id) & 1) * 0x60000000ull)
80 #endif
81 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
82 static inline uint64_t CVMX_LMCX_CHAR_CTL(unsigned long block_id)
83 {
84         if (!(
85               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
86                 cvmx_warn("CVMX_LMCX_CHAR_CTL(%lu) is invalid on this chip\n", block_id);
87         return CVMX_ADD_IO_SEG(0x0001180088000220ull);
88 }
89 #else
90 #define CVMX_LMCX_CHAR_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000220ull))
91 #endif
92 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
93 static inline uint64_t CVMX_LMCX_CHAR_MASK0(unsigned long block_id)
94 {
95         if (!(
96               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
97                 cvmx_warn("CVMX_LMCX_CHAR_MASK0(%lu) is invalid on this chip\n", block_id);
98         return CVMX_ADD_IO_SEG(0x0001180088000228ull);
99 }
100 #else
101 #define CVMX_LMCX_CHAR_MASK0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000228ull))
102 #endif
103 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
104 static inline uint64_t CVMX_LMCX_CHAR_MASK1(unsigned long block_id)
105 {
106         if (!(
107               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
108                 cvmx_warn("CVMX_LMCX_CHAR_MASK1(%lu) is invalid on this chip\n", block_id);
109         return CVMX_ADD_IO_SEG(0x0001180088000230ull);
110 }
111 #else
112 #define CVMX_LMCX_CHAR_MASK1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000230ull))
113 #endif
114 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
115 static inline uint64_t CVMX_LMCX_CHAR_MASK2(unsigned long block_id)
116 {
117         if (!(
118               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
119                 cvmx_warn("CVMX_LMCX_CHAR_MASK2(%lu) is invalid on this chip\n", block_id);
120         return CVMX_ADD_IO_SEG(0x0001180088000238ull);
121 }
122 #else
123 #define CVMX_LMCX_CHAR_MASK2(block_id) (CVMX_ADD_IO_SEG(0x0001180088000238ull))
124 #endif
125 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
126 static inline uint64_t CVMX_LMCX_CHAR_MASK3(unsigned long block_id)
127 {
128         if (!(
129               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
130                 cvmx_warn("CVMX_LMCX_CHAR_MASK3(%lu) is invalid on this chip\n", block_id);
131         return CVMX_ADD_IO_SEG(0x0001180088000240ull);
132 }
133 #else
134 #define CVMX_LMCX_CHAR_MASK3(block_id) (CVMX_ADD_IO_SEG(0x0001180088000240ull))
135 #endif
136 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
137 static inline uint64_t CVMX_LMCX_CHAR_MASK4(unsigned long block_id)
138 {
139         if (!(
140               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
141                 cvmx_warn("CVMX_LMCX_CHAR_MASK4(%lu) is invalid on this chip\n", block_id);
142         return CVMX_ADD_IO_SEG(0x0001180088000318ull);
143 }
144 #else
145 #define CVMX_LMCX_CHAR_MASK4(block_id) (CVMX_ADD_IO_SEG(0x0001180088000318ull))
146 #endif
147 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
148 static inline uint64_t CVMX_LMCX_COMP_CTL(unsigned long block_id)
149 {
150         if (!(
151               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
152               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
153               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
154               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
155               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
156               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
157               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
158                 cvmx_warn("CVMX_LMCX_COMP_CTL(%lu) is invalid on this chip\n", block_id);
159         return CVMX_ADD_IO_SEG(0x0001180088000028ull) + ((block_id) & 1) * 0x60000000ull;
160 }
161 #else
162 #define CVMX_LMCX_COMP_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000028ull) + ((block_id) & 1) * 0x60000000ull)
163 #endif
164 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
165 static inline uint64_t CVMX_LMCX_COMP_CTL2(unsigned long block_id)
166 {
167         if (!(
168               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
169                 cvmx_warn("CVMX_LMCX_COMP_CTL2(%lu) is invalid on this chip\n", block_id);
170         return CVMX_ADD_IO_SEG(0x00011800880001B8ull);
171 }
172 #else
173 #define CVMX_LMCX_COMP_CTL2(block_id) (CVMX_ADD_IO_SEG(0x00011800880001B8ull))
174 #endif
175 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
176 static inline uint64_t CVMX_LMCX_CONFIG(unsigned long block_id)
177 {
178         if (!(
179               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
180                 cvmx_warn("CVMX_LMCX_CONFIG(%lu) is invalid on this chip\n", block_id);
181         return CVMX_ADD_IO_SEG(0x0001180088000188ull);
182 }
183 #else
184 #define CVMX_LMCX_CONFIG(block_id) (CVMX_ADD_IO_SEG(0x0001180088000188ull))
185 #endif
186 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
187 static inline uint64_t CVMX_LMCX_CONTROL(unsigned long block_id)
188 {
189         if (!(
190               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
191                 cvmx_warn("CVMX_LMCX_CONTROL(%lu) is invalid on this chip\n", block_id);
192         return CVMX_ADD_IO_SEG(0x0001180088000190ull);
193 }
194 #else
195 #define CVMX_LMCX_CONTROL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000190ull))
196 #endif
197 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
198 static inline uint64_t CVMX_LMCX_CTL(unsigned long block_id)
199 {
200         if (!(
201               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
202               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
203               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
204               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
205               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
206               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
207               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
208                 cvmx_warn("CVMX_LMCX_CTL(%lu) is invalid on this chip\n", block_id);
209         return CVMX_ADD_IO_SEG(0x0001180088000010ull) + ((block_id) & 1) * 0x60000000ull;
210 }
211 #else
212 #define CVMX_LMCX_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000010ull) + ((block_id) & 1) * 0x60000000ull)
213 #endif
214 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
215 static inline uint64_t CVMX_LMCX_CTL1(unsigned long block_id)
216 {
217         if (!(
218               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
219               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
220               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
221               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
222               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
223                 cvmx_warn("CVMX_LMCX_CTL1(%lu) is invalid on this chip\n", block_id);
224         return CVMX_ADD_IO_SEG(0x0001180088000090ull) + ((block_id) & 1) * 0x60000000ull;
225 }
226 #else
227 #define CVMX_LMCX_CTL1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000090ull) + ((block_id) & 1) * 0x60000000ull)
228 #endif
229 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
230 static inline uint64_t CVMX_LMCX_DCLK_CNT(unsigned long block_id)
231 {
232         if (!(
233               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
234                 cvmx_warn("CVMX_LMCX_DCLK_CNT(%lu) is invalid on this chip\n", block_id);
235         return CVMX_ADD_IO_SEG(0x00011800880001E0ull);
236 }
237 #else
238 #define CVMX_LMCX_DCLK_CNT(block_id) (CVMX_ADD_IO_SEG(0x00011800880001E0ull))
239 #endif
240 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
241 static inline uint64_t CVMX_LMCX_DCLK_CNT_HI(unsigned long block_id)
242 {
243         if (!(
244               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
245               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
246               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
247               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
248               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
249               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
250               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
251                 cvmx_warn("CVMX_LMCX_DCLK_CNT_HI(%lu) is invalid on this chip\n", block_id);
252         return CVMX_ADD_IO_SEG(0x0001180088000070ull) + ((block_id) & 1) * 0x60000000ull;
253 }
254 #else
255 #define CVMX_LMCX_DCLK_CNT_HI(block_id) (CVMX_ADD_IO_SEG(0x0001180088000070ull) + ((block_id) & 1) * 0x60000000ull)
256 #endif
257 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
258 static inline uint64_t CVMX_LMCX_DCLK_CNT_LO(unsigned long block_id)
259 {
260         if (!(
261               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
262               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
263               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
264               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
265               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
266               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
267               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
268                 cvmx_warn("CVMX_LMCX_DCLK_CNT_LO(%lu) is invalid on this chip\n", block_id);
269         return CVMX_ADD_IO_SEG(0x0001180088000068ull) + ((block_id) & 1) * 0x60000000ull;
270 }
271 #else
272 #define CVMX_LMCX_DCLK_CNT_LO(block_id) (CVMX_ADD_IO_SEG(0x0001180088000068ull) + ((block_id) & 1) * 0x60000000ull)
273 #endif
274 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
275 static inline uint64_t CVMX_LMCX_DCLK_CTL(unsigned long block_id)
276 {
277         if (!(
278               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
279                 cvmx_warn("CVMX_LMCX_DCLK_CTL(%lu) is invalid on this chip\n", block_id);
280         return CVMX_ADD_IO_SEG(0x00011800880000B8ull) + ((block_id) & 1) * 0x60000000ull;
281 }
282 #else
283 #define CVMX_LMCX_DCLK_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000B8ull) + ((block_id) & 1) * 0x60000000ull)
284 #endif
285 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
286 static inline uint64_t CVMX_LMCX_DDR2_CTL(unsigned long block_id)
287 {
288         if (!(
289               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
290               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
291               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
292               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
293               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
294               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
295               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
296                 cvmx_warn("CVMX_LMCX_DDR2_CTL(%lu) is invalid on this chip\n", block_id);
297         return CVMX_ADD_IO_SEG(0x0001180088000018ull) + ((block_id) & 1) * 0x60000000ull;
298 }
299 #else
300 #define CVMX_LMCX_DDR2_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000018ull) + ((block_id) & 1) * 0x60000000ull)
301 #endif
302 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
303 static inline uint64_t CVMX_LMCX_DDR_PLL_CTL(unsigned long block_id)
304 {
305         if (!(
306               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
307                 cvmx_warn("CVMX_LMCX_DDR_PLL_CTL(%lu) is invalid on this chip\n", block_id);
308         return CVMX_ADD_IO_SEG(0x0001180088000258ull);
309 }
310 #else
311 #define CVMX_LMCX_DDR_PLL_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000258ull))
312 #endif
313 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
314 static inline uint64_t CVMX_LMCX_DELAY_CFG(unsigned long block_id)
315 {
316         if (!(
317               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
318               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
319               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
320               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
321               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
322               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
323                 cvmx_warn("CVMX_LMCX_DELAY_CFG(%lu) is invalid on this chip\n", block_id);
324         return CVMX_ADD_IO_SEG(0x0001180088000088ull) + ((block_id) & 1) * 0x60000000ull;
325 }
326 #else
327 #define CVMX_LMCX_DELAY_CFG(block_id) (CVMX_ADD_IO_SEG(0x0001180088000088ull) + ((block_id) & 1) * 0x60000000ull)
328 #endif
329 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
330 static inline uint64_t CVMX_LMCX_DIMMX_PARAMS(unsigned long offset, unsigned long block_id)
331 {
332         if (!(
333               (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 1)) && ((block_id == 0))))))
334                 cvmx_warn("CVMX_LMCX_DIMMX_PARAMS(%lu,%lu) is invalid on this chip\n", offset, block_id);
335         return CVMX_ADD_IO_SEG(0x0001180088000270ull) + (((offset) & 1) + ((block_id) & 0) * 0x0ull) * 8;
336 }
337 #else
338 #define CVMX_LMCX_DIMMX_PARAMS(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180088000270ull) + (((offset) & 1) + ((block_id) & 0) * 0x0ull) * 8)
339 #endif
340 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
341 static inline uint64_t CVMX_LMCX_DIMM_CTL(unsigned long block_id)
342 {
343         if (!(
344               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
345                 cvmx_warn("CVMX_LMCX_DIMM_CTL(%lu) is invalid on this chip\n", block_id);
346         return CVMX_ADD_IO_SEG(0x0001180088000310ull);
347 }
348 #else
349 #define CVMX_LMCX_DIMM_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000310ull))
350 #endif
351 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
352 static inline uint64_t CVMX_LMCX_DLL_CTL(unsigned long block_id)
353 {
354         if (!(
355               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
356               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
357                 cvmx_warn("CVMX_LMCX_DLL_CTL(%lu) is invalid on this chip\n", block_id);
358         return CVMX_ADD_IO_SEG(0x00011800880000C0ull) + ((block_id) & 1) * 0x60000000ull;
359 }
360 #else
361 #define CVMX_LMCX_DLL_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000C0ull) + ((block_id) & 1) * 0x60000000ull)
362 #endif
363 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
364 static inline uint64_t CVMX_LMCX_DLL_CTL2(unsigned long block_id)
365 {
366         if (!(
367               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
368                 cvmx_warn("CVMX_LMCX_DLL_CTL2(%lu) is invalid on this chip\n", block_id);
369         return CVMX_ADD_IO_SEG(0x00011800880001C8ull);
370 }
371 #else
372 #define CVMX_LMCX_DLL_CTL2(block_id) (CVMX_ADD_IO_SEG(0x00011800880001C8ull))
373 #endif
374 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
375 static inline uint64_t CVMX_LMCX_DLL_CTL3(unsigned long block_id)
376 {
377         if (!(
378               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
379                 cvmx_warn("CVMX_LMCX_DLL_CTL3(%lu) is invalid on this chip\n", block_id);
380         return CVMX_ADD_IO_SEG(0x0001180088000218ull);
381 }
382 #else
383 #define CVMX_LMCX_DLL_CTL3(block_id) (CVMX_ADD_IO_SEG(0x0001180088000218ull))
384 #endif
385 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
386 static inline uint64_t CVMX_LMCX_DUAL_MEMCFG(unsigned long block_id)
387 {
388         if (!(
389               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
390               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
391               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
392               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
393               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
394                 cvmx_warn("CVMX_LMCX_DUAL_MEMCFG(%lu) is invalid on this chip\n", block_id);
395         return CVMX_ADD_IO_SEG(0x0001180088000098ull) + ((block_id) & 1) * 0x60000000ull;
396 }
397 #else
398 #define CVMX_LMCX_DUAL_MEMCFG(block_id) (CVMX_ADD_IO_SEG(0x0001180088000098ull) + ((block_id) & 1) * 0x60000000ull)
399 #endif
400 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
401 static inline uint64_t CVMX_LMCX_ECC_SYND(unsigned long block_id)
402 {
403         if (!(
404               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
405               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
406               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
407               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
408               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
409               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
410               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
411               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
412                 cvmx_warn("CVMX_LMCX_ECC_SYND(%lu) is invalid on this chip\n", block_id);
413         return CVMX_ADD_IO_SEG(0x0001180088000038ull) + ((block_id) & 1) * 0x60000000ull;
414 }
415 #else
416 #define CVMX_LMCX_ECC_SYND(block_id) (CVMX_ADD_IO_SEG(0x0001180088000038ull) + ((block_id) & 1) * 0x60000000ull)
417 #endif
418 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
419 static inline uint64_t CVMX_LMCX_FADR(unsigned long block_id)
420 {
421         if (!(
422               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
423               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
424               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
425               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
426               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
427               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
428               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
429               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
430                 cvmx_warn("CVMX_LMCX_FADR(%lu) is invalid on this chip\n", block_id);
431         return CVMX_ADD_IO_SEG(0x0001180088000020ull) + ((block_id) & 1) * 0x60000000ull;
432 }
433 #else
434 #define CVMX_LMCX_FADR(block_id) (CVMX_ADD_IO_SEG(0x0001180088000020ull) + ((block_id) & 1) * 0x60000000ull)
435 #endif
436 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
437 static inline uint64_t CVMX_LMCX_IFB_CNT(unsigned long block_id)
438 {
439         if (!(
440               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
441                 cvmx_warn("CVMX_LMCX_IFB_CNT(%lu) is invalid on this chip\n", block_id);
442         return CVMX_ADD_IO_SEG(0x00011800880001D0ull);
443 }
444 #else
445 #define CVMX_LMCX_IFB_CNT(block_id) (CVMX_ADD_IO_SEG(0x00011800880001D0ull))
446 #endif
447 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
448 static inline uint64_t CVMX_LMCX_IFB_CNT_HI(unsigned long block_id)
449 {
450         if (!(
451               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
452               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
453               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
454               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
455               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
456               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
457               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
458                 cvmx_warn("CVMX_LMCX_IFB_CNT_HI(%lu) is invalid on this chip\n", block_id);
459         return CVMX_ADD_IO_SEG(0x0001180088000050ull) + ((block_id) & 1) * 0x60000000ull;
460 }
461 #else
462 #define CVMX_LMCX_IFB_CNT_HI(block_id) (CVMX_ADD_IO_SEG(0x0001180088000050ull) + ((block_id) & 1) * 0x60000000ull)
463 #endif
464 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
465 static inline uint64_t CVMX_LMCX_IFB_CNT_LO(unsigned long block_id)
466 {
467         if (!(
468               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
469               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
470               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
471               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
472               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
473               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
474               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
475                 cvmx_warn("CVMX_LMCX_IFB_CNT_LO(%lu) is invalid on this chip\n", block_id);
476         return CVMX_ADD_IO_SEG(0x0001180088000048ull) + ((block_id) & 1) * 0x60000000ull;
477 }
478 #else
479 #define CVMX_LMCX_IFB_CNT_LO(block_id) (CVMX_ADD_IO_SEG(0x0001180088000048ull) + ((block_id) & 1) * 0x60000000ull)
480 #endif
481 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
482 static inline uint64_t CVMX_LMCX_INT(unsigned long block_id)
483 {
484         if (!(
485               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
486                 cvmx_warn("CVMX_LMCX_INT(%lu) is invalid on this chip\n", block_id);
487         return CVMX_ADD_IO_SEG(0x00011800880001F0ull);
488 }
489 #else
490 #define CVMX_LMCX_INT(block_id) (CVMX_ADD_IO_SEG(0x00011800880001F0ull))
491 #endif
492 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
493 static inline uint64_t CVMX_LMCX_INT_EN(unsigned long block_id)
494 {
495         if (!(
496               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
497                 cvmx_warn("CVMX_LMCX_INT_EN(%lu) is invalid on this chip\n", block_id);
498         return CVMX_ADD_IO_SEG(0x00011800880001E8ull);
499 }
500 #else
501 #define CVMX_LMCX_INT_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800880001E8ull))
502 #endif
503 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
504 static inline uint64_t CVMX_LMCX_MEM_CFG0(unsigned long block_id)
505 {
506         if (!(
507               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
508               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
509               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
510               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
511               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
512               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
513               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
514                 cvmx_warn("CVMX_LMCX_MEM_CFG0(%lu) is invalid on this chip\n", block_id);
515         return CVMX_ADD_IO_SEG(0x0001180088000000ull) + ((block_id) & 1) * 0x60000000ull;
516 }
517 #else
518 #define CVMX_LMCX_MEM_CFG0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000000ull) + ((block_id) & 1) * 0x60000000ull)
519 #endif
520 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
521 static inline uint64_t CVMX_LMCX_MEM_CFG1(unsigned long block_id)
522 {
523         if (!(
524               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
525               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
526               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
527               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
528               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
529               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
530               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
531                 cvmx_warn("CVMX_LMCX_MEM_CFG1(%lu) is invalid on this chip\n", block_id);
532         return CVMX_ADD_IO_SEG(0x0001180088000008ull) + ((block_id) & 1) * 0x60000000ull;
533 }
534 #else
535 #define CVMX_LMCX_MEM_CFG1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000008ull) + ((block_id) & 1) * 0x60000000ull)
536 #endif
537 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
538 static inline uint64_t CVMX_LMCX_MODEREG_PARAMS0(unsigned long block_id)
539 {
540         if (!(
541               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
542                 cvmx_warn("CVMX_LMCX_MODEREG_PARAMS0(%lu) is invalid on this chip\n", block_id);
543         return CVMX_ADD_IO_SEG(0x00011800880001A8ull);
544 }
545 #else
546 #define CVMX_LMCX_MODEREG_PARAMS0(block_id) (CVMX_ADD_IO_SEG(0x00011800880001A8ull))
547 #endif
548 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
549 static inline uint64_t CVMX_LMCX_MODEREG_PARAMS1(unsigned long block_id)
550 {
551         if (!(
552               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
553                 cvmx_warn("CVMX_LMCX_MODEREG_PARAMS1(%lu) is invalid on this chip\n", block_id);
554         return CVMX_ADD_IO_SEG(0x0001180088000260ull);
555 }
556 #else
557 #define CVMX_LMCX_MODEREG_PARAMS1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000260ull))
558 #endif
559 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
560 static inline uint64_t CVMX_LMCX_NXM(unsigned long block_id)
561 {
562         if (!(
563               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
564               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
565               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0))) ||
566               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
567                 cvmx_warn("CVMX_LMCX_NXM(%lu) is invalid on this chip\n", block_id);
568         return CVMX_ADD_IO_SEG(0x00011800880000C8ull) + ((block_id) & 1) * 0x60000000ull;
569 }
570 #else
571 #define CVMX_LMCX_NXM(block_id) (CVMX_ADD_IO_SEG(0x00011800880000C8ull) + ((block_id) & 1) * 0x60000000ull)
572 #endif
573 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
574 static inline uint64_t CVMX_LMCX_OPS_CNT(unsigned long block_id)
575 {
576         if (!(
577               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
578                 cvmx_warn("CVMX_LMCX_OPS_CNT(%lu) is invalid on this chip\n", block_id);
579         return CVMX_ADD_IO_SEG(0x00011800880001D8ull);
580 }
581 #else
582 #define CVMX_LMCX_OPS_CNT(block_id) (CVMX_ADD_IO_SEG(0x00011800880001D8ull))
583 #endif
584 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
585 static inline uint64_t CVMX_LMCX_OPS_CNT_HI(unsigned long block_id)
586 {
587         if (!(
588               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
589               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
590               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
591               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
592               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
593               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
594               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
595                 cvmx_warn("CVMX_LMCX_OPS_CNT_HI(%lu) is invalid on this chip\n", block_id);
596         return CVMX_ADD_IO_SEG(0x0001180088000060ull) + ((block_id) & 1) * 0x60000000ull;
597 }
598 #else
599 #define CVMX_LMCX_OPS_CNT_HI(block_id) (CVMX_ADD_IO_SEG(0x0001180088000060ull) + ((block_id) & 1) * 0x60000000ull)
600 #endif
601 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
602 static inline uint64_t CVMX_LMCX_OPS_CNT_LO(unsigned long block_id)
603 {
604         if (!(
605               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
606               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
607               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
608               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
609               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
610               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
611               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
612                 cvmx_warn("CVMX_LMCX_OPS_CNT_LO(%lu) is invalid on this chip\n", block_id);
613         return CVMX_ADD_IO_SEG(0x0001180088000058ull) + ((block_id) & 1) * 0x60000000ull;
614 }
615 #else
616 #define CVMX_LMCX_OPS_CNT_LO(block_id) (CVMX_ADD_IO_SEG(0x0001180088000058ull) + ((block_id) & 1) * 0x60000000ull)
617 #endif
618 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
619 static inline uint64_t CVMX_LMCX_PHY_CTL(unsigned long block_id)
620 {
621         if (!(
622               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
623                 cvmx_warn("CVMX_LMCX_PHY_CTL(%lu) is invalid on this chip\n", block_id);
624         return CVMX_ADD_IO_SEG(0x0001180088000210ull);
625 }
626 #else
627 #define CVMX_LMCX_PHY_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000210ull))
628 #endif
629 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
630 static inline uint64_t CVMX_LMCX_PLL_BWCTL(unsigned long block_id)
631 {
632         if (!(
633               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
634               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
635               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0)))))
636                 cvmx_warn("CVMX_LMCX_PLL_BWCTL(%lu) is invalid on this chip\n", block_id);
637         return CVMX_ADD_IO_SEG(0x0001180088000040ull);
638 }
639 #else
640 #define CVMX_LMCX_PLL_BWCTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000040ull))
641 #endif
642 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
643 static inline uint64_t CVMX_LMCX_PLL_CTL(unsigned long block_id)
644 {
645         if (!(
646               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
647               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
648               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
649               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
650                 cvmx_warn("CVMX_LMCX_PLL_CTL(%lu) is invalid on this chip\n", block_id);
651         return CVMX_ADD_IO_SEG(0x00011800880000A8ull) + ((block_id) & 1) * 0x60000000ull;
652 }
653 #else
654 #define CVMX_LMCX_PLL_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000A8ull) + ((block_id) & 1) * 0x60000000ull)
655 #endif
656 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
657 static inline uint64_t CVMX_LMCX_PLL_STATUS(unsigned long block_id)
658 {
659         if (!(
660               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
661               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
662               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
663               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
664                 cvmx_warn("CVMX_LMCX_PLL_STATUS(%lu) is invalid on this chip\n", block_id);
665         return CVMX_ADD_IO_SEG(0x00011800880000B0ull) + ((block_id) & 1) * 0x60000000ull;
666 }
667 #else
668 #define CVMX_LMCX_PLL_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800880000B0ull) + ((block_id) & 1) * 0x60000000ull)
669 #endif
670 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
671 static inline uint64_t CVMX_LMCX_READ_LEVEL_CTL(unsigned long block_id)
672 {
673         if (!(
674               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
675               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
676                 cvmx_warn("CVMX_LMCX_READ_LEVEL_CTL(%lu) is invalid on this chip\n", block_id);
677         return CVMX_ADD_IO_SEG(0x0001180088000140ull) + ((block_id) & 1) * 0x60000000ull;
678 }
679 #else
680 #define CVMX_LMCX_READ_LEVEL_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000140ull) + ((block_id) & 1) * 0x60000000ull)
681 #endif
682 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
683 static inline uint64_t CVMX_LMCX_READ_LEVEL_DBG(unsigned long block_id)
684 {
685         if (!(
686               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
687               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
688                 cvmx_warn("CVMX_LMCX_READ_LEVEL_DBG(%lu) is invalid on this chip\n", block_id);
689         return CVMX_ADD_IO_SEG(0x0001180088000148ull) + ((block_id) & 1) * 0x60000000ull;
690 }
691 #else
692 #define CVMX_LMCX_READ_LEVEL_DBG(block_id) (CVMX_ADD_IO_SEG(0x0001180088000148ull) + ((block_id) & 1) * 0x60000000ull)
693 #endif
694 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
695 static inline uint64_t CVMX_LMCX_READ_LEVEL_RANKX(unsigned long offset, unsigned long block_id)
696 {
697         if (!(
698               (OCTEON_IS_MODEL(OCTEON_CN52XX) && (((offset <= 3)) && ((block_id == 0)))) ||
699               (OCTEON_IS_MODEL(OCTEON_CN56XX) && (((offset <= 3)) && ((block_id <= 1))))))
700                 cvmx_warn("CVMX_LMCX_READ_LEVEL_RANKX(%lu,%lu) is invalid on this chip\n", offset, block_id);
701         return CVMX_ADD_IO_SEG(0x0001180088000100ull) + (((offset) & 3) + ((block_id) & 1) * 0xC000000ull) * 8;
702 }
703 #else
704 #define CVMX_LMCX_READ_LEVEL_RANKX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180088000100ull) + (((offset) & 3) + ((block_id) & 1) * 0xC000000ull) * 8)
705 #endif
706 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
707 static inline uint64_t CVMX_LMCX_RESET_CTL(unsigned long block_id)
708 {
709         if (!(
710               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
711                 cvmx_warn("CVMX_LMCX_RESET_CTL(%lu) is invalid on this chip\n", block_id);
712         return CVMX_ADD_IO_SEG(0x0001180088000180ull);
713 }
714 #else
715 #define CVMX_LMCX_RESET_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000180ull))
716 #endif
717 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
718 static inline uint64_t CVMX_LMCX_RLEVEL_CTL(unsigned long block_id)
719 {
720         if (!(
721               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
722                 cvmx_warn("CVMX_LMCX_RLEVEL_CTL(%lu) is invalid on this chip\n", block_id);
723         return CVMX_ADD_IO_SEG(0x00011800880002A0ull);
724 }
725 #else
726 #define CVMX_LMCX_RLEVEL_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880002A0ull))
727 #endif
728 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
729 static inline uint64_t CVMX_LMCX_RLEVEL_DBG(unsigned long block_id)
730 {
731         if (!(
732               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
733                 cvmx_warn("CVMX_LMCX_RLEVEL_DBG(%lu) is invalid on this chip\n", block_id);
734         return CVMX_ADD_IO_SEG(0x00011800880002A8ull);
735 }
736 #else
737 #define CVMX_LMCX_RLEVEL_DBG(block_id) (CVMX_ADD_IO_SEG(0x00011800880002A8ull))
738 #endif
739 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
740 static inline uint64_t CVMX_LMCX_RLEVEL_RANKX(unsigned long offset, unsigned long block_id)
741 {
742         if (!(
743               (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
744                 cvmx_warn("CVMX_LMCX_RLEVEL_RANKX(%lu,%lu) is invalid on this chip\n", offset, block_id);
745         return CVMX_ADD_IO_SEG(0x0001180088000280ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 8;
746 }
747 #else
748 #define CVMX_LMCX_RLEVEL_RANKX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180088000280ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 8)
749 #endif
750 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
751 static inline uint64_t CVMX_LMCX_RODT_COMP_CTL(unsigned long block_id)
752 {
753         if (!(
754               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
755               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
756               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
757               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
758                 cvmx_warn("CVMX_LMCX_RODT_COMP_CTL(%lu) is invalid on this chip\n", block_id);
759         return CVMX_ADD_IO_SEG(0x00011800880000A0ull) + ((block_id) & 1) * 0x60000000ull;
760 }
761 #else
762 #define CVMX_LMCX_RODT_COMP_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800880000A0ull) + ((block_id) & 1) * 0x60000000ull)
763 #endif
764 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
765 static inline uint64_t CVMX_LMCX_RODT_CTL(unsigned long block_id)
766 {
767         if (!(
768               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
769               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
770               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
771               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
772               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
773               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
774               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
775                 cvmx_warn("CVMX_LMCX_RODT_CTL(%lu) is invalid on this chip\n", block_id);
776         return CVMX_ADD_IO_SEG(0x0001180088000078ull) + ((block_id) & 1) * 0x60000000ull;
777 }
778 #else
779 #define CVMX_LMCX_RODT_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000078ull) + ((block_id) & 1) * 0x60000000ull)
780 #endif
781 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
782 static inline uint64_t CVMX_LMCX_RODT_MASK(unsigned long block_id)
783 {
784         if (!(
785               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
786                 cvmx_warn("CVMX_LMCX_RODT_MASK(%lu) is invalid on this chip\n", block_id);
787         return CVMX_ADD_IO_SEG(0x0001180088000268ull);
788 }
789 #else
790 #define CVMX_LMCX_RODT_MASK(block_id) (CVMX_ADD_IO_SEG(0x0001180088000268ull))
791 #endif
792 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
793 static inline uint64_t CVMX_LMCX_SLOT_CTL0(unsigned long block_id)
794 {
795         if (!(
796               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
797                 cvmx_warn("CVMX_LMCX_SLOT_CTL0(%lu) is invalid on this chip\n", block_id);
798         return CVMX_ADD_IO_SEG(0x00011800880001F8ull);
799 }
800 #else
801 #define CVMX_LMCX_SLOT_CTL0(block_id) (CVMX_ADD_IO_SEG(0x00011800880001F8ull))
802 #endif
803 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
804 static inline uint64_t CVMX_LMCX_SLOT_CTL1(unsigned long block_id)
805 {
806         if (!(
807               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
808                 cvmx_warn("CVMX_LMCX_SLOT_CTL1(%lu) is invalid on this chip\n", block_id);
809         return CVMX_ADD_IO_SEG(0x0001180088000200ull);
810 }
811 #else
812 #define CVMX_LMCX_SLOT_CTL1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000200ull))
813 #endif
814 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
815 static inline uint64_t CVMX_LMCX_SLOT_CTL2(unsigned long block_id)
816 {
817         if (!(
818               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
819                 cvmx_warn("CVMX_LMCX_SLOT_CTL2(%lu) is invalid on this chip\n", block_id);
820         return CVMX_ADD_IO_SEG(0x0001180088000208ull);
821 }
822 #else
823 #define CVMX_LMCX_SLOT_CTL2(block_id) (CVMX_ADD_IO_SEG(0x0001180088000208ull))
824 #endif
825 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
826 static inline uint64_t CVMX_LMCX_TIMING_PARAMS0(unsigned long block_id)
827 {
828         if (!(
829               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
830                 cvmx_warn("CVMX_LMCX_TIMING_PARAMS0(%lu) is invalid on this chip\n", block_id);
831         return CVMX_ADD_IO_SEG(0x0001180088000198ull);
832 }
833 #else
834 #define CVMX_LMCX_TIMING_PARAMS0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000198ull))
835 #endif
836 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
837 static inline uint64_t CVMX_LMCX_TIMING_PARAMS1(unsigned long block_id)
838 {
839         if (!(
840               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
841                 cvmx_warn("CVMX_LMCX_TIMING_PARAMS1(%lu) is invalid on this chip\n", block_id);
842         return CVMX_ADD_IO_SEG(0x00011800880001A0ull);
843 }
844 #else
845 #define CVMX_LMCX_TIMING_PARAMS1(block_id) (CVMX_ADD_IO_SEG(0x00011800880001A0ull))
846 #endif
847 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
848 static inline uint64_t CVMX_LMCX_TRO_CTL(unsigned long block_id)
849 {
850         if (!(
851               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
852                 cvmx_warn("CVMX_LMCX_TRO_CTL(%lu) is invalid on this chip\n", block_id);
853         return CVMX_ADD_IO_SEG(0x0001180088000248ull);
854 }
855 #else
856 #define CVMX_LMCX_TRO_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000248ull))
857 #endif
858 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
859 static inline uint64_t CVMX_LMCX_TRO_STAT(unsigned long block_id)
860 {
861         if (!(
862               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
863                 cvmx_warn("CVMX_LMCX_TRO_STAT(%lu) is invalid on this chip\n", block_id);
864         return CVMX_ADD_IO_SEG(0x0001180088000250ull);
865 }
866 #else
867 #define CVMX_LMCX_TRO_STAT(block_id) (CVMX_ADD_IO_SEG(0x0001180088000250ull))
868 #endif
869 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
870 static inline uint64_t CVMX_LMCX_WLEVEL_CTL(unsigned long block_id)
871 {
872         if (!(
873               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
874                 cvmx_warn("CVMX_LMCX_WLEVEL_CTL(%lu) is invalid on this chip\n", block_id);
875         return CVMX_ADD_IO_SEG(0x0001180088000300ull);
876 }
877 #else
878 #define CVMX_LMCX_WLEVEL_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180088000300ull))
879 #endif
880 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
881 static inline uint64_t CVMX_LMCX_WLEVEL_DBG(unsigned long block_id)
882 {
883         if (!(
884               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
885                 cvmx_warn("CVMX_LMCX_WLEVEL_DBG(%lu) is invalid on this chip\n", block_id);
886         return CVMX_ADD_IO_SEG(0x0001180088000308ull);
887 }
888 #else
889 #define CVMX_LMCX_WLEVEL_DBG(block_id) (CVMX_ADD_IO_SEG(0x0001180088000308ull))
890 #endif
891 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
892 static inline uint64_t CVMX_LMCX_WLEVEL_RANKX(unsigned long offset, unsigned long block_id)
893 {
894         if (!(
895               (OCTEON_IS_MODEL(OCTEON_CN63XX) && (((offset <= 3)) && ((block_id == 0))))))
896                 cvmx_warn("CVMX_LMCX_WLEVEL_RANKX(%lu,%lu) is invalid on this chip\n", offset, block_id);
897         return CVMX_ADD_IO_SEG(0x00011800880002B0ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 8;
898 }
899 #else
900 #define CVMX_LMCX_WLEVEL_RANKX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800880002B0ull) + (((offset) & 3) + ((block_id) & 0) * 0x0ull) * 8)
901 #endif
902 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
903 static inline uint64_t CVMX_LMCX_WODT_CTL0(unsigned long block_id)
904 {
905         if (!(
906               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
907               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
908               (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((block_id == 0))) ||
909               (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((block_id == 0))) ||
910               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
911               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1))) ||
912               (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((block_id == 0)))))
913                 cvmx_warn("CVMX_LMCX_WODT_CTL0(%lu) is invalid on this chip\n", block_id);
914         return CVMX_ADD_IO_SEG(0x0001180088000030ull) + ((block_id) & 1) * 0x60000000ull;
915 }
916 #else
917 #define CVMX_LMCX_WODT_CTL0(block_id) (CVMX_ADD_IO_SEG(0x0001180088000030ull) + ((block_id) & 1) * 0x60000000ull)
918 #endif
919 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
920 static inline uint64_t CVMX_LMCX_WODT_CTL1(unsigned long block_id)
921 {
922         if (!(
923               (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((block_id == 0))) ||
924               (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((block_id == 0))) ||
925               (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((block_id == 0))) ||
926               (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((block_id <= 1)))))
927                 cvmx_warn("CVMX_LMCX_WODT_CTL1(%lu) is invalid on this chip\n", block_id);
928         return CVMX_ADD_IO_SEG(0x0001180088000080ull) + ((block_id) & 1) * 0x60000000ull;
929 }
930 #else
931 #define CVMX_LMCX_WODT_CTL1(block_id) (CVMX_ADD_IO_SEG(0x0001180088000080ull) + ((block_id) & 1) * 0x60000000ull)
932 #endif
933 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
934 static inline uint64_t CVMX_LMCX_WODT_MASK(unsigned long block_id)
935 {
936         if (!(
937               (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((block_id == 0)))))
938                 cvmx_warn("CVMX_LMCX_WODT_MASK(%lu) is invalid on this chip\n", block_id);
939         return CVMX_ADD_IO_SEG(0x00011800880001B0ull);
940 }
941 #else
942 #define CVMX_LMCX_WODT_MASK(block_id) (CVMX_ADD_IO_SEG(0x00011800880001B0ull))
943 #endif
944
945 /**
946  * cvmx_lmc#_bist_ctl
947  *
948  * Notes:
949  * This controls BiST only for the memories that operate on DCLK.  The normal, chip-wide BiST flow
950  * controls BiST for the memories that operate on ECLK.
951  */
952 union cvmx_lmcx_bist_ctl
953 {
954         uint64_t u64;
955         struct cvmx_lmcx_bist_ctl_s
956         {
957 #if __BYTE_ORDER == __BIG_ENDIAN
958         uint64_t reserved_1_63                : 63;
959         uint64_t start                        : 1;  /**< A 0->1 transition causes BiST to run. */
960 #else
961         uint64_t start                        : 1;
962         uint64_t reserved_1_63                : 63;
963 #endif
964         } s;
965         struct cvmx_lmcx_bist_ctl_s           cn50xx;
966         struct cvmx_lmcx_bist_ctl_s           cn52xx;
967         struct cvmx_lmcx_bist_ctl_s           cn52xxp1;
968         struct cvmx_lmcx_bist_ctl_s           cn56xx;
969         struct cvmx_lmcx_bist_ctl_s           cn56xxp1;
970 };
971 typedef union cvmx_lmcx_bist_ctl cvmx_lmcx_bist_ctl_t;
972
973 /**
974  * cvmx_lmc#_bist_result
975  *
976  * Notes:
977  * Access to the internal BiST results
978  * Each bit is the BiST result of an individual memory (per bit, 0=pass and 1=fail).
979  */
980 union cvmx_lmcx_bist_result
981 {
982         uint64_t u64;
983         struct cvmx_lmcx_bist_result_s
984         {
985 #if __BYTE_ORDER == __BIG_ENDIAN
986         uint64_t reserved_11_63               : 53;
987         uint64_t csrd2e                       : 1;  /**< BiST result of CSRD2E memory (0=pass, !0=fail) */
988         uint64_t csre2d                       : 1;  /**< BiST result of CSRE2D memory (0=pass, !0=fail) */
989         uint64_t mwf                          : 1;  /**< BiST result of MWF memories (0=pass, !0=fail) */
990         uint64_t mwd                          : 3;  /**< BiST result of MWD memories (0=pass, !0=fail) */
991         uint64_t mwc                          : 1;  /**< BiST result of MWC memories (0=pass, !0=fail) */
992         uint64_t mrf                          : 1;  /**< BiST result of MRF memories (0=pass, !0=fail) */
993         uint64_t mrd                          : 3;  /**< BiST result of MRD memories (0=pass, !0=fail) */
994 #else
995         uint64_t mrd                          : 3;
996         uint64_t mrf                          : 1;
997         uint64_t mwc                          : 1;
998         uint64_t mwd                          : 3;
999         uint64_t mwf                          : 1;
1000         uint64_t csre2d                       : 1;
1001         uint64_t csrd2e                       : 1;
1002         uint64_t reserved_11_63               : 53;
1003 #endif
1004         } s;
1005         struct cvmx_lmcx_bist_result_cn50xx
1006         {
1007 #if __BYTE_ORDER == __BIG_ENDIAN
1008         uint64_t reserved_9_63                : 55;
1009         uint64_t mwf                          : 1;  /**< BiST result of MWF memories (0=pass, !0=fail) */
1010         uint64_t mwd                          : 3;  /**< BiST result of MWD memories (0=pass, !0=fail) */
1011         uint64_t mwc                          : 1;  /**< BiST result of MWC memories (0=pass, !0=fail) */
1012         uint64_t mrf                          : 1;  /**< BiST result of MRF memories (0=pass, !0=fail) */
1013         uint64_t mrd                          : 3;  /**< BiST result of MRD memories (0=pass, !0=fail) */
1014 #else
1015         uint64_t mrd                          : 3;
1016         uint64_t mrf                          : 1;
1017         uint64_t mwc                          : 1;
1018         uint64_t mwd                          : 3;
1019         uint64_t mwf                          : 1;
1020         uint64_t reserved_9_63                : 55;
1021 #endif
1022         } cn50xx;
1023         struct cvmx_lmcx_bist_result_s        cn52xx;
1024         struct cvmx_lmcx_bist_result_s        cn52xxp1;
1025         struct cvmx_lmcx_bist_result_s        cn56xx;
1026         struct cvmx_lmcx_bist_result_s        cn56xxp1;
1027 };
1028 typedef union cvmx_lmcx_bist_result cvmx_lmcx_bist_result_t;
1029
1030 /**
1031  * cvmx_lmc#_char_ctl
1032  *
1033  * LMC_CHAR_CTL = LMC Characterization Control
1034  * This register is an assortment of various control fields needed to charecterize the DDR3 interface
1035  */
1036 union cvmx_lmcx_char_ctl
1037 {
1038         uint64_t u64;
1039         struct cvmx_lmcx_char_ctl_s
1040         {
1041 #if __BYTE_ORDER == __BIG_ENDIAN
1042         uint64_t reserved_42_63               : 22;
1043         uint64_t en                           : 1;  /**< Enable characterization */
1044         uint64_t sel                          : 1;  /**< Pattern select
1045                                                          0 = PRBS
1046                                                          1 = Programmable pattern */
1047         uint64_t prog                         : 8;  /**< Programmable pattern */
1048         uint64_t prbs                         : 32; /**< PRBS Polynomial */
1049 #else
1050         uint64_t prbs                         : 32;
1051         uint64_t prog                         : 8;
1052         uint64_t sel                          : 1;
1053         uint64_t en                           : 1;
1054         uint64_t reserved_42_63               : 22;
1055 #endif
1056         } s;
1057         struct cvmx_lmcx_char_ctl_s           cn63xx;
1058         struct cvmx_lmcx_char_ctl_s           cn63xxp1;
1059 };
1060 typedef union cvmx_lmcx_char_ctl cvmx_lmcx_char_ctl_t;
1061
1062 /**
1063  * cvmx_lmc#_char_mask0
1064  *
1065  * LMC_CHAR_MASK0 = LMC Characterization Mask0
1066  * This register is an assortment of various control fields needed to charecterize the DDR3 interface
1067  */
1068 union cvmx_lmcx_char_mask0
1069 {
1070         uint64_t u64;
1071         struct cvmx_lmcx_char_mask0_s
1072         {
1073 #if __BYTE_ORDER == __BIG_ENDIAN
1074         uint64_t mask                         : 64; /**< Mask for DQ0[63:0] */
1075 #else
1076         uint64_t mask                         : 64;
1077 #endif
1078         } s;
1079         struct cvmx_lmcx_char_mask0_s         cn63xx;
1080         struct cvmx_lmcx_char_mask0_s         cn63xxp1;
1081 };
1082 typedef union cvmx_lmcx_char_mask0 cvmx_lmcx_char_mask0_t;
1083
1084 /**
1085  * cvmx_lmc#_char_mask1
1086  *
1087  * LMC_CHAR_MASK1 = LMC Characterization Mask1
1088  * This register is an assortment of various control fields needed to charecterize the DDR3 interface
1089  */
1090 union cvmx_lmcx_char_mask1
1091 {
1092         uint64_t u64;
1093         struct cvmx_lmcx_char_mask1_s
1094         {
1095 #if __BYTE_ORDER == __BIG_ENDIAN
1096         uint64_t reserved_8_63                : 56;
1097         uint64_t mask                         : 8;  /**< Mask for DQ0[71:64] */
1098 #else
1099         uint64_t mask                         : 8;
1100         uint64_t reserved_8_63                : 56;
1101 #endif
1102         } s;
1103         struct cvmx_lmcx_char_mask1_s         cn63xx;
1104         struct cvmx_lmcx_char_mask1_s         cn63xxp1;
1105 };
1106 typedef union cvmx_lmcx_char_mask1 cvmx_lmcx_char_mask1_t;
1107
1108 /**
1109  * cvmx_lmc#_char_mask2
1110  *
1111  * LMC_CHAR_MASK2 = LMC Characterization Mask2
1112  * This register is an assortment of various control fields needed to charecterize the DDR3 interface
1113  */
1114 union cvmx_lmcx_char_mask2
1115 {
1116         uint64_t u64;
1117         struct cvmx_lmcx_char_mask2_s
1118         {
1119 #if __BYTE_ORDER == __BIG_ENDIAN
1120         uint64_t mask                         : 64; /**< Mask for DQ1[63:0] */
1121 #else
1122         uint64_t mask                         : 64;
1123 #endif
1124         } s;
1125         struct cvmx_lmcx_char_mask2_s         cn63xx;
1126         struct cvmx_lmcx_char_mask2_s         cn63xxp1;
1127 };
1128 typedef union cvmx_lmcx_char_mask2 cvmx_lmcx_char_mask2_t;
1129
1130 /**
1131  * cvmx_lmc#_char_mask3
1132  *
1133  * LMC_CHAR_MASK3 = LMC Characterization Mask3
1134  * This register is an assortment of various control fields needed to charecterize the DDR3 interface
1135  */
1136 union cvmx_lmcx_char_mask3
1137 {
1138         uint64_t u64;
1139         struct cvmx_lmcx_char_mask3_s
1140         {
1141 #if __BYTE_ORDER == __BIG_ENDIAN
1142         uint64_t reserved_8_63                : 56;
1143         uint64_t mask                         : 8;  /**< Mask for DQ1[71:64] */
1144 #else
1145         uint64_t mask                         : 8;
1146         uint64_t reserved_8_63                : 56;
1147 #endif
1148         } s;
1149         struct cvmx_lmcx_char_mask3_s         cn63xx;
1150         struct cvmx_lmcx_char_mask3_s         cn63xxp1;
1151 };
1152 typedef union cvmx_lmcx_char_mask3 cvmx_lmcx_char_mask3_t;
1153
1154 /**
1155  * cvmx_lmc#_char_mask4
1156  *
1157  * LMC_CHAR_MASK4 = LMC Characterization Mask4
1158  * This register is an assortment of various control fields needed to charecterize the DDR3 interface
1159  */
1160 union cvmx_lmcx_char_mask4
1161 {
1162         uint64_t u64;
1163         struct cvmx_lmcx_char_mask4_s
1164         {
1165 #if __BYTE_ORDER == __BIG_ENDIAN
1166         uint64_t reserved_33_63               : 31;
1167         uint64_t reset_n_mask                 : 1;  /**< Mask for RESET_L */
1168         uint64_t a_mask                       : 16; /**< Mask for A[15:0] */
1169         uint64_t ba_mask                      : 3;  /**< Mask for BA[2:0] */
1170         uint64_t we_n_mask                    : 1;  /**< Mask for WE_N */
1171         uint64_t cas_n_mask                   : 1;  /**< Mask for CAS_N */
1172         uint64_t ras_n_mask                   : 1;  /**< Mask for RAS_N */
1173         uint64_t odt1_mask                    : 2;  /**< Mask for ODT1 */
1174         uint64_t odt0_mask                    : 2;  /**< Mask for ODT0 */
1175         uint64_t cs1_n_mask                   : 2;  /**< Mask for CS1_N */
1176         uint64_t cs0_n_mask                   : 2;  /**< Mask for CS0_N */
1177         uint64_t cke_mask                     : 2;  /**< Mask for CKE* */
1178 #else
1179         uint64_t cke_mask                     : 2;
1180         uint64_t cs0_n_mask                   : 2;
1181         uint64_t cs1_n_mask                   : 2;
1182         uint64_t odt0_mask                    : 2;
1183         uint64_t odt1_mask                    : 2;
1184         uint64_t ras_n_mask                   : 1;
1185         uint64_t cas_n_mask                   : 1;
1186         uint64_t we_n_mask                    : 1;
1187         uint64_t ba_mask                      : 3;
1188         uint64_t a_mask                       : 16;
1189         uint64_t reset_n_mask                 : 1;
1190         uint64_t reserved_33_63               : 31;
1191 #endif
1192         } s;
1193         struct cvmx_lmcx_char_mask4_s         cn63xx;
1194         struct cvmx_lmcx_char_mask4_s         cn63xxp1;
1195 };
1196 typedef union cvmx_lmcx_char_mask4 cvmx_lmcx_char_mask4_t;
1197
1198 /**
1199  * cvmx_lmc#_comp_ctl
1200  *
1201  * LMC_COMP_CTL = LMC Compensation control
1202  *
1203  */
1204 union cvmx_lmcx_comp_ctl
1205 {
1206         uint64_t u64;
1207         struct cvmx_lmcx_comp_ctl_s
1208         {
1209 #if __BYTE_ORDER == __BIG_ENDIAN
1210         uint64_t reserved_32_63               : 32;
1211         uint64_t nctl_csr                     : 4;  /**< Compensation control bits */
1212         uint64_t nctl_clk                     : 4;  /**< Compensation control bits */
1213         uint64_t nctl_cmd                     : 4;  /**< Compensation control bits */
1214         uint64_t nctl_dat                     : 4;  /**< Compensation control bits */
1215         uint64_t pctl_csr                     : 4;  /**< Compensation control bits */
1216         uint64_t pctl_clk                     : 4;  /**< Compensation control bits */
1217         uint64_t reserved_0_7                 : 8;
1218 #else
1219         uint64_t reserved_0_7                 : 8;
1220         uint64_t pctl_clk                     : 4;
1221         uint64_t pctl_csr                     : 4;
1222         uint64_t nctl_dat                     : 4;
1223         uint64_t nctl_cmd                     : 4;
1224         uint64_t nctl_clk                     : 4;
1225         uint64_t nctl_csr                     : 4;
1226         uint64_t reserved_32_63               : 32;
1227 #endif
1228         } s;
1229         struct cvmx_lmcx_comp_ctl_cn30xx
1230         {
1231 #if __BYTE_ORDER == __BIG_ENDIAN
1232         uint64_t reserved_32_63               : 32;
1233         uint64_t nctl_csr                     : 4;  /**< Compensation control bits */
1234         uint64_t nctl_clk                     : 4;  /**< Compensation control bits */
1235         uint64_t nctl_cmd                     : 4;  /**< Compensation control bits */
1236         uint64_t nctl_dat                     : 4;  /**< Compensation control bits */
1237         uint64_t pctl_csr                     : 4;  /**< Compensation control bits */
1238         uint64_t pctl_clk                     : 4;  /**< Compensation control bits */
1239         uint64_t pctl_cmd                     : 4;  /**< Compensation control bits */
1240         uint64_t pctl_dat                     : 4;  /**< Compensation control bits */
1241 #else
1242         uint64_t pctl_dat                     : 4;
1243         uint64_t pctl_cmd                     : 4;
1244         uint64_t pctl_clk                     : 4;
1245         uint64_t pctl_csr                     : 4;
1246         uint64_t nctl_dat                     : 4;
1247         uint64_t nctl_cmd                     : 4;
1248         uint64_t nctl_clk                     : 4;
1249         uint64_t nctl_csr                     : 4;
1250         uint64_t reserved_32_63               : 32;
1251 #endif
1252         } cn30xx;
1253         struct cvmx_lmcx_comp_ctl_cn30xx      cn31xx;
1254         struct cvmx_lmcx_comp_ctl_cn30xx      cn38xx;
1255         struct cvmx_lmcx_comp_ctl_cn30xx      cn38xxp2;
1256         struct cvmx_lmcx_comp_ctl_cn50xx
1257         {
1258 #if __BYTE_ORDER == __BIG_ENDIAN
1259         uint64_t reserved_32_63               : 32;
1260         uint64_t nctl_csr                     : 4;  /**< Compensation control bits */
1261         uint64_t reserved_20_27               : 8;
1262         uint64_t nctl_dat                     : 4;  /**< Compensation control bits */
1263         uint64_t pctl_csr                     : 4;  /**< Compensation control bits */
1264         uint64_t reserved_5_11                : 7;
1265         uint64_t pctl_dat                     : 5;  /**< Compensation control bits */
1266 #else
1267         uint64_t pctl_dat                     : 5;
1268         uint64_t reserved_5_11                : 7;
1269         uint64_t pctl_csr                     : 4;
1270         uint64_t nctl_dat                     : 4;
1271         uint64_t reserved_20_27               : 8;
1272         uint64_t nctl_csr                     : 4;
1273         uint64_t reserved_32_63               : 32;
1274 #endif
1275         } cn50xx;
1276         struct cvmx_lmcx_comp_ctl_cn50xx      cn52xx;
1277         struct cvmx_lmcx_comp_ctl_cn50xx      cn52xxp1;
1278         struct cvmx_lmcx_comp_ctl_cn50xx      cn56xx;
1279         struct cvmx_lmcx_comp_ctl_cn50xx      cn56xxp1;
1280         struct cvmx_lmcx_comp_ctl_cn50xx      cn58xx;
1281         struct cvmx_lmcx_comp_ctl_cn58xxp1
1282         {
1283 #if __BYTE_ORDER == __BIG_ENDIAN
1284         uint64_t reserved_32_63               : 32;
1285         uint64_t nctl_csr                     : 4;  /**< Compensation control bits */
1286         uint64_t reserved_20_27               : 8;
1287         uint64_t nctl_dat                     : 4;  /**< Compensation control bits */
1288         uint64_t pctl_csr                     : 4;  /**< Compensation control bits */
1289         uint64_t reserved_4_11                : 8;
1290         uint64_t pctl_dat                     : 4;  /**< Compensation control bits */
1291 #else
1292         uint64_t pctl_dat                     : 4;
1293         uint64_t reserved_4_11                : 8;
1294         uint64_t pctl_csr                     : 4;
1295         uint64_t nctl_dat                     : 4;
1296         uint64_t reserved_20_27               : 8;
1297         uint64_t nctl_csr                     : 4;
1298         uint64_t reserved_32_63               : 32;
1299 #endif
1300         } cn58xxp1;
1301 };
1302 typedef union cvmx_lmcx_comp_ctl cvmx_lmcx_comp_ctl_t;
1303
1304 /**
1305  * cvmx_lmc#_comp_ctl2
1306  *
1307  * LMC_COMP_CTL2 = LMC Compensation control
1308  *
1309  */
1310 union cvmx_lmcx_comp_ctl2
1311 {
1312         uint64_t u64;
1313         struct cvmx_lmcx_comp_ctl2_s
1314         {
1315 #if __BYTE_ORDER == __BIG_ENDIAN
1316         uint64_t reserved_34_63               : 30;
1317         uint64_t ddr__ptune                   : 4;  /**< DDR PCTL from compensation circuit
1318                                                          The encoded value provides debug information for the
1319                                                          compensation impedance on P-pullup */
1320         uint64_t ddr__ntune                   : 4;  /**< DDR NCTL from compensation circuit
1321                                                          The encoded value provides debug information for the
1322                                                          compensation impedance on N-pulldown */
1323         uint64_t m180                         : 1;  /**< Cap impedance at 180 Ohm (instead of 240 Ohm) */
1324         uint64_t byp                          : 1;  /**< Bypass mode
1325                                                          When set, PTUNE,NTUNE are the compensation setting.
1326                                                          When clear, DDR_PTUNE,DDR_NTUNE are the compensation setting. */
1327         uint64_t ptune                        : 4;  /**< PCTL impedance control in bypass mode */
1328         uint64_t ntune                        : 4;  /**< NCTL impedance control in bypass mode */
1329         uint64_t rodt_ctl                     : 4;  /**< NCTL RODT impedance control bits
1330                                                          This field controls ODT values during a memory read
1331                                                          on the Octeon side
1332                                                          0000 = No ODT
1333                                                          0001 = 20 ohm
1334                                                          0010 = 30 ohm
1335                                                          0011 = 40 ohm
1336                                                          0100 = 60 ohm
1337                                                          0101 = 120 ohm
1338                                                          0110-1111 = Reserved */
1339         uint64_t cmd_ctl                      : 4;  /**< Drive strength control for CMD/A/RESET_L/CKE* drivers
1340                                                          0001 = 24 ohm
1341                                                          0010 = 26.67 ohm
1342                                                          0011 = 30 ohm
1343                                                          0100 = 34.3 ohm
1344                                                          0101 = 40 ohm
1345                                                          0110 = 48 ohm
1346                                                          0111 = 60 ohm
1347                                                          0000,1000-1111 = Reserved */
1348         uint64_t ck_ctl                       : 4;  /**< Drive strength control for CK/CS*_L/ODT drivers
1349                                                          0001 = 24 ohm
1350                                                          0010 = 26.67 ohm
1351                                                          0011 = 30 ohm
1352                                                          0100 = 34.3 ohm
1353                                                          0101 = 40 ohm
1354                                                          0110 = 48 ohm
1355                                                          0111 = 60 ohm
1356                                                          0000,1000-1111 = Reserved */
1357         uint64_t dqx_ctl                      : 4;  /**< Drive strength control for DQ/DQS drivers
1358                                                          0001 = 24 ohm
1359                                                          0010 = 26.67 ohm
1360                                                          0011 = 30 ohm
1361                                                          0100 = 34.3 ohm
1362                                                          0101 = 40 ohm
1363                                                          0110 = 48 ohm
1364                                                          0111 = 60 ohm
1365                                                          0000,1000-1111 = Reserved */
1366 #else
1367         uint64_t dqx_ctl                      : 4;
1368         uint64_t ck_ctl                       : 4;
1369         uint64_t cmd_ctl                      : 4;
1370         uint64_t rodt_ctl                     : 4;
1371         uint64_t ntune                        : 4;
1372         uint64_t ptune                        : 4;
1373         uint64_t byp                          : 1;
1374         uint64_t m180                         : 1;
1375         uint64_t ddr__ntune                   : 4;
1376         uint64_t ddr__ptune                   : 4;
1377         uint64_t reserved_34_63               : 30;
1378 #endif
1379         } s;
1380         struct cvmx_lmcx_comp_ctl2_s          cn63xx;
1381         struct cvmx_lmcx_comp_ctl2_s          cn63xxp1;
1382 };
1383 typedef union cvmx_lmcx_comp_ctl2 cvmx_lmcx_comp_ctl2_t;
1384
1385 /**
1386  * cvmx_lmc#_config
1387  *
1388  * LMC_CONFIG = LMC Configuration Register
1389  *
1390  * This register controls certain parameters of  Memory Configuration
1391  *
1392  * Notes:
1393  * a. Priority order for hardware writes to LMC*_CONFIG/LMC*_FADR/LMC*_ECC_SYND: DED error >= NXM error > SEC error
1394  * b. The self refresh entry sequence(s) power the DLL up/down (depending on LMC*_MODEREG_PARAMS0[DLL])
1395  * when LMC*_CONFIG[SREF_WITH_DLL] is set
1396  * c. Prior to the self-refresh exit sequence, LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 should be re-programmed (if needed) to the
1397  * appropriate values
1398  *
1399  * LMC Bringup Sequence:
1400  * 1. SW must ensure there are no pending DRAM transactions and that the DDR PLL and the DLL have been initialized.
1401  * 2. Write LMC*_COMP_CTL2, LMC*_CONTROL, LMC*_WODT_MASK, LMC*_DUAL_MEMCFG, LMC*_TIMING_PARAMS0, LMC*_TIMING_PARAMS1,
1402  *    LMC*_MODEREG_PARAMS0, LMC*_MODEREG_PARAMS1, LMC*_RESET_CTL (with DDR3RST=0), LMC*_CONFIG (with INIT_START=0)
1403  *    with appropriate values, if necessary.
1404  * 3. Wait 200us, then write LMC*_RESET_CTL[DDR3RST] = 1.
1405  * 4. Initialize all ranks at once by writing LMC*_CONFIG[RANKMASK][n] = 1, LMC*_CONFIG[INIT_STATUS][n] = 1, and LMC*_CONFIG[INIT_START] = 1
1406  *    where n is a valid rank index for the specific board configuration.
1407  * 5. for each rank n to be write-leveled [
1408  *       if auto write-leveling is desired [
1409  *           write LMC*_CONFIG[RANKMASK][n] = 1, LMC*_WLEVEL_CTL appropriately and LMC*_CONFIG[INIT_START] = 1
1410  *           wait until LMC*_WLEVEL_RANKn[STATUS] = 3
1411  *       ] else [
1412  *           write LMC*_WLEVEL_RANKn with appropriate values
1413  *       ]
1414  *    ]
1415  * 6. for each rank n to be read-leveled [
1416  *       if auto read-leveling is desired [
1417  *           write LMC*_CONFIG[RANKMASK][n] = 1, LMC*_RLEVEL_CTL appropriately and LMC*_CONFIG[INIT_START] = 1
1418  *           wait until LMC*_RLEVEL_RANKn[STATUS] = 3
1419  *       ] else [
1420  *           write LMC*_RLEVEL_RANKn with appropriate values
1421  *       ]
1422  *    ]
1423  */
1424 union cvmx_lmcx_config
1425 {
1426         uint64_t u64;
1427         struct cvmx_lmcx_config_s
1428         {
1429 #if __BYTE_ORDER == __BIG_ENDIAN
1430         uint64_t reserved_59_63               : 5;
1431         uint64_t early_unload_d1_r1           : 1;  /**< When set, unload the PHY silo one cycle early for Rank 3
1432                                                          reads
1433                                                          The recommended EARLY_UNLOAD_D1_R1 value can be calculated
1434                                                          after the final LMC*_RLEVEL_RANK3[BYTE*] values are
1435                                                          selected (as part of read-leveling initialization).
1436                                                          Then, determine the largest read-leveling setting
1437                                                          for rank 3 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK3[BYTEi])
1438                                                          across all i), then set EARLY_UNLOAD_D1_R1
1439                                                          when the low two bits of this largest setting is not
1440                                                          3 (i.e. EARLY_UNLOAD_D1_R1 = (maxset<1:0>!=3)). */
1441         uint64_t early_unload_d1_r0           : 1;  /**< When set, unload the PHY silo one cycle early for Rank 2
1442                                                          reads
1443                                                          The recommended EARLY_UNLOAD_D1_RO value can be calculated
1444                                                          after the final LMC*_RLEVEL_RANK2[BYTE*] values are
1445                                                          selected (as part of read-leveling initialization).
1446                                                          Then, determine the largest read-leveling setting
1447                                                          for rank 2 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK2[BYTEi])
1448                                                          across all i), then set EARLY_UNLOAD_D1_RO
1449                                                          when the low two bits of this largest setting is not
1450                                                          3 (i.e. EARLY_UNLOAD_D1_RO = (maxset<1:0>!=3)). */
1451         uint64_t early_unload_d0_r1           : 1;  /**< When set, unload the PHY silo one cycle early for Rank 1
1452                                                          reads
1453                                                          The recommended EARLY_UNLOAD_D0_R1 value can be calculated
1454                                                          after the final LMC*_RLEVEL_RANK1[BYTE*] values are
1455                                                          selected (as part of read-leveling initialization).
1456                                                          Then, determine the largest read-leveling setting
1457                                                          for rank 1 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK1[BYTEi])
1458                                                          across all i), then set EARLY_UNLOAD_D0_R1
1459                                                          when the low two bits of this largest setting is not
1460                                                          3 (i.e. EARLY_UNLOAD_D0_R1 = (maxset<1:0>!=3)). */
1461         uint64_t early_unload_d0_r0           : 1;  /**< When set, unload the PHY silo one cycle early for Rank 0
1462                                                          reads.
1463                                                          The recommended EARLY_UNLOAD_D0_R0 value can be calculated
1464                                                          after the final LMC*_RLEVEL_RANK0[BYTE*] values are
1465                                                          selected (as part of read-leveling initialization).
1466                                                          Then, determine the largest read-leveling setting
1467                                                          for rank 0 (i.e. calculate maxset=MAX(LMC*_RLEVEL_RANK0[BYTEi])
1468                                                          across all i), then set EARLY_UNLOAD_D0_R0
1469                                                          when the low two bits of this largest setting is not
1470                                                          3 (i.e. EARLY_UNLOAD_D0_R0 = (maxset<1:0>!=3)). */
1471         uint64_t init_status                  : 4;  /**< Indicates status of initialization
1472                                                          INIT_STATUS[n] = 1 implies rank n has been initialized
1473                                                          SW must set necessary INIT_STATUS bits with the
1474                                                          same LMC*_CONFIG write that initiates
1475                                                          power-up/init and self-refresh exit sequences
1476                                                          (if the required INIT_STATUS bits are not already
1477                                                          set before LMC initiates the sequence).
1478                                                          INIT_STATUS determines the chip-selects that assert
1479                                                          during refresh, ZQCS, and precharge power-down and
1480                                                          self-refresh entry/exit SEQUENCE's. */
1481         uint64_t mirrmask                     : 4;  /**< Mask determining which ranks are address-mirrored.
1482                                                          MIRRMASK<n> = 1 means Rank n addresses are mirrored
1483                                                          for 0 <= n <= 3
1484                                                          A mirrored read/write has these differences:
1485                                                           - DDR_BA<1> is swapped with DDR_BA<0>
1486                                                           - DDR_A<8> is swapped with DDR_A<7>
1487                                                           - DDR_A<6> is swapped with DDR_A<5>
1488                                                           - DDR_A<4> is swapped with DDR_A<3>
1489                                                          When RANK_ENA=0, MIRRMASK<1> and MIRRMASK<3> MBZ */
1490         uint64_t rankmask                     : 4;  /**< Mask to select rank to be leveled/initialized.
1491                                                          To write-level/read-level/initialize rank i, set RANKMASK<i>
1492                                                                          RANK_ENA=1               RANK_ENA=0
1493                                                            RANKMASK<0> = DIMM0_CS0                DIMM0_CS0
1494                                                            RANKMASK<1> = DIMM0_CS1                  MBZ
1495                                                            RANKMASK<2> = DIMM1_CS0                DIMM1_CS0
1496                                                            RANKMASK<3> = DIMM1_CS1                  MBZ
1497                                                          For read/write leveling, each rank has to be leveled separately,
1498                                                          so RANKMASK should only have one bit set.
1499                                                          RANKMASK is not used during self-refresh entry/exit and
1500                                                          precharge power-down entry/exit instruction sequences.
1501                                                          When RANK_ENA=0, RANKMASK<1> and RANKMASK<3> MBZ */
1502         uint64_t rank_ena                     : 1;  /**< RANK ena (for use with dual-rank DIMMs)
1503                                                          For dual-rank DIMMs, the rank_ena bit will enable
1504                                                          the drive of the CS*_L[1:0] and ODT_<1:0> pins differently based on the
1505                                                          (pbank_lsb-1) address bit.
1506                                                          Write 0 for SINGLE ranked DIMM's. */
1507         uint64_t sref_with_dll                : 1;  /**< Self-refresh entry/exit write MR1 and MR2
1508                                                          When set, self-refresh entry and exit instruction sequences
1509                                                          write MR1 and MR2 (in all ranks). (The writes occur before
1510                                                          self-refresh entry, and after self-refresh exit.)
1511                                                          When clear, self-refresh entry and exit instruction sequences
1512                                                          do not write any registers in the DDR3 parts. */
1513         uint64_t early_dqx                    : 1;  /**< Send DQx signals one CK cycle earlier for the case when
1514                                                          the shortest DQx lines have a larger delay than the CK line */
1515         uint64_t sequence                     : 3;  /**< Selects the sequence that LMC runs after a 0->1
1516                                                          transition on LMC*_CONFIG[INIT_START].
1517                                                          SEQUENCE=0=power-up/init:
1518                                                            - RANKMASK selects participating ranks (should be all ranks with attached DRAM)
1519                                                            - INIT_STATUS must equal RANKMASK
1520                                                            - DDR_CKE* signals activated (if they weren't already active)
1521                                                            - RDIMM register control words 0-15 will be written to RANKMASK-selected
1522                                                                RDIMM's when LMC(0)_CONTROL[RDIMM_ENA]=1 and corresponding
1523                                                                LMC*_DIMM_CTL[DIMM*_WMASK] bits are set. (Refer to LMC*_DIMM*_PARAMS and
1524                                                                LMC*_DIMM_CTL descriptions below for more details.)
1525                                                            - MR0, MR1, MR2, and MR3 will be written to selected ranks
1526                                                          SEQUENCE=1=read-leveling:
1527                                                            - RANKMASK selects the rank to be read-leveled
1528                                                            - MR3 written to selected rank
1529                                                          SEQUENCE=2=self-refresh entry:
1530                                                            - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM)
1531                                                            - MR1 and MR2 will be written to selected ranks if SREF_WITH_DLL=1
1532                                                            - DDR_CKE* signals de-activated
1533                                                          SEQUENCE=3=self-refresh exit:
1534                                                            - INIT_STATUS must be set to indicate participating ranks (should be all ranks with attached DRAM)
1535                                                            - DDR_CKE* signals activated
1536                                                            - MR0, MR1, MR2, and MR3 will be written to participating ranks if SREF_WITH_DLL=1
1537                                                          SEQUENCE=4=precharge power-down entry:
1538                                                            - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM)
1539                                                            - DDR_CKE* signals de-activated
1540                                                          SEQUENCE=5=precharge power-down exit:
1541                                                            - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM)
1542                                                            - DDR_CKE* signals activated
1543                                                          SEQUENCE=6=write-leveling:
1544                                                            - RANKMASK selects the rank to be write-leveled
1545                                                            - INIT_STATUS must indicate all ranks with attached DRAM
1546                                                            - MR1 and MR2 written to INIT_STATUS-selected ranks
1547                                                          SEQUENCE=7=illegal
1548                                                          Precharge power-down entry and exit SEQUENCE's may also
1549                                                          be automatically generated by the HW when IDLEPOWER!=0.
1550                                                          Self-refresh entry SEQUENCE's may also be automatically
1551                                                          generated by hardware upon a chip warm or soft reset
1552                                                          sequence when LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT] are set.
1553                                                          LMC writes the LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 CSR field values
1554                                                          to the Mode registers in the DRAM parts (i.e. MR0, MR1, MR2, and MR3) as part of some of these sequences.
1555                                                          Refer to the LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 descriptions for more details.
1556                                                          If there are two consecutive power-up/init's without
1557                                                          a DRESET assertion between them, LMC asserts DDR_CKE* as part of
1558                                                          the first power-up/init, and continues to assert DDR_CKE*
1559                                                          through the remainder of the first and the second power-up/init.
1560                                                          If DDR_CKE* deactivation and reactivation is needed for
1561                                                          a second power-up/init, a DRESET assertion is required
1562                                                          between the first and the second. */
1563         uint64_t ref_zqcs_int                 : 19; /**< Refresh & ZQCS interval represented in \#of 512 CK cycle
1564                                                          increments. A Refresh sequence is triggered when bits
1565                                                          [24:18] are equal to 0, and a ZQCS sequence is triggered
1566                                                          when [36:18] are equal to 0.
1567                                                          Program [24:18] to RND-DN(tREFI/clkPeriod/512)
1568                                                          Program [36:25] to RND-DN(ZQCS_Interval/clkPeriod/(512*64)). Note
1569                                                          that this value should always be greater than 32, to account for
1570                                                          resistor calibration delays.
1571                                                          000_00000000_00000000: RESERVED
1572                                                          Max Refresh interval = 127 * 512           = 65024 CKs
1573                                                          Max ZQCS interval    = (8*256*256-1) * 512 = 268434944 CKs ~ 335ms for a 800 MHz CK
1574                                                          LMC*_CONFIG[INIT_STATUS] determines which ranks receive
1575                                                          the REF / ZQCS. LMC does not send any refreshes / ZQCS's
1576                                                          when LMC*_CONFIG[INIT_STATUS]=0. */
1577         uint64_t reset                        : 1;  /**< Reset oneshot pulse for refresh counter,
1578                                                          and LMC*_OPS_CNT, LMC*_IFB_CNT, and LMC*_DCLK_CNT
1579                                                          CSR's. SW should write this to a one, then re-write
1580                                                          it to a zero to cause the reset. */
1581         uint64_t ecc_adr                      : 1;  /**< Include memory reference address in the ECC calculation
1582                                                          0=disabled, 1=enabled */
1583         uint64_t forcewrite                   : 4;  /**< Force the oldest outstanding write to complete after
1584                                                          having waited for 2^FORCEWRITE CK cycles.  0=disabled. */
1585         uint64_t idlepower                    : 3;  /**< Enter precharge power-down mode after the memory
1586                                                          controller has been idle for 2^(2+IDLEPOWER) CK cycles.
1587                                                          0=disabled.
1588                                                          This field should only be programmed after initialization.
1589                                                          LMC*_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL
1590                                                          is disabled during the precharge power-down. */
1591         uint64_t pbank_lsb                    : 4;  /**< DIMM address bit select
1592                                                          Reverting to the explanation for ROW_LSB,
1593                                                          PBank_LSB would be Row_LSB bit + \#rowbits + \#rankbits
1594                                                          Decoding for pbank_lsb
1595                                                               - 0000:DIMM = mem_adr[28]    / rank = mem_adr[27] (if RANK_ENA)
1596                                                               - 0001:DIMM = mem_adr[29]    / rank = mem_adr[28]      "
1597                                                               - 0010:DIMM = mem_adr[30]    / rank = mem_adr[29]      "
1598                                                               - 0011:DIMM = mem_adr[31]    / rank = mem_adr[30]      "
1599                                                               - 0100:DIMM = mem_adr[32]    / rank = mem_adr[31]      "
1600                                                               - 0101:DIMM = mem_adr[33]    / rank = mem_adr[32]      "
1601                                                               - 0110:DIMM = mem_adr[34]    / rank = mem_adr[33]      "
1602                                                               - 0111:DIMM = 0              / rank = mem_adr[34]      "
1603                                                               - 1000-1111: RESERVED
1604                                                          For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank)
1605                                                          DDR3 parts, the column address width = 10, so with
1606                                                          10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16]
1607                                                          With rank_ena = 0, pbank_lsb = 2
1608                                                          With rank_ena = 1, pbank_lsb = 3 */
1609         uint64_t row_lsb                      : 3;  /**< Row Address bit select
1610                                                          Encoding used to determine which memory address
1611                                                          bit position represents the low order DDR ROW address.
1612                                                          The processor's memory address[34:7] needs to be
1613                                                          translated to DRAM addresses (bnk,row,col,rank and DIMM)
1614                                                          and that is a function of the following:
1615                                                          1. Datapath Width (64)
1616                                                          2. \# Banks (8)
1617                                                          3. \# Column Bits of the memory part - spec'd indirectly
1618                                                          by this register.
1619                                                          4. \# Row Bits of the memory part - spec'd indirectly
1620                                                          5. \# Ranks in a DIMM - spec'd by RANK_ENA
1621                                                          6. \# DIMM's in the system by the register below (PBANK_LSB).
1622                                                          Decoding for row_lsb
1623                                                               - 000: row_lsb = mem_adr[14]
1624                                                               - 001: row_lsb = mem_adr[15]
1625                                                               - 010: row_lsb = mem_adr[16]
1626                                                               - 011: row_lsb = mem_adr[17]
1627                                                               - 100: row_lsb = mem_adr[18]
1628                                                               - 101: row_lsb = mem_adr[19]
1629                                                               - 110: row_lsb = mem_adr[20]
1630                                                               - 111: RESERVED
1631                                                          For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank)
1632                                                          DDR3 parts, the column address width = 10, so with
1633                                                          10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16] */
1634         uint64_t ecc_ena                      : 1;  /**< ECC Enable: When set will enable the 8b ECC
1635                                                          check/correct logic. Should be 1 when used with DIMMs
1636                                                          with ECC. 0, otherwise.
1637                                                          When this mode is turned on, DQ[71:64]
1638                                                          on writes, will contain the ECC code generated for
1639                                                          the 64 bits of data which will
1640                                                          written in the memory and then later on reads, used
1641                                                          to check for Single bit error (which will be auto-
1642                                                          corrected) and Double Bit error (which will be
1643                                                          reported). When not turned on, DQ[71:64]
1644                                                          are driven to 0.  Please refer to SEC_ERR, DED_ERR,
1645                                                          LMC*_FADR, and LMC*_ECC_SYND registers
1646                                                          for diagnostics information when there is an error. */
1647         uint64_t init_start                   : 1;  /**< A 0->1 transition starts the DDR memory sequence that is
1648                                                          selected by LMC*_CONFIG[SEQUENCE].  This register is a
1649                                                          oneshot and clears itself each time it is set. */
1650 #else
1651         uint64_t init_start                   : 1;
1652         uint64_t ecc_ena                      : 1;
1653         uint64_t row_lsb                      : 3;
1654         uint64_t pbank_lsb                    : 4;
1655         uint64_t idlepower                    : 3;
1656         uint64_t forcewrite                   : 4;
1657         uint64_t ecc_adr                      : 1;
1658         uint64_t reset                        : 1;
1659         uint64_t ref_zqcs_int                 : 19;
1660         uint64_t sequence                     : 3;
1661         uint64_t early_dqx                    : 1;
1662         uint64_t sref_with_dll                : 1;
1663         uint64_t rank_ena                     : 1;
1664         uint64_t rankmask                     : 4;
1665         uint64_t mirrmask                     : 4;
1666         uint64_t init_status                  : 4;
1667         uint64_t early_unload_d0_r0           : 1;
1668         uint64_t early_unload_d0_r1           : 1;
1669         uint64_t early_unload_d1_r0           : 1;
1670         uint64_t early_unload_d1_r1           : 1;
1671         uint64_t reserved_59_63               : 5;
1672 #endif
1673         } s;
1674         struct cvmx_lmcx_config_s             cn63xx;
1675         struct cvmx_lmcx_config_cn63xxp1
1676         {
1677 #if __BYTE_ORDER == __BIG_ENDIAN
1678         uint64_t reserved_55_63               : 9;
1679         uint64_t init_status                  : 4;  /**< Indicates status of initialization
1680                                                          INIT_STATUS[n] = 1 implies rank n has been initialized
1681                                                          SW must set necessary INIT_STATUS bits with the
1682                                                          same LMC*_CONFIG write that initiates
1683                                                          power-up/init and self-refresh exit sequences
1684                                                          (if the required INIT_STATUS bits are not already
1685                                                          set before LMC initiates the sequence).
1686                                                          INIT_STATUS determines the chip-selects that assert
1687                                                          during refresh, ZQCS, and precharge power-down and
1688                                                          self-refresh entry/exit SEQUENCE's. */
1689         uint64_t mirrmask                     : 4;  /**< Mask determining which ranks are address-mirrored.
1690                                                          MIRRMASK<n> = 1 means Rank n addresses are mirrored
1691                                                          for 0 <= n <= 3
1692                                                          A mirrored read/write has these differences:
1693                                                           - DDR_BA<1> is swapped with DDR_BA<0>
1694                                                           - DDR_A<8> is swapped with DDR_A<7>
1695                                                           - DDR_A<6> is swapped with DDR_A<5>
1696                                                           - DDR_A<4> is swapped with DDR_A<3>
1697                                                          When RANK_ENA=0, MIRRMASK<1> and MIRRMASK<3> MBZ */
1698         uint64_t rankmask                     : 4;  /**< Mask to select rank to be leveled/initialized.
1699                                                          To write-level/read-level/initialize rank i, set RANKMASK<i>
1700                                                                          RANK_ENA=1               RANK_ENA=0
1701                                                            RANKMASK<0> = DIMM0_CS0                DIMM0_CS0
1702                                                            RANKMASK<1> = DIMM0_CS1                  MBZ
1703                                                            RANKMASK<2> = DIMM1_CS0                DIMM1_CS0
1704                                                            RANKMASK<3> = DIMM1_CS1                  MBZ
1705                                                          For read/write leveling, each rank has to be leveled separately,
1706                                                          so RANKMASK should only have one bit set.
1707                                                          RANKMASK is not used during self-refresh entry/exit and
1708                                                          precharge power-down entry/exit instruction sequences.
1709                                                          When RANK_ENA=0, RANKMASK<1> and RANKMASK<3> MBZ */
1710         uint64_t rank_ena                     : 1;  /**< RANK ena (for use with dual-rank DIMMs)
1711                                                          For dual-rank DIMMs, the rank_ena bit will enable
1712                                                          the drive of the CS*_L[1:0] and ODT_<1:0> pins differently based on the
1713                                                          (pbank_lsb-1) address bit.
1714                                                          Write 0 for SINGLE ranked DIMM's. */
1715         uint64_t sref_with_dll                : 1;  /**< Self-refresh entry/exit write MR1 and MR2
1716                                                          When set, self-refresh entry and exit instruction sequences
1717                                                          write MR1 and MR2 (in all ranks). (The writes occur before
1718                                                          self-refresh entry, and after self-refresh exit.)
1719                                                          When clear, self-refresh entry and exit instruction sequences
1720                                                          do not write any registers in the DDR3 parts. */
1721         uint64_t early_dqx                    : 1;  /**< Send DQx signals one CK cycle earlier for the case when
1722                                                          the shortest DQx lines have a larger delay than the CK line */
1723         uint64_t sequence                     : 3;  /**< Selects the sequence that LMC runs after a 0->1
1724                                                          transition on LMC*_CONFIG[INIT_START].
1725                                                          SEQUENCE=0=power-up/init:
1726                                                            - RANKMASK selects participating ranks (should be all ranks with attached DRAM)
1727                                                            - INIT_STATUS must equal RANKMASK
1728                                                            - DDR_CKE* signals activated (if they weren't already active)
1729                                                            - RDIMM register control words 0-15 will be written to RANKMASK-selected
1730                                                                RDIMM's when LMC(0)_CONTROL[RDIMM_ENA]=1 and corresponding
1731                                                                LMC*_DIMM_CTL[DIMM*_WMASK] bits are set. (Refer to LMC*_DIMM*_PARAMS and
1732                                                                LMC*_DIMM_CTL descriptions below for more details.)
1733                                                            - MR0, MR1, MR2, and MR3 will be written to selected ranks
1734                                                          SEQUENCE=1=read-leveling:
1735                                                            - RANKMASK selects the rank to be read-leveled
1736                                                            - MR3 written to selected rank
1737                                                          SEQUENCE=2=self-refresh entry:
1738                                                            - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM)
1739                                                            - MR1 and MR2 will be written to selected ranks if SREF_WITH_DLL=1
1740                                                            - DDR_CKE* signals de-activated
1741                                                          SEQUENCE=3=self-refresh exit:
1742                                                            - INIT_STATUS must be set to indicate participating ranks (should be all ranks with attached DRAM)
1743                                                            - DDR_CKE* signals activated
1744                                                            - MR0, MR1, MR2, and MR3 will be written to participating ranks if SREF_WITH_DLL=1
1745                                                          SEQUENCE=4=precharge power-down entry:
1746                                                            - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM)
1747                                                            - DDR_CKE* signals de-activated
1748                                                          SEQUENCE=5=precharge power-down exit:
1749                                                            - INIT_STATUS selects participating ranks (should be all ranks with attached DRAM)
1750                                                            - DDR_CKE* signals activated
1751                                                          SEQUENCE=6=write-leveling:
1752                                                            - RANKMASK selects the rank to be write-leveled
1753                                                            - INIT_STATUS must indicate all ranks with attached DRAM
1754                                                            - MR1 and MR2 written to INIT_STATUS-selected ranks
1755                                                          SEQUENCE=7=illegal
1756                                                          Precharge power-down entry and exit SEQUENCE's may also
1757                                                          be automatically generated by the HW when IDLEPOWER!=0.
1758                                                          Self-refresh entry SEQUENCE's may also be automatically
1759                                                          generated by hardware upon a chip warm or soft reset
1760                                                          sequence when LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT] are set.
1761                                                          LMC writes the LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 CSR field values
1762                                                          to the Mode registers in the DRAM parts (i.e. MR0, MR1, MR2, and MR3) as part of some of these sequences.
1763                                                          Refer to the LMC*_MODEREG_PARAMS0 and LMC*_MODEREG_PARAMS1 descriptions for more details.
1764                                                          If there are two consecutive power-up/init's without
1765                                                          a DRESET assertion between them, LMC asserts DDR_CKE* as part of
1766                                                          the first power-up/init, and continues to assert DDR_CKE*
1767                                                          through the remainder of the first and the second power-up/init.
1768                                                          If DDR_CKE* deactivation and reactivation is needed for
1769                                                          a second power-up/init, a DRESET assertion is required
1770                                                          between the first and the second. */
1771         uint64_t ref_zqcs_int                 : 19; /**< Refresh & ZQCS interval represented in \#of 512 CK cycle
1772                                                          increments. A Refresh sequence is triggered when bits
1773                                                          [24:18] are equal to 0, and a ZQCS sequence is triggered
1774                                                          when [36:18] are equal to 0.
1775                                                          Program [24:18] to RND-DN(tREFI/clkPeriod/512)
1776                                                          Program [36:25] to RND-DN(ZQCS_Interval/clkPeriod/(512*64)). Note
1777                                                          that this value should always be greater than 32, to account for
1778                                                          resistor calibration delays.
1779                                                          000_00000000_00000000: RESERVED
1780                                                          Max Refresh interval = 127 * 512           = 65024 CKs
1781                                                          Max ZQCS interval    = (8*256*256-1) * 512 = 268434944 CKs ~ 335ms for a 800 MHz CK
1782                                                          LMC*_CONFIG[INIT_STATUS] determines which ranks receive
1783                                                          the REF / ZQCS. LMC does not send any refreshes / ZQCS's
1784                                                          when LMC*_CONFIG[INIT_STATUS]=0. */
1785         uint64_t reset                        : 1;  /**< Reset oneshot pulse for refresh counter,
1786                                                          and LMC*_OPS_CNT, LMC*_IFB_CNT, and LMC*_DCLK_CNT
1787                                                          CSR's. SW should write this to a one, then re-write
1788                                                          it to a zero to cause the reset. */
1789         uint64_t ecc_adr                      : 1;  /**< Include memory reference address in the ECC calculation
1790                                                          0=disabled, 1=enabled */
1791         uint64_t forcewrite                   : 4;  /**< Force the oldest outstanding write to complete after
1792                                                          having waited for 2^FORCEWRITE CK cycles.  0=disabled. */
1793         uint64_t idlepower                    : 3;  /**< Enter precharge power-down mode after the memory
1794                                                          controller has been idle for 2^(2+IDLEPOWER) CK cycles.
1795                                                          0=disabled.
1796                                                          This field should only be programmed after initialization.
1797                                                          LMC*_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL
1798                                                          is disabled during the precharge power-down. */
1799         uint64_t pbank_lsb                    : 4;  /**< DIMM address bit select
1800                                                          Reverting to the explanation for ROW_LSB,
1801                                                          PBank_LSB would be Row_LSB bit + \#rowbits + \#rankbits
1802                                                          Decoding for pbank_lsb
1803                                                               - 0000:DIMM = mem_adr[28]    / rank = mem_adr[27] (if RANK_ENA)
1804                                                               - 0001:DIMM = mem_adr[29]    / rank = mem_adr[28]      "
1805                                                               - 0010:DIMM = mem_adr[30]    / rank = mem_adr[29]      "
1806                                                               - 0011:DIMM = mem_adr[31]    / rank = mem_adr[30]      "
1807                                                               - 0100:DIMM = mem_adr[32]    / rank = mem_adr[31]      "
1808                                                               - 0101:DIMM = mem_adr[33]    / rank = mem_adr[32]      "
1809                                                               - 0110:DIMM = mem_adr[34]    / rank = mem_adr[33]      "
1810                                                               - 0111:DIMM = 0              / rank = mem_adr[34]      "
1811                                                               - 1000-1111: RESERVED
1812                                                          For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank)
1813                                                          DDR3 parts, the column address width = 10, so with
1814                                                          10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16]
1815                                                          With rank_ena = 0, pbank_lsb = 2
1816                                                          With rank_ena = 1, pbank_lsb = 3 */
1817         uint64_t row_lsb                      : 3;  /**< Row Address bit select
1818                                                          Encoding used to determine which memory address
1819                                                          bit position represents the low order DDR ROW address.
1820                                                          The processor's memory address[34:7] needs to be
1821                                                          translated to DRAM addresses (bnk,row,col,rank and DIMM)
1822                                                          and that is a function of the following:
1823                                                          1. Datapath Width (64)
1824                                                          2. \# Banks (8)
1825                                                          3. \# Column Bits of the memory part - spec'd indirectly
1826                                                          by this register.
1827                                                          4. \# Row Bits of the memory part - spec'd indirectly
1828                                                          5. \# Ranks in a DIMM - spec'd by RANK_ENA
1829                                                          6. \# DIMM's in the system by the register below (PBANK_LSB).
1830                                                          Decoding for row_lsb
1831                                                               - 000: row_lsb = mem_adr[14]
1832                                                               - 001: row_lsb = mem_adr[15]
1833                                                               - 010: row_lsb = mem_adr[16]
1834                                                               - 011: row_lsb = mem_adr[17]
1835                                                               - 100: row_lsb = mem_adr[18]
1836                                                               - 101: row_lsb = mem_adr[19]
1837                                                               - 110: row_lsb = mem_adr[20]
1838                                                               - 111: RESERVED
1839                                                          For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank)
1840                                                          DDR3 parts, the column address width = 10, so with
1841                                                          10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16] */
1842         uint64_t ecc_ena                      : 1;  /**< ECC Enable: When set will enable the 8b ECC
1843                                                          check/correct logic. Should be 1 when used with DIMMs
1844                                                          with ECC. 0, otherwise.
1845                                                          When this mode is turned on, DQ[71:64]
1846                                                          on writes, will contain the ECC code generated for
1847                                                          the 64 bits of data which will
1848                                                          written in the memory and then later on reads, used
1849                                                          to check for Single bit error (which will be auto-
1850                                                          corrected) and Double Bit error (which will be
1851                                                          reported). When not turned on, DQ[71:64]
1852                                                          are driven to 0.  Please refer to SEC_ERR, DED_ERR,
1853                                                          LMC*_FADR, and LMC*_ECC_SYND registers
1854                                                          for diagnostics information when there is an error. */
1855         uint64_t init_start                   : 1;  /**< A 0->1 transition starts the DDR memory sequence that is
1856                                                          selected by LMC*_CONFIG[SEQUENCE].  This register is a
1857                                                          oneshot and clears itself each time it is set. */
1858 #else
1859         uint64_t init_start                   : 1;
1860         uint64_t ecc_ena                      : 1;
1861         uint64_t row_lsb                      : 3;
1862         uint64_t pbank_lsb                    : 4;
1863         uint64_t idlepower                    : 3;
1864         uint64_t forcewrite                   : 4;
1865         uint64_t ecc_adr                      : 1;
1866         uint64_t reset                        : 1;
1867         uint64_t ref_zqcs_int                 : 19;
1868         uint64_t sequence                     : 3;
1869         uint64_t early_dqx                    : 1;
1870         uint64_t sref_with_dll                : 1;
1871         uint64_t rank_ena                     : 1;
1872         uint64_t rankmask                     : 4;
1873         uint64_t mirrmask                     : 4;
1874         uint64_t init_status                  : 4;
1875         uint64_t reserved_55_63               : 9;
1876 #endif
1877         } cn63xxp1;
1878 };
1879 typedef union cvmx_lmcx_config cvmx_lmcx_config_t;
1880
1881 /**
1882  * cvmx_lmc#_control
1883  *
1884  * LMC_CONTROL = LMC Control
1885  * This register is an assortment of various control fields needed by the memory controller
1886  */
1887 union cvmx_lmcx_control
1888 {
1889         uint64_t u64;
1890         struct cvmx_lmcx_control_s
1891         {
1892 #if __BYTE_ORDER == __BIG_ENDIAN
1893         uint64_t reserved_24_63               : 40;
1894         uint64_t rodt_bprch                   : 1;  /**< When set, the turn-off time for the ODT pin during a
1895                                                          RD cmd is delayed an additional CK cycle. */
1896         uint64_t wodt_bprch                   : 1;  /**< When set, the turn-off time for the ODT pin during a
1897                                                          WR cmd is delayed an additional CK cycle. */
1898         uint64_t bprch                        : 2;  /**< Back Porch Enable: When set, the turn-on time for
1899                                                          the default DDR_DQ/DQS drivers is delayed an additional BPRCH
1900                                                          CK cycles.
1901                                                          00 = 0 CKs
1902                                                          01 = 1 CKs
1903                                                          10 = 2 CKs
1904                                                          11 = 3 CKs */
1905         uint64_t ext_zqcs_dis                 : 1;  /**< Disable (external) auto-zqcs calibration
1906                                                          When clear, LMC runs external ZQ calibration
1907                                                          every LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */
1908         uint64_t int_zqcs_dis                 : 1;  /**< Disable (internal) auto-zqcs calibration
1909                                                          When clear, LMC runs internal ZQ calibration
1910                                                          every LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */
1911         uint64_t auto_dclkdis                 : 1;  /**< When 1, LMC will automatically shut off its internal
1912                                                          clock to conserve power when there is no traffic. Note
1913                                                          that this has no effect on the DDR3 PHY and pads clocks. */
1914         uint64_t xor_bank                     : 1;  /**< If (XOR_BANK == 1), then
1915                                                           bank[2:0]=address[9:7] ^ address[14:12]
1916                                                          else
1917                                                           bank[2:0]=address[9:7] */
1918         uint64_t max_write_batch              : 4;  /**< Maximum number of consecutive writes to service before
1919                                                          forcing reads to interrupt. */
1920         uint64_t nxm_write_en                 : 1;  /**< NXM Write mode
1921                                                          When clear, LMC discards writes to addresses that don't
1922                                                          exist in the DRAM (as defined by LMC*_NXM configuration).
1923                                                          When set, LMC completes writes to addresses that don't
1924                                                          exist in the DRAM at an aliased address. */
1925         uint64_t elev_prio_dis                : 1;  /**< Disable elevate priority logic.
1926                                                          When set, writes are sent in
1927                                                          regardless of priority information from L2C. */
1928         uint64_t inorder_wr                   : 1;  /**< Send writes in order(regardless of priority) */
1929         uint64_t inorder_rd                   : 1;  /**< Send reads in order (regardless of priority) */
1930         uint64_t throttle_wr                  : 1;  /**< When set, use at most one IFB for writes */
1931         uint64_t throttle_rd                  : 1;  /**< When set, use at most one IFB for reads */
1932         uint64_t fprch2                       : 2;  /**< Front Porch Enable: When set, the turn-off
1933                                                          time for the default DDR_DQ/DQS drivers is FPRCH2 CKs earlier.
1934                                                          00 = 0 CKs
1935                                                          01 = 1 CKs
1936                                                          10 = 2 CKs
1937                                                          11 = RESERVED */
1938         uint64_t pocas                        : 1;  /**< Enable the Posted CAS feature of DDR3.
1939                                                          This bit must be set whenever LMC*_MODEREG_PARAMS0[AL]!=0,
1940                                                          and clear otherwise. */
1941         uint64_t ddr2t                        : 1;  /**< Turn on the DDR 2T mode. 2 CK cycle window for CMD and
1942                                                          address. This mode helps relieve setup time pressure
1943                                                          on the Address and command bus which nominally have
1944                                                          a very large fanout. Please refer to Micron's tech
1945                                                          note tn_47_01 titled "DDR2-533 Memory Design Guide
1946                                                          for Two Dimm Unbuffered Systems" for physical details. */
1947         uint64_t bwcnt                        : 1;  /**< Bus utilization counter Clear.
1948                                                          Clears the LMC*_OPS_CNT, LMC*_IFB_CNT, and
1949                                                          LMC*_DCLK_CNT registers. SW should first write this
1950                                                          field to a one, then write this field to a zero to
1951                                                          clear the CSR's. */
1952         uint64_t rdimm_ena                    : 1;  /**< Registered DIMM Enable - When set allows the use
1953                                                          of JEDEC Registered DIMMs which require address and
1954                                                          control bits to be registered in the controller. */
1955 #else
1956         uint64_t rdimm_ena                    : 1;
1957         uint64_t bwcnt                        : 1;
1958         uint64_t ddr2t                        : 1;
1959         uint64_t pocas                        : 1;
1960         uint64_t fprch2                       : 2;
1961         uint64_t throttle_rd                  : 1;
1962         uint64_t throttle_wr                  : 1;
1963         uint64_t inorder_rd                   : 1;
1964         uint64_t inorder_wr                   : 1;
1965         uint64_t elev_prio_dis                : 1;
1966         uint64_t nxm_write_en                 : 1;
1967         uint64_t max_write_batch              : 4;
1968         uint64_t xor_bank                     : 1;
1969         uint64_t auto_dclkdis                 : 1;
1970         uint64_t int_zqcs_dis                 : 1;
1971         uint64_t ext_zqcs_dis                 : 1;
1972         uint64_t bprch                        : 2;
1973         uint64_t wodt_bprch                   : 1;
1974         uint64_t rodt_bprch                   : 1;
1975         uint64_t reserved_24_63               : 40;
1976 #endif
1977         } s;
1978         struct cvmx_lmcx_control_s            cn63xx;
1979         struct cvmx_lmcx_control_s            cn63xxp1;
1980 };
1981 typedef union cvmx_lmcx_control cvmx_lmcx_control_t;
1982
1983 /**
1984  * cvmx_lmc#_ctl
1985  *
1986  * LMC_CTL = LMC Control
1987  * This register is an assortment of various control fields needed by the memory controller
1988  */
1989 union cvmx_lmcx_ctl
1990 {
1991         uint64_t u64;
1992         struct cvmx_lmcx_ctl_s
1993         {
1994 #if __BYTE_ORDER == __BIG_ENDIAN
1995         uint64_t reserved_32_63               : 32;
1996         uint64_t ddr__nctl                    : 4;  /**< DDR nctl from compensation circuit
1997                                                          The encoded value on this will adjust the drive strength
1998                                                          of the DDR DQ pulldns. */
1999         uint64_t ddr__pctl                    : 4;  /**< DDR pctl from compensation circuit
2000                                                          The encoded value on this will adjust the drive strength
2001                                                          of the DDR DQ pullup. */
2002         uint64_t slow_scf                     : 1;  /**< Should be cleared to zero */
2003         uint64_t xor_bank                     : 1;  /**< If (XOR_BANK == 1), then
2004                                                            bank[n:0]=address[n+7:7] ^ address[n+7+5:7+5]
2005                                                          else
2006                                                            bank[n:0]=address[n+7:7]
2007                                                          where n=1 for a 4 bank part and n=2 for an 8 bank part */
2008         uint64_t max_write_batch              : 4;  /**< Maximum number of consecutive writes to service before
2009                                                          allowing reads to interrupt. */
2010         uint64_t pll_div2                     : 1;  /**< PLL Div2. */
2011         uint64_t pll_bypass                   : 1;  /**< PLL Bypass. */
2012         uint64_t rdimm_ena                    : 1;  /**< Registered DIMM Enable - When set allows the use
2013                                                          of JEDEC Registered DIMMs which require Write
2014                                                          data to be registered in the controller. */
2015         uint64_t r2r_slot                     : 1;  /**< R2R Slot Enable: When set, all read-to-read trans
2016                                                          will slot an additional 1 cycle data bus bubble to
2017                                                          avoid DQ/DQS bus contention. This is only a CYA bit,
2018                                                          in case the "built-in" DIMM and RANK crossing logic
2019                                                          which should auto-detect and perfectly slot
2020                                                          read-to-reads to the same DIMM/RANK. */
2021         uint64_t inorder_mwf                  : 1;  /**< Reads as zero */
2022         uint64_t inorder_mrf                  : 1;  /**< Always clear to zero */
2023         uint64_t reserved_10_11               : 2;
2024         uint64_t fprch2                       : 1;  /**< Front Porch Enable: When set, the turn-off
2025                                                          time for the DDR_DQ/DQS drivers is 1 dclk earlier.
2026                                                          This bit should typically be set. */
2027         uint64_t bprch                        : 1;  /**< Back Porch Enable: When set, the turn-on time for
2028                                                          the DDR_DQ/DQS drivers is delayed an additional DCLK
2029                                                          cycle. This should be set to one whenever both SILO_HC
2030                                                          and SILO_QC are set. */
2031         uint64_t sil_lat                      : 2;  /**< SILO Latency: On reads, determines how many additional
2032                                                          dclks to wait (on top of TCL+1+TSKW) before pulling
2033                                                          data out of the pad silos.
2034                                                              - 00: illegal
2035                                                              - 01: 1 dclks
2036                                                              - 10: 2 dclks
2037                                                              - 11: illegal
2038                                                          This should always be set to 1. */
2039         uint64_t tskw                         : 2;  /**< This component is a representation of total BOARD
2040                                                          DELAY on DQ (used in the controller to determine the
2041                                                          R->W spacing to avoid DQS/DQ bus conflicts). Enter
2042                                                          the largest of the per byte Board delay
2043                                                              - 00: 0 dclk
2044                                                              - 01: 1 dclks
2045                                                              - 10: 2 dclks
2046                                                              - 11: 3 dclks */
2047         uint64_t qs_dic                       : 2;  /**< DDR2 Termination Resistor Setting
2048                                                          A non Zero value in this register
2049                                                          enables the On Die Termination (ODT) in DDR parts.
2050                                                          These two bits are loaded into the RTT
2051                                                          portion of the EMRS register bits A6 & A2. If DDR2's
2052                                                          termination (for the memory's DQ/DQS/DM pads) is not
2053                                                          desired, set it to 00. If it is, chose between
2054                                                          01 for 75 ohm and 10 for 150 ohm termination.
2055                                                              00 = ODT Disabled
2056                                                              01 = 75 ohm Termination
2057                                                              10 = 150 ohm Termination
2058                                                              11 = 50 ohm Termination
2059                                                          Octeon, on writes, by default, drives the 4/8 ODT
2060                                                          pins (64/128b mode) based on what the masks
2061                                                          (LMC_WODT_CTL) are programmed to.
2062                                                          LMC_DDR2_CTL->ODT_ENA enables Octeon to drive ODT pins
2063                                                          for READS. LMC_RODT_CTL needs to be programmed based
2064                                                          on the system's needs for ODT. */
2065         uint64_t dic                          : 2;  /**< Drive Strength Control:
2066                                                          DIC[0] is
2067                                                          loaded into the Extended Mode Register (EMRS) A1 bit
2068                                                          during initialization.
2069                                                              0 = Normal
2070                                                              1 = Reduced
2071                                                          DIC[1] is used to load into EMRS
2072                                                          bit 10 - DQSN Enable/Disable field. By default, we
2073                                                          program the DDR's to drive the DQSN also. Set it to
2074                                                          1 if DQSN should be Hi-Z.
2075                                                              0 - DQSN Enable
2076                                                              1 - DQSN Disable */
2077 #else
2078         uint64_t dic                          : 2;
2079         uint64_t qs_dic                       : 2;
2080         uint64_t tskw                         : 2;
2081         uint64_t sil_lat                      : 2;
2082         uint64_t bprch                        : 1;
2083         uint64_t fprch2                       : 1;
2084         uint64_t reserved_10_11               : 2;
2085         uint64_t inorder_mrf                  : 1;
2086         uint64_t inorder_mwf                  : 1;
2087         uint64_t r2r_slot                     : 1;
2088         uint64_t rdimm_ena                    : 1;
2089         uint64_t pll_bypass                   : 1;
2090         uint64_t pll_div2                     : 1;
2091         uint64_t max_write_batch              : 4;
2092         uint64_t xor_bank                     : 1;
2093         uint64_t slow_scf                     : 1;
2094         uint64_t ddr__pctl                    : 4;
2095         uint64_t ddr__nctl                    : 4;
2096         uint64_t reserved_32_63               : 32;
2097 #endif
2098         } s;
2099         struct cvmx_lmcx_ctl_cn30xx
2100         {
2101 #if __BYTE_ORDER == __BIG_ENDIAN
2102         uint64_t reserved_32_63               : 32;
2103         uint64_t ddr__nctl                    : 4;  /**< DDR nctl from compensation circuit
2104                                                          The encoded value on this will adjust the drive strength
2105                                                          of the DDR DQ pulldns. */
2106         uint64_t ddr__pctl                    : 4;  /**< DDR pctl from compensation circuit
2107                                                          The encoded value on this will adjust the drive strength
2108                                                          of the DDR DQ pullup. */
2109         uint64_t slow_scf                     : 1;  /**< 1=SCF has pass1 latency, 0=SCF has 1 cycle lower latency
2110                                                          when compared to pass1 */
2111         uint64_t xor_bank                     : 1;  /**< If (XOR_BANK == 1), then
2112                                                            bank[n:0]=address[n+7:7] ^ address[n+7+5:7+5]
2113                                                          else
2114                                                            bank[n:0]=address[n+7:7]
2115                                                          where n=1 for a 4 bank part and n=2 for an 8 bank part */
2116         uint64_t max_write_batch              : 4;  /**< Maximum number of consecutive writes to service before
2117                                                          allowing reads to interrupt. */
2118         uint64_t pll_div2                     : 1;  /**< PLL Div2. */
2119         uint64_t pll_bypass                   : 1;  /**< PLL Bypass. */
2120         uint64_t rdimm_ena                    : 1;  /**< Registered DIMM Enable - When set allows the use
2121                                                          of JEDEC Registered DIMMs which require Write
2122                                                          data to be registered in the controller. */
2123         uint64_t r2r_slot                     : 1;  /**< R2R Slot Enable: When set, all read-to-read trans
2124                                                          will slot an additional 1 cycle data bus bubble to
2125                                                          avoid DQ/DQS bus contention. This is only a CYA bit,
2126                                                          in case the "built-in" DIMM and RANK crossing logic
2127                                                          which should auto-detect and perfectly slot
2128                                                          read-to-reads to the same DIMM/RANK. */
2129         uint64_t inorder_mwf                  : 1;  /**< Reads as zero */
2130         uint64_t inorder_mrf                  : 1;  /**< Always set to zero */
2131         uint64_t dreset                       : 1;  /**< Dclk domain reset.  The reset signal that is used by the
2132                                                          Dclk domain is (DRESET || ECLK_RESET). */
2133         uint64_t mode32b                      : 1;  /**< 32b data Path Mode
2134                                                          Set to 1 if we use only 32 DQ pins
2135                                                          0 for 16b DQ mode. */
2136         uint64_t fprch2                       : 1;  /**< Front Porch Enable: When set, the turn-off
2137                                                          time for the DDR_DQ/DQS drivers is 1 dclk earlier.
2138                                                          This bit should typically be set. */
2139         uint64_t bprch                        : 1;  /**< Back Porch Enable: When set, the turn-on time for
2140                                                          the DDR_DQ/DQS drivers is delayed an additional DCLK
2141                                                          cycle. This should be set to one whenever both SILO_HC
2142                                                          and SILO_QC are set. */
2143         uint64_t sil_lat                      : 2;  /**< SILO Latency: On reads, determines how many additional
2144                                                          dclks to wait (on top of TCL+1+TSKW) before pulling
2145                                                          data out of the pad silos.
2146                                                              - 00: illegal
2147                                                              - 01: 1 dclks
2148                                                              - 10: 2 dclks
2149                                                              - 11: illegal
2150                                                          This should always be set to 1. */
2151         uint64_t tskw                         : 2;  /**< This component is a representation of total BOARD
2152                                                          DELAY on DQ (used in the controller to determine the
2153                                                          R->W spacing to avoid DQS/DQ bus conflicts). Enter
2154                                                          the largest of the per byte Board delay
2155                                                              - 00: 0 dclk
2156                                                              - 01: 1 dclks
2157                                                              - 10: 2 dclks
2158                                                              - 11: 3 dclks */
2159         uint64_t qs_dic                       : 2;  /**< QS Drive Strength Control (DDR1):
2160                                                          & DDR2 Termination Resistor Setting
2161                                                          When in DDR2, a non Zero value in this register
2162                                                          enables the On Die Termination (ODT) in DDR parts.
2163                                                          These two bits are loaded into the RTT
2164                                                          portion of the EMRS register bits A6 & A2. If DDR2's
2165                                                          termination (for the memory's DQ/DQS/DM pads) is not
2166                                                          desired, set it to 00. If it is, chose between
2167                                                          01 for 75 ohm and 10 for 150 ohm termination.
2168                                                              00 = ODT Disabled
2169                                                              01 = 75 ohm Termination
2170                                                              10 = 150 ohm Termination
2171                                                              11 = 50 ohm Termination
2172                                                          Octeon, on writes, by default, drives the 8 ODT
2173                                                          pins based on what the masks (LMC_WODT_CTL1 & 2)
2174                                                          are programmed to. LMC_DDR2_CTL->ODT_ENA
2175                                                          enables Octeon to drive ODT pins for READS.
2176                                                          LMC_RODT_CTL needs to be programmed based on
2177                                                          the system's needs for ODT. */
2178         uint64_t dic                          : 2;  /**< Drive Strength Control:
2179                                                          For DDR-I/II Mode, DIC[0] is
2180                                                          loaded into the Extended Mode Register (EMRS) A1 bit
2181                                                          during initialization. (see DDR-I data sheet EMRS
2182                                                          description)
2183                                                              0 = Normal
2184                                                              1 = Reduced
2185                                                          For DDR-II Mode, DIC[1] is used to load into EMRS
2186                                                          bit 10 - DQSN Enable/Disable field. By default, we
2187                                                          program the DDR's to drive the DQSN also. Set it to
2188                                                          1 if DQSN should be Hi-Z.
2189                                                              0 - DQSN Enable
2190                                                              1 - DQSN Disable */
2191 #else
2192         uint64_t dic                          : 2;
2193         uint64_t qs_dic                       : 2;
2194         uint64_t tskw                         : 2;
2195         uint64_t sil_lat                      : 2;
2196         uint64_t bprch                        : 1;
2197         uint64_t fprch2                       : 1;
2198         uint64_t mode32b                      : 1;
2199         uint64_t dreset                       : 1;
2200         uint64_t inorder_mrf                  : 1;
2201         uint64_t inorder_mwf                  : 1;
2202         uint64_t r2r_slot                     : 1;
2203         uint64_t rdimm_ena                    : 1;
2204         uint64_t pll_bypass                   : 1;
2205         uint64_t pll_div2                     : 1;
2206         uint64_t max_write_batch              : 4;
2207         uint64_t xor_bank                     : 1;
2208         uint64_t slow_scf                     : 1;
2209         uint64_t ddr__pctl                    : 4;
2210         uint64_t ddr__nctl                    : 4;
2211         uint64_t reserved_32_63               : 32;
2212 #endif
2213         } cn30xx;
2214         struct cvmx_lmcx_ctl_cn30xx           cn31xx;
2215         struct cvmx_lmcx_ctl_cn38xx
2216         {
2217 #if __BYTE_ORDER == __BIG_ENDIAN
2218         uint64_t reserved_32_63               : 32;
2219         uint64_t ddr__nctl                    : 4;  /**< DDR nctl from compensation circuit
2220                                                          The encoded value on this will adjust the drive strength
2221                                                          of the DDR DQ pulldns. */
2222         uint64_t ddr__pctl                    : 4;  /**< DDR pctl from compensation circuit
2223                                                          The encoded value on this will adjust the drive strength
2224                                                          of the DDR DQ pullup. */
2225         uint64_t slow_scf                     : 1;  /**< 1=SCF has pass1 latency, 0=SCF has 1 cycle lower latency
2226                                                          when compared to pass1
2227                                                          NOTE - This bit has NO effect in PASS1 */
2228         uint64_t xor_bank                     : 1;  /**< If (XOR_BANK == 1), then
2229                                                            bank[n:0]=address[n+7:7] ^ address[n+7+5:7+5]
2230                                                          else
2231                                                            bank[n:0]=address[n+7:7]
2232                                                          where n=1 for a 4 bank part and n=2 for an 8 bank part */
2233         uint64_t max_write_batch              : 4;  /**< Maximum number of consecutive writes to service before
2234                                                          allowing reads to interrupt. */
2235         uint64_t reserved_16_17               : 2;
2236         uint64_t rdimm_ena                    : 1;  /**< Registered DIMM Enable - When set allows the use
2237                                                          of JEDEC Registered DIMMs which require Write
2238                                                          data to be registered in the controller. */
2239         uint64_t r2r_slot                     : 1;  /**< R2R Slot Enable: When set, all read-to-read trans
2240                                                          will slot an additional 1 cycle data bus bubble to
2241                                                          avoid DQ/DQS bus contention. This is only a CYA bit,
2242                                                          in case the "built-in" DIMM and RANK crossing logic
2243                                                          which should auto-detect and perfectly slot
2244                                                          read-to-reads to the same DIMM/RANK. */
2245         uint64_t inorder_mwf                  : 1;  /**< When set, forces LMC_MWF (writes) into strict, in-order
2246                                                          mode.  When clear, writes may be serviced out of order
2247                                                          (optimized to keep multiple banks active).
2248                                                          This bit is ONLY to be set at power-on and
2249                                                          should not be set for normal use.
2250                                                          NOTE: For PASS1, set as follows:
2251                                                              DDR-I -> 1
2252                                                              DDR-II -> 0
2253                                                          For Pass2, this bit is RA0, write ignore (this feature
2254                                                          is permanently disabled) */
2255         uint64_t inorder_mrf                  : 1;  /**< When set, forces LMC_MRF (reads) into strict, in-order
2256                                                          mode.  When clear, reads may be serviced out of order
2257                                                          (optimized to keep multiple banks active).
2258                                                          This bit is ONLY to be set at power-on and
2259                                                          should not be set for normal use.
2260                                                          NOTE: For PASS1, set as follows:
2261                                                              DDR-I -> 1
2262                                                              DDR-II -> 0
2263                                                          For Pass2, this bit should be written ZERO for
2264                                                          DDR I & II */
2265         uint64_t set_zero                     : 1;  /**< Reserved. Always Set this Bit to Zero */
2266         uint64_t mode128b                     : 1;  /**< 128b data Path Mode
2267                                                          Set to 1 if we use all 128 DQ pins
2268                                                          0 for 64b DQ mode. */
2269         uint64_t fprch2                       : 1;  /**< Front Porch Enable: When set, the turn-off
2270                                                          time for the DDR_DQ/DQS drivers is 1 dclk earlier.
2271                                                          This bit should typically be set. */
2272         uint64_t bprch                        : 1;  /**< Back Porch Enable: When set, the turn-on time for
2273                                                          the DDR_DQ/DQS drivers is delayed an additional DCLK
2274                                                          cycle. This should be set to one whenever both SILO_HC
2275                                                          and SILO_QC are set. */
2276         uint64_t sil_lat                      : 2;  /**< SILO Latency: On reads, determines how many additional
2277                                                          dclks to wait (on top of TCL+1+TSKW) before pulling
2278                                                          data out of the pad silos.
2279                                                              - 00: illegal
2280                                                              - 01: 1 dclks
2281                                                              - 10: 2 dclks
2282                                                              - 11: illegal
2283                                                          This should always be set to 1. */
2284         uint64_t tskw                         : 2;  /**< This component is a representation of total BOARD
2285                                                          DELAY on DQ (used in the controller to determine the
2286                                                          R->W spacing to avoid DQS/DQ bus conflicts). Enter
2287                                                          the largest of the per byte Board delay
2288                                                              - 00: 0 dclk
2289                                                              - 01: 1 dclks
2290                                                              - 10: 2 dclks
2291                                                              - 11: 3 dclks */
2292         uint64_t qs_dic                       : 2;  /**< QS Drive Strength Control (DDR1):
2293                                                          & DDR2 Termination Resistor Setting
2294                                                          When in DDR2, a non Zero value in this register
2295                                                          enables the On Die Termination (ODT) in DDR parts.
2296                                                          These two bits are loaded into the RTT
2297                                                          portion of the EMRS register bits A6 & A2. If DDR2's
2298                                                          termination (for the memory's DQ/DQS/DM pads) is not
2299                                                          desired, set it to 00. If it is, chose between
2300                                                          01 for 75 ohm and 10 for 150 ohm termination.
2301                                                              00 = ODT Disabled
2302                                                              01 = 75 ohm Termination
2303                                                              10 = 150 ohm Termination
2304                                                              11 = 50 ohm Termination
2305                                                          Octeon, on writes, by default, drives the 4/8 ODT
2306                                                          pins (64/128b mode) based on what the masks
2307                                                          (LMC_WODT_CTL) are programmed to.
2308                                                          LMC_DDR2_CTL->ODT_ENA enables Octeon to drive ODT pins
2309                                                          for READS. LMC_RODT_CTL needs to be programmed based
2310                                                          on the system's needs for ODT. */
2311         uint64_t dic                          : 2;  /**< Drive Strength Control:
2312                                                          For DDR-I/II Mode, DIC[0] is
2313                                                          loaded into the Extended Mode Register (EMRS) A1 bit
2314                                                          during initialization. (see DDR-I data sheet EMRS
2315                                                          description)
2316                                                              0 = Normal
2317                                                              1 = Reduced
2318                                                          For DDR-II Mode, DIC[1] is used to load into EMRS
2319                                                          bit 10 - DQSN Enable/Disable field. By default, we
2320                                                          program the DDR's to drive the DQSN also. Set it to
2321                                                          1 if DQSN should be Hi-Z.
2322                                                              0 - DQSN Enable
2323                                                              1 - DQSN Disable */
2324 #else
2325         uint64_t dic                          : 2;
2326         uint64_t qs_dic                       : 2;
2327         uint64_t tskw                         : 2;
2328         uint64_t sil_lat                      : 2;
2329         uint64_t bprch                        : 1;
2330         uint64_t fprch2                       : 1;
2331         uint64_t mode128b                     : 1;
2332         uint64_t set_zero                     : 1;
2333         uint64_t inorder_mrf                  : 1;
2334         uint64_t inorder_mwf                  : 1;
2335         uint64_t r2r_slot                     : 1;
2336         uint64_t rdimm_ena                    : 1;
2337         uint64_t reserved_16_17               : 2;
2338         uint64_t max_write_batch              : 4;
2339         uint64_t xor_bank                     : 1;
2340         uint64_t slow_scf                     : 1;
2341         uint64_t ddr__pctl                    : 4;
2342         uint64_t ddr__nctl                    : 4;
2343         uint64_t reserved_32_63               : 32;
2344 #endif
2345         } cn38xx;
2346         struct cvmx_lmcx_ctl_cn38xx           cn38xxp2;
2347         struct cvmx_lmcx_ctl_cn50xx
2348         {
2349 #if __BYTE_ORDER == __BIG_ENDIAN
2350         uint64_t reserved_32_63               : 32;
2351         uint64_t ddr__nctl                    : 4;  /**< DDR nctl from compensation circuit
2352                                                          The encoded value on this will adjust the drive strength
2353                                                          of the DDR DQ pulldns. */
2354         uint64_t ddr__pctl                    : 4;  /**< DDR pctl from compensation circuit
2355                                                          The encoded value on this will adjust the drive strength
2356                                                          of the DDR DQ pullup. */
2357         uint64_t slow_scf                     : 1;  /**< Should be cleared to zero */
2358         uint64_t xor_bank                     : 1;  /**< If (XOR_BANK == 1), then
2359                                                            bank[n:0]=address[n+7:7] ^ address[n+7+5:7+5]
2360                                                          else
2361                                                            bank[n:0]=address[n+7:7]
2362                                                          where n=1 for a 4 bank part and n=2 for an 8 bank part */
2363         uint64_t max_write_batch              : 4;  /**< Maximum number of consecutive writes to service before
2364                                                          allowing reads to interrupt. */
2365         uint64_t reserved_17_17               : 1;
2366         uint64_t pll_bypass                   : 1;  /**< PLL Bypass. */
2367         uint64_t rdimm_ena                    : 1;  /**< Registered DIMM Enable - When set allows the use
2368                                                          of JEDEC Registered DIMMs which require Write
2369                                                          data to be registered in the controller. */
2370         uint64_t r2r_slot                     : 1;  /**< R2R Slot Enable: When set, all read-to-read trans
2371                                                          will slot an additional 1 cycle data bus bubble to
2372                                                          avoid DQ/DQS bus contention. This is only a CYA bit,
2373                                                          in case the "built-in" DIMM and RANK crossing logic
2374                                                          which should auto-detect and perfectly slot
2375                                                          read-to-reads to the same DIMM/RANK. */
2376         uint64_t inorder_mwf                  : 1;  /**< Reads as zero */
2377         uint64_t inorder_mrf                  : 1;  /**< Always clear to zero */
2378         uint64_t dreset                       : 1;  /**< Dclk domain reset.  The reset signal that is used by the
2379                                                          Dclk domain is (DRESET || ECLK_RESET). */
2380         uint64_t mode32b                      : 1;  /**< 32b data Path Mode
2381                                                          Set to 1 if we use 32 DQ pins
2382                                                          0 for 16b DQ mode. */
2383         uint64_t fprch2                       : 1;  /**< Front Porch Enable: When set, the turn-off
2384                                                          time for the DDR_DQ/DQS drivers is 1 dclk earlier.
2385                                                          This bit should typically be set. */
2386         uint64_t bprch                        : 1;  /**< Back Porch Enable: When set, the turn-on time for
2387                                                          the DDR_DQ/DQS drivers is delayed an additional DCLK
2388                                                          cycle. This should be set to one whenever both SILO_HC
2389                                                          and SILO_QC are set. */
2390         uint64_t sil_lat                      : 2;  /**< SILO Latency: On reads, determines how many additional
2391                                                          dclks to wait (on top of TCL+1+TSKW) before pulling
2392                                                          data out of the pad silos.
2393                                                              - 00: illegal
2394                                                              - 01: 1 dclks
2395                                                              - 10: 2 dclks
2396                                                              - 11: illegal
2397                                                          This should always be set to 1. */
2398         uint64_t tskw                         : 2;  /**< This component is a representation of total BOARD
2399                                                          DELAY on DQ (used in the controller to determine the
2400                                                          R->W spacing to avoid DQS/DQ bus conflicts). Enter
2401                                                          the largest of the per byte Board delay
2402                                                              - 00: 0 dclk
2403                                                              - 01: 1 dclks
2404                                                              - 10: 2 dclks
2405                                                              - 11: 3 dclks */
2406         uint64_t qs_dic                       : 2;  /**< DDR2 Termination Resistor Setting
2407                                                          When in DDR2, a non Zero value in this register
2408                                                          enables the On Die Termination (ODT) in DDR parts.
2409                                                          These two bits are loaded into the RTT
2410                                                          portion of the EMRS register bits A6 & A2. If DDR2's
2411                                                          termination (for the memory's DQ/DQS/DM pads) is not
2412                                                          desired, set it to 00. If it is, chose between
2413                                                          01 for 75 ohm and 10 for 150 ohm termination.
2414                                                              00 = ODT Disabled
2415                                                              01 = 75 ohm Termination
2416                                                              10 = 150 ohm Termination
2417                                                              11 = 50 ohm Termination
2418                                                          Octeon, on writes, by default, drives the ODT
2419                                                          pins based on what the masks
2420                                                          (LMC_WODT_CTL) are programmed to.
2421                                                          LMC_DDR2_CTL->ODT_ENA enables Octeon to drive ODT pins
2422                                                          for READS. LMC_RODT_CTL needs to be programmed based
2423                                                          on the system's needs for ODT. */
2424         uint64_t dic                          : 2;  /**< Drive Strength Control:
2425                                                          DIC[0] is
2426                                                          loaded into the Extended Mode Register (EMRS) A1 bit
2427                                                          during initialization.
2428                                                              0 = Normal
2429                                                              1 = Reduced
2430                                                          DIC[1] is used to load into EMRS
2431                                                          bit 10 - DQSN Enable/Disable field. By default, we
2432                                                          program the DDR's to drive the DQSN also. Set it to
2433                                                          1 if DQSN should be Hi-Z.
2434                                                              0 - DQSN Enable
2435                                                              1 - DQSN Disable */
2436 #else
2437         uint64_t dic                          : 2;
2438         uint64_t qs_dic                       : 2;
2439         uint64_t tskw                         : 2;
2440         uint64_t sil_lat                      : 2;
2441         uint64_t bprch                        : 1;
2442         uint64_t fprch2                       : 1;
2443         uint64_t mode32b                      : 1;
2444         uint64_t dreset                       : 1;
2445         uint64_t inorder_mrf                  : 1;
2446         uint64_t inorder_mwf                  : 1;
2447         uint64_t r2r_slot                     : 1;
2448         uint64_t rdimm_ena                    : 1;
2449         uint64_t pll_bypass                   : 1;
2450         uint64_t reserved_17_17               : 1;
2451         uint64_t max_write_batch              : 4;
2452         uint64_t xor_bank                     : 1;
2453         uint64_t slow_scf                     : 1;
2454         uint64_t ddr__pctl                    : 4;
2455         uint64_t ddr__nctl                    : 4;
2456         uint64_t reserved_32_63               : 32;
2457 #endif
2458         } cn50xx;
2459         struct cvmx_lmcx_ctl_cn52xx
2460         {
2461 #if __BYTE_ORDER == __BIG_ENDIAN
2462         uint64_t reserved_32_63               : 32;
2463         uint64_t ddr__nctl                    : 4;  /**< DDR nctl from compensation circuit
2464                                                          The encoded value on this will adjust the drive strength
2465                                                          of the DDR DQ pulldns. */
2466         uint64_t ddr__pctl                    : 4;  /**< DDR pctl from compensation circuit
2467                                                          The encoded value on this will adjust the drive strength
2468                                                          of the DDR DQ pullup. */
2469         uint64_t slow_scf                     : 1;  /**< Always clear to zero */
2470         uint64_t xor_bank                     : 1;  /**< If (XOR_BANK == 1), then
2471                                                            bank[n:0]=address[n+7:7] ^ address[n+7+5:7+5]
2472                                                          else
2473                                                            bank[n:0]=address[n+7:7]
2474                                                          where n=1 for a 4 bank part and n=2 for an 8 bank part */
2475         uint64_t max_write_batch              : 4;  /**< Maximum number of consecutive writes to service before
2476                                                          allowing reads to interrupt. */
2477         uint64_t reserved_16_17               : 2;
2478         uint64_t rdimm_ena                    : 1;  /**< Registered DIMM Enable - When set allows the use
2479                                                          of JEDEC Registered DIMMs which require Write
2480                                                          data to be registered in the controller. */
2481         uint64_t r2r_slot                     : 1;  /**< R2R Slot Enable: When set, all read-to-read trans
2482                                                          will slot an additional 1 cycle data bus bubble to
2483                                                          avoid DQ/DQS bus contention. This is only a CYA bit,
2484                                                          in case the "built-in" DIMM and RANK crossing logic
2485                                                          which should auto-detect and perfectly slot
2486                                                          read-to-reads to the same DIMM/RANK. */
2487         uint64_t inorder_mwf                  : 1;  /**< Reads as zero */
2488         uint64_t inorder_mrf                  : 1;  /**< Always set to zero */
2489         uint64_t dreset                       : 1;  /**< MBZ
2490                                                          THIS IS OBSOLETE.  Use LMC_DLL_CTL[DRESET] instead. */
2491         uint64_t mode32b                      : 1;  /**< 32b data Path Mode
2492                                                          Set to 1 if we use only 32 DQ pins
2493                                                          0 for 64b DQ mode. */
2494         uint64_t fprch2                       : 1;  /**< Front Porch Enable: When set, the turn-off
2495                                                          time for the DDR_DQ/DQS drivers is 1 dclk earlier.
2496                                                          This bit should typically be set. */
2497         uint64_t bprch                        : 1;  /**< Back Porch Enable: When set, the turn-on time for
2498                                                          the DDR_DQ/DQS drivers is delayed an additional DCLK
2499                                                          cycle. This should be set to one whenever both SILO_HC
2500                                                          and SILO_QC are set. */
2501         uint64_t sil_lat                      : 2;  /**< SILO Latency: On reads, determines how many additional
2502                                                          dclks to wait (on top of TCL+1+TSKW) before pulling
2503                                                          data out of the pad silos.
2504                                                              - 00: illegal
2505                                                              - 01: 1 dclks
2506                                                              - 10: 2 dclks
2507                                                              - 11: illegal
2508                                                          This should always be set to 1.
2509                                                          THIS IS OBSOLETE.  Use READ_LEVEL_RANK instead. */
2510         uint64_t tskw                         : 2;  /**< This component is a representation of total BOARD
2511                                                          DELAY on DQ (used in the controller to determine the
2512                                                          R->W spacing to avoid DQS/DQ bus conflicts). Enter
2513                                                          the largest of the per byte Board delay
2514                                                              - 00: 0 dclk
2515                                                              - 01: 1 dclks
2516                                                              - 10: 2 dclks
2517                                                              - 11: 3 dclks
2518                                                          THIS IS OBSOLETE.  Use READ_LEVEL_RANK instead. */
2519         uint64_t qs_dic                       : 2;  /**< DDR2 Termination Resistor Setting
2520                                                          When in DDR2, a non Zero value in this register
2521                                                          enables the On Die Termination (ODT) in DDR parts.
2522                                                          These two bits are loaded into the RTT
2523                                                          portion of the EMRS register bits A6 & A2. If DDR2's
2524                                                          termination (for the memory's DQ/DQS/DM pads) is not
2525                                                          desired, set it to 00. If it is, chose between
2526                                                          01 for 75 ohm and 10 for 150 ohm termination.
2527                                                              00 = ODT Disabled
2528                                                              01 = 75 ohm Termination
2529                                                              10 = 150 ohm Termination
2530                                                              11 = 50 ohm Termination
2531                                                          Octeon, on writes, by default, drives the 4/8 ODT
2532                                                          pins (64/128b mode) based on what the masks
2533                                                          (LMC_WODT_CTL0 & 1) are programmed to.
2534                                                          LMC_DDR2_CTL->ODT_ENA enables Octeon to drive ODT pins
2535                                                          for READS. LMC_RODT_CTL needs to be programmed based
2536                                                          on the system's needs for ODT. */
2537         uint64_t dic                          : 2;  /**< Drive Strength Control:
2538                                                          DIC[0] is
2539                                                          loaded into the Extended Mode Register (EMRS) A1 bit
2540                                                          during initialization.
2541                                                              0 = Normal
2542                                                              1 = Reduced
2543                                                          DIC[1] is used to load into EMRS
2544                                                          bit 10 - DQSN Enable/Disable field. By default, we
2545                                                          program the DDR's to drive the DQSN also. Set it to
2546                                                          1 if DQSN should be Hi-Z.
2547                                                              0 - DQSN Enable
2548                                                              1 - DQSN Disable */
2549 #else
2550         uint64_t dic                          : 2;
2551         uint64_t qs_dic                       : 2;
2552         uint64_t tskw                         : 2;
2553         uint64_t sil_lat                      : 2;
2554         uint64_t bprch                        : 1;
2555         uint64_t fprch2                       : 1;
2556         uint64_t mode32b                      : 1;
2557         uint64_t dreset                       : 1;
2558         uint64_t inorder_mrf                  : 1;
2559         uint64_t inorder_mwf                  : 1;
2560         uint64_t r2r_slot                     : 1;
2561         uint64_t rdimm_ena                    : 1;
2562         uint64_t reserved_16_17               : 2;
2563         uint64_t max_write_batch              : 4;
2564         uint64_t xor_bank                     : 1;
2565         uint64_t slow_scf                     : 1;
2566         uint64_t ddr__pctl                    : 4;
2567         uint64_t ddr__nctl                    : 4;
2568         uint64_t reserved_32_63               : 32;
2569 #endif
2570         } cn52xx;
2571         struct cvmx_lmcx_ctl_cn52xx           cn52xxp1;
2572         struct cvmx_lmcx_ctl_cn52xx           cn56xx;
2573         struct cvmx_lmcx_ctl_cn52xx           cn56xxp1;
2574         struct cvmx_lmcx_ctl_cn58xx
2575         {
2576 #if __BYTE_ORDER == __BIG_ENDIAN
2577         uint64_t reserved_32_63               : 32;
2578         uint64_t ddr__nctl                    : 4;  /**< DDR nctl from compensation circuit
2579                                                          The encoded value on this will adjust the drive strength
2580                                                          of the DDR DQ pulldns. */
2581         uint64_t ddr__pctl                    : 4;  /**< DDR pctl from compensation circuit
2582                                                          The encoded value on this will adjust the drive strength
2583                                                          of the DDR DQ pullup. */
2584         uint64_t slow_scf                     : 1;  /**< Should be cleared to zero */
2585         uint64_t xor_bank                     : 1;  /**< If (XOR_BANK == 1), then
2586                                                            bank[n:0]=address[n+7:7] ^ address[n+7+5:7+5]
2587                                                          else
2588                                                            bank[n:0]=address[n+7:7]
2589                                                          where n=1 for a 4 bank part and n=2 for an 8 bank part */
2590         uint64_t max_write_batch              : 4;  /**< Maximum number of consecutive writes to service before
2591                                                          allowing reads to interrupt. */
2592         uint64_t reserved_16_17               : 2;
2593         uint64_t rdimm_ena                    : 1;  /**< Registered DIMM Enable - When set allows the use
2594                                                          of JEDEC Registered DIMMs which require Write
2595                                                          data to be registered in the controller. */
2596         uint64_t r2r_slot                     : 1;  /**< R2R Slot Enable: When set, all read-to-read trans
2597                                                          will slot an additional 1 cycle data bus bubble to
2598                                                          avoid DQ/DQS bus contention. This is only a CYA bit,
2599                                                          in case the "built-in" DIMM and RANK crossing logic
2600                                                          which should auto-detect and perfectly slot
2601                                                          read-to-reads to the same DIMM/RANK. */
2602         uint64_t inorder_mwf                  : 1;  /**< Reads as zero */
2603         uint64_t inorder_mrf                  : 1;  /**< Always clear to zero */
2604         uint64_t dreset                       : 1;  /**< Dclk domain reset.  The reset signal that is used by the
2605                                                          Dclk domain is (DRESET || ECLK_RESET). */
2606         uint64_t mode128b                     : 1;  /**< 128b data Path Mode
2607                                                          Set to 1 if we use all 128 DQ pins
2608                                                          0 for 64b DQ mode. */
2609         uint64_t fprch2                       : 1;  /**< Front Porch Enable: When set, the turn-off
2610                                                          time for the DDR_DQ/DQS drivers is 1 dclk earlier.
2611                                                          This bit should typically be set. */
2612         uint64_t bprch                        : 1;  /**< Back Porch Enable: When set, the turn-on time for
2613                                                          the DDR_DQ/DQS drivers is delayed an additional DCLK
2614                                                          cycle. This should be set to one whenever both SILO_HC
2615                                                          and SILO_QC are set. */
2616         uint64_t sil_lat                      : 2;  /**< SILO Latency: On reads, determines how many additional
2617                                                          dclks to wait (on top of TCL+1+TSKW) before pulling
2618                                                          data out of the pad silos.
2619                                                              - 00: illegal
2620                                                              - 01: 1 dclks
2621                                                              - 10: 2 dclks
2622                                                              - 11: illegal
2623                                                          This should always be set to 1. */
2624         uint64_t tskw                         : 2;  /**< This component is a representation of total BOARD
2625                                                          DELAY on DQ (used in the controller to determine the
2626                                                          R->W spacing to avoid DQS/DQ bus conflicts). Enter
2627                                                          the largest of the per byte Board delay
2628                                                              - 00: 0 dclk
2629                                                              - 01: 1 dclks
2630                                                              - 10: 2 dclks
2631                                                              - 11: 3 dclks */
2632         uint64_t qs_dic                       : 2;  /**< DDR2 Termination Resistor Setting
2633                                                          A non Zero value in this register
2634                                                          enables the On Die Termination (ODT) in DDR parts.
2635                                                          These two bits are loaded into the RTT
2636                                                          portion of the EMRS register bits A6 & A2. If DDR2's
2637                                                          termination (for the memory's DQ/DQS/DM pads) is not
2638                                                          desired, set it to 00. If it is, chose between
2639                                                          01 for 75 ohm and 10 for 150 ohm termination.
2640                                                              00 = ODT Disabled
2641                                                              01 = 75 ohm Termination
2642                                                              10 = 150 ohm Termination
2643                                                              11 = 50 ohm Termination
2644                                                          Octeon, on writes, by default, drives the 4/8 ODT
2645                                                          pins (64/128b mode) based on what the masks
2646                                                          (LMC_WODT_CTL) are programmed to.
2647                                                          LMC_DDR2_CTL->ODT_ENA enables Octeon to drive ODT pins
2648                                                          for READS. LMC_RODT_CTL needs to be programmed based
2649                                                          on the system's needs for ODT. */
2650         uint64_t dic                          : 2;  /**< Drive Strength Control:
2651                                                          DIC[0] is
2652                                                          loaded into the Extended Mode Register (EMRS) A1 bit
2653                                                          during initialization.
2654                                                              0 = Normal
2655                                                              1 = Reduced
2656                                                          DIC[1] is used to load into EMRS
2657                                                          bit 10 - DQSN Enable/Disable field. By default, we
2658                                                          program the DDR's to drive the DQSN also. Set it to
2659                                                          1 if DQSN should be Hi-Z.
2660                                                              0 - DQSN Enable
2661                                                              1 - DQSN Disable */
2662 #else
2663         uint64_t dic                          : 2;
2664         uint64_t qs_dic                       : 2;
2665         uint64_t tskw                         : 2;
2666         uint64_t sil_lat                      : 2;
2667         uint64_t bprch                        : 1;
2668         uint64_t fprch2                       : 1;
2669         uint64_t mode128b                     : 1;
2670         uint64_t dreset                       : 1;
2671         uint64_t inorder_mrf                  : 1;
2672         uint64_t inorder_mwf                  : 1;
2673         uint64_t r2r_slot                     : 1;
2674         uint64_t rdimm_ena                    : 1;
2675         uint64_t reserved_16_17               : 2;
2676         uint64_t max_write_batch              : 4;
2677         uint64_t xor_bank                     : 1;
2678         uint64_t slow_scf                     : 1;
2679         uint64_t ddr__pctl                    : 4;
2680         uint64_t ddr__nctl                    : 4;
2681         uint64_t reserved_32_63               : 32;
2682 #endif
2683         } cn58xx;
2684         struct cvmx_lmcx_ctl_cn58xx           cn58xxp1;
2685 };
2686 typedef union cvmx_lmcx_ctl cvmx_lmcx_ctl_t;
2687
2688 /**
2689  * cvmx_lmc#_ctl1
2690  *
2691  * LMC_CTL1 = LMC Control1
2692  * This register is an assortment of various control fields needed by the memory controller
2693  */
2694 union cvmx_lmcx_ctl1
2695 {
2696         uint64_t u64;
2697         struct cvmx_lmcx_ctl1_s
2698         {
2699 #if __BYTE_ORDER == __BIG_ENDIAN
2700         uint64_t reserved_21_63               : 43;
2701         uint64_t ecc_adr                      : 1;  /**< Include memory reference address in the ECC calculation
2702                                                          0=disabled, 1=enabled */
2703         uint64_t forcewrite                   : 4;  /**< Force the oldest outstanding write to complete after
2704                                                          having waited for 2^FORCEWRITE cycles.  0=disabled. */
2705         uint64_t idlepower                    : 3;  /**< Enter power-down mode after the memory controller has
2706                                                          been idle for 2^(2+IDLEPOWER) cycles.  0=disabled. */
2707         uint64_t sequence                     : 3;  /**< Instruction sequence that is run after a 0->1 transition
2708                                                          on LMC_MEM_CFG0[INIT_START].
2709                                                          0=DDR2 power-up/init, 1=read-leveling
2710                                                          2=self-refresh entry, 3=self-refresh exit,
2711                                                          4=power-down entry, 5=power-down exit, 6=7=illegal */
2712         uint64_t sil_mode                     : 1;  /**< Read Silo mode.  0=envelope, 1=self-timed. */
2713         uint64_t dcc_enable                   : 1;  /**< Duty Cycle Corrector Enable.
2714                                                          0=disable, 1=enable
2715                                                          If the memory part does not support DCC, then this bit
2716                                                          must be set to 0. */
2717         uint64_t reserved_2_7                 : 6;
2718         uint64_t data_layout                  : 2;  /**< Logical data layout per DQ byte lane:
2719                                                          In 32b mode, this setting has no effect and the data
2720                                                          layout DQ[35:0] is the following:
2721                                                              [E[3:0], D[31:24], D[23:16], D[15:8], D[7:0]]
2722                                                          In 16b mode, the DQ[35:0] layouts are the following:
2723                                                          0 - [0[3:0], 0[7:0], [0[7:2], E[1:0]], D[15:8], D[7:0]]
2724                                                          1 - [0[3:0], [0[7:2], E[1:0]], D[15:8], D[7:0], 0[7:0]]
2725                                                          2 - [[0[1:0], E[1:0]], D[15:8], D[7:0], 0[7:0], 0[7:0]]
2726                                                          where E means ecc, D means data, and 0 means unused
2727                                                          (ignored on reads and written as 0 on writes) */
2728 #else
2729         uint64_t data_layout                  : 2;
2730         uint64_t reserved_2_7                 : 6;
2731         uint64_t dcc_enable                   : 1;
2732         uint64_t sil_mode                     : 1;
2733         uint64_t sequence                     : 3;
2734         uint64_t idlepower                    : 3;
2735         uint64_t forcewrite                   : 4;
2736         uint64_t ecc_adr                      : 1;
2737         uint64_t reserved_21_63               : 43;
2738 #endif
2739         } s;
2740         struct cvmx_lmcx_ctl1_cn30xx
2741         {
2742 #if __BYTE_ORDER == __BIG_ENDIAN
2743         uint64_t reserved_2_63                : 62;
2744         uint64_t data_layout                  : 2;  /**< Logical data layout per DQ byte lane:
2745                                                          In 32b mode, this setting has no effect and the data
2746                                                          layout DQ[35:0] is the following:
2747                                                              [E[3:0], D[31:24], D[23:16], D[15:8], D[7:0]]
2748                                                          In 16b mode, the DQ[35:0] layouts are the following:
2749                                                          0 - [0[3:0], 0[7:0], [0[7:2], E[1:0]], D[15:8], D[7:0]]
2750                                                          1 - [0[3:0], [0[7:2], E[1:0]], D[15:8], D[7:0], 0[7:0]]
2751                                                          2 - [[0[1:0], E[1:0]], D[15:8], D[7:0], 0[7:0], 0[7:0]]
2752                                                          where E means ecc, D means data, and 0 means unused
2753                                                          (ignored on reads and written as 0 on writes) */
2754 #else
2755         uint64_t data_layout                  : 2;
2756         uint64_t reserved_2_63                : 62;
2757 #endif
2758         } cn30xx;
2759         struct cvmx_lmcx_ctl1_cn50xx
2760         {
2761 #if __BYTE_ORDER == __BIG_ENDIAN
2762         uint64_t reserved_10_63               : 54;
2763         uint64_t sil_mode                     : 1;  /**< Read Silo mode.  0=envelope, 1=self-timed. */
2764         uint64_t dcc_enable                   : 1;  /**< Duty Cycle Corrector Enable.
2765                                                          0=disable, 1=enable
2766                                                          If the memory part does not support DCC, then this bit
2767                                                          must be set to 0. */
2768         uint64_t reserved_2_7                 : 6;
2769         uint64_t data_layout                  : 2;  /**< Logical data layout per DQ byte lane:
2770                                                          In 32b mode, this setting has no effect and the data
2771                                                          layout DQ[35:0] is the following:
2772                                                              [E[3:0], D[31:24], D[23:16], D[15:8], D[7:0]]
2773                                                          In 16b mode, the DQ[35:0] layouts are the following:
2774                                                          0 - [0[3:0], 0[7:0], [0[7:2], E[1:0]], D[15:8], D[7:0]]
2775                                                          1 - [0[3:0], [0[7:2], E[1:0]], D[15:8], D[7:0], 0[7:0]]
2776                                                          2 - [[0[1:0], E[1:0]], D[15:8], D[7:0], 0[7:0], 0[7:0]]
2777                                                          where E means ecc, D means data, and 0 means unused
2778                                                          (ignored on reads and written as 0 on writes) */
2779 #else
2780         uint64_t data_layout                  : 2;
2781         uint64_t reserved_2_7                 : 6;
2782         uint64_t dcc_enable                   : 1;
2783         uint64_t sil_mode                     : 1;
2784         uint64_t reserved_10_63               : 54;
2785 #endif
2786         } cn50xx;
2787         struct cvmx_lmcx_ctl1_cn52xx
2788         {
2789 #if __BYTE_ORDER == __BIG_ENDIAN
2790         uint64_t reserved_21_63               : 43;
2791         uint64_t ecc_adr                      : 1;  /**< Include memory reference address in the ECC calculation
2792                                                          0=disabled, 1=enabled */
2793         uint64_t forcewrite                   : 4;  /**< Force the oldest outstanding write to complete after
2794                                                          having waited for 2^FORCEWRITE cycles.  0=disabled. */
2795         uint64_t idlepower                    : 3;  /**< Enter power-down mode after the memory controller has
2796                                                          been idle for 2^(2+IDLEPOWER) cycles.  0=disabled. */
2797         uint64_t sequence                     : 3;  /**< Instruction sequence that is run after a 0->1 transition
2798                                                          on LMC_MEM_CFG0[INIT_START].
2799                                                          0=DDR2 power-up/init, 1=read-leveling
2800                                                          2=self-refresh entry, 3=self-refresh exit,
2801                                                          4=power-down entry, 5=power-down exit, 6=7=illegal */
2802         uint64_t sil_mode                     : 1;  /**< Read Silo mode.  0=envelope, 1=self-timed. */
2803         uint64_t dcc_enable                   : 1;  /**< Duty Cycle Corrector Enable.
2804                                                          0=disable, 1=enable
2805                                                          If the memory part does not support DCC, then this bit
2806                                                          must be set to 0. */
2807         uint64_t reserved_0_7                 : 8;
2808 #else
2809         uint64_t reserved_0_7                 : 8;
2810         uint64_t dcc_enable                   : 1;
2811         uint64_t sil_mode                     : 1;
2812         uint64_t sequence                     : 3;
2813         uint64_t idlepower                    : 3;
2814         uint64_t forcewrite                   : 4;
2815         uint64_t ecc_adr                      : 1;
2816         uint64_t reserved_21_63               : 43;
2817 #endif
2818         } cn52xx;
2819         struct cvmx_lmcx_ctl1_cn52xx          cn52xxp1;
2820         struct cvmx_lmcx_ctl1_cn52xx          cn56xx;
2821         struct cvmx_lmcx_ctl1_cn52xx          cn56xxp1;
2822         struct cvmx_lmcx_ctl1_cn58xx
2823         {
2824 #if __BYTE_ORDER == __BIG_ENDIAN
2825         uint64_t reserved_10_63               : 54;
2826         uint64_t sil_mode                     : 1;  /**< Read Silo mode.  0=envelope, 1=self-timed. */
2827         uint64_t dcc_enable                   : 1;  /**< Duty Cycle Corrector Enable.
2828                                                          0=disable, 1=enable
2829                                                          If the memory part does not support DCC, then this bit
2830                                                          must be set to 0. */
2831         uint64_t reserved_0_7                 : 8;
2832 #else
2833         uint64_t reserved_0_7                 : 8;
2834         uint64_t dcc_enable                   : 1;
2835         uint64_t sil_mode                     : 1;
2836         uint64_t reserved_10_63               : 54;
2837 #endif
2838         } cn58xx;
2839         struct cvmx_lmcx_ctl1_cn58xx          cn58xxp1;
2840 };
2841 typedef union cvmx_lmcx_ctl1 cvmx_lmcx_ctl1_t;
2842
2843 /**
2844  * cvmx_lmc#_dclk_cnt
2845  *
2846  * LMC_DCLK_CNT  = Performance Counters
2847  *
2848  */
2849 union cvmx_lmcx_dclk_cnt
2850 {
2851         uint64_t u64;
2852         struct cvmx_lmcx_dclk_cnt_s
2853         {
2854 #if __BYTE_ORDER == __BIG_ENDIAN
2855         uint64_t dclkcnt                      : 64; /**< Performance Counter
2856                                                          64-bit counter that increments every CK cycle */
2857 #else
2858         uint64_t dclkcnt                      : 64;
2859 #endif
2860         } s;
2861         struct cvmx_lmcx_dclk_cnt_s           cn63xx;
2862         struct cvmx_lmcx_dclk_cnt_s           cn63xxp1;
2863 };
2864 typedef union cvmx_lmcx_dclk_cnt cvmx_lmcx_dclk_cnt_t;
2865
2866 /**
2867  * cvmx_lmc#_dclk_cnt_hi
2868  *
2869  * LMC_DCLK_CNT_HI  = Performance Counters
2870  *
2871  */
2872 union cvmx_lmcx_dclk_cnt_hi
2873 {
2874         uint64_t u64;
2875         struct cvmx_lmcx_dclk_cnt_hi_s
2876         {
2877 #if __BYTE_ORDER == __BIG_ENDIAN
2878         uint64_t reserved_32_63               : 32;
2879         uint64_t dclkcnt_hi                   : 32; /**< Performance Counter that counts dclks
2880                                                          Upper 32-bits of a 64-bit counter. */
2881 #else
2882         uint64_t dclkcnt_hi                   : 32;
2883         uint64_t reserved_32_63               : 32;
2884 #endif
2885         } s;
2886         struct cvmx_lmcx_dclk_cnt_hi_s        cn30xx;
2887         struct cvmx_lmcx_dclk_cnt_hi_s        cn31xx;
2888         struct cvmx_lmcx_dclk_cnt_hi_s        cn38xx;
2889         struct cvmx_lmcx_dclk_cnt_hi_s        cn38xxp2;
2890         struct cvmx_lmcx_dclk_cnt_hi_s        cn50xx;
2891         struct cvmx_lmcx_dclk_cnt_hi_s        cn52xx;
2892         struct cvmx_lmcx_dclk_cnt_hi_s        cn52xxp1;
2893         struct cvmx_lmcx_dclk_cnt_hi_s        cn56xx;
2894         struct cvmx_lmcx_dclk_cnt_hi_s        cn56xxp1;
2895         struct cvmx_lmcx_dclk_cnt_hi_s        cn58xx;
2896         struct cvmx_lmcx_dclk_cnt_hi_s        cn58xxp1;
2897 };
2898 typedef union cvmx_lmcx_dclk_cnt_hi cvmx_lmcx_dclk_cnt_hi_t;
2899
2900 /**
2901  * cvmx_lmc#_dclk_cnt_lo
2902  *
2903  * LMC_DCLK_CNT_LO  = Performance Counters
2904  *
2905  */
2906 union cvmx_lmcx_dclk_cnt_lo
2907 {
2908         uint64_t u64;
2909         struct cvmx_lmcx_dclk_cnt_lo_s
2910         {
2911 #if __BYTE_ORDER == __BIG_ENDIAN
2912         uint64_t reserved_32_63               : 32;
2913         uint64_t dclkcnt_lo                   : 32; /**< Performance Counter that counts dclks
2914                                                          Lower 32-bits of a 64-bit counter. */
2915 #else
2916         uint64_t dclkcnt_lo                   : 32;
2917         uint64_t reserved_32_63               : 32;
2918 #endif
2919         } s;
2920         struct cvmx_lmcx_dclk_cnt_lo_s        cn30xx;
2921         struct cvmx_lmcx_dclk_cnt_lo_s        cn31xx;
2922         struct cvmx_lmcx_dclk_cnt_lo_s        cn38xx;
2923         struct cvmx_lmcx_dclk_cnt_lo_s        cn38xxp2;
2924         struct cvmx_lmcx_dclk_cnt_lo_s        cn50xx;
2925         struct cvmx_lmcx_dclk_cnt_lo_s        cn52xx;
2926         struct cvmx_lmcx_dclk_cnt_lo_s        cn52xxp1;
2927         struct cvmx_lmcx_dclk_cnt_lo_s        cn56xx;
2928         struct cvmx_lmcx_dclk_cnt_lo_s        cn56xxp1;
2929         struct cvmx_lmcx_dclk_cnt_lo_s        cn58xx;
2930         struct cvmx_lmcx_dclk_cnt_lo_s        cn58xxp1;
2931 };
2932 typedef union cvmx_lmcx_dclk_cnt_lo cvmx_lmcx_dclk_cnt_lo_t;
2933
2934 /**
2935  * cvmx_lmc#_dclk_ctl
2936  *
2937  * LMC_DCLK_CTL = LMC DCLK generation control
2938  *
2939  *
2940  * Notes:
2941  * This CSR is only relevant for LMC1. LMC0_DCLK_CTL is not used.
2942  *
2943  */
2944 union cvmx_lmcx_dclk_ctl
2945 {
2946         uint64_t u64;
2947         struct cvmx_lmcx_dclk_ctl_s
2948         {
2949 #if __BYTE_ORDER == __BIG_ENDIAN
2950         uint64_t reserved_8_63                : 56;
2951         uint64_t off90_ena                    : 1;  /**< 0=use global DCLK (i.e. the PLL) directly for LMC1
2952                                                          1=use the 90 degree DCLK DLL to offset LMC1 DCLK */
2953         uint64_t dclk90_byp                   : 1;  /**< 0=90 degree DCLK DLL uses sampled delay from LMC0
2954                                                          1=90 degree DCLK DLL uses DCLK90_VLU
2955                                                          See DCLK90_VLU. */
2956         uint64_t dclk90_ld                    : 1;  /**< The 90 degree DCLK DLL samples the delay setting
2957                                                          from LMC0's DLL when this field transitions 0->1 */
2958         uint64_t dclk90_vlu                   : 5;  /**< Manual open-loop delay setting.
2959                                                          The LMC1 90 degree DCLK DLL uses DCLK90_VLU rather
2960                                                          than the delay setting sampled from LMC0 when
2961                                                          DCLK90_BYP=1. */
2962 #else
2963         uint64_t dclk90_vlu                   : 5;
2964         uint64_t dclk90_ld                    : 1;
2965         uint64_t dclk90_byp                   : 1;
2966         uint64_t off90_ena                    : 1;
2967         uint64_t reserved_8_63                : 56;
2968 #endif
2969         } s;
2970         struct cvmx_lmcx_dclk_ctl_s           cn56xx;
2971         struct cvmx_lmcx_dclk_ctl_s           cn56xxp1;
2972 };
2973 typedef union cvmx_lmcx_dclk_ctl cvmx_lmcx_dclk_ctl_t;
2974
2975 /**
2976  * cvmx_lmc#_ddr2_ctl
2977  *
2978  * LMC_DDR2_CTL = LMC DDR2 & DLL Control Register
2979  *
2980  */
2981 union cvmx_lmcx_ddr2_ctl
2982 {
2983         uint64_t u64;
2984         struct cvmx_lmcx_ddr2_ctl_s
2985         {
2986 #if __BYTE_ORDER == __BIG_ENDIAN
2987         uint64_t reserved_32_63               : 32;
2988         uint64_t bank8                        : 1;  /**< For 8 bank DDR2 parts
2989                                                          1 - DDR2 parts have 8 internal banks (BA is 3 bits
2990                                                          wide).
2991                                                          0 - DDR2 parts have 4 internal banks (BA is 2 bits
2992                                                          wide). */
2993         uint64_t burst8                       : 1;  /**< 8-burst mode.
2994                                                          1 - DDR data transfer happens in burst of 8
2995                                                          0 - DDR data transfer happens in burst of 4
2996                                                          BURST8 should be set when DDR2T is set
2997                                                          to minimize the command bandwidth loss. */
2998         uint64_t addlat                       : 3;  /**< Additional Latency for posted CAS
2999                                                          When Posted CAS is on, this configures the additional
3000                                                          latency. This should be set to
3001                                                                 1 .. LMC_MEM_CFG1[TRCD]-2
3002                                                          (Note the implication that posted CAS should not
3003                                                          be used when tRCD is two.) */
3004         uint64_t pocas                        : 1;  /**< Enable the Posted CAS feature of DDR2. */
3005         uint64_t bwcnt                        : 1;  /**< Bus utilization counter Clear.
3006                                                          Clears the LMC_OPS_CNT_*, LMC_IFB_CNT_*, and
3007                                                          LMC_DCLK_CNT_* registers. SW should first write this
3008                                                          field to a one, then write this field to a zero to
3009                                                          clear the CSR's. */
3010         uint64_t twr                          : 3;  /**< DDR Write Recovery time (tWR). Last Wr Brst to Pre delay
3011                                                          This is not a direct encoding of the value. Its
3012                                                          programmed as below per DDR2 spec. The decimal number
3013                                                          on the right is RNDUP(tWR(ns) / tCYC(ns))
3014                                                           TYP=15ns
3015                                                              - 000: RESERVED
3016                                                              - 001: 2
3017                                                              - 010: 3
3018                                                              - 011: 4
3019                                                              - 100: 5
3020                                                              - 101: 6
3021                                                              - 110: 7
3022                                                              - 111: 8 */
3023         uint64_t silo_hc                      : 1;  /**< Delays the read sample window by a Half Cycle. */
3024         uint64_t ddr_eof                      : 4;  /**< Early Fill Counter Init.
3025                                                          L2 needs to know a few cycle before a fill completes so
3026                                                          it can get its Control pipe started (for better overall
3027                                                          performance). This counter contains  an init value which
3028                                                          is a function of Eclk/Dclk ratio to account for the
3029                                                          asynchronous boundary between L2 cache and the DRAM
3030                                                          controller. This init value will
3031                                                          determine when to safely let the L2 know that a fill
3032                                                          termination is coming up.
3033                                                          Set DDR_EOF according to the following rule:
3034                                                          eclkFreq/dclkFreq = dclkPeriod/eclkPeriod = RATIO
3035                                                                 RATIO < 6/6  -> illegal
3036                                                          6/6 <= RATIO < 6/5  -> DDR_EOF=3
3037                                                          6/5 <= RATIO < 6/4  -> DDR_EOF=3
3038                                                          6/4 <= RATIO < 6/3  -> DDR_EOF=2
3039                                                          6/3 <= RATIO < 6/2  -> DDR_EOF=1
3040                                                          6/2 <= RATIO < 6/1  -> DDR_EOF=0
3041                                                          6/1 <= RATIO        -> DDR_EOF=0 */
3042         uint64_t tfaw                         : 5;  /**< tFAW - Cycles = RNDUP[tFAW(ns)/tcyc(ns)] - 1
3043                                                          Four Access Window time. Relevant only in DDR2 AND in
3044                                                          8-bank parts.
3045                                                              tFAW = 5'b0 in DDR2-4bank
3046                                                              tFAW = RNDUP[tFAW(ns)/tcyc(ns)] - 1
3047                                                                       in DDR2-8bank */
3048         uint64_t crip_mode                    : 1;  /**< Cripple Mode - When set, the LMC allows only
3049                                                          1 inflight transaction (.vs. 8 in normal mode).
3050                                                          This bit is ONLY to be set at power-on and
3051                                                          should not be set for normal use. */
3052         uint64_t ddr2t                        : 1;  /**< Turn on the DDR 2T mode. 2 cycle window for CMD and
3053                                                          address. This mode helps relieve setup time pressure
3054                                                          on the Address and command bus which nominally have
3055                                                          a very large fanout. Please refer to Micron's tech
3056                                                          note tn_47_01 titled "DDR2-533 Memory Design Guide
3057                                                          for Two Dimm Unbuffered Systems" for physical details.
3058                                                          BURST8 should be set when DDR2T is set to minimize
3059                                                          add/cmd loss. */
3060         uint64_t odt_ena                      : 1;  /**< Enable Obsolete ODT on Reads
3061                                                          Obsolete Read ODT wiggles DDR_ODT_* pins on reads.
3062                                                          Should normally be cleared to zero.
3063                                                          When this is on, the following fields must also be
3064                                                          programmed:
3065                                                              LMC_CTL->QS_DIC - programs the termination value
3066                                                              LMC_RODT_CTL - programs the ODT I/O mask for Reads */
3067         uint64_t qdll_ena                     : 1;  /**< DDR Quad DLL Enable: A 0->1 transition on this bit after
3068                                                          DCLK init sequence will reset the DDR 90 DLL. Should
3069                                                          happen at startup before any activity in DDR.
3070                                                          DRESET should be asserted before and for 10 usec
3071                                                          following the 0->1 transition on QDLL_ENA. */
3072         uint64_t dll90_vlu                    : 5;  /**< Contains the open loop setting value for the DDR90 delay
3073                                                          line. */
3074         uint64_t dll90_byp                    : 1;  /**< DDR DLL90 Bypass: When set, the DDR90 DLL is to be
3075                                                          bypassed and the setting is defined by DLL90_VLU */
3076         uint64_t rdqs                         : 1;  /**< DDR2 RDQS mode. When set, configures memory subsystem to
3077                                                          use unidirectional DQS pins. RDQS/DM - Rcv & DQS - Xmit */
3078         uint64_t ddr2                         : 1;  /**< Should be set */
3079 #else
3080         uint64_t ddr2                         : 1;
3081         uint64_t rdqs                         : 1;
3082         uint64_t dll90_byp                    : 1;
3083         uint64_t dll90_vlu                    : 5;
3084         uint64_t qdll_ena                     : 1;
3085         uint64_t odt_ena                      : 1;
3086         uint64_t ddr2t                        : 1;
3087         uint64_t crip_mode                    : 1;
3088         uint64_t tfaw                         : 5;
3089         uint64_t ddr_eof                      : 4;
3090         uint64_t silo_hc                      : 1;
3091         uint64_t twr                          : 3;
3092         uint64_t bwcnt                        : 1;
3093         uint64_t pocas                        : 1;
3094         uint64_t addlat                       : 3;
3095         uint64_t burst8                       : 1;
3096         uint64_t bank8                        : 1;
3097         uint64_t reserved_32_63               : 32;
3098 #endif
3099         } s;
3100         struct cvmx_lmcx_ddr2_ctl_cn30xx
3101         {
3102 #if __BYTE_ORDER == __BIG_ENDIAN
3103         uint64_t reserved_32_63               : 32;
3104         uint64_t bank8                        : 1;  /**< For 8 bank DDR2 parts
3105                                                          1 - DDR2 parts have 8 internal banks (BA is 3 bits
3106                                                          wide).
3107                                                          0 - DDR2 parts have 4 internal banks (BA is 2 bits
3108                                                          wide). */
3109         uint64_t burst8                       : 1;  /**< 8-burst mode.
3110                                                          1 - DDR data transfer happens in burst of 8
3111                                                          0 - DDR data transfer happens in burst of 4
3112                                                          BURST8 should be set when DDR2T is set to minimize
3113                                                          add/cmd bandwidth loss. */
3114         uint64_t addlat                       : 3;  /**< Additional Latency for posted CAS
3115                                                          When Posted CAS is on, this configures the additional
3116                                                          latency. This should be set to
3117                                                                 1 .. LMC_MEM_CFG1[TRCD]-2
3118                                                          (Note the implication that posted CAS should not
3119                                                          be used when tRCD is two.) */
3120         uint64_t pocas                        : 1;  /**< Enable the Posted CAS feature of DDR2. */
3121         uint64_t bwcnt                        : 1;  /**< Bus utilization counter Clear.
3122                                                          Clears the LMC_OPS_CNT_*, LMC_IFB_CNT_*, and
3123                                                          LMC_DCLK_CNT_* registers. SW should first write this
3124                                                          field to a one, then write this field to a zero to
3125                                                          clear the CSR's. */
3126         uint64_t twr                          : 3;  /**< DDR Write Recovery time (tWR). Last Wr Brst to Pre delay
3127                                                          This is not a direct encoding of the value. Its
3128                                                          programmed as below per DDR2 spec. The decimal number
3129                                                          on the right is RNDUP(tWR(ns) / tCYC(ns))
3130                                                           TYP=15ns
3131                                                              - 000: RESERVED
3132                                                              - 001: 2
3133                                                              - 010: 3
3134                                                              - 011: 4
3135                                                              - 100: 5
3136                                                              - 101: 6
3137                                                              - 110-111: RESERVED */
3138         uint64_t silo_hc                      : 1;  /**< Delays the read sample window by a Half Cycle. */
3139         uint64_t ddr_eof                      : 4;  /**< Early Fill Counter Init.
3140                                                          L2 needs to know a few cycle before a fill completes so
3141                                                          it can get its Control pipe started (for better overall
3142                                                          performance). This counter contains  an init value which
3143                                                          is a function of Eclk/Dclk ratio to account for the
3144                                                          asynchronous boundary between L2 cache and the DRAM
3145                                                          controller. This init value will
3146                                                          determine when to safely let the L2 know that a fill
3147                                                          termination is coming up.
3148                                                          DDR_EOF = RNDUP (DCLK period/Eclk Period). If the ratio
3149                                                          is above 3, set DDR_EOF to 3.
3150                                                              DCLK/ECLK period         DDR_EOF
3151                                                                 Less than 1            1
3152                                                                 Less than 2            2
3153                                                                 More than 2            3 */
3154         uint64_t tfaw                         : 5;  /**< tFAW - Cycles = RNDUP[tFAW(ns)/tcyc(ns)] - 1
3155                                                          Four Access Window time. Relevant only in
3156                                                          8-bank parts.
3157                                                              TFAW = 5'b0 for DDR2-4bank
3158                                                              TFAW = RNDUP[tFAW(ns)/tcyc(ns)] - 1 in DDR2-8bank */
3159         uint64_t crip_mode                    : 1;  /**< Cripple Mode - When set, the LMC allows only
3160                                                          1 inflight transaction (.vs. 8 in normal mode).
3161                                                          This bit is ONLY to be set at power-on and
3162                                                          should not be set for normal use. */
3163         uint64_t ddr2t                        : 1;  /**< Turn on the DDR 2T mode. 2 cycle window for CMD and
3164                                                          address. This mode helps relieve setup time pressure
3165                                                          on the Address and command bus which nominally have
3166                                                          a very large fanout. Please refer to Micron's tech
3167                                                          note tn_47_01 titled "DDR2-533 Memory Design Guide
3168                                                          for Two Dimm Unbuffered Systems" for physical details.
3169                                                          BURST8 should be used when DDR2T is set to minimize
3170                                                          add/cmd bandwidth loss. */
3171         uint64_t odt_ena                      : 1;  /**< Enable ODT for DDR2 on Reads
3172                                                          When this is on, the following fields must also be
3173                                                          programmed:
3174                                                              LMC_CTL->QS_DIC - programs the termination value
3175                                                              LMC_RODT_CTL - programs the ODT I/O mask for writes
3176                                                          Program as 0 for DDR1 mode and ODT needs to be off
3177                                                          on Octeon Reads */
3178         uint64_t qdll_ena                     : 1;  /**< DDR Quad DLL Enable: A 0->1 transition on this bit after
3179                                                          erst deassertion will reset the DDR 90 DLL. Should
3180                                                          happen at startup before any activity in DDR. */
3181         uint64_t dll90_vlu                    : 5;  /**< Contains the open loop setting value for the DDR90 delay
3182                                                          line. */
3183         uint64_t dll90_byp                    : 1;  /**< DDR DLL90 Bypass: When set, the DDR90 DLL is to be
3184                                                          bypassed and the setting is defined by DLL90_VLU */
3185         uint64_t reserved_1_1                 : 1;
3186         uint64_t ddr2                         : 1;  /**< DDR2 Enable: When set, configures memory subsystem for
3187                                                          DDR-II SDRAMs. */
3188 #else
3189         uint64_t ddr2                         : 1;
3190         uint64_t reserved_1_1                 : 1;
3191         uint64_t dll90_byp                    : 1;
3192         uint64_t dll90_vlu                    : 5;
3193         uint64_t qdll_ena                     : 1;
3194         uint64_t odt_ena                      : 1;
3195         uint64_t ddr2t                        : 1;
3196         uint64_t crip_mode                    : 1;
3197         uint64_t tfaw                         : 5;
3198         uint64_t ddr_eof                      : 4;
3199         uint64_t silo_hc                      : 1;
3200         uint64_t twr                          : 3;
3201         uint64_t bwcnt                        : 1;
3202         uint64_t pocas                        : 1;
3203         uint64_t addlat                       : 3;
3204         uint64_t burst8                       : 1;
3205         uint64_t bank8                        : 1;
3206         uint64_t reserved_32_63               : 32;
3207 #endif
3208         } cn30xx;
3209         struct cvmx_lmcx_ddr2_ctl_cn30xx      cn31xx;
3210         struct cvmx_lmcx_ddr2_ctl_s           cn38xx;
3211         struct cvmx_lmcx_ddr2_ctl_s           cn38xxp2;
3212         struct cvmx_lmcx_ddr2_ctl_s           cn50xx;
3213         struct cvmx_lmcx_ddr2_ctl_s           cn52xx;
3214         struct cvmx_lmcx_ddr2_ctl_s           cn52xxp1;
3215         struct cvmx_lmcx_ddr2_ctl_s           cn56xx;
3216         struct cvmx_lmcx_ddr2_ctl_s           cn56xxp1;
3217         struct cvmx_lmcx_ddr2_ctl_s           cn58xx;
3218         struct cvmx_lmcx_ddr2_ctl_s           cn58xxp1;
3219 };
3220 typedef union cvmx_lmcx_ddr2_ctl cvmx_lmcx_ddr2_ctl_t;
3221
3222 /**
3223  * cvmx_lmc#_ddr_pll_ctl
3224  *
3225  * LMC_DDR_PLL_CTL = LMC DDR PLL control
3226  *
3227  *
3228  * Notes:
3229  * DDR PLL Bringup sequence:
3230  * 1.  Write CLKF, DDR_PS_EN, DFM_PS_EN, DIFFAMP, CPS, CPB.
3231  *     If test mode is going to be activated, then also write jtg__ddr_pll_tm_en1, jtg__ddr_pll_tm_en2, jtg__ddr_pll_tm_en3,
3232  *     jtg__ddr_pll_tm_en4, jtg__dfa_pll_tm_en1, jtg__dfa_pll_tm_en2, jtg__dfa_pll_tm_en3, jtg__dfa_pll_tm_en4, JTAG_TEST_MODE
3233  * 2.  Wait 128 ref clock cycles (7680 rclk cycles)
3234  * 3.  Write 1 to RESET_N
3235  * 4.  Wait 1152 ref clocks (1152*16 rclk cycles)
3236  * 5.  Write 0 to  DDR_DIV_RESET and DFM_DIV_RESET
3237  * 6.  Wait 10 ref clock cycles (160 rclk cycles) before bringing up the DDR interface
3238  *     If test mode is going to be activated, wait an additional 8191 ref clocks (8191*16 rclk cycles) to allow PLL
3239  *     clock alignment
3240  */
3241 union cvmx_lmcx_ddr_pll_ctl
3242 {
3243         uint64_t u64;
3244         struct cvmx_lmcx_ddr_pll_ctl_s
3245         {
3246 #if __BYTE_ORDER == __BIG_ENDIAN
3247         uint64_t reserved_27_63               : 37;
3248         uint64_t jtg_test_mode                : 1;  /**< JTAG Test Mode
3249                                                          Clock alignment between DCLK & REFCLK as well as FCLK &
3250                                                          REFCLK can only be performed after the ddr_pll_divider_reset
3251                                                          is deasserted. SW need to wait atleast 10 reference clock
3252                                                          cycles after deasserting pll_divider_reset before asserting
3253                                                          LMC(0)_DDR_PLL_CTL[JTG_TEST_MODE]. During alignment (which can
3254                                                          take upto 160 microseconds) DCLK and FCLK can exhibit some
3255                                                          high frequency pulses. Therefore, all bring up activities in
3256                                                          that clock domain need to be delayed (when the chip operates
3257                                                          in jtg_test_mode) by about 160 microseconds to ensure that
3258                                                          lock is achieved. */
3259         uint64_t dfm_div_reset                : 1;  /**< DFM postscalar divider reset */
3260         uint64_t dfm_ps_en                    : 3;  /**< DFM postscalar divide ratio
3261                                                          Determines the DFM CK speed.
3262                                                          0x0 : Divide LMC+DFM PLL output by 1
3263                                                          0x1 : Divide LMC+DFM PLL output by 2
3264                                                          0x2 : Divide LMC+DFM PLL output by 3
3265                                                          0x3 : Divide LMC+DFM PLL output by 4
3266                                                          0x4 : Divide LMC+DFM PLL output by 6
3267                                                          0x5 : Divide LMC+DFM PLL output by 8
3268                                                          0x6 : Divide LMC+DFM PLL output by 12
3269                                                          0x7 : Divide LMC+DFM PLL output by 12
3270                                                          DFM_PS_EN is not used when DFM_DIV_RESET = 1 */
3271         uint64_t ddr_div_reset                : 1;  /**< DDR postscalar divider reset */
3272         uint64_t ddr_ps_en                    : 3;  /**< DDR postscalar divide ratio
3273                                                          Determines the LMC CK speed.
3274                                                          0x0 : Divide LMC+DFM PLL output by 1
3275                                                          0x1 : Divide LMC+DFM PLL output by 2
3276                                                          0x2 : Divide LMC+DFM PLL output by 3
3277                                                          0x3 : Divide LMC+DFM PLL output by 4
3278                                                          0x4 : Divide LMC+DFM PLL output by 6
3279                                                          0x5 : Divide LMC+DFM PLL output by 8
3280                                                          0x6 : Divide LMC+DFM PLL output by 12
3281                                                          0x7 : Divide LMC+DFM PLL output by 12
3282                                                          DDR_PS_EN is not used when DDR_DIV_RESET = 1 */
3283         uint64_t diffamp                      : 4;  /**< PLL diffamp input transconductance */
3284         uint64_t cps                          : 3;  /**< PLL charge-pump current */
3285         uint64_t cpb                          : 3;  /**< PLL charge-pump current */
3286         uint64_t reset_n                      : 1;  /**< PLL reset */
3287         uint64_t clkf                         : 7;  /**< Multiply reference by CLKF
3288                                                          32 <= CLKF <= 64
3289                                                          LMC+DFM PLL frequency = 50 * CLKF
3290                                                          min = 1.6 GHz, max = 3.2 GHz */
3291 #else
3292         uint64_t clkf                         : 7;
3293         uint64_t reset_n                      : 1;
3294         uint64_t cpb                          : 3;
3295         uint64_t cps                          : 3;
3296         uint64_t diffamp                      : 4;
3297         uint64_t ddr_ps_en                    : 3;
3298         uint64_t ddr_div_reset                : 1;
3299         uint64_t dfm_ps_en                    : 3;
3300         uint64_t dfm_div_reset                : 1;
3301         uint64_t jtg_test_mode                : 1;
3302         uint64_t reserved_27_63               : 37;
3303 #endif
3304         } s;
3305         struct cvmx_lmcx_ddr_pll_ctl_s        cn63xx;
3306         struct cvmx_lmcx_ddr_pll_ctl_s        cn63xxp1;
3307 };
3308 typedef union cvmx_lmcx_ddr_pll_ctl cvmx_lmcx_ddr_pll_ctl_t;
3309
3310 /**
3311  * cvmx_lmc#_delay_cfg
3312  *
3313  * LMC_DELAY_CFG = Open-loop delay line settings
3314  *
3315  *
3316  * Notes:
3317  * The DQ bits add OUTGOING delay only to dq, dqs_[p,n], cb, cbs_[p,n], dqm.  Delay is approximately
3318  * 50-80ps per setting depending on process/voltage.  There is no need to add incoming delay since by
3319  * default all strobe bits are delayed internally by 90 degrees (as was always the case in previous
3320  * passes and past chips.
3321  *
3322  * The CMD add delay to all command bits DDR_RAS, DDR_CAS, DDR_A<15:0>, DDR_BA<2:0>, DDR_n_CS<1:0>_L,
3323  * DDR_WE, DDR_CKE and DDR_ODT_<7:0>. Again, delay is 50-80ps per tap.
3324  *
3325  * The CLK bits add delay to all clock signals DDR_CK_<5:0>_P and DDR_CK_<5:0>_N.  Again, delay is
3326  * 50-80ps per tap.
3327  *
3328  * The usage scenario is the following: There is too much delay on command signals and setup on command
3329  * is not met. The user can then delay the clock until setup is met.
3330  *
3331  * At the same time though, dq/dqs should be delayed because there is also a DDR spec tying dqs with
3332  * clock. If clock is too much delayed with respect to dqs, writes will start to fail.
3333  *
3334  * This scheme should eliminate the board need of adding routing delay to clock signals to make high
3335  * frequencies work.
3336  */
3337 union cvmx_lmcx_delay_cfg
3338 {
3339         uint64_t u64;
3340         struct cvmx_lmcx_delay_cfg_s
3341         {
3342 #if __BYTE_ORDER == __BIG_ENDIAN
3343         uint64_t reserved_15_63               : 49;
3344         uint64_t dq                           : 5;  /**< Setting for DQ  delay line */
3345         uint64_t cmd                          : 5;  /**< Setting for CMD delay line */
3346         uint64_t clk                          : 5;  /**< Setting for CLK delay line */
3347 #else
3348         uint64_t clk                          : 5;
3349         uint64_t cmd                          : 5;
3350         uint64_t dq                           : 5;
3351         uint64_t reserved_15_63               : 49;
3352 #endif
3353         } s;
3354         struct cvmx_lmcx_delay_cfg_s          cn30xx;
3355         struct cvmx_lmcx_delay_cfg_cn38xx
3356         {
3357 #if __BYTE_ORDER == __BIG_ENDIAN
3358         uint64_t reserved_14_63               : 50;
3359         uint64_t dq                           : 4;  /**< Setting for DQ  delay line */
3360         uint64_t reserved_9_9                 : 1;
3361         uint64_t cmd                          : 4;  /**< Setting for CMD delay line */
3362         uint64_t reserved_4_4                 : 1;
3363         uint64_t clk                          : 4;  /**< Setting for CLK delay line */
3364 #else
3365         uint64_t clk                          : 4;
3366         uint64_t reserved_4_4                 : 1;
3367         uint64_t cmd                          : 4;
3368         uint64_t reserved_9_9                 : 1;
3369         uint64_t dq                           : 4;
3370         uint64_t reserved_14_63               : 50;
3371 #endif
3372         } cn38xx;
3373         struct cvmx_lmcx_delay_cfg_cn38xx     cn50xx;
3374         struct cvmx_lmcx_delay_cfg_cn38xx     cn52xx;
3375         struct cvmx_lmcx_delay_cfg_cn38xx     cn52xxp1;
3376         struct cvmx_lmcx_delay_cfg_cn38xx     cn56xx;
3377         struct cvmx_lmcx_delay_cfg_cn38xx     cn56xxp1;
3378         struct cvmx_lmcx_delay_cfg_cn38xx     cn58xx;
3379         struct cvmx_lmcx_delay_cfg_cn38xx     cn58xxp1;
3380 };
3381 typedef union cvmx_lmcx_delay_cfg cvmx_lmcx_delay_cfg_t;
3382
3383 /**
3384  * cvmx_lmc#_dimm#_params
3385  *
3386  * LMC_DIMMX_PARAMS = LMC DIMMX Params
3387  * This register contains values to be programmed into each control word in the corresponding (registered) DIMM. The control words allow
3388  * optimization of the device properties for different raw card designs.
3389  *
3390  * Notes:
3391  * LMC only uses this CSR when LMC*_CONTROL[RDIMM_ENA]=1. During a power-up/init sequence, LMC writes
3392  * these fields into the control words in the JEDEC standard SSTE32882 registering clock driver on an
3393  * RDIMM when corresponding LMC*_DIMM_CTL[DIMM*_WMASK] bits are set.
3394  */
3395 union cvmx_lmcx_dimmx_params
3396 {
3397         uint64_t u64;
3398         struct cvmx_lmcx_dimmx_params_s
3399         {
3400 #if __BYTE_ORDER == __BIG_ENDIAN
3401         uint64_t rc15                         : 4;  /**< RC15, Reserved */
3402         uint64_t rc14                         : 4;  /**< RC14, Reserved */
3403         uint64_t rc13                         : 4;  /**< RC13, Reserved */
3404         uint64_t rc12                         : 4;  /**< RC12, Reserved */
3405         uint64_t rc11                         : 4;  /**< RC11, Encoding for RDIMM Operating VDD */
3406         uint64_t rc10                         : 4;  /**< RC10, Encoding for RDIMM Operating Speed */
3407         uint64_t rc9                          : 4;  /**< RC9 , Power Savings Settings Control Word */
3408         uint64_t rc8                          : 4;  /**< RC8 , Additional IBT Settings Control Word */
3409         uint64_t rc7                          : 4;  /**< RC7 , Reserved */
3410         uint64_t rc6                          : 4;  /**< RC6 , Reserved */
3411         uint64_t rc5                          : 4;  /**< RC5 , CK Driver Characterstics Control Word */
3412         uint64_t rc4                          : 4;  /**< RC4 , Control Signals Driver Characteristics Control Word */
3413         uint64_t rc3                          : 4;  /**< RC3 , CA Signals Driver Characterstics Control Word */
3414         uint64_t rc2                          : 4;  /**< RC2 , Timing Control Word */
3415         uint64_t rc1                          : 4;  /**< RC1 , Clock Driver Enable Control Word */
3416         uint64_t rc0                          : 4;  /**< RC0 , Global Features Control Word */
3417 #else
3418         uint64_t rc0                          : 4;
3419         uint64_t rc1                          : 4;
3420         uint64_t rc2                          : 4;
3421         uint64_t rc3                          : 4;
3422         uint64_t rc4                          : 4;
3423         uint64_t rc5                          : 4;
3424         uint64_t rc6                          : 4;
3425         uint64_t rc7                          : 4;
3426         uint64_t rc8                          : 4;
3427         uint64_t rc9                          : 4;
3428         uint64_t rc10                         : 4;
3429         uint64_t rc11                         : 4;
3430         uint64_t rc12                         : 4;
3431         uint64_t rc13                         : 4;
3432         uint64_t rc14                         : 4;
3433         uint64_t rc15                         : 4;
3434 #endif
3435         } s;
3436         struct cvmx_lmcx_dimmx_params_s       cn63xx;
3437         struct cvmx_lmcx_dimmx_params_s       cn63xxp1;
3438 };
3439 typedef union cvmx_lmcx_dimmx_params cvmx_lmcx_dimmx_params_t;
3440
3441 /**
3442  * cvmx_lmc#_dimm_ctl
3443  *
3444  * LMC_DIMM_CTL = LMC DIMM Control
3445  *
3446  *
3447  * Notes:
3448  * This CSR is only used when LMC*_CONTROL[RDIMM_ENA]=1. During a power-up/init sequence, this CSR
3449  * controls LMC's writes to the control words in the JEDEC standard SSTE32882 registering clock driver
3450  * on an RDIMM.
3451  */
3452 union cvmx_lmcx_dimm_ctl
3453 {
3454         uint64_t u64;
3455         struct cvmx_lmcx_dimm_ctl_s
3456         {
3457 #if __BYTE_ORDER == __BIG_ENDIAN
3458         uint64_t reserved_46_63               : 18;
3459         uint64_t parity                       : 1;  /**< Parity */
3460         uint64_t tcws                         : 13; /**< LMC waits for this time period before and after a RDIMM
3461                                                          Control Word Access during a power-up/init SEQUENCE.
3462                                                          TCWS is in multiples of 8 CK cycles.
3463                                                          Set TCWS (CSR field) = RNDUP[tcws(ns)/(8*tCYC(ns))],
3464                                                          where tCWS is the desired time (ns), and tCYC(ns)
3465                                                          is the DDR clock frequency (not data rate).
3466                                                          TYP=0x4e0 (equivalent to 15us) when changing
3467                                                          clock timing (RC2.DBA1, RC6.DA4, RC10.DA3, RC10.DA4,
3468                                                          RC11.DA3, and RC11.DA4)
3469                                                          TYP=0x8, otherwise
3470                                                          0x0 = Reserved */
3471         uint64_t dimm1_wmask                  : 16; /**< DIMM1 Write Mask
3472                                                          if (DIMM1_WMASK[n] = 1)
3473                                                              Write DIMM1.RCn */
3474         uint64_t dimm0_wmask                  : 16; /**< DIMM0 Write Mask
3475                                                          if (DIMM0_WMASK[n] = 1)
3476                                                              Write DIMM0.RCn */
3477 #else
3478         uint64_t dimm0_wmask                  : 16;
3479         uint64_t dimm1_wmask                  : 16;
3480         uint64_t tcws                         : 13;
3481         uint64_t parity                       : 1;
3482         uint64_t reserved_46_63               : 18;
3483 #endif
3484         } s;
3485         struct cvmx_lmcx_dimm_ctl_s           cn63xx;
3486         struct cvmx_lmcx_dimm_ctl_s           cn63xxp1;
3487 };
3488 typedef union cvmx_lmcx_dimm_ctl cvmx_lmcx_dimm_ctl_t;
3489
3490 /**
3491  * cvmx_lmc#_dll_ctl
3492  *
3493  * LMC_DLL_CTL = LMC DLL control and DCLK reset
3494  *
3495  */
3496 union cvmx_lmcx_dll_ctl
3497 {
3498         uint64_t u64;
3499         struct cvmx_lmcx_dll_ctl_s
3500         {
3501 #if __BYTE_ORDER == __BIG_ENDIAN
3502         uint64_t reserved_8_63                : 56;
3503         uint64_t dreset                       : 1;  /**< Dclk domain reset.  The reset signal that is used by the
3504                                                          Dclk domain is (DRESET || ECLK_RESET). */
3505         uint64_t dll90_byp                    : 1;  /**< DDR DLL90 Bypass: When set, the DDR90 DLL is to be
3506                                                          bypassed and the setting is defined by DLL90_VLU */
3507         uint64_t dll90_ena                    : 1;  /**< DDR Quad DLL Enable: A 0->1 transition on this bit after
3508                                                          DCLK init sequence resets the DDR 90 DLL. Should
3509                                                          happen at startup before any activity in DDR. QDLL_ENA
3510                                                          must not transition 1->0 outside of a DRESET sequence
3511                                                          (i.e. it must remain 1 until the next DRESET).
3512                                                          DRESET should be asserted before and for 10 usec
3513                                                          following the 0->1 transition on QDLL_ENA. */
3514         uint64_t dll90_vlu                    : 5;  /**< Contains the open loop setting value for the DDR90 delay
3515                                                          line. */
3516 #else
3517         uint64_t dll90_vlu                    : 5;
3518         uint64_t dll90_ena                    : 1;
3519         uint64_t dll90_byp                    : 1;
3520         uint64_t dreset                       : 1;
3521         uint64_t reserved_8_63                : 56;
3522 #endif
3523         } s;
3524         struct cvmx_lmcx_dll_ctl_s            cn52xx;
3525         struct cvmx_lmcx_dll_ctl_s            cn52xxp1;
3526         struct cvmx_lmcx_dll_ctl_s            cn56xx;
3527         struct cvmx_lmcx_dll_ctl_s            cn56xxp1;
3528 };
3529 typedef union cvmx_lmcx_dll_ctl cvmx_lmcx_dll_ctl_t;
3530
3531 /**
3532  * cvmx_lmc#_dll_ctl2
3533  *
3534  * LMC_DLL_CTL2 = LMC (Octeon) DLL control and DCLK reset
3535  *
3536  *
3537  * Notes:
3538  * DLL Bringup sequence:
3539  * 1. If not done already, set LMC*_DLL_CTL2 = 0, except when LMC*_DLL_CTL2[DRESET] = 1.
3540  * 2. Write 1 to LMC*_DLL_CTL2[DLL_BRINGUP]
3541  * 3. Wait for 10 CK cycles, then write 1 to LMC*_DLL_CTL2[QUAD_DLL_ENA]. It may not be feasible to count 10 CK cycles, but the
3542  *    idea is to configure the delay line into DLL mode by asserting DLL_BRING_UP earlier than [QUAD_DLL_ENA], even if it is one
3543  *    cycle early. LMC*_DLL_CTL2[QUAD_DLL_ENA] must not change after this point without restarting the LMC and/or DRESET initialization
3544  *    sequence.
3545  * 4. Read L2D_BST0 and wait for the result. (L2D_BST0 is subject to change depending on how it called in o63. It is still ok to go
3546  *    without step 4, since step 5 has enough time)
3547  * 5. Wait 10 us.
3548  * 6. Write 0 to LMC*_DLL_CTL2[DLL_BRINGUP]. LMC*_DLL_CTL2[DLL_BRINGUP] must not change after this point without restarting the LMC
3549  *    and/or DRESET initialization sequence.
3550  * 7. Read L2D_BST0 and wait for the result. (same as step 4, but the idea here is the wait some time before going to step 8, even it
3551  *    is one cycle is fine)
3552  * 8. Write 0 to LMC*_DLL_CTL2[DRESET].  LMC*_DLL_CTL2[DRESET] must not change after this point without restarting the LMC and/or
3553  *    DRESET initialization sequence.
3554  */
3555 union cvmx_lmcx_dll_ctl2
3556 {
3557         uint64_t u64;
3558         struct cvmx_lmcx_dll_ctl2_s
3559         {
3560 #if __BYTE_ORDER == __BIG_ENDIAN
3561         uint64_t reserved_15_63               : 49;
3562         uint64_t dll_bringup                  : 1;  /**< DLL Bringup */
3563         uint64_t dreset                       : 1;  /**< Dclk domain reset.  The reset signal that is used by the
3564                                                          Dclk domain is (DRESET || ECLK_RESET). */
3565         uint64_t quad_dll_ena                 : 1;  /**< DLL Enable */
3566         uint64_t byp_sel                      : 4;  /**< Bypass select
3567                                                          0000 : no byte
3568                                                          0001 : byte 0
3569                                                          - ...
3570                                                          1001 : byte 8
3571                                                          1010 : all bytes
3572                                                          1011-1111 : Reserved */
3573         uint64_t byp_setting                  : 8;  /**< Bypass setting
3574                                                          DDR3-1600: 00100010
3575                                                          DDR3-1333: 00110010
3576                                                          DDR3-1066: 01001011
3577                                                          DDR3-800 : 01110101
3578                                                          DDR3-667 : 10010110
3579                                                          DDR3-600 : 10101100 */
3580 #else
3581         uint64_t byp_setting                  : 8;
3582         uint64_t byp_sel                      : 4;
3583         uint64_t quad_dll_ena                 : 1;
3584         uint64_t dreset                       : 1;
3585         uint64_t dll_bringup                  : 1;
3586         uint64_t reserved_15_63               : 49;
3587 #endif
3588         } s;
3589         struct cvmx_lmcx_dll_ctl2_s           cn63xx;
3590         struct cvmx_lmcx_dll_ctl2_s           cn63xxp1;
3591 };
3592 typedef union cvmx_lmcx_dll_ctl2 cvmx_lmcx_dll_ctl2_t;
3593
3594 /**
3595  * cvmx_lmc#_dll_ctl3
3596  *
3597  * LMC_DLL_CTL3 = LMC DLL control and DCLK reset
3598  *
3599  */
3600 union cvmx_lmcx_dll_ctl3
3601 {
3602         uint64_t u64;
3603         struct cvmx_lmcx_dll_ctl3_s
3604         {
3605 #if __BYTE_ORDER == __BIG_ENDIAN
3606         uint64_t reserved_29_63               : 35;
3607         uint64_t dll_fast                     : 1;  /**< DLL lock
3608                                                          0 = DLL locked */
3609         uint64_t dll90_setting                : 8;  /**< Encoded DLL settings. Works in conjuction with
3610                                                          DLL90_BYTE_SEL */
3611         uint64_t fine_tune_mode               : 1;  /**< DLL Fine Tune Mode
3612                                                          0 = disabled
3613                                                          1 = enable.
3614                                                          When enabled, calibrate internal PHY DLL every
3615                                                          LMC*_CONFIG[REF_ZQCS_INT] CK cycles. */
3616         uint64_t dll_mode                     : 1;  /**< DLL Mode */
3617         uint64_t dll90_byte_sel               : 4;  /**< Observe DLL settings for selected byte
3618                                                          0001 : byte 0
3619                                                          - ...
3620                                                          1001 : byte 8
3621                                                          0000,1010-1111 : Reserved */
3622         uint64_t offset_ena                   : 1;  /**< Offset enable
3623                                                          0 = disable
3624                                                          1 = enable */
3625         uint64_t load_offset                  : 1;  /**< Load offset
3626                                                          0 : disable
3627                                                          1 : load (generates a 1 cycle pulse to the PHY)
3628                                                          This register is oneshot and clears itself each time
3629                                                          it is set */
3630         uint64_t mode_sel                     : 2;  /**< Mode select
3631                                                          00 : reset
3632                                                          01 : write
3633                                                          10 : read
3634                                                          11 : write & read */
3635         uint64_t byte_sel                     : 4;  /**< Byte select
3636                                                          0000 : no byte
3637                                                          0001 : byte 0
3638                                                          - ...
3639                                                          1001 : byte 8
3640                                                          1010 : all bytes
3641                                                          1011-1111 : Reserved */
3642         uint64_t offset                       : 6;  /**< Write/read offset setting
3643                                                          [4:0] : offset
3644                                                          [5]   : 0 = increment, 1 = decrement
3645                                                          Not a 2's complement value */
3646 #else
3647         uint64_t offset                       : 6;
3648         uint64_t byte_sel                     : 4;
3649         uint64_t mode_sel                     : 2;
3650         uint64_t load_offset                  : 1;
3651         uint64_t offset_ena                   : 1;
3652         uint64_t dll90_byte_sel               : 4;
3653         uint64_t dll_mode                     : 1;
3654         uint64_t fine_tune_mode               : 1;
3655         uint64_t dll90_setting                : 8;
3656         uint64_t dll_fast                     : 1;
3657         uint64_t reserved_29_63               : 35;
3658 #endif
3659         } s;
3660         struct cvmx_lmcx_dll_ctl3_s           cn63xx;
3661         struct cvmx_lmcx_dll_ctl3_s           cn63xxp1;
3662 };
3663 typedef union cvmx_lmcx_dll_ctl3 cvmx_lmcx_dll_ctl3_t;
3664
3665 /**
3666  * cvmx_lmc#_dual_memcfg
3667  *
3668  * LMC_DUAL_MEMCFG = LMC Dual Memory Configuration Register
3669  *
3670  * This register controls certain parameters of Dual Memory Configuration
3671  *
3672  * Notes:
3673  * This register enables the design to have two, separate memory configurations, selected dynamically
3674  * by the reference address.  Note however, that both configurations share
3675  * LMC*_CONTROL[XOR_BANK], LMC*_CONFIG[PBANK_LSB], LMC*_CONFIG[RANK_ENA], and all timing parameters.
3676  * In this description, "config0" refers to the normal memory configuration that is defined by the
3677  * LMC*_CONFIG[ROW_LSB] parameters and "config1" refers to the dual (or second)
3678  * memory configuration that is defined by this register.
3679  *
3680  * Enable mask to chip select mapping is shown below:
3681  *   CS_MASK[3] -> DIMM1_CS_<1>
3682  *   CS_MASK[2] -> DIMM1_CS_<0>
3683  *
3684  *   CS_MASK[1] -> DIMM0_CS_<1>
3685  *   CS_MASK[0] -> DIMM0_CS_<0>
3686  *
3687  *  DIMM n uses the pair of chip selects DIMMn_CS_<1:0>.
3688  *
3689  *  Programming restrictions for CS_MASK:
3690  *    when LMC*_CONFIG[RANK_ENA] == 0, CS_MASK[2n + 1] = CS_MASK[2n]
3691  */
3692 union cvmx_lmcx_dual_memcfg
3693 {
3694         uint64_t u64;
3695         struct cvmx_lmcx_dual_memcfg_s
3696         {
3697 #if __BYTE_ORDER == __BIG_ENDIAN
3698         uint64_t reserved_20_63               : 44;
3699         uint64_t bank8                        : 1;  /**< See LMC_DDR2_CTL[BANK8] */
3700         uint64_t row_lsb                      : 3;  /**< See LMC*_CONFIG[ROW_LSB] */
3701         uint64_t reserved_8_15                : 8;
3702         uint64_t cs_mask                      : 8;  /**< Chip select mask.
3703                                                          This mask corresponds to the 8 chip selects for a memory
3704                                                          configuration.  Each reference address will assert one of
3705                                                          the chip selects.  If that chip select has its
3706                                                          corresponding CS_MASK bit set, then the "config1"
3707                                                          parameters are used, otherwise the "config0" parameters
3708                                                          are used.  See additional notes below.
3709                                                          [7:4] */
3710 #else
3711         uint64_t cs_mask                      : 8;
3712         uint64_t reserved_8_15                : 8;
3713         uint64_t row_lsb                      : 3;
3714         uint64_t bank8                        : 1;
3715         uint64_t reserved_20_63               : 44;
3716 #endif
3717         } s;
3718         struct cvmx_lmcx_dual_memcfg_s        cn50xx;
3719         struct cvmx_lmcx_dual_memcfg_s        cn52xx;
3720         struct cvmx_lmcx_dual_memcfg_s        cn52xxp1;
3721         struct cvmx_lmcx_dual_memcfg_s        cn56xx;
3722         struct cvmx_lmcx_dual_memcfg_s        cn56xxp1;
3723         struct cvmx_lmcx_dual_memcfg_s        cn58xx;
3724         struct cvmx_lmcx_dual_memcfg_s        cn58xxp1;
3725         struct cvmx_lmcx_dual_memcfg_cn63xx
3726         {
3727 #if __BYTE_ORDER == __BIG_ENDIAN
3728         uint64_t reserved_19_63               : 45;
3729         uint64_t row_lsb                      : 3;  /**< See LMC*_CONFIG[ROW_LSB] */
3730         uint64_t reserved_8_15                : 8;
3731         uint64_t cs_mask                      : 8;  /**< Chip select mask.
3732                                                          This mask corresponds to the 8 chip selects for a memory
3733                                                          configuration.  Each reference address will assert one of
3734                                                          the chip selects.  If that chip select has its
3735                                                          corresponding CS_MASK bit set, then the "config1"
3736                                                          parameters are used, otherwise the "config0" parameters
3737                                                          are used.  See additional notes below.
3738                                                          [7:4] */
3739 #else
3740         uint64_t cs_mask                      : 8;
3741         uint64_t reserved_8_15                : 8;
3742         uint64_t row_lsb                      : 3;
3743         uint64_t reserved_19_63               : 45;
3744 #endif
3745         } cn63xx;
3746         struct cvmx_lmcx_dual_memcfg_cn63xx   cn63xxp1;
3747 };
3748 typedef union cvmx_lmcx_dual_memcfg cvmx_lmcx_dual_memcfg_t;
3749
3750 /**
3751  * cvmx_lmc#_ecc_synd
3752  *
3753  * LMC_ECC_SYND = MRD ECC Syndromes
3754  *
3755  */
3756 union cvmx_lmcx_ecc_synd
3757 {
3758         uint64_t u64;
3759         struct cvmx_lmcx_ecc_synd_s
3760         {
3761 #if __BYTE_ORDER == __BIG_ENDIAN
3762         uint64_t reserved_32_63               : 32;
3763         uint64_t mrdsyn3                      : 8;  /**< MRD ECC Syndrome Quad3
3764                                                          MRDSYN3 corresponds to DQ[63:0]_c1_p1
3765                                                            where _cC_pP denotes cycle C and phase P */
3766         uint64_t mrdsyn2                      : 8;  /**< MRD ECC Syndrome Quad2
3767                                                          MRDSYN2 corresponds to DQ[63:0]_c1_p0
3768                                                            where _cC_pP denotes cycle C and phase P */
3769         uint64_t mrdsyn1                      : 8;  /**< MRD ECC Syndrome Quad1
3770                                                          MRDSYN1 corresponds to DQ[63:0]_c0_p1
3771                                                            where _cC_pP denotes cycle C and phase P */
3772         uint64_t mrdsyn0                      : 8;  /**< MRD ECC Syndrome Quad0
3773                                                          MRDSYN0 corresponds to DQ[63:0]_c0_p0
3774                                                            where _cC_pP denotes cycle C and phase P */
3775 #else
3776         uint64_t mrdsyn0                      : 8;
3777         uint64_t mrdsyn1                      : 8;
3778         uint64_t mrdsyn2                      : 8;
3779         uint64_t mrdsyn3                      : 8;
3780         uint64_t reserved_32_63               : 32;
3781 #endif
3782         } s;
3783         struct cvmx_lmcx_ecc_synd_s           cn30xx;
3784         struct cvmx_lmcx_ecc_synd_s           cn31xx;
3785         struct cvmx_lmcx_ecc_synd_s           cn38xx;
3786         struct cvmx_lmcx_ecc_synd_s           cn38xxp2;
3787         struct cvmx_lmcx_ecc_synd_s           cn50xx;
3788         struct cvmx_lmcx_ecc_synd_s           cn52xx;
3789         struct cvmx_lmcx_ecc_synd_s           cn52xxp1;
3790         struct cvmx_lmcx_ecc_synd_s           cn56xx;
3791         struct cvmx_lmcx_ecc_synd_s           cn56xxp1;
3792         struct cvmx_lmcx_ecc_synd_s           cn58xx;
3793         struct cvmx_lmcx_ecc_synd_s           cn58xxp1;
3794         struct cvmx_lmcx_ecc_synd_s           cn63xx;
3795         struct cvmx_lmcx_ecc_synd_s           cn63xxp1;
3796 };
3797 typedef union cvmx_lmcx_ecc_synd cvmx_lmcx_ecc_synd_t;
3798
3799 /**
3800  * cvmx_lmc#_fadr
3801  *
3802  * LMC_FADR = LMC Failing Address Register (SEC/DED/NXM)
3803  *
3804  * This register only captures the first transaction with ecc/nxm errors. A DED/NXM error can
3805  * over-write this register with its failing addresses if the first error was a SEC. If you write
3806  * LMC*_CONFIG->SEC_ERR/DED_ERR/NXM_ERR then it will clear the error bits and capture the
3807  * next failing address.
3808  *
3809  * If FDIMM is 2 that means the error is in the higher bits DIMM.
3810  */
3811 union cvmx_lmcx_fadr
3812 {
3813         uint64_t u64;
3814         struct cvmx_lmcx_fadr_s
3815         {
3816 #if __BYTE_ORDER == __BIG_ENDIAN
3817         uint64_t reserved_0_63                : 64;
3818 #else
3819         uint64_t reserved_0_63                : 64;
3820 #endif
3821         } s;
3822         struct cvmx_lmcx_fadr_cn30xx
3823         {
3824 #if __BYTE_ORDER == __BIG_ENDIAN
3825         uint64_t reserved_32_63               : 32;
3826         uint64_t fdimm                        : 2;  /**< Failing DIMM# */
3827         uint64_t fbunk                        : 1;  /**< Failing Rank */
3828         uint64_t fbank                        : 3;  /**< Failing Bank[2:0] */
3829         uint64_t frow                         : 14; /**< Failing Row Address[13:0] */
3830         uint64_t fcol                         : 12; /**< Failing Column Start Address[11:0]
3831                                                          Represents the Failing read's starting column address
3832                                                          (and not the exact column address in which the SEC/DED
3833                                                          was detected) */
3834 #else
3835         uint64_t fcol                         : 12;
3836         uint64_t frow                         : 14;
3837         uint64_t fbank                        : 3;
3838         uint64_t fbunk                        : 1;
3839         uint64_t fdimm                        : 2;
3840         uint64_t reserved_32_63               : 32;
3841 #endif
3842         } cn30xx;
3843         struct cvmx_lmcx_fadr_cn30xx          cn31xx;
3844         struct cvmx_lmcx_fadr_cn30xx          cn38xx;
3845         struct cvmx_lmcx_fadr_cn30xx          cn38xxp2;
3846         struct cvmx_lmcx_fadr_cn30xx          cn50xx;
3847         struct cvmx_lmcx_fadr_cn30xx          cn52xx;
3848         struct cvmx_lmcx_fadr_cn30xx          cn52xxp1;
3849         struct cvmx_lmcx_fadr_cn30xx          cn56xx;
3850         struct cvmx_lmcx_fadr_cn30xx          cn56xxp1;
3851         struct cvmx_lmcx_fadr_cn30xx          cn58xx;
3852         struct cvmx_lmcx_fadr_cn30xx          cn58xxp1;
3853         struct cvmx_lmcx_fadr_cn63xx
3854         {
3855 #if __BYTE_ORDER == __BIG_ENDIAN
3856         uint64_t reserved_36_63               : 28;
3857         uint64_t fdimm                        : 2;  /**< Failing DIMM# */
3858         uint64_t fbunk                        : 1;  /**< Failing Rank */
3859         uint64_t fbank                        : 3;  /**< Failing Bank[2:0] */
3860         uint64_t frow                         : 16; /**< Failing Row Address[15:0] */
3861         uint64_t fcol                         : 14; /**< Failing Column Address[13:0]
3862                                                          Technically, represents the address of the 128b data
3863                                                          that had an ecc error, i.e., fcol[0] is always 0. Can
3864                                                          be used in conjuction with LMC*_CONFIG[DED_ERR] to
3865                                                          isolate the 64b chunk of data in error */
3866 #else
3867         uint64_t fcol                         : 14;
3868         uint64_t frow                         : 16;
3869         uint64_t fbank                        : 3;
3870         uint64_t fbunk                        : 1;
3871         uint64_t fdimm                        : 2;
3872         uint64_t reserved_36_63               : 28;
3873 #endif
3874         } cn63xx;
3875         struct cvmx_lmcx_fadr_cn63xx          cn63xxp1;
3876 };
3877 typedef union cvmx_lmcx_fadr cvmx_lmcx_fadr_t;
3878
3879 /**
3880  * cvmx_lmc#_ifb_cnt
3881  *
3882  * LMC_IFB_CNT  = Performance Counters
3883  *
3884  */
3885 union cvmx_lmcx_ifb_cnt
3886 {
3887         uint64_t u64;
3888         struct cvmx_lmcx_ifb_cnt_s
3889         {
3890 #if __BYTE_ORDER == __BIG_ENDIAN
3891         uint64_t ifbcnt                       : 64; /**< Performance Counter
3892                                                          64-bit counter that increments every
3893                                                          CK cycle there is something in the in-flight buffer. */
3894 #else
3895         uint64_t ifbcnt                       : 64;
3896 #endif
3897         } s;
3898         struct cvmx_lmcx_ifb_cnt_s            cn63xx;
3899         struct cvmx_lmcx_ifb_cnt_s            cn63xxp1;
3900 };
3901 typedef union cvmx_lmcx_ifb_cnt cvmx_lmcx_ifb_cnt_t;
3902
3903 /**
3904  * cvmx_lmc#_ifb_cnt_hi
3905  *
3906  * LMC_IFB_CNT_HI  = Performance Counters
3907  *
3908  */
3909 union cvmx_lmcx_ifb_cnt_hi
3910 {
3911         uint64_t u64;
3912         struct cvmx_lmcx_ifb_cnt_hi_s
3913         {
3914 #if __BYTE_ORDER == __BIG_ENDIAN
3915         uint64_t reserved_32_63               : 32;
3916         uint64_t ifbcnt_hi                    : 32; /**< Performance Counter to measure Bus Utilization
3917                                                          Upper 32-bits of 64-bit counter that increments every
3918                                                          cycle there is something in the in-flight buffer. */
3919 #else
3920         uint64_t ifbcnt_hi                    : 32;
3921         uint64_t reserved_32_63               : 32;
3922 #endif
3923         } s;
3924         struct cvmx_lmcx_ifb_cnt_hi_s         cn30xx;
3925         struct cvmx_lmcx_ifb_cnt_hi_s         cn31xx;
3926         struct cvmx_lmcx_ifb_cnt_hi_s         cn38xx;
3927         struct cvmx_lmcx_ifb_cnt_hi_s         cn38xxp2;
3928         struct cvmx_lmcx_ifb_cnt_hi_s         cn50xx;
3929         struct cvmx_lmcx_ifb_cnt_hi_s         cn52xx;
3930         struct cvmx_lmcx_ifb_cnt_hi_s         cn52xxp1;
3931         struct cvmx_lmcx_ifb_cnt_hi_s         cn56xx;
3932         struct cvmx_lmcx_ifb_cnt_hi_s         cn56xxp1;
3933         struct cvmx_lmcx_ifb_cnt_hi_s         cn58xx;
3934         struct cvmx_lmcx_ifb_cnt_hi_s         cn58xxp1;
3935 };
3936 typedef union cvmx_lmcx_ifb_cnt_hi cvmx_lmcx_ifb_cnt_hi_t;
3937
3938 /**
3939  * cvmx_lmc#_ifb_cnt_lo
3940  *
3941  * LMC_IFB_CNT_LO  = Performance Counters
3942  *
3943  */
3944 union cvmx_lmcx_ifb_cnt_lo
3945 {
3946         uint64_t u64;
3947         struct cvmx_lmcx_ifb_cnt_lo_s
3948         {
3949 #if __BYTE_ORDER == __BIG_ENDIAN
3950         uint64_t reserved_32_63               : 32;
3951         uint64_t ifbcnt_lo                    : 32; /**< Performance Counter
3952                                                          Low 32-bits of 64-bit counter that increments every
3953                                                          cycle there is something in the in-flight buffer. */
3954 #else
3955         uint64_t ifbcnt_lo                    : 32;
3956         uint64_t reserved_32_63               : 32;
3957 #endif
3958         } s;
3959         struct cvmx_lmcx_ifb_cnt_lo_s         cn30xx;
3960         struct cvmx_lmcx_ifb_cnt_lo_s         cn31xx;
3961         struct cvmx_lmcx_ifb_cnt_lo_s         cn38xx;
3962         struct cvmx_lmcx_ifb_cnt_lo_s         cn38xxp2;
3963         struct cvmx_lmcx_ifb_cnt_lo_s         cn50xx;
3964         struct cvmx_lmcx_ifb_cnt_lo_s         cn52xx;
3965         struct cvmx_lmcx_ifb_cnt_lo_s         cn52xxp1;
3966         struct cvmx_lmcx_ifb_cnt_lo_s         cn56xx;
3967         struct cvmx_lmcx_ifb_cnt_lo_s         cn56xxp1;
3968         struct cvmx_lmcx_ifb_cnt_lo_s         cn58xx;
3969         struct cvmx_lmcx_ifb_cnt_lo_s         cn58xxp1;
3970 };
3971 typedef union cvmx_lmcx_ifb_cnt_lo cvmx_lmcx_ifb_cnt_lo_t;
3972
3973 /**
3974  * cvmx_lmc#_int
3975  *
3976  * LMC_INT = LMC Interrupt Register
3977  *
3978  */
3979 union cvmx_lmcx_int
3980 {
3981         uint64_t u64;
3982         struct cvmx_lmcx_int_s
3983         {
3984 #if __BYTE_ORDER == __BIG_ENDIAN
3985         uint64_t reserved_9_63                : 55;
3986         uint64_t ded_err                      : 4;  /**< Double Error detected (DED) of Rd Data
3987                                                          [0] corresponds to DQ[63:0]_c0_p0
3988                                                          [1] corresponds to DQ[63:0]_c0_p1
3989                                                          [2] corresponds to DQ[63:0]_c1_p0
3990                                                          [3] corresponds to DQ[63:0]_c1_p1
3991                                                          where _cC_pP denotes cycle C and phase P
3992                                                          Write of 1 will clear the corresponding error bit */
3993         uint64_t sec_err                      : 4;  /**< Single Error (corrected) of Rd Data
3994                                                          [0] corresponds to DQ[63:0]_c0_p0
3995                                                          [1] corresponds to DQ[63:0]_c0_p1
3996                                                          [2] corresponds to DQ[63:0]_c1_p0
3997                                                          [3] corresponds to DQ[63:0]_c1_p1
3998                                                          where _cC_pP denotes cycle C and phase P
3999                                                          Write of 1 will clear the corresponding error bit */
4000         uint64_t nxm_wr_err                   : 1;  /**< Write to non-existent memory
4001                                                          Write of 1 will clear the corresponding error bit */
4002 #else
4003         uint64_t nxm_wr_err                   : 1;
4004         uint64_t sec_err                      : 4;
4005         uint64_t ded_err                      : 4;
4006         uint64_t reserved_9_63                : 55;
4007 #endif
4008         } s;
4009         struct cvmx_lmcx_int_s                cn63xx;
4010         struct cvmx_lmcx_int_s                cn63xxp1;
4011 };
4012 typedef union cvmx_lmcx_int cvmx_lmcx_int_t;
4013
4014 /**
4015  * cvmx_lmc#_int_en
4016  *
4017  * LMC_INT_EN = LMC Interrupt Enable Register
4018  *
4019  */
4020 union cvmx_lmcx_int_en
4021 {
4022         uint64_t u64;
4023         struct cvmx_lmcx_int_en_s
4024         {
4025 #if __BYTE_ORDER == __BIG_ENDIAN
4026         uint64_t reserved_3_63                : 61;
4027         uint64_t intr_ded_ena                 : 1;  /**< ECC Double Error Detect(DED) Interrupt Enable bit
4028                                                          When set, the memory controller raises a processor
4029                                                          interrupt on detecting an uncorrectable Dbl Bit ECC
4030                                                          error. */
4031         uint64_t intr_sec_ena                 : 1;  /**< ECC Single Error Correct(SEC) Interrupt Enable bit
4032                                                          When set, the memory controller raises a processor
4033                                                          interrupt on detecting a correctable Single Bit ECC
4034                                                          error. */
4035         uint64_t intr_nxm_wr_ena              : 1;  /**< Non Write Error Interrupt Enable bit
4036                                                          When set, the memory controller raises a processor
4037                                                          interrupt on detecting an non-existent memory write */
4038 #else
4039         uint64_t intr_nxm_wr_ena              : 1;
4040         uint64_t intr_sec_ena                 : 1;
4041         uint64_t intr_ded_ena                 : 1;
4042         uint64_t reserved_3_63                : 61;
4043 #endif
4044         } s;
4045         struct cvmx_lmcx_int_en_s             cn63xx;
4046         struct cvmx_lmcx_int_en_s             cn63xxp1;
4047 };
4048 typedef union cvmx_lmcx_int_en cvmx_lmcx_int_en_t;
4049
4050 /**
4051  * cvmx_lmc#_mem_cfg0
4052  *
4053  * Specify the RSL base addresses for the block
4054  *
4055  *                  LMC_MEM_CFG0 = LMC Memory Configuration Register0
4056  *
4057  * This register controls certain parameters of  Memory Configuration
4058  */
4059 union cvmx_lmcx_mem_cfg0
4060 {
4061         uint64_t u64;
4062         struct cvmx_lmcx_mem_cfg0_s
4063         {
4064 #if __BYTE_ORDER == __BIG_ENDIAN
4065         uint64_t reserved_32_63               : 32;
4066         uint64_t reset                        : 1;  /**< Reset oneshot pulse for refresh counter,
4067                                                          and LMC_OPS_CNT_*, LMC_IFB_CNT_*, and LMC_DCLK_CNT_*
4068                                                          CSR's. SW should write this to a one, then re-write
4069                                                          it to a zero to cause the reset. */
4070         uint64_t silo_qc                      : 1;  /**< Adds a Quarter Cycle granularity to generate
4071                                                          dqs pulse generation for silo.
4072                                                          Combination of Silo_HC and Silo_QC gives the
4073                                                          ability to position the read enable with quarter
4074                                                          cycle resolution. This is applied on all the bytes
4075                                                          uniformly. */
4076         uint64_t bunk_ena                     : 1;  /**< Bunk Enable aka RANK ena (for use with dual-rank DIMMs)
4077                                                          For dual-rank DIMMs, the bunk_ena bit will enable
4078                                                          the drive of the CS_N[1:0] pins based on the
4079                                                          (pbank_lsb-1) address bit.
4080                                                          Write 0 for SINGLE ranked DIMM's. */
4081         uint64_t ded_err                      : 4;  /**< Double Error detected (DED) of Rd Data
4082                                                          In 128b mode, ecc is calulated on 1 cycle worth of data
4083                                                          [25] corresponds to DQ[63:0], Phase0
4084                                                          [26] corresponds to DQ[127:64], Phase0
4085                                                          [27] corresponds to DQ[63:0], Phase1
4086                                                          [28] corresponds to DQ[127:64], Phase1
4087                                                          In 64b mode, ecc is calculated on 2 cycle worth of data
4088                                                          [25] corresponds to DQ[63:0], Phase0, cycle0
4089                                                          [26] corresponds to DQ[63:0], Phase0, cycle1
4090                                                          [27] corresponds to DQ[63:0], Phase1, cycle0
4091                                                          [28] corresponds to DQ[63:0], Phase1, cycle1
4092                                                          Write of 1 will clear the corresponding error bit */
4093         uint64_t sec_err                      : 4;  /**< Single Error (corrected) of Rd Data
4094                                                          In 128b mode, ecc is calulated on 1 cycle worth of data
4095                                                          [21] corresponds to DQ[63:0], Phase0
4096                                                          [22] corresponds to DQ[127:64], Phase0
4097                                                          [23] corresponds to DQ[63:0], Phase1
4098                                                          [24] corresponds to DQ[127:64], Phase1
4099                                                          In 64b mode, ecc is calculated on 2 cycle worth of data
4100                                                          [21] corresponds to DQ[63:0], Phase0, cycle0
4101                                                          [22] corresponds to DQ[63:0], Phase0, cycle1
4102                                                          [23] corresponds to DQ[63:0], Phase1, cycle0
4103                                                          [24] corresponds to DQ[63:0], Phase1, cycle1
4104                                                          Write of 1 will clear the corresponding error bit */
4105         uint64_t intr_ded_ena                 : 1;  /**< ECC Double Error Detect(DED) Interrupt Enable bit
4106                                                          When set, the memory controller raises a processor
4107                                                          interrupt on detecting an uncorrectable Dbl Bit ECC
4108                                                          error. */
4109         uint64_t intr_sec_ena                 : 1;  /**< ECC Single Error Correct(SEC) Interrupt Enable bit
4110                                                          When set, the memory controller raises a processor
4111                                                          interrupt on detecting a correctable Single Bit ECC
4112                                                          error. */
4113         uint64_t tcl                          : 4;  /**< This register is not used */
4114         uint64_t ref_int                      : 6;  /**< Refresh interval represented in \#of 512 dclk increments.
4115                                                          Program this to RND-DN(tREFI/clkPeriod/512)
4116                                                             - 000000: RESERVED
4117                                                             - 000001: 1 * 512  = 512 dclks
4118                                                              - ...
4119                                                             - 111111: 63 * 512 = 32256 dclks */
4120         uint64_t pbank_lsb                    : 4;  /**< Physical Bank address select
4121                                                                                  Reverting to the explanation for ROW_LSB,
4122                                                                                  PBank_LSB would be Row_LSB bit + \#rowbits
4123                                                                                  + \#rankbits
4124                                                                                  In the 512MB DIMM Example, assuming no rank bits:
4125                                                                                  pbank_lsb=mem_addr[15+13] for 64 b mode
4126                                                                                           =mem_addr[16+13] for 128b mode
4127                                                                                  Hence the parameter
4128                                                          0000:pbank[1:0] = mem_adr[28:27]    / rank = mem_adr[26] (if bunk_ena)
4129                                                          0001:pbank[1:0] = mem_adr[29:28]    / rank = mem_adr[27]      "
4130                                                          0010:pbank[1:0] = mem_adr[30:29]    / rank = mem_adr[28]      "
4131                                                          0011:pbank[1:0] = mem_adr[31:30]    / rank = mem_adr[29]      "
4132                                                          0100:pbank[1:0] = mem_adr[32:31]    / rank = mem_adr[30]      "
4133                                                          0101:pbank[1:0] = mem_adr[33:32]    / rank = mem_adr[31]      "
4134                                                          0110:pbank[1:0] =[1'b0,mem_adr[33]] / rank = mem_adr[32]      "
4135                                                          0111:pbank[1:0] =[2'b0]             / rank = mem_adr[33]      "
4136                                                          1000-1111: RESERVED */
4137         uint64_t row_lsb                      : 3;  /**< Encoding used to determine which memory address
4138                                                          bit position represents the low order DDR ROW address.
4139                                                          The processor's memory address[33:7] needs to be
4140                                                          translated to DRAM addresses (bnk,row,col,rank and dimm)
4141                                                          and that is a function of the following:
4142                                                          1. \# Banks (4 or 8) - spec'd by BANK8
4143                                                          2. Datapath Width(64 or 128) - MODE128b
4144                                                          3. \# Ranks in a DIMM - spec'd by BUNK_ENA
4145                                                          4. \# DIMM's in the system
4146                                                          5. \# Column Bits of the memory part - spec'd indirectly
4147                                                          by this register.
4148                                                          6. \# Row Bits of the memory part - spec'd indirectly
4149                                                          by the register below (PBANK_LSB).
4150                                                          Illustration: For Micron's MT18HTF6472A,512MB DDR2
4151                                                          Unbuffered DIMM which uses 256Mb parts (8M x 8 x 4),
4152                                                          \# Banks = 4 -> 2 bits of BA
4153                                                          \# Columns = 1K -> 10 bits of Col
4154                                                          \# Rows = 8K -> 13 bits of Row
4155                                                          Assuming that the total Data width is 128, this is how
4156                                                          we arrive at row_lsb:
4157                                                          Col Address starts from mem_addr[4] for 128b (16Bytes)
4158                                                          dq width or from mem_addr[3] for 64b (8Bytes) dq width
4159                                                          \# col + \# bank = 12. Hence row_lsb is mem_adr[15] for
4160                                                          64bmode or mem_adr[16] for 128b mode. Hence row_lsb
4161                                                          parameter should be set to 001 (64b) or 010 (128b).
4162                                                               - 000: row_lsb = mem_adr[14]
4163                                                               - 001: row_lsb = mem_adr[15]
4164                                                               - 010: row_lsb = mem_adr[16]
4165                                                               - 011: row_lsb = mem_adr[17]
4166                                                               - 100: row_lsb = mem_adr[18]
4167                                                               - 101-111:row_lsb = RESERVED */
4168         uint64_t ecc_ena                      : 1;  /**< ECC Enable: When set will enable the 8b ECC
4169                                                          check/correct logic. Should be 1 when used with DIMMs
4170                                                          with ECC. 0, otherwise.
4171                                                          When this mode is turned on, DQ[71:64] and DQ[143:137]
4172                                                          on writes, will contain the ECC code generated for
4173                                                          the lower 64 and upper 64 bits of data which will
4174                                                          written in the memory and then later on reads, used
4175                                                          to check for Single bit error (which will be auto-
4176                                                          corrected) and Double Bit error (which will be
4177                                                          reported). When not turned on, DQ[71:64] and DQ[143:137]
4178                                                          are driven to 0.  Please refer to SEC_ERR, DED_ERR,
4179                                                          LMC_FADR, and LMC_ECC_SYND registers
4180                                                          for diagnostics information when there is an error. */
4181         uint64_t init_start                   : 1;  /**< A 0->1 transition starts the DDR memory initialization
4182                                                          sequence. */
4183 #else
4184         uint64_t init_start                   : 1;
4185         uint64_t ecc_ena                      : 1;
4186         uint64_t row_lsb                      : 3;
4187         uint64_t pbank_lsb                    : 4;
4188         uint64_t ref_int                      : 6;
4189         uint64_t tcl                          : 4;
4190         uint64_t intr_sec_ena                 : 1;
4191         uint64_t intr_ded_ena                 : 1;
4192         uint64_t sec_err                      : 4;
4193         uint64_t ded_err                      : 4;
4194         uint64_t bunk_ena                     : 1;
4195         uint64_t silo_qc                      : 1;
4196         uint64_t reset                        : 1;
4197         uint64_t reserved_32_63               : 32;
4198 #endif
4199         } s;
4200         struct cvmx_lmcx_mem_cfg0_s           cn30xx;
4201         struct cvmx_lmcx_mem_cfg0_s           cn31xx;
4202         struct cvmx_lmcx_mem_cfg0_s           cn38xx;
4203         struct cvmx_lmcx_mem_cfg0_s           cn38xxp2;
4204         struct cvmx_lmcx_mem_cfg0_s           cn50xx;
4205         struct cvmx_lmcx_mem_cfg0_s           cn52xx;
4206         struct cvmx_lmcx_mem_cfg0_s           cn52xxp1;
4207         struct cvmx_lmcx_mem_cfg0_s           cn56xx;
4208         struct cvmx_lmcx_mem_cfg0_s           cn56xxp1;
4209         struct cvmx_lmcx_mem_cfg0_s           cn58xx;
4210         struct cvmx_lmcx_mem_cfg0_s           cn58xxp1;
4211 };
4212 typedef union cvmx_lmcx_mem_cfg0 cvmx_lmcx_mem_cfg0_t;
4213
4214 /**
4215  * cvmx_lmc#_mem_cfg1
4216  *
4217  * LMC_MEM_CFG1 = LMC Memory Configuration Register1
4218  *
4219  * This register controls the External Memory Configuration Timing Parameters. Please refer to the
4220  * appropriate DDR part spec from your memory vendor for the various values in this CSR.
4221  * The details of each of these timing parameters can be found in the JEDEC spec or the vendor
4222  * spec of the memory parts.
4223  */
4224 union cvmx_lmcx_mem_cfg1
4225 {
4226         uint64_t u64;
4227         struct cvmx_lmcx_mem_cfg1_s
4228         {
4229 #if __BYTE_ORDER == __BIG_ENDIAN
4230         uint64_t reserved_32_63               : 32;
4231         uint64_t comp_bypass                  : 1;  /**< Compensation bypass. */
4232         uint64_t trrd                         : 3;  /**< tRRD cycles: ACT-ACT timing parameter for different
4233                                                          banks. (Represented in tCYC cycles == 1dclks)
4234                                                          TYP=15ns (66MHz=1,167MHz=3,200MHz=3)
4235                                                          For DDR2, TYP=7.5ns
4236                                                             - 000: RESERVED
4237                                                             - 001: 1 tCYC
4238                                                             - 010: 2 tCYC
4239                                                             - 011: 3 tCYC
4240                                                             - 100: 4 tCYC
4241                                                             - 101: 5 tCYC
4242                                                             - 110: 6 tCYC
4243                                                             - 111: 7 tCYC */
4244         uint64_t caslat                       : 3;  /**< CAS Latency Encoding which is loaded into each DDR
4245                                                          SDRAM device (MRS[6:4]) upon power-up (INIT_START=1).
4246                                                          (Represented in tCYC cycles == 1 dclks)
4247                                                             000 RESERVED
4248                                                             001 RESERVED
4249                                                             010 2.0 tCYC
4250                                                             011 3.0 tCYC
4251                                                             100 4.0 tCYC
4252                                                             101 5.0 tCYC
4253                                                             110 6.0 tCYC
4254                                                             111 RESERVED
4255                                                          eg). The parameters TSKW, SILO_HC, and SILO_QC can
4256                                                          account for 1/4 cycle granularity in board/etch delays. */
4257         uint64_t tmrd                         : 3;  /**< tMRD Cycles
4258                                                          (Represented in dclk tCYC)
4259                                                          For DDR2, its TYP 2*tCYC)
4260                                                              - 000: RESERVED
4261                                                              - 001: 1
4262                                                              - 010: 2
4263                                                              - 011: 3
4264                                                              - 100: 4
4265                                                              - 101-111: RESERVED */
4266         uint64_t trfc                         : 5;  /**< Indicates tRFC constraints.
4267                                                          Set TRFC (CSR field) = RNDUP[tRFC(ns)/4*tcyc(ns)],
4268                                                          where tRFC is from the DDR2 spec, and tcyc(ns)
4269                                                          is the DDR clock frequency (not data rate).
4270                                                          For example, with 2Gb, DDR2-667 parts,
4271                                                          typ tRFC=195ns, so TRFC (CSR field) = 0x11.
4272                                                              TRFC (binary): Corresponding tRFC Cycles
4273                                                              ----------------------------------------
4274                                                              - 00000-00001: RESERVED
4275                                                              - 00010: 0-8
4276                                                              - 00011: 9-12
4277                                                              - 00100: 13-16
4278                                                              - ...
4279                                                              - 11110: 117-120
4280                                                              - 11111: 121-124 */
4281         uint64_t trp                          : 4;  /**< tRP Cycles = RNDUP[tRP(ns)/tcyc(ns)]
4282                                                          (Represented in tCYC cycles == 1dclk)
4283                                                          TYP=15ns (66MHz=1,167MHz=3,400MHz=6 for TYP)
4284                                                              - 0000: RESERVED
4285                                                              - 0001: 1
4286                                                              - ...
4287                                                              - 1001: 9
4288                                                              - 1010-1111: RESERVED
4289                                                          When using parts with 8 banks (LMC_DDR2_CTL->BANK8
4290                                                          is 1), load tRP cycles + 1 into this register. */
4291         uint64_t twtr                         : 4;  /**< tWTR Cycles = RNDUP[tWTR(ns)/tcyc(ns)]
4292                                                          Last Wr Data to Rd Command time.
4293                                                          (Represented in tCYC cycles == 1dclks)
4294                                                          TYP=15ns (66MHz=1,167MHz=3,400MHz=6, for TYP)
4295                                                              - 0000: RESERVED
4296                                                              - 0001: 1
4297                                                              - ...
4298                                                              - 0111: 7
4299                                                              - 1000-1111: RESERVED */
4300         uint64_t trcd                         : 4;  /**< tRCD Cycles = RNDUP[tRCD(ns)/tcyc(ns)]
4301                                                          (Represented in tCYC cycles == 1dclk)
4302                                                          TYP=15ns (66MHz=1,167MHz=3,400MHz=6 for TYP)
4303                                                              - 0000: RESERVED
4304                                                              - 0001: 2 (2 is the smallest value allowed)
4305                                                              - 0002: 2
4306                                                              - ...
4307                                                              - 1001: 9
4308                                                              - 1010-1111: RESERVED
4309                                                          In 2T mode, make this register TRCD-1, not going
4310                                                          below 2. */
4311         uint64_t tras                         : 5;  /**< tRAS Cycles = RNDUP[tRAS(ns)/tcyc(ns)]
4312                                                          (Represented in tCYC cycles == 1 dclk)
4313                                                              - 00000-0001: RESERVED
4314                                                              - 00010: 2
4315                                                              - ...
4316                                                              - 11111: 31 */
4317 #else
4318         uint64_t tras                         : 5;
4319         uint64_t trcd                         : 4;
4320         uint64_t twtr                         : 4;
4321         uint64_t trp                          : 4;
4322         uint64_t trfc                         : 5;
4323         uint64_t tmrd                         : 3;
4324         uint64_t caslat                       : 3;
4325         uint64_t trrd                         : 3;
4326         uint64_t comp_bypass                  : 1;
4327         uint64_t reserved_32_63               : 32;
4328 #endif
4329         } s;
4330         struct cvmx_lmcx_mem_cfg1_s           cn30xx;
4331         struct cvmx_lmcx_mem_cfg1_s           cn31xx;
4332         struct cvmx_lmcx_mem_cfg1_cn38xx
4333         {
4334 #if __BYTE_ORDER == __BIG_ENDIAN
4335         uint64_t reserved_31_63               : 33;
4336         uint64_t trrd                         : 3;  /**< tRRD cycles: ACT-ACT timing parameter for different
4337                                                          banks. (Represented in tCYC cycles == 1dclks)
4338                                                          TYP=15ns (66MHz=1,167MHz=3,200MHz=3)
4339                                                          For DDR2, TYP=7.5ns
4340                                                             - 000: RESERVED
4341                                                             - 001: 1 tCYC
4342                                                             - 010: 2 tCYC
4343                                                             - 011: 3 tCYC
4344                                                             - 100: 4 tCYC
4345                                                             - 101: 5 tCYC
4346                                                             - 110-111: RESERVED */
4347         uint64_t caslat                       : 3;  /**< CAS Latency Encoding which is loaded into each DDR
4348                                                          SDRAM device (MRS[6:4]) upon power-up (INIT_START=1).
4349                                                          (Represented in tCYC cycles == 1 dclks)
4350                                                             000 RESERVED
4351                                                             001 RESERVED
4352                                                             010 2.0 tCYC
4353                                                             011 3.0 tCYC
4354                                                             100 4.0 tCYC
4355                                                             101 5.0 tCYC
4356                                                             110 6.0 tCYC (DDR2)
4357                                                                 2.5 tCYC (DDR1)
4358                                                             111 RESERVED
4359                                                          eg). The parameters TSKW, SILO_HC, and SILO_QC can
4360                                                          account for 1/4 cycle granularity in board/etch delays. */
4361         uint64_t tmrd                         : 3;  /**< tMRD Cycles
4362                                                          (Represented in dclk tCYC)
4363                                                          For DDR2, its TYP 2*tCYC)
4364                                                              - 000: RESERVED
4365                                                              - 001: 1
4366                                                              - 010: 2
4367                                                              - 011: 3
4368                                                              - 100: 4
4369                                                              - 101-111: RESERVED */
4370         uint64_t trfc                         : 5;  /**< Indicates tRFC constraints.
4371                                                          Set TRFC (CSR field) = RNDUP[tRFC(ns)/4*tcyc(ns)],
4372                                                          where tRFC is from the DDR2 spec, and tcyc(ns)
4373                                                          is the DDR clock frequency (not data rate).
4374                                                          For example, with 2Gb, DDR2-667 parts,
4375                                                          typ tRFC=195ns, so TRFC (CSR field) = 0x11.
4376                                                              TRFC (binary): Corresponding tRFC Cycles
4377                                                              ----------------------------------------
4378                                                              - 00000-00001: RESERVED
4379                                                              - 00010: 0-8
4380                                                              - 00011: 9-12
4381                                                              - 00100: 13-16
4382                                                              - ...
4383                                                              - 11110: 117-120
4384                                                              - 11111: 121-124 */
4385         uint64_t trp                          : 4;  /**< tRP Cycles = RNDUP[tRP(ns)/tcyc(ns)]
4386                                                          (Represented in tCYC cycles == 1dclk)
4387                                                          TYP=15ns (66MHz=1,167MHz=3,400MHz=6 for TYP)
4388                                                              - 0000: RESERVED
4389                                                              - 0001: 1
4390                                                              - ...
4391                                                              - 0111: 7
4392                                                              - 1000-1111: RESERVED
4393                                                          When using parts with 8 banks (LMC_DDR2_CTL->BANK8
4394                                                          is 1), load tRP cycles + 1 into this register. */
4395         uint64_t twtr                         : 4;  /**< tWTR Cycles = RNDUP[tWTR(ns)/tcyc(ns)]
4396                                                          Last Wr Data to Rd Command time.
4397                                                          (Represented in tCYC cycles == 1dclks)
4398                                                          TYP=15ns (66MHz=1,167MHz=3,400MHz=6, for TYP)
4399                                                              - 0000: RESERVED
4400                                                              - 0001: 1
4401                                                              - ...
4402                                                              - 0111: 7
4403                                                              - 1000-1111: RESERVED */
4404         uint64_t trcd                         : 4;  /**< tRCD Cycles = RNDUP[tRCD(ns)/tcyc(ns)]
4405                                                          (Represented in tCYC cycles == 1dclk)
4406                                                          TYP=15ns (66MHz=1,167MHz=3,400MHz=6 for TYP)
4407                                                              - 0000: RESERVED
4408                                                              - 0001: 2 (2 is the smallest value allowed)
4409                                                              - 0002: 2
4410                                                              - ...
4411                                                              - 0111: 7
4412                                                              - 1110-1111: RESERVED
4413                                                          In 2T mode, make this register TRCD-1, not going
4414                                                          below 2. */
4415         uint64_t tras                         : 5;  /**< tRAS Cycles = RNDUP[tRAS(ns)/tcyc(ns)]
4416                                                          (Represented in tCYC cycles == 1 dclk)
4417                                                          For DDR-I mode:
4418                                                          TYP=45ns (66MHz=3,167MHz=8,400MHz=18
4419                                                              - 00000-0001: RESERVED
4420                                                              - 00010: 2
4421                                                              - ...
4422                                                              - 10100: 20
4423                                                              - 10101-11111: RESERVED */
4424 #else
4425         uint64_t tras                         : 5;
4426         uint64_t trcd                         : 4;
4427         uint64_t twtr                         : 4;
4428         uint64_t trp                          : 4;
4429         uint64_t trfc                         : 5;
4430         uint64_t tmrd                         : 3;
4431         uint64_t caslat                       : 3;
4432         uint64_t trrd                         : 3;
4433         uint64_t reserved_31_63               : 33;
4434 #endif
4435         } cn38xx;
4436         struct cvmx_lmcx_mem_cfg1_cn38xx      cn38xxp2;
4437         struct cvmx_lmcx_mem_cfg1_s           cn50xx;
4438         struct cvmx_lmcx_mem_cfg1_cn38xx      cn52xx;
4439         struct cvmx_lmcx_mem_cfg1_cn38xx      cn52xxp1;
4440         struct cvmx_lmcx_mem_cfg1_cn38xx      cn56xx;
4441         struct cvmx_lmcx_mem_cfg1_cn38xx      cn56xxp1;
4442         struct cvmx_lmcx_mem_cfg1_cn38xx      cn58xx;
4443         struct cvmx_lmcx_mem_cfg1_cn38xx      cn58xxp1;
4444 };
4445 typedef union cvmx_lmcx_mem_cfg1 cvmx_lmcx_mem_cfg1_t;
4446
4447 /**
4448  * cvmx_lmc#_modereg_params0
4449  *
4450  * Notes:
4451  * These parameters are written into the DDR3 MR0, MR1, MR2 and MR3 registers.
4452  *
4453  */
4454 union cvmx_lmcx_modereg_params0
4455 {
4456         uint64_t u64;
4457         struct cvmx_lmcx_modereg_params0_s
4458         {
4459 #if __BYTE_ORDER == __BIG_ENDIAN
4460         uint64_t reserved_25_63               : 39;
4461         uint64_t ppd                          : 1;  /**< DLL Control for precharge powerdown
4462                                                          0 = Slow exit (DLL off)
4463                                                          1 = Fast exit (DLL on)
4464                                                          LMC writes this value to MR0[PPD] in the selected DDR3 parts
4465                                                          during power-up/init and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4466                                                          self-refresh exit instruction sequences.
4467                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK].
4468                                                          This value must equal the MR0[PPD] value in all the DDR3
4469                                                          parts attached to all ranks during normal operation. */
4470         uint64_t wrp                          : 3;  /**< Write recovery for auto precharge
4471                                                          Should be programmed to be equal to or greater than
4472                                                          RNDUP[tWR(ns)/tCYC(ns)]
4473                                                          000 = Reserved
4474                                                          001 = 5
4475                                                          010 = 6
4476                                                          011 = 7
4477                                                          100 = 8
4478                                                          101 = 10
4479                                                          110 = 12
4480                                                          111 = Reserved
4481                                                          LMC writes this value to MR0[WR] in the selected DDR3 parts
4482                                                          during power-up/init and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4483                                                          self-refresh exit instruction sequences.
4484                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK].
4485                                                          This value must equal the MR0[WR] value in all the DDR3
4486                                                          parts attached to all ranks during normal operation. */
4487         uint64_t dllr                         : 1;  /**< DLL Reset
4488                                                          LMC writes this value to MR0[DLL] in the selected DDR3 parts
4489                                                          during power-up/init and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4490                                                          self-refresh exit instruction sequences.
4491                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK].
4492                                                          The MR0[DLL] value must be 0 in all the DDR3
4493                                                          parts attached to all ranks during normal operation. */
4494         uint64_t tm                           : 1;  /**< Test Mode
4495                                                          LMC writes this value to MR0[TM] in the selected DDR3 parts
4496                                                          during power-up/init and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4497                                                          self-refresh exit instruction sequences.
4498                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK].
4499                                                          The MR0[TM] value must be 0 in all the DDR3
4500                                                          parts attached to all ranks during normal operation. */
4501         uint64_t rbt                          : 1;  /**< Read Burst Type
4502                                                          1 = interleaved (fixed)
4503                                                          LMC writes this value to MR0[RBT] in the selected DDR3 parts
4504                                                          during power-up/init and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4505                                                          self-refresh exit instruction sequences.
4506                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK].
4507                                                          The MR0[RBT] value must be 1 in all the DDR3
4508                                                          parts attached to all ranks during normal operation. */
4509         uint64_t cl                           : 4;  /**< CAS Latency
4510                                                          0010 = 5
4511                                                          0100 = 6
4512                                                          0110 = 7
4513                                                          1000 = 8
4514                                                          1010 = 9
4515                                                          1100 = 10
4516                                                          1110 = 11
4517                                                          0000, ???1 = Reserved
4518                                                          LMC writes this value to MR0[CAS Latency / CL] in the selected DDR3 parts
4519                                                          during power-up/init and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4520                                                          self-refresh exit instruction sequences.
4521                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK].
4522                                                          This value must equal the MR0[CAS Latency / CL] value in all the DDR3
4523                                                          parts attached to all ranks during normal operation. */
4524         uint64_t bl                           : 2;  /**< Burst Length
4525                                                          0 = 8 (fixed)
4526                                                          LMC writes this value to MR0[BL] in the selected DDR3 parts
4527                                                          during power-up/init and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4528                                                          self-refresh exit instruction sequences.
4529                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK].
4530                                                          The MR0[BL] value must be 0 in all the DDR3
4531                                                          parts attached to all ranks during normal operation. */
4532         uint64_t qoff                         : 1;  /**< Qoff Enable
4533                                                          0 = enable
4534                                                          1 = disable
4535                                                          LMC writes this value to MR1[Qoff] in the DDR3 parts in the selected ranks
4536                                                          during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4537                                                          self-refresh entry and exit instruction sequences.
4538                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK,INIT_STATUS] and
4539                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT].
4540                                                          The MR1[Qoff] value must be 0 in all the DDR3
4541                                                          parts attached to all ranks during normal operation. */
4542         uint64_t tdqs                         : 1;  /**< TDQS Enable
4543                                                          0 = disable
4544                                                          LMC writes this value to MR1[TDQS] in the DDR3 parts in the selected ranks
4545                                                          during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4546                                                          self-refresh entry and exit instruction sequences.
4547                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK,INIT_STATUS] and
4548                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
4549         uint64_t wlev                         : 1;  /**< Write Leveling Enable
4550                                                          0 = disable
4551                                                          LMC writes MR1[Level]=0 in the DDR3 parts in the selected ranks
4552                                                          during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4553                                                          self-refresh entry and exit instruction sequences.
4554                                                          (Write-leveling can only be initiated via the
4555                                                          write-leveling instruction sequence.)
4556                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK,INIT_STATUS] and
4557                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
4558         uint64_t al                           : 2;  /**< Additive Latency
4559                                                          00 = 0
4560                                                          01 = CL-1
4561                                                          10 = CL-2
4562                                                          11 = Reserved
4563                                                          LMC writes this value to MR1[AL] in the selected DDR3 parts
4564                                                          during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4565                                                          self-refresh entry and exit instruction sequences.
4566                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
4567                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT].
4568                                                          This value must equal the MR1[AL] value in all the DDR3
4569                                                          parts attached to all ranks during normal operation.
4570                                                          See also LMC*_CONTROL[POCAS]. */
4571         uint64_t dll                          : 1;  /**< DLL Enable
4572                                                          0 = enable
4573                                                          1 = disable.
4574                                                          LMC writes this value to MR1[DLL] in the selected DDR3 parts
4575                                                          during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4576                                                          self-refresh entry and exit instruction sequences.
4577                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
4578                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT].
4579                                                          This value must equal the MR1[DLL] value in all the DDR3
4580                                                          parts attached to all ranks during normal operation.
4581                                                          In dll-off mode, CL/CWL must be programmed
4582                                                          equal to 6/6, respectively, as per the DDR3 specifications. */
4583         uint64_t mpr                          : 1;  /**< MPR
4584                                                          LMC writes this value to MR3[MPR] in the selected DDR3 parts
4585                                                          during power-up/init, read-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4586                                                          self-refresh exit instruction sequences.
4587                                                          (LMC also writes MR3[MPR]=1 at the beginning of the
4588                                                          read-leveling instruction sequence. Read-leveling should only be initiated via the
4589                                                          read-leveling instruction sequence.)
4590                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK].
4591                                                          The MR3[MPR] value must be 0 in all the DDR3
4592                                                          parts attached to all ranks during normal operation. */
4593         uint64_t mprloc                       : 2;  /**< MPR Location
4594                                                          LMC writes this value to MR3[MPRLoc] in the selected DDR3 parts
4595                                                          during power-up/init, read-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4596                                                          self-refresh exit instruction sequences.
4597                                                          (LMC also writes MR3[MPRLoc]=0 at the beginning of the
4598                                                          read-leveling instruction sequence.)
4599                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK].
4600                                                          The MR3[MPRLoc] value must be 0 in all the DDR3
4601                                                          parts attached to all ranks during normal operation. */
4602         uint64_t cwl                          : 3;  /**< CAS Write Latency
4603                                                          - 000: 5
4604                                                          - 001: 6
4605                                                          - 010: 7
4606                                                          - 011: 8
4607                                                          1xx: Reserved
4608                                                          LMC writes this value to MR2[CWL] in the selected DDR3 parts
4609                                                          during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4610                                                          self-refresh entry and exit instruction sequences.
4611                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
4612                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT].
4613                                                          This value must equal the MR2[CWL] value in all the DDR3
4614                                                          parts attached to all ranks during normal operation. */
4615 #else
4616         uint64_t cwl                          : 3;
4617         uint64_t mprloc                       : 2;
4618         uint64_t mpr                          : 1;
4619         uint64_t dll                          : 1;
4620         uint64_t al                           : 2;
4621         uint64_t wlev                         : 1;
4622         uint64_t tdqs                         : 1;
4623         uint64_t qoff                         : 1;
4624         uint64_t bl                           : 2;
4625         uint64_t cl                           : 4;
4626         uint64_t rbt                          : 1;
4627         uint64_t tm                           : 1;
4628         uint64_t dllr                         : 1;
4629         uint64_t wrp                          : 3;
4630         uint64_t ppd                          : 1;
4631         uint64_t reserved_25_63               : 39;
4632 #endif
4633         } s;
4634         struct cvmx_lmcx_modereg_params0_s    cn63xx;
4635         struct cvmx_lmcx_modereg_params0_s    cn63xxp1;
4636 };
4637 typedef union cvmx_lmcx_modereg_params0 cvmx_lmcx_modereg_params0_t;
4638
4639 /**
4640  * cvmx_lmc#_modereg_params1
4641  *
4642  * Notes:
4643  * These parameters are written into the DDR3 MR0, MR1, MR2 and MR3 registers.
4644  *
4645  */
4646 union cvmx_lmcx_modereg_params1
4647 {
4648         uint64_t u64;
4649         struct cvmx_lmcx_modereg_params1_s
4650         {
4651 #if __BYTE_ORDER == __BIG_ENDIAN
4652         uint64_t reserved_48_63               : 16;
4653         uint64_t rtt_nom_11                   : 3;  /**< RTT_NOM Rank 3
4654                                                          LMC writes this value to MR1[Rtt_Nom] in the rank 3 (i.e. DIMM1_CS1) DDR3 parts
4655                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4656                                                          self-refresh entry and exit instruction sequences.
4657                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
4658                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT].
4659                                                          Per JEDEC DDR3 specifications, if RTT_Nom is used during writes,
4660                                                          only values MR1[Rtt_Nom] = 1 (RQZ/4), 2 (RQZ/2), or 3 (RQZ/6) are allowed.
4661                                                          Otherwise, values MR1[Rtt_Nom] = 4 (RQZ/12) and 5 (RQZ/8) are also allowed. */
4662         uint64_t dic_11                       : 2;  /**< Output Driver Impedance Control Rank 3
4663                                                          LMC writes this value to MR1[D.I.C.] in the rank 3 (i.e. DIMM1_CS1) DDR3 parts
4664                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4665                                                          self-refresh entry and exit instruction sequences.
4666                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
4667                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
4668         uint64_t rtt_wr_11                    : 2;  /**< RTT_WR Rank 3
4669                                                          LMC writes this value to MR2[Rtt_WR] in the rank 3 (i.e. DIMM1_CS1) DDR3 parts
4670                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4671                                                          self-refresh entry and exit instruction sequences.
4672                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
4673                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
4674         uint64_t srt_11                       : 1;  /**< Self-refresh temperature range Rank 3
4675                                                          LMC writes this value to MR2[SRT] in the rank 3 (i.e. DIMM1_CS1) DDR3 parts
4676                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4677                                                          self-refresh entry and exit instruction sequences.
4678                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
4679                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
4680         uint64_t asr_11                       : 1;  /**< Auto self-refresh Rank 3
4681                                                          LMC writes this value to MR2[ASR] in the rank 3 (i.e. DIMM1_CS1) DDR3 parts
4682                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4683                                                          self-refresh entry and exit instruction sequences.
4684                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
4685                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
4686         uint64_t pasr_11                      : 3;  /**< Partial array self-refresh Rank 3
4687                                                          LMC writes this value to MR2[PASR] in the rank 3 (i.e. DIMM1_CS1) DDR3 parts
4688                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4689                                                          self-refresh entry and exit instruction sequences.
4690                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
4691                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
4692         uint64_t rtt_nom_10                   : 3;  /**< RTT_NOM Rank 2
4693                                                          LMC writes this value to MR1[Rtt_Nom] in the rank 2 (i.e. DIMM1_CS0) DDR3 parts
4694                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4695                                                          self-refresh entry and exit instruction sequences.
4696                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
4697                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT].
4698                                                          Per JEDEC DDR3 specifications, if RTT_Nom is used during writes,
4699                                                          only values MR1[Rtt_Nom] = 1 (RQZ/4), 2 (RQZ/2), or 3 (RQZ/6) are allowed.
4700                                                          Otherwise, values MR1[Rtt_Nom] = 4 (RQZ/12) and 5 (RQZ/8) are also allowed. */
4701         uint64_t dic_10                       : 2;  /**< Output Driver Impedance Control Rank 2
4702                                                          LMC writes this value to MR1[D.I.C.] in the rank 2 (i.e. DIMM1_CS0) DDR3 parts
4703                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4704                                                          self-refresh entry and exit instruction sequences.
4705                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
4706                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
4707         uint64_t rtt_wr_10                    : 2;  /**< RTT_WR Rank 2
4708                                                          LMC writes this value to MR2[Rtt_WR] in the rank 2 (i.e. DIMM1_CS0) DDR3 parts
4709                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4710                                                          self-refresh entry and exit instruction sequences.
4711                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
4712                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
4713         uint64_t srt_10                       : 1;  /**< Self-refresh temperature range Rank 2
4714                                                          LMC writes this value to MR2[SRT] in the rank 2 (i.e. DIMM1_CS0) DDR3 parts
4715                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4716                                                          self-refresh entry and exit instruction sequences.
4717                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
4718                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
4719         uint64_t asr_10                       : 1;  /**< Auto self-refresh Rank 2
4720                                                          LMC writes this value to MR2[ASR] in the rank 2 (i.e. DIMM1_CS0) DDR3 parts
4721                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4722                                                          self-refresh entry and exit instruction sequences.
4723                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
4724                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
4725         uint64_t pasr_10                      : 3;  /**< Partial array self-refresh Rank 2
4726                                                          LMC writes this value to MR2[PASR] in the rank 2 (i.e. DIMM1_CS0) DDR3 parts
4727                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4728                                                          self-refresh entry and exit instruction sequences.
4729                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
4730                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
4731         uint64_t rtt_nom_01                   : 3;  /**< RTT_NOM Rank 1
4732                                                          LMC writes this value to MR1[Rtt_Nom] in the rank 1 (i.e. DIMM0_CS1) DDR3 parts
4733                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4734                                                          self-refresh entry and exit instruction sequences.
4735                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
4736                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT].
4737                                                          Per JEDEC DDR3 specifications, if RTT_Nom is used during writes,
4738                                                          only values MR1[Rtt_Nom] = 1 (RQZ/4), 2 (RQZ/2), or 3 (RQZ/6) are allowed.
4739                                                          Otherwise, values MR1[Rtt_Nom] = 4 (RQZ/12) and 5 (RQZ/8) are also allowed. */
4740         uint64_t dic_01                       : 2;  /**< Output Driver Impedance Control Rank 1
4741                                                          LMC writes this value to MR1[D.I.C.] in the rank 1 (i.e. DIMM0_CS1) DDR3 parts
4742                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4743                                                          self-refresh entry and exit instruction sequences.
4744                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
4745                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
4746         uint64_t rtt_wr_01                    : 2;  /**< RTT_WR Rank 1
4747                                                          LMC writes this value to MR2[Rtt_WR] in the rank 1 (i.e. DIMM0_CS1) DDR3 parts
4748                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4749                                                          self-refresh entry and exit instruction sequences.
4750                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
4751                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
4752         uint64_t srt_01                       : 1;  /**< Self-refresh temperature range Rank 1
4753                                                          LMC writes this value to MR2[SRT] in the rank 1 (i.e. DIMM0_CS1) DDR3 parts
4754                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4755                                                          self-refresh entry and exit instruction sequences.
4756                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
4757                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
4758         uint64_t asr_01                       : 1;  /**< Auto self-refresh Rank 1
4759                                                          LMC writes this value to MR2[ASR] in the rank 1 (i.e. DIMM0_CS1) DDR3 parts
4760                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4761                                                          self-refresh entry and exit instruction sequences.
4762                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
4763                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
4764         uint64_t pasr_01                      : 3;  /**< Partial array self-refresh Rank 1
4765                                                          LMC writes this value to MR2[PASR] in the rank 1 (i.e. DIMM0_CS1) DDR3 parts
4766                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4767                                                          self-refresh entry and exit instruction sequences.
4768                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
4769                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
4770         uint64_t rtt_nom_00                   : 3;  /**< RTT_NOM Rank 0
4771                                                          LMC writes this value to MR1[Rtt_Nom] in the rank 0 (i.e. DIMM0_CS0) DDR3 parts
4772                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4773                                                          self-refresh entry and exit instruction sequences.
4774                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
4775                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT].
4776                                                          Per JEDEC DDR3 specifications, if RTT_Nom is used during writes,
4777                                                          only values MR1[Rtt_Nom] = 1 (RQZ/4), 2 (RQZ/2), or 3 (RQZ/6) are allowed.
4778                                                          Otherwise, values MR1[Rtt_Nom] = 4 (RQZ/12) and 5 (RQZ/8) are also allowed. */
4779         uint64_t dic_00                       : 2;  /**< Output Driver Impedance Control Rank 0
4780                                                          LMC writes this value to MR1[D.I.C.] in the rank 0 (i.e. DIMM0_CS0) DDR3 parts
4781                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4782                                                          self-refresh entry and exit instruction sequences.
4783                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
4784                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
4785         uint64_t rtt_wr_00                    : 2;  /**< RTT_WR Rank 0
4786                                                          LMC writes this value to MR2[Rtt_WR] in the rank 0 (i.e. DIMM0_CS0) DDR3 parts
4787                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4788                                                          self-refresh entry and exit instruction sequences.
4789                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
4790                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
4791         uint64_t srt_00                       : 1;  /**< Self-refresh temperature range Rank 0
4792                                                          LMC writes this value to MR2[SRT] in the rank 0 (i.e. DIMM0_CS0) DDR3 parts
4793                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4794                                                          self-refresh entry and exit instruction sequences.
4795                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
4796                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
4797         uint64_t asr_00                       : 1;  /**< Auto self-refresh Rank 0
4798                                                          LMC writes this value to MR2[ASR] in the rank 0 (i.e. DIMM0_CS0) DDR3 parts
4799                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4800                                                          self-refresh entry and exit instruction sequences.
4801                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
4802                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
4803         uint64_t pasr_00                      : 3;  /**< Partial array self-refresh Rank 0
4804                                                          LMC writes this value to MR2[PASR] in the rank 0 (i.e. DIMM0_CS0) DDR3 parts
4805                                                          when selected during power-up/init, write-leveling, and, if LMC*_CONFIG[SREF_WITH_DLL] is set,
4806                                                          self-refresh entry and exit instruction sequences.
4807                                                          See LMC*_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
4808                                                          LMC*_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
4809 #else
4810         uint64_t pasr_00                      : 3;
4811         uint64_t asr_00                       : 1;
4812         uint64_t srt_00                       : 1;
4813         uint64_t rtt_wr_00                    : 2;
4814         uint64_t dic_00                       : 2;
4815         uint64_t rtt_nom_00                   : 3;
4816         uint64_t pasr_01                      : 3;
4817         uint64_t asr_01                       : 1;
4818         uint64_t srt_01                       : 1;
4819         uint64_t rtt_wr_01                    : 2;
4820         uint64_t dic_01                       : 2;
4821         uint64_t rtt_nom_01                   : 3;
4822         uint64_t pasr_10                      : 3;
4823         uint64_t asr_10                       : 1;
4824         uint64_t srt_10                       : 1;
4825         uint64_t rtt_wr_10                    : 2;
4826         uint64_t dic_10                       : 2;
4827         uint64_t rtt_nom_10                   : 3;
4828         uint64_t pasr_11                      : 3;
4829         uint64_t asr_11                       : 1;
4830         uint64_t srt_11                       : 1;
4831         uint64_t rtt_wr_11                    : 2;
4832         uint64_t dic_11                       : 2;
4833         uint64_t rtt_nom_11                   : 3;
4834         uint64_t reserved_48_63               : 16;
4835 #endif
4836         } s;
4837         struct cvmx_lmcx_modereg_params1_s    cn63xx;
4838         struct cvmx_lmcx_modereg_params1_s    cn63xxp1;
4839 };
4840 typedef union cvmx_lmcx_modereg_params1 cvmx_lmcx_modereg_params1_t;
4841
4842 /**
4843  * cvmx_lmc#_nxm
4844  *
4845  * LMC_NXM = LMC non-existent memory
4846  *
4847  *
4848  * Notes:
4849  * Decoding for mem_msb/rank
4850  *      - 0000: mem_msb = mem_adr[25]
4851  *      - 0001: mem_msb = mem_adr[26]
4852  *      - 0010: mem_msb = mem_adr[27]
4853  *      - 0011: mem_msb = mem_adr[28]
4854  *      - 0100: mem_msb = mem_adr[29]
4855  *      - 0101: mem_msb = mem_adr[30]
4856  *      - 0110: mem_msb = mem_adr[31]
4857  *      - 0111: mem_msb = mem_adr[32]
4858  *      - 1000: mem_msb = mem_adr[33]
4859  *      - 1001: mem_msb = mem_adr[34]
4860  *      1010-1111 = Reserved
4861  * For example, for a DIMM made of Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank)
4862  * DDR3 parts, the column address width = 10, so with
4863  * 10b of col, 3b of bus, 3b of bank, row_lsb = 16. So, row = mem_adr[29:16] and
4864  * mem_msb = 4
4865  *
4866  * Note also that addresses greater the max defined space (pbank_msb) are also treated
4867  * as NXM accesses
4868  */
4869 union cvmx_lmcx_nxm
4870 {
4871         uint64_t u64;
4872         struct cvmx_lmcx_nxm_s
4873         {
4874 #if __BYTE_ORDER == __BIG_ENDIAN
4875         uint64_t reserved_40_63               : 24;
4876         uint64_t mem_msb_d3_r1                : 4;  /**< Max Row MSB for DIMM3, RANK1/DIMM3 in Single Ranked */
4877         uint64_t mem_msb_d3_r0                : 4;  /**< Max Row MSB for DIMM3, RANK0 */
4878         uint64_t mem_msb_d2_r1                : 4;  /**< Max Row MSB for DIMM2, RANK1/DIMM2 in Single Ranked */
4879         uint64_t mem_msb_d2_r0                : 4;  /**< Max Row MSB for DIMM2, RANK0 */
4880         uint64_t mem_msb_d1_r1                : 4;  /**< Max Row MSB for DIMM1, RANK1/DIMM1 in Single Ranked */
4881         uint64_t mem_msb_d1_r0                : 4;  /**< Max Row MSB for DIMM1, RANK0 */
4882         uint64_t mem_msb_d0_r1                : 4;  /**< Max Row MSB for DIMM0, RANK1/DIMM0 in Single Ranked */
4883         uint64_t mem_msb_d0_r0                : 4;  /**< Max Row MSB for DIMM0, RANK0 */
4884         uint64_t cs_mask                      : 8;  /**< Chip select mask.
4885                                                          This mask corresponds to the 8 chip selects for a memory
4886                                                          configuration.  If LMC*_CONFIG[RANK_ENA]==0 then this
4887                                                          mask must be set in pairs because each reference address
4888                                                          will assert a pair of chip selects.  If the chip
4889                                                          select(s) have a corresponding CS_MASK bit set, then the
4890                                                          reference is to non-existent memory (NXM).  LMC will alias a
4891                                                          NXM read reference to use the lowest, legal chip select(s)
4892                                                          and return 0's. LMC normally discards NXM writes, but will
4893                                                          also alias them when LMC*_CONTROL[NXM_WRITE_EN]=1.
4894                                                          CS_MASK<7:4> MBZ in 63xx */
4895 #else
4896         uint64_t cs_mask                      : 8;
4897         uint64_t mem_msb_d0_r0                : 4;
4898         uint64_t mem_msb_d0_r1                : 4;
4899         uint64_t mem_msb_d1_r0                : 4;
4900         uint64_t mem_msb_d1_r1                : 4;
4901         uint64_t mem_msb_d2_r0                : 4;
4902         uint64_t mem_msb_d2_r1                : 4;
4903         uint64_t mem_msb_d3_r0                : 4;
4904         uint64_t mem_msb_d3_r1                : 4;
4905         uint64_t reserved_40_63               : 24;
4906 #endif
4907         } s;
4908         struct cvmx_lmcx_nxm_cn52xx
4909         {
4910 #if __BYTE_ORDER == __BIG_ENDIAN
4911         uint64_t reserved_8_63                : 56;
4912         uint64_t cs_mask                      : 8;  /**< Chip select mask.
4913                                                          This mask corresponds to the 8 chip selects for a memory
4914                                                          configuration.  If LMC_MEM_CFG0[BUNK_ENA]==0 then this
4915                                                          mask must be set in pairs because each reference address
4916                                                          will assert a pair of chip selects.  If the chip
4917                                                          select(s) have a corresponding CS_MASK bit set, then the
4918                                                          reference is to non-existent memory.  LMC will alias the
4919                                                          reference to use the lowest, legal chip select(s) in
4920                                                          that case. */
4921 #else
4922         uint64_t cs_mask                      : 8;
4923         uint64_t reserved_8_63                : 56;
4924 #endif
4925         } cn52xx;
4926         struct cvmx_lmcx_nxm_cn52xx           cn56xx;
4927         struct cvmx_lmcx_nxm_cn52xx           cn58xx;
4928         struct cvmx_lmcx_nxm_s                cn63xx;
4929         struct cvmx_lmcx_nxm_s                cn63xxp1;
4930 };
4931 typedef union cvmx_lmcx_nxm cvmx_lmcx_nxm_t;
4932
4933 /**
4934  * cvmx_lmc#_ops_cnt
4935  *
4936  * LMC_OPS_CNT  = Performance Counters
4937  *
4938  */
4939 union cvmx_lmcx_ops_cnt
4940 {
4941         uint64_t u64;
4942         struct cvmx_lmcx_ops_cnt_s
4943         {
4944 #if __BYTE_ORDER == __BIG_ENDIAN
4945         uint64_t opscnt                       : 64; /**< Performance Counter
4946                                                          64-bit counter that increments when the DDR3 data bus
4947                                                          is being used
4948                                                          DRAM bus utilization = LMC*_OPS_CNT/LMC*_DCLK_CNT */
4949 #else
4950         uint64_t opscnt                       : 64;
4951 #endif
4952         } s;
4953         struct cvmx_lmcx_ops_cnt_s            cn63xx;
4954         struct cvmx_lmcx_ops_cnt_s            cn63xxp1;
4955 };
4956 typedef union cvmx_lmcx_ops_cnt cvmx_lmcx_ops_cnt_t;
4957
4958 /**
4959  * cvmx_lmc#_ops_cnt_hi
4960  *
4961  * LMC_OPS_CNT_HI  = Performance Counters
4962  *
4963  */
4964 union cvmx_lmcx_ops_cnt_hi
4965 {
4966         uint64_t u64;
4967         struct cvmx_lmcx_ops_cnt_hi_s
4968         {
4969 #if __BYTE_ORDER == __BIG_ENDIAN
4970         uint64_t reserved_32_63               : 32;
4971         uint64_t opscnt_hi                    : 32; /**< Performance Counter to measure Bus Utilization
4972                                                          Upper 32-bits of 64-bit counter
4973                                                            DRAM bus utilization = LMC_OPS_CNT_* /LMC_DCLK_CNT_* */
4974 #else
4975         uint64_t opscnt_hi                    : 32;
4976         uint64_t reserved_32_63               : 32;
4977 #endif
4978         } s;
4979         struct cvmx_lmcx_ops_cnt_hi_s         cn30xx;
4980         struct cvmx_lmcx_ops_cnt_hi_s         cn31xx;
4981         struct cvmx_lmcx_ops_cnt_hi_s         cn38xx;
4982         struct cvmx_lmcx_ops_cnt_hi_s         cn38xxp2;
4983         struct cvmx_lmcx_ops_cnt_hi_s         cn50xx;
4984         struct cvmx_lmcx_ops_cnt_hi_s         cn52xx;
4985         struct cvmx_lmcx_ops_cnt_hi_s         cn52xxp1;
4986         struct cvmx_lmcx_ops_cnt_hi_s         cn56xx;
4987         struct cvmx_lmcx_ops_cnt_hi_s         cn56xxp1;
4988         struct cvmx_lmcx_ops_cnt_hi_s         cn58xx;
4989         struct cvmx_lmcx_ops_cnt_hi_s         cn58xxp1;
4990 };
4991 typedef union cvmx_lmcx_ops_cnt_hi cvmx_lmcx_ops_cnt_hi_t;
4992
4993 /**
4994  * cvmx_lmc#_ops_cnt_lo
4995  *
4996  * LMC_OPS_CNT_LO  = Performance Counters
4997  *
4998  */
4999 union cvmx_lmcx_ops_cnt_lo
5000 {
5001         uint64_t u64;
5002         struct cvmx_lmcx_ops_cnt_lo_s
5003         {
5004 #if __BYTE_ORDER == __BIG_ENDIAN
5005         uint64_t reserved_32_63               : 32;
5006         uint64_t opscnt_lo                    : 32; /**< Performance Counter
5007                                                          Low 32-bits of 64-bit counter
5008                                                            DRAM bus utilization = LMC_OPS_CNT_* /LMC_DCLK_CNT_* */
5009 #else
5010         uint64_t opscnt_lo                    : 32;
5011         uint64_t reserved_32_63               : 32;
5012 #endif
5013         } s;
5014         struct cvmx_lmcx_ops_cnt_lo_s         cn30xx;
5015         struct cvmx_lmcx_ops_cnt_lo_s         cn31xx;
5016         struct cvmx_lmcx_ops_cnt_lo_s         cn38xx;
5017         struct cvmx_lmcx_ops_cnt_lo_s         cn38xxp2;
5018         struct cvmx_lmcx_ops_cnt_lo_s         cn50xx;
5019         struct cvmx_lmcx_ops_cnt_lo_s         cn52xx;
5020         struct cvmx_lmcx_ops_cnt_lo_s         cn52xxp1;
5021         struct cvmx_lmcx_ops_cnt_lo_s         cn56xx;
5022         struct cvmx_lmcx_ops_cnt_lo_s         cn56xxp1;
5023         struct cvmx_lmcx_ops_cnt_lo_s         cn58xx;
5024         struct cvmx_lmcx_ops_cnt_lo_s         cn58xxp1;
5025 };
5026 typedef union cvmx_lmcx_ops_cnt_lo cvmx_lmcx_ops_cnt_lo_t;
5027
5028 /**
5029  * cvmx_lmc#_phy_ctl
5030  *
5031  * LMC_PHY_CTL = LMC PHY Control
5032  *
5033  */
5034 union cvmx_lmcx_phy_ctl
5035 {
5036         uint64_t u64;
5037         struct cvmx_lmcx_phy_ctl_s
5038         {
5039 #if __BYTE_ORDER == __BIG_ENDIAN
5040         uint64_t reserved_15_63               : 49;
5041         uint64_t rx_always_on                 : 1;  /**< Disable dynamic DDR3 IO Rx power gating */
5042         uint64_t lv_mode                      : 1;  /**< Low Voltage Mode (1.35V) */
5043         uint64_t ck_tune1                     : 1;  /**< Clock Tune */
5044         uint64_t ck_dlyout1                   : 4;  /**< Clock delay out setting */
5045         uint64_t ck_tune0                     : 1;  /**< Clock Tune */
5046         uint64_t ck_dlyout0                   : 4;  /**< Clock delay out setting */
5047         uint64_t loopback                     : 1;  /**< Loopback enable */
5048         uint64_t loopback_pos                 : 1;  /**< Loopback pos mode */
5049         uint64_t ts_stagger                   : 1;  /**< TS Staggermode
5050                                                          This mode configures output drivers with 2-stage drive
5051                                                          strength to avoid undershoot issues on the bus when strong
5052                                                          drivers are suddenly turned on. When this mode is asserted,
5053                                                          Octeon will configure output drivers to be weak drivers
5054                                                          (60 ohm output impedance) at the first CK cycle, and
5055                                                          change drivers to the designated drive strengths specified
5056                                                          in $LMC(0)_COMP_CTL2 [CMD_CTL/CK_CTL/DQX_CTL] starting
5057                                                          at the following cycle */
5058 #else
5059         uint64_t ts_stagger                   : 1;
5060         uint64_t loopback_pos                 : 1;
5061         uint64_t loopback                     : 1;
5062         uint64_t ck_dlyout0                   : 4;
5063         uint64_t ck_tune0                     : 1;
5064         uint64_t ck_dlyout1                   : 4;
5065         uint64_t ck_tune1                     : 1;
5066         uint64_t lv_mode                      : 1;
5067         uint64_t rx_always_on                 : 1;
5068         uint64_t reserved_15_63               : 49;
5069 #endif
5070         } s;
5071         struct cvmx_lmcx_phy_ctl_s            cn63xx;
5072         struct cvmx_lmcx_phy_ctl_cn63xxp1
5073         {
5074 #if __BYTE_ORDER == __BIG_ENDIAN
5075         uint64_t reserved_14_63               : 50;
5076         uint64_t lv_mode                      : 1;  /**< Low Voltage Mode (1.35V) */
5077         uint64_t ck_tune1                     : 1;  /**< Clock Tune */
5078         uint64_t ck_dlyout1                   : 4;  /**< Clock delay out setting */
5079         uint64_t ck_tune0                     : 1;  /**< Clock Tune */
5080         uint64_t ck_dlyout0                   : 4;  /**< Clock delay out setting */
5081         uint64_t loopback                     : 1;  /**< Loopback enable */
5082         uint64_t loopback_pos                 : 1;  /**< Loopback pos mode */
5083         uint64_t ts_stagger                   : 1;  /**< TS Staggermode
5084                                                          This mode configures output drivers with 2-stage drive
5085                                                          strength to avoid undershoot issues on the bus when strong
5086                                                          drivers are suddenly turned on. When this mode is asserted,
5087                                                          Octeon will configure output drivers to be weak drivers
5088                                                          (60 ohm output impedance) at the first CK cycle, and
5089                                                          change drivers to the designated drive strengths specified
5090                                                          in $LMC(0)_COMP_CTL2 [CMD_CTL/CK_CTL/DQX_CTL] starting
5091                                                          at the following cycle */
5092 #else
5093         uint64_t ts_stagger                   : 1;
5094         uint64_t loopback_pos                 : 1;
5095         uint64_t loopback                     : 1;
5096         uint64_t ck_dlyout0                   : 4;
5097         uint64_t ck_tune0                     : 1;
5098         uint64_t ck_dlyout1                   : 4;
5099         uint64_t ck_tune1                     : 1;
5100         uint64_t lv_mode                      : 1;
5101         uint64_t reserved_14_63               : 50;
5102 #endif
5103         } cn63xxp1;
5104 };
5105 typedef union cvmx_lmcx_phy_ctl cvmx_lmcx_phy_ctl_t;
5106
5107 /**
5108  * cvmx_lmc#_pll_bwctl
5109  *
5110  * LMC_PLL_BWCTL  = DDR PLL Bandwidth Control Register
5111  *
5112  */
5113 union cvmx_lmcx_pll_bwctl
5114 {
5115         uint64_t u64;
5116         struct cvmx_lmcx_pll_bwctl_s
5117         {
5118 #if __BYTE_ORDER == __BIG_ENDIAN
5119         uint64_t reserved_5_63                : 59;
5120         uint64_t bwupd                        : 1;  /**< Load this Bandwidth Register value into the PLL */
5121         uint64_t bwctl                        : 4;  /**< Bandwidth Control Register for DDR PLL */
5122 #else
5123         uint64_t bwctl                        : 4;
5124         uint64_t bwupd                        : 1;
5125         uint64_t reserved_5_63                : 59;
5126 #endif
5127         } s;
5128         struct cvmx_lmcx_pll_bwctl_s          cn30xx;
5129         struct cvmx_lmcx_pll_bwctl_s          cn31xx;
5130         struct cvmx_lmcx_pll_bwctl_s          cn38xx;
5131         struct cvmx_lmcx_pll_bwctl_s          cn38xxp2;
5132 };
5133 typedef union cvmx_lmcx_pll_bwctl cvmx_lmcx_pll_bwctl_t;
5134
5135 /**
5136  * cvmx_lmc#_pll_ctl
5137  *
5138  * LMC_PLL_CTL = LMC pll control
5139  *
5140  *
5141  * Notes:
5142  * This CSR is only relevant for LMC0. LMC1_PLL_CTL is not used.
5143  *
5144  * Exactly one of EN2, EN4, EN6, EN8, EN12, EN16 must be set.
5145  *
5146  * The resultant DDR_CK frequency is the DDR2_REF_CLK
5147  * frequency multiplied by:
5148  *
5149  *     (CLKF + 1) / ((CLKR + 1) * EN(2,4,6,8,12,16))
5150  *
5151  * The PLL frequency, which is:
5152  *
5153  *     (DDR2_REF_CLK freq) * ((CLKF + 1) / (CLKR + 1))
5154  *
5155  * must reside between 1.2 and 2.5 GHz. A faster PLL frequency is desirable if there is a choice.
5156  */
5157 union cvmx_lmcx_pll_ctl
5158 {
5159         uint64_t u64;
5160         struct cvmx_lmcx_pll_ctl_s
5161         {
5162 #if __BYTE_ORDER == __BIG_ENDIAN
5163         uint64_t reserved_30_63               : 34;
5164         uint64_t bypass                       : 1;  /**< PLL Bypass */
5165         uint64_t fasten_n                     : 1;  /**< Should be set, especially when CLKF > ~80 */
5166         uint64_t div_reset                    : 1;  /**< Analog pll divider reset
5167                                                          De-assert at least 500*(CLKR+1) reference clock
5168                                                          cycles following RESET_N de-assertion. */
5169         uint64_t reset_n                      : 1;  /**< Analog pll reset
5170                                                          De-assert at least 5 usec after CLKF, CLKR,
5171                                                          and EN* are set up. */
5172         uint64_t clkf                         : 12; /**< Multiply reference by CLKF + 1
5173                                                          CLKF must be <= 128 */
5174         uint64_t clkr                         : 6;  /**< Divide reference by CLKR + 1 */
5175         uint64_t reserved_6_7                 : 2;
5176         uint64_t en16                         : 1;  /**< Divide output by 16 */
5177         uint64_t en12                         : 1;  /**< Divide output by 12 */
5178         uint64_t en8                          : 1;  /**< Divide output by 8 */
5179         uint64_t en6                          : 1;  /**< Divide output by 6 */
5180         uint64_t en4                          : 1;  /**< Divide output by 4 */
5181         uint64_t en2                          : 1;  /**< Divide output by 2 */
5182 #else
5183         uint64_t en2                          : 1;
5184         uint64_t en4                          : 1;
5185         uint64_t en6                          : 1;
5186         uint64_t en8                          : 1;
5187         uint64_t en12                         : 1;
5188         uint64_t en16                         : 1;
5189         uint64_t reserved_6_7                 : 2;
5190         uint64_t clkr                         : 6;
5191         uint64_t clkf                         : 12;
5192         uint64_t reset_n                      : 1;
5193         uint64_t div_reset                    : 1;
5194         uint64_t fasten_n                     : 1;
5195         uint64_t bypass                       : 1;
5196         uint64_t reserved_30_63               : 34;
5197 #endif
5198         } s;
5199         struct cvmx_lmcx_pll_ctl_cn50xx
5200         {
5201 #if __BYTE_ORDER == __BIG_ENDIAN
5202         uint64_t reserved_29_63               : 35;
5203         uint64_t fasten_n                     : 1;  /**< Should be set, especially when CLKF > ~80 */
5204         uint64_t div_reset                    : 1;  /**< Analog pll divider reset
5205                                                          De-assert at least 500*(CLKR+1) reference clock
5206                                                          cycles following RESET_N de-assertion. */
5207         uint64_t reset_n                      : 1;  /**< Analog pll reset
5208                                                          De-assert at least 5 usec after CLKF, CLKR,
5209                                                          and EN* are set up. */
5210         uint64_t clkf                         : 12; /**< Multiply reference by CLKF + 1
5211                                                          CLKF must be <= 256 */
5212         uint64_t clkr                         : 6;  /**< Divide reference by CLKR + 1 */
5213         uint64_t reserved_6_7                 : 2;
5214         uint64_t en16                         : 1;  /**< Divide output by 16 */
5215         uint64_t en12                         : 1;  /**< Divide output by 12 */
5216         uint64_t en8                          : 1;  /**< Divide output by 8 */
5217         uint64_t en6                          : 1;  /**< Divide output by 6 */
5218         uint64_t en4                          : 1;  /**< Divide output by 4 */
5219         uint64_t en2                          : 1;  /**< Divide output by 2 */
5220 #else
5221         uint64_t en2                          : 1;
5222         uint64_t en4                          : 1;
5223         uint64_t en6                          : 1;
5224         uint64_t en8                          : 1;
5225         uint64_t en12                         : 1;
5226         uint64_t en16                         : 1;
5227         uint64_t reserved_6_7                 : 2;
5228         uint64_t clkr                         : 6;
5229         uint64_t clkf                         : 12;
5230         uint64_t reset_n                      : 1;
5231         uint64_t div_reset                    : 1;
5232         uint64_t fasten_n                     : 1;
5233         uint64_t reserved_29_63               : 35;
5234 #endif
5235         } cn50xx;
5236         struct cvmx_lmcx_pll_ctl_s            cn52xx;
5237         struct cvmx_lmcx_pll_ctl_s            cn52xxp1;
5238         struct cvmx_lmcx_pll_ctl_cn50xx       cn56xx;
5239         struct cvmx_lmcx_pll_ctl_cn56xxp1
5240         {
5241 #if __BYTE_ORDER == __BIG_ENDIAN
5242         uint64_t reserved_28_63               : 36;
5243         uint64_t div_reset                    : 1;  /**< Analog pll divider reset
5244                                                          De-assert at least 500*(CLKR+1) reference clock
5245                                                          cycles following RESET_N de-assertion. */
5246         uint64_t reset_n                      : 1;  /**< Analog pll reset
5247                                                          De-assert at least 5 usec after CLKF, CLKR,
5248                                                          and EN* are set up. */
5249         uint64_t clkf                         : 12; /**< Multiply reference by CLKF + 1
5250                                                          CLKF must be <= 128 */
5251         uint64_t clkr                         : 6;  /**< Divide reference by CLKR + 1 */
5252         uint64_t reserved_6_7                 : 2;
5253         uint64_t en16                         : 1;  /**< Divide output by 16 */
5254         uint64_t en12                         : 1;  /**< Divide output by 12 */
5255         uint64_t en8                          : 1;  /**< Divide output by 8 */
5256         uint64_t en6                          : 1;  /**< Divide output by 6 */
5257         uint64_t en4                          : 1;  /**< Divide output by 4 */
5258         uint64_t en2                          : 1;  /**< Divide output by 2 */
5259 #else
5260         uint64_t en2                          : 1;
5261         uint64_t en4                          : 1;
5262         uint64_t en6                          : 1;
5263         uint64_t en8                          : 1;
5264         uint64_t en12                         : 1;
5265         uint64_t en16                         : 1;
5266         uint64_t reserved_6_7                 : 2;
5267         uint64_t clkr                         : 6;
5268         uint64_t clkf                         : 12;
5269         uint64_t reset_n                      : 1;
5270         uint64_t div_reset                    : 1;
5271         uint64_t reserved_28_63               : 36;
5272 #endif
5273         } cn56xxp1;
5274         struct cvmx_lmcx_pll_ctl_cn56xxp1     cn58xx;
5275         struct cvmx_lmcx_pll_ctl_cn56xxp1     cn58xxp1;
5276 };
5277 typedef union cvmx_lmcx_pll_ctl cvmx_lmcx_pll_ctl_t;
5278
5279 /**
5280  * cvmx_lmc#_pll_status
5281  *
5282  * LMC_PLL_STATUS = LMC pll status
5283  *
5284  */
5285 union cvmx_lmcx_pll_status
5286 {
5287         uint64_t u64;
5288         struct cvmx_lmcx_pll_status_s
5289         {
5290 #if __BYTE_ORDER == __BIG_ENDIAN
5291         uint64_t reserved_32_63               : 32;
5292         uint64_t ddr__nctl                    : 5;  /**< DDR nctl from compensation circuit */
5293         uint64_t ddr__pctl                    : 5;  /**< DDR pctl from compensation circuit */
5294         uint64_t reserved_2_21                : 20;
5295         uint64_t rfslip                       : 1;  /**< Reference clock slip */
5296         uint64_t fbslip                       : 1;  /**< Feedback clock slip */
5297 #else
5298         uint64_t fbslip                       : 1;
5299         uint64_t rfslip                       : 1;
5300         uint64_t reserved_2_21                : 20;
5301         uint64_t ddr__pctl                    : 5;
5302         uint64_t ddr__nctl                    : 5;
5303         uint64_t reserved_32_63               : 32;
5304 #endif
5305         } s;
5306         struct cvmx_lmcx_pll_status_s         cn50xx;
5307         struct cvmx_lmcx_pll_status_s         cn52xx;
5308         struct cvmx_lmcx_pll_status_s         cn52xxp1;
5309         struct cvmx_lmcx_pll_status_s         cn56xx;
5310         struct cvmx_lmcx_pll_status_s         cn56xxp1;
5311         struct cvmx_lmcx_pll_status_s         cn58xx;
5312         struct cvmx_lmcx_pll_status_cn58xxp1
5313         {
5314 #if __BYTE_ORDER == __BIG_ENDIAN
5315         uint64_t reserved_2_63                : 62;
5316         uint64_t rfslip                       : 1;  /**< Reference clock slip */
5317         uint64_t fbslip                       : 1;  /**< Feedback clock slip */
5318 #else
5319         uint64_t fbslip                       : 1;
5320         uint64_t rfslip                       : 1;
5321         uint64_t reserved_2_63                : 62;
5322 #endif
5323         } cn58xxp1;
5324 };
5325 typedef union cvmx_lmcx_pll_status cvmx_lmcx_pll_status_t;
5326
5327 /**
5328  * cvmx_lmc#_read_level_ctl
5329  *
5330  * Notes:
5331  * The HW writes and reads the cache block selected by ROW, COL, BNK and the rank as part of a read-leveling sequence for a rank.
5332  * A cache block write is 16 72-bit words. PATTERN selects the write value. For the first 8
5333  * words, the write value is the bit PATTERN<i> duplicated into a 72-bit vector. The write value of
5334  * the last 8 words is the inverse of the write value of the first 8 words.
5335  * See LMC*_READ_LEVEL_RANK*.
5336  */
5337 union cvmx_lmcx_read_level_ctl
5338 {
5339         uint64_t u64;
5340         struct cvmx_lmcx_read_level_ctl_s
5341         {
5342 #if __BYTE_ORDER == __BIG_ENDIAN
5343         uint64_t reserved_44_63               : 20;
5344         uint64_t rankmask                     : 4;  /**< Selects ranks to be leveled
5345                                                          to read-level rank i, set RANKMASK<i> */
5346         uint64_t pattern                      : 8;  /**< All DQ driven to PATTERN[burst], 0 <= burst <= 7
5347                                                          All DQ driven to ~PATTERN[burst-8], 8 <= burst <= 15 */
5348         uint64_t row                          : 16; /**< Row    address used to write/read data pattern */
5349         uint64_t col                          : 12; /**< Column address used to write/read data pattern */
5350         uint64_t reserved_3_3                 : 1;
5351         uint64_t bnk                          : 3;  /**< Bank   address used to write/read data pattern */
5352 #else
5353         uint64_t bnk                          : 3;
5354         uint64_t reserved_3_3                 : 1;
5355         uint64_t col                          : 12;
5356         uint64_t row                          : 16;
5357         uint64_t pattern                      : 8;
5358         uint64_t rankmask                     : 4;
5359         uint64_t reserved_44_63               : 20;
5360 #endif
5361         } s;
5362         struct cvmx_lmcx_read_level_ctl_s     cn52xx;
5363         struct cvmx_lmcx_read_level_ctl_s     cn52xxp1;
5364         struct cvmx_lmcx_read_level_ctl_s     cn56xx;
5365         struct cvmx_lmcx_read_level_ctl_s     cn56xxp1;
5366 };
5367 typedef union cvmx_lmcx_read_level_ctl cvmx_lmcx_read_level_ctl_t;
5368
5369 /**
5370  * cvmx_lmc#_read_level_dbg
5371  *
5372  * Notes:
5373  * A given read of LMC*_READ_LEVEL_DBG returns the read-leveling pass/fail results for all possible
5374  * delay settings (i.e. the BITMASK) for only one byte in the last rank that the HW read-leveled.
5375  * LMC*_READ_LEVEL_DBG[BYTE] selects the particular byte.
5376  * To get these pass/fail results for another different rank, you must run the hardware read-leveling
5377  * again. For example, it is possible to get the BITMASK results for every byte of every rank
5378  * if you run read-leveling separately for each rank, probing LMC*_READ_LEVEL_DBG between each
5379  * read-leveling.
5380  */
5381 union cvmx_lmcx_read_level_dbg
5382 {
5383         uint64_t u64;
5384         struct cvmx_lmcx_read_level_dbg_s
5385         {
5386 #if __BYTE_ORDER == __BIG_ENDIAN
5387         uint64_t reserved_32_63               : 32;
5388         uint64_t bitmask                      : 16; /**< Bitmask generated during deskew settings sweep
5389                                                          BITMASK[n]=0 means deskew setting n failed
5390                                                          BITMASK[n]=1 means deskew setting n passed
5391                                                          for 0 <= n <= 15 */
5392         uint64_t reserved_4_15                : 12;
5393         uint64_t byte                         : 4;  /**< 0 <= BYTE <= 8 */
5394 #else
5395         uint64_t byte                         : 4;
5396         uint64_t reserved_4_15                : 12;
5397         uint64_t bitmask                      : 16;
5398         uint64_t reserved_32_63               : 32;
5399 #endif
5400         } s;
5401         struct cvmx_lmcx_read_level_dbg_s     cn52xx;
5402         struct cvmx_lmcx_read_level_dbg_s     cn52xxp1;
5403         struct cvmx_lmcx_read_level_dbg_s     cn56xx;
5404         struct cvmx_lmcx_read_level_dbg_s     cn56xxp1;
5405 };
5406 typedef union cvmx_lmcx_read_level_dbg cvmx_lmcx_read_level_dbg_t;
5407
5408 /**
5409  * cvmx_lmc#_read_level_rank#
5410  *
5411  * Notes:
5412  * This is four CSRs per LMC, one per each rank.
5413  * Each CSR is written by HW during a read-leveling sequence for the rank. (HW sets STATUS==3 after HW read-leveling completes for the rank.)
5414  * Each CSR may also be written by SW, but not while a read-leveling sequence is in progress. (HW sets STATUS==1 after a CSR write.)
5415  * Deskew setting is measured in units of 1/4 DCLK, so the above BYTE* values can range over 4 DCLKs.
5416  * SW initiates a HW read-leveling sequence by programming LMC*_READ_LEVEL_CTL and writing INIT_START=1 with SEQUENCE=1.
5417  * See LMC*_READ_LEVEL_CTL.
5418  */
5419 union cvmx_lmcx_read_level_rankx
5420 {
5421         uint64_t u64;
5422         struct cvmx_lmcx_read_level_rankx_s
5423         {
5424 #if __BYTE_ORDER == __BIG_ENDIAN
5425         uint64_t reserved_38_63               : 26;
5426         uint64_t status                       : 2;  /**< Indicates status of the read-levelling and where
5427                                                          the BYTE* programmings in <35:0> came from:
5428                                                          0 = BYTE* values are their reset value
5429                                                          1 = BYTE* values were set via a CSR write to this register
5430                                                          2 = read-leveling sequence currently in progress (BYTE* values are unpredictable)
5431                                                          3 = BYTE* values came from a complete read-leveling sequence */
5432         uint64_t byte8                        : 4;  /**< Deskew setting */
5433         uint64_t byte7                        : 4;  /**< Deskew setting */
5434         uint64_t byte6                        : 4;  /**< Deskew setting */
5435         uint64_t byte5                        : 4;  /**< Deskew setting */
5436         uint64_t byte4                        : 4;  /**< Deskew setting */
5437         uint64_t byte3                        : 4;  /**< Deskew setting */
5438         uint64_t byte2                        : 4;  /**< Deskew setting */
5439         uint64_t byte1                        : 4;  /**< Deskew setting */
5440         uint64_t byte0                        : 4;  /**< Deskew setting */
5441 #else
5442         uint64_t byte0                        : 4;
5443         uint64_t byte1                        : 4;
5444         uint64_t byte2                        : 4;
5445         uint64_t byte3                        : 4;
5446         uint64_t byte4                        : 4;
5447         uint64_t byte5                        : 4;
5448         uint64_t byte6                        : 4;
5449         uint64_t byte7                        : 4;
5450         uint64_t byte8                        : 4;
5451         uint64_t status                       : 2;
5452         uint64_t reserved_38_63               : 26;
5453 #endif
5454         } s;
5455         struct cvmx_lmcx_read_level_rankx_s   cn52xx;
5456         struct cvmx_lmcx_read_level_rankx_s   cn52xxp1;
5457         struct cvmx_lmcx_read_level_rankx_s   cn56xx;
5458         struct cvmx_lmcx_read_level_rankx_s   cn56xxp1;
5459 };
5460 typedef union cvmx_lmcx_read_level_rankx cvmx_lmcx_read_level_rankx_t;
5461
5462 /**
5463  * cvmx_lmc#_reset_ctl
5464  *
5465  * Specify the RSL base addresses for the block
5466  *
5467  *
5468  * Notes:
5469  * DDR3RST - DDR3 DRAM parts have a new RESET#
5470  *   pin that wasn't present in DDR2 parts. The
5471  *   DDR3RST CSR field controls the assertion of
5472  *   the new 63xx pin that attaches to RESET#.
5473  *   When DDR3RST is set, 63xx asserts RESET#.
5474  *   When DDR3RST is clear, 63xx de-asserts
5475  *   RESET#.
5476  *
5477  *   DDR3RST is set on a cold reset. Warm and
5478  *   soft chip resets do not affect the DDR3RST
5479  *   value. Outside of cold reset, only software
5480  *   CSR writes change the DDR3RST value.
5481  *
5482  * DDR3PWARM - Enables preserve mode during a warm
5483  *   reset. When set, the DDR3 controller hardware
5484  *   automatically puts the attached DDR3 DRAM parts
5485  *   into self refresh (see LMC*CONFIG[SEQUENCE] below) at the beginning of a warm
5486  *   reset sequence, provided that the DDR3 controller
5487  *   is up. When clear, the DDR3 controller hardware
5488  *   does not put the attached DDR3 DRAM parts into
5489  *   self-refresh during a warm reset sequence.
5490  *
5491  *   DDR3PWARM is cleared on a cold reset. Warm and
5492  *   soft chip resets do not affect the DDR3PWARM
5493  *   value. Outside of cold reset, only software
5494  *   CSR writes change the DDR3PWARM value.
5495  *
5496  *   Note that if a warm reset follows a soft reset,
5497  *   DDR3PWARM has no effect, as the DDR3 controller
5498  *   is no longer up after any cold/warm/soft
5499  *   reset sequence.
5500  *
5501  * DDR3PSOFT - Enables preserve mode during a soft
5502  *   reset. When set, the DDR3 controller hardware
5503  *   automatically puts the attached DDR3 DRAM parts
5504  *   into self refresh (see LMC*CONFIG[SEQUENCE] below) at the beginning of a soft
5505  *   reset sequence, provided that the DDR3 controller
5506  *   is up. When clear, the DDR3 controller hardware
5507  *   does not put the attached DDR3 DRAM parts into
5508  *   self-refresh during a soft reset sequence.
5509  *
5510  *   DDR3PSOFT is cleared on a cold reset. Warm and
5511  *   soft chip resets do not affect the DDR3PSOFT
5512  *   value. Outside of cold reset, only software
5513  *   CSR writes change the DDR3PSOFT value.
5514  *
5515  * DDR3PSV - May be useful for system software to
5516  *   determine when the DDR3 contents have been
5517  *   preserved.
5518  *
5519  *   Cleared by hardware during a cold reset. Never
5520  *   cleared by hardware during a warm/soft reset.
5521  *   Set by hardware during a warm/soft reset if
5522  *   the hardware automatically put the DDR3 DRAM
5523  *   into self-refresh during the reset sequence.
5524  *
5525  *   Can also be written by software (to any value).
5526  */
5527 union cvmx_lmcx_reset_ctl
5528 {
5529         uint64_t u64;
5530         struct cvmx_lmcx_reset_ctl_s
5531         {
5532 #if __BYTE_ORDER == __BIG_ENDIAN
5533         uint64_t reserved_4_63                : 60;
5534         uint64_t ddr3psv                      : 1;  /**< Memory Reset
5535                                                          1 = DDR contents preserved */
5536         uint64_t ddr3psoft                    : 1;  /**< Memory Reset
5537                                                          1 = Enable Preserve mode during soft reset */
5538         uint64_t ddr3pwarm                    : 1;  /**< Memory Reset
5539                                                          1 = Enable Preserve mode during warm reset */
5540         uint64_t ddr3rst                      : 1;  /**< Memory Reset
5541                                                          0 = Reset asserted
5542                                                          1 = Reset de-asserted */
5543 #else
5544         uint64_t ddr3rst                      : 1;
5545         uint64_t ddr3pwarm                    : 1;
5546         uint64_t ddr3psoft                    : 1;
5547         uint64_t ddr3psv                      : 1;
5548         uint64_t reserved_4_63                : 60;
5549 #endif
5550         } s;
5551         struct cvmx_lmcx_reset_ctl_s          cn63xx;
5552         struct cvmx_lmcx_reset_ctl_s          cn63xxp1;
5553 };
5554 typedef union cvmx_lmcx_reset_ctl cvmx_lmcx_reset_ctl_t;
5555
5556 /**
5557  * cvmx_lmc#_rlevel_ctl
5558  */
5559 union cvmx_lmcx_rlevel_ctl
5560 {
5561         uint64_t u64;
5562         struct cvmx_lmcx_rlevel_ctl_s
5563         {
5564 #if __BYTE_ORDER == __BIG_ENDIAN
5565         uint64_t reserved_22_63               : 42;
5566         uint64_t delay_unload_3               : 1;  /**< When set, unload the PHY silo one cycle later
5567                                                          during read-leveling if LMC*_RLEVEL_RANKi[BYTE*<1:0>] = 3
5568                                                          DELAY_UNLOAD_3 should normally be set, particularly at higher speeds. */
5569         uint64_t delay_unload_2               : 1;  /**< When set, unload the PHY silo one cycle later
5570                                                          during read-leveling if LMC*_RLEVEL_RANKi[BYTE*<1:0>] = 2
5571                                                          DELAY_UNLOAD_2 should normally not be set. */
5572         uint64_t delay_unload_1               : 1;  /**< When set, unload the PHY silo one cycle later
5573                                                          during read-leveling if LMC*_RLEVEL_RANKi[BYTE*<1:0>] = 1
5574                                                          DELAY_UNLOAD_1 should normally not be set. */
5575         uint64_t delay_unload_0               : 1;  /**< When set, unload the PHY silo one cycle later
5576                                                          during read-leveling if LMC*_RLEVEL_RANKi[BYTE*<1:0>] = 0
5577                                                          DELAY_UNLOAD_0 should normally not be set. */
5578         uint64_t bitmask                      : 8;  /**< Mask to select bit lanes on which read-leveling
5579                                                          feedback is returned when OR_DIS is set to 1 */
5580         uint64_t or_dis                       : 1;  /**< Disable or'ing of bits in a byte lane when computing
5581                                                          the read-leveling bitmask
5582                                                          OR_DIS should normally not be set. */
5583         uint64_t offset_en                    : 1;  /**< When set, LMC attempts to select the read-leveling
5584                                                          setting that is LMC*RLEVEL_CTL[OFFSET] settings earlier than the
5585                                                          last passing read-leveling setting in the largest
5586                                                          contiguous sequence of passing settings.
5587                                                          When clear, or if the setting selected by LMC*RLEVEL_CTL[OFFSET]
5588                                                          did not pass, LMC selects the middle setting in the
5589                                                          largest contiguous sequence of passing settings,
5590                                                          rounding earlier when necessary. */
5591         uint64_t offset                       : 4;  /**< The offset used when LMC*RLEVEL_CTL[OFFSET] is set */
5592         uint64_t byte                         : 4;  /**< 0 <= BYTE <= 8
5593                                                          Byte index for which bitmask results are saved
5594                                                          in LMC*_RLEVEL_DBG */
5595 #else
5596         uint64_t byte                         : 4;
5597         uint64_t offset                       : 4;
5598         uint64_t offset_en                    : 1;
5599         uint64_t or_dis                       : 1;
5600         uint64_t bitmask                      : 8;
5601         uint64_t delay_unload_0               : 1;
5602         uint64_t delay_unload_1               : 1;
5603         uint64_t delay_unload_2               : 1;
5604         uint64_t delay_unload_3               : 1;
5605         uint64_t reserved_22_63               : 42;
5606 #endif
5607         } s;
5608         struct cvmx_lmcx_rlevel_ctl_s         cn63xx;
5609         struct cvmx_lmcx_rlevel_ctl_cn63xxp1
5610         {
5611 #if __BYTE_ORDER == __BIG_ENDIAN
5612         uint64_t reserved_9_63                : 55;
5613         uint64_t offset_en                    : 1;  /**< When set, LMC attempts to select the read-leveling
5614                                                          setting that is LMC*RLEVEL_CTL[OFFSET] settings earlier than the
5615                                                          last passing read-leveling setting in the largest
5616                                                          contiguous sequence of passing settings.
5617                                                          When clear, or if the setting selected by LMC*RLEVEL_CTL[OFFSET]
5618                                                          did not pass, LMC selects the middle setting in the
5619                                                          largest contiguous sequence of passing settings,
5620                                                          rounding earlier when necessary. */
5621         uint64_t offset                       : 4;  /**< The offset used when LMC*RLEVEL_CTL[OFFSET] is set */
5622         uint64_t byte                         : 4;  /**< 0 <= BYTE <= 8
5623                                                          Byte index for which bitmask results are saved
5624                                                          in LMC*_RLEVEL_DBG */
5625 #else
5626         uint64_t byte                         : 4;
5627         uint64_t offset                       : 4;
5628         uint64_t offset_en                    : 1;
5629         uint64_t reserved_9_63                : 55;
5630 #endif
5631         } cn63xxp1;
5632 };
5633 typedef union cvmx_lmcx_rlevel_ctl cvmx_lmcx_rlevel_ctl_t;
5634
5635 /**
5636  * cvmx_lmc#_rlevel_dbg
5637  *
5638  * Notes:
5639  * A given read of LMC*_RLEVEL_DBG returns the read-leveling pass/fail results for all possible
5640  * delay settings (i.e. the BITMASK) for only one byte in the last rank that the HW read-leveled.
5641  * LMC*_RLEVEL_CTL[BYTE] selects the particular byte.
5642  *
5643  * To get these pass/fail results for another different rank, you must run the hardware read-leveling
5644  * again. For example, it is possible to get the BITMASK results for every byte of every rank
5645  * if you run read-leveling separately for each rank, probing LMC*_RLEVEL_DBG between each
5646  * read-leveling.
5647  */
5648 union cvmx_lmcx_rlevel_dbg
5649 {
5650         uint64_t u64;
5651         struct cvmx_lmcx_rlevel_dbg_s
5652         {
5653 #if __BYTE_ORDER == __BIG_ENDIAN
5654         uint64_t bitmask                      : 64; /**< Bitmask generated during deskew settings sweep
5655                                                          BITMASK[n]=0 means deskew setting n failed
5656                                                          BITMASK[n]=1 means deskew setting n passed
5657                                                          for 0 <= n <= 63 */
5658 #else
5659         uint64_t bitmask                      : 64;
5660 #endif
5661         } s;
5662         struct cvmx_lmcx_rlevel_dbg_s         cn63xx;
5663         struct cvmx_lmcx_rlevel_dbg_s         cn63xxp1;
5664 };
5665 typedef union cvmx_lmcx_rlevel_dbg cvmx_lmcx_rlevel_dbg_t;
5666
5667 /**
5668  * cvmx_lmc#_rlevel_rank#
5669  *
5670  * Notes:
5671  * This is four CSRs per LMC, one per each rank.
5672  *
5673  * Deskew setting is measured in units of 1/4 CK, so the above BYTE* values can range over 16 CKs.
5674  *
5675  * Each CSR is written by HW during a read-leveling sequence for the rank. (HW sets STATUS==3 after HW read-leveling completes for the rank.)
5676  * If HW is unable to find a match per LMC*_RLEVEL_CTL[OFFSET_ENA] and LMC*_RLEVEL_CTL[OFFSET], then HW will set LMC*_RLEVEL_RANKi[BYTE*<5:0>]
5677  * to  0.
5678  *
5679  * Each CSR may also be written by SW, but not while a read-leveling sequence is in progress. (HW sets STATUS==1 after a CSR write.)
5680  *
5681  * SW initiates a HW read-leveling sequence by programming LMC*_RLEVEL_CTL and writing INIT_START=1 with SEQUENCE=1.
5682  * See LMC*_RLEVEL_CTL.
5683  *
5684  * LMC*_RLEVEL_RANKi values for ranks i without attached DRAM should be set such that
5685  * they do not increase the range of possible BYTE values for any byte
5686  * lane. The easiest way to do this is to set
5687  *     LMC*_RLEVEL_RANKi = LMC*_RLEVEL_RANKj,
5688  * where j is some rank with attached DRAM whose LMC*_RLEVEL_RANKj is already fully initialized.
5689  */
5690 union cvmx_lmcx_rlevel_rankx
5691 {
5692         uint64_t u64;
5693         struct cvmx_lmcx_rlevel_rankx_s
5694         {
5695 #if __BYTE_ORDER == __BIG_ENDIAN
5696         uint64_t reserved_56_63               : 8;
5697         uint64_t status                       : 2;  /**< Indicates status of the read-levelling and where
5698                                                          the BYTE* programmings in <35:0> came from:
5699                                                          0 = BYTE* values are their reset value
5700                                                          1 = BYTE* values were set via a CSR write to this register
5701                                                          2 = read-leveling sequence currently in progress (BYTE* values are unpredictable)
5702                                                          3 = BYTE* values came from a complete read-leveling sequence */
5703         uint64_t byte8                        : 6;  /**< Deskew setting
5704                                                          When ECC DRAM is not present (i.e. when DRAM is not
5705                                                          attached to chip signals DDR_CBS_0_* and DDR_CB[7:0]),
5706                                                          SW should write BYTE8 to a value that does
5707                                                          not increase the range of possible BYTE* values. The
5708                                                          easiest way to do this is to set
5709                                                          LMC*_RLEVEL_RANK*[BYTE8] = LMC*_RLEVEL_RANK*[BYTE0]
5710                                                          when there is no ECC DRAM, using the final BYTE0 value. */
5711         uint64_t byte7                        : 6;  /**< Deskew setting */
5712         uint64_t byte6                        : 6;  /**< Deskew setting */
5713         uint64_t byte5                        : 6;  /**< Deskew setting */
5714         uint64_t byte4                        : 6;  /**< Deskew setting */
5715         uint64_t byte3                        : 6;  /**< Deskew setting */
5716         uint64_t byte2                        : 6;  /**< Deskew setting */
5717         uint64_t byte1                        : 6;  /**< Deskew setting */
5718         uint64_t byte0                        : 6;  /**< Deskew setting */
5719 #else
5720         uint64_t byte0                        : 6;
5721         uint64_t byte1                        : 6;
5722         uint64_t byte2                        : 6;
5723         uint64_t byte3                        : 6;
5724         uint64_t byte4                        : 6;
5725         uint64_t byte5                        : 6;
5726         uint64_t byte6                        : 6;
5727         uint64_t byte7                        : 6;
5728         uint64_t byte8                        : 6;
5729         uint64_t status                       : 2;
5730         uint64_t reserved_56_63               : 8;
5731 #endif
5732         } s;
5733         struct cvmx_lmcx_rlevel_rankx_s       cn63xx;
5734         struct cvmx_lmcx_rlevel_rankx_s       cn63xxp1;
5735 };
5736 typedef union cvmx_lmcx_rlevel_rankx cvmx_lmcx_rlevel_rankx_t;
5737
5738 /**
5739  * cvmx_lmc#_rodt_comp_ctl
5740  *
5741  * LMC_RODT_COMP_CTL = LMC Compensation control
5742  *
5743  */
5744 union cvmx_lmcx_rodt_comp_ctl
5745 {
5746         uint64_t u64;
5747         struct cvmx_lmcx_rodt_comp_ctl_s
5748         {
5749 #if __BYTE_ORDER == __BIG_ENDIAN
5750         uint64_t reserved_17_63               : 47;
5751         uint64_t enable                       : 1;  /**< 0=not enabled, 1=enable */
5752         uint64_t reserved_12_15               : 4;
5753         uint64_t nctl                         : 4;  /**< Compensation control bits */
5754         uint64_t reserved_5_7                 : 3;
5755         uint64_t pctl                         : 5;  /**< Compensation control bits */
5756 #else
5757         uint64_t pctl                         : 5;
5758         uint64_t reserved_5_7                 : 3;
5759         uint64_t nctl                         : 4;
5760         uint64_t reserved_12_15               : 4;
5761         uint64_t enable                       : 1;
5762         uint64_t reserved_17_63               : 47;
5763 #endif
5764         } s;
5765         struct cvmx_lmcx_rodt_comp_ctl_s      cn50xx;
5766         struct cvmx_lmcx_rodt_comp_ctl_s      cn52xx;
5767         struct cvmx_lmcx_rodt_comp_ctl_s      cn52xxp1;
5768         struct cvmx_lmcx_rodt_comp_ctl_s      cn56xx;
5769         struct cvmx_lmcx_rodt_comp_ctl_s      cn56xxp1;
5770         struct cvmx_lmcx_rodt_comp_ctl_s      cn58xx;
5771         struct cvmx_lmcx_rodt_comp_ctl_s      cn58xxp1;
5772 };
5773 typedef union cvmx_lmcx_rodt_comp_ctl cvmx_lmcx_rodt_comp_ctl_t;
5774
5775 /**
5776  * cvmx_lmc#_rodt_ctl
5777  *
5778  * LMC_RODT_CTL = Obsolete LMC Read OnDieTermination control
5779  * See the description in LMC_WODT_CTL1. On Reads, Octeon only supports turning on ODT's in
5780  * the lower 2 DIMM's with the masks as below.
5781  *
5782  * Notes:
5783  * When a given RANK in position N is selected, the RODT _HI and _LO masks for that position are used.
5784  * Mask[3:0] is used for RODT control of the RANKs in positions 3, 2, 1, and 0, respectively.
5785  * In  64b mode, DIMMs are assumed to be ordered in the following order:
5786  *  position 3: [unused        , DIMM1_RANK1_LO]
5787  *  position 2: [unused        , DIMM1_RANK0_LO]
5788  *  position 1: [unused        , DIMM0_RANK1_LO]
5789  *  position 0: [unused        , DIMM0_RANK0_LO]
5790  * In 128b mode, DIMMs are assumed to be ordered in the following order:
5791  *  position 3: [DIMM3_RANK1_HI, DIMM1_RANK1_LO]
5792  *  position 2: [DIMM3_RANK0_HI, DIMM1_RANK0_LO]
5793  *  position 1: [DIMM2_RANK1_HI, DIMM0_RANK1_LO]
5794  *  position 0: [DIMM2_RANK0_HI, DIMM0_RANK0_LO]
5795  */
5796 union cvmx_lmcx_rodt_ctl
5797 {
5798         uint64_t u64;
5799         struct cvmx_lmcx_rodt_ctl_s
5800         {
5801 #if __BYTE_ORDER == __BIG_ENDIAN
5802         uint64_t reserved_32_63               : 32;
5803         uint64_t rodt_hi3                     : 4;  /**< Read ODT mask for position 3, data[127:64] */
5804         uint64_t rodt_hi2                     : 4;  /**< Read ODT mask for position 2, data[127:64] */
5805         uint64_t rodt_hi1                     : 4;  /**< Read ODT mask for position 1, data[127:64] */
5806         uint64_t rodt_hi0                     : 4;  /**< Read ODT mask for position 0, data[127:64] */
5807         uint64_t rodt_lo3                     : 4;  /**< Read ODT mask for position 3, data[ 63: 0] */
5808         uint64_t rodt_lo2                     : 4;  /**< Read ODT mask for position 2, data[ 63: 0] */
5809         uint64_t rodt_lo1                     : 4;  /**< Read ODT mask for position 1, data[ 63: 0] */
5810         uint64_t rodt_lo0                     : 4;  /**< Read ODT mask for position 0, data[ 63: 0] */
5811 #else
5812         uint64_t rodt_lo0                     : 4;
5813         uint64_t rodt_lo1                     : 4;
5814         uint64_t rodt_lo2                     : 4;
5815         uint64_t rodt_lo3                     : 4;
5816         uint64_t rodt_hi0                     : 4;
5817         uint64_t rodt_hi1                     : 4;
5818         uint64_t rodt_hi2                     : 4;
5819         uint64_t rodt_hi3                     : 4;
5820         uint64_t reserved_32_63               : 32;
5821 #endif
5822         } s;
5823         struct cvmx_lmcx_rodt_ctl_s           cn30xx;
5824         struct cvmx_lmcx_rodt_ctl_s           cn31xx;
5825         struct cvmx_lmcx_rodt_ctl_s           cn38xx;
5826         struct cvmx_lmcx_rodt_ctl_s           cn38xxp2;
5827         struct cvmx_lmcx_rodt_ctl_s           cn50xx;
5828         struct cvmx_lmcx_rodt_ctl_s           cn52xx;
5829         struct cvmx_lmcx_rodt_ctl_s           cn52xxp1;
5830         struct cvmx_lmcx_rodt_ctl_s           cn56xx;
5831         struct cvmx_lmcx_rodt_ctl_s           cn56xxp1;
5832         struct cvmx_lmcx_rodt_ctl_s           cn58xx;
5833         struct cvmx_lmcx_rodt_ctl_s           cn58xxp1;
5834 };
5835 typedef union cvmx_lmcx_rodt_ctl cvmx_lmcx_rodt_ctl_t;
5836
5837 /**
5838  * cvmx_lmc#_rodt_mask
5839  *
5840  * LMC_RODT_MASK = LMC Read OnDieTermination mask
5841  * System designers may desire to terminate DQ/DQS lines for higher frequency DDR operations
5842  * especially on a multi-rank system. DDR3 DQ/DQS I/O's have built in
5843  * Termination resistor that can be turned on or off by the controller, after meeting tAOND and tAOF
5844  * timing requirements. Each Rank has its own ODT pin that fans out to all the memory parts
5845  * in that DIMM. System designers may prefer different combinations of ODT ON's for reads
5846  * into different ranks. Octeon supports full programmability by way of the mask register below.
5847  * Each Rank position has its own 8-bit programmable field.
5848  * When the controller does a read to that rank, it sets the 4 ODT pins to the MASK pins below.
5849  * For eg., When doing a read from Rank0, a system designer may desire to terminate the lines
5850  * with the resistor on DIMM0/Rank1. The mask RODT_D0_R0 would then be [00000010].
5851  * Octeon drives the appropriate mask values on the ODT pins by default. If this feature is not
5852  * required, write 0 in this register. Note that, as per the DDR3 specifications, the ODT pin
5853  * for the rank that is being read should always be 0.
5854  *
5855  * Notes:
5856  * When a given RANK is selected, the RODT mask for that RANK is used.  The resulting RODT mask is
5857  * driven to the DIMMs in the following manner:
5858  *             RANK_ENA=1                    RANK_ENA=0
5859  * Mask[3] -> DIMM1_ODT_1                    MBZ
5860  * Mask[2] -> DIMM1_ODT_0                    DIMM1_ODT_0
5861  * Mask[1] -> DIMM0_ODT_1                    MBZ
5862  * Mask[0] -> DIMM0_ODT_0                    DIMM0_ODT_0
5863  *
5864  * LMC always reads entire cache blocks and always reads them via two consecutive
5865  * read CAS operations to the same rank+bank+row spaced exactly 4 CK's apart.
5866  * When a RODT mask bit is set, LMC asserts the OCTEON ODT output
5867  * pin(s) starting (CL - CWL) CK's after the first read CAS operation. Then, OCTEON
5868  * normally continues to assert the ODT output pin(s) for 9+LMC*_CONTROL[RODT_BPRCH] more CK's
5869  * - for a total of 10+LMC*_CONTROL[RODT_BPRCH] CK's for the entire cache block read -
5870  * through the second read CAS operation of the cache block,
5871  * satisfying the 6 CK DDR3 ODTH8 requirements.
5872  * But it is possible for OCTEON to issue two cache block reads separated by as few as
5873  * RtR = 8 or 9 (10 if LMC*_CONTROL[RODT_BPRCH]=1) CK's. In that case, OCTEON asserts the ODT output pin(s)
5874  * for the RODT mask of the first cache block read for RtR CK's, then asserts
5875  * the ODT output pin(s) for the RODT mask of the second cache block read for 10+LMC*_CONTROL[RODT_BPRCH] CK's
5876  * (or less if a third cache block read follows within 8 or 9 (or 10) CK's of this second cache block read).
5877  * Note that it may be necessary to force LMC to space back-to-back cache block reads
5878  * to different ranks apart by at least 10+LMC*_CONTROL[RODT_BPRCH] CK's to prevent DDR3 ODTH8 violations.
5879  */
5880 union cvmx_lmcx_rodt_mask
5881 {
5882         uint64_t u64;
5883         struct cvmx_lmcx_rodt_mask_s
5884         {
5885 #if __BYTE_ORDER == __BIG_ENDIAN
5886         uint64_t rodt_d3_r1                   : 8;  /**< Read ODT mask DIMM3, RANK1/DIMM3 in SingleRanked
5887                                                          *UNUSED IN 63xx, and MBZ* */
5888         uint64_t rodt_d3_r0                   : 8;  /**< Read ODT mask DIMM3, RANK0
5889                                                          *UNUSED IN 63xx, and MBZ* */
5890         uint64_t rodt_d2_r1                   : 8;  /**< Read ODT mask DIMM2, RANK1/DIMM2 in SingleRanked
5891                                                          *UNUSED IN 63xx, and MBZ* */
5892         uint64_t rodt_d2_r0                   : 8;  /**< Read ODT mask DIMM2, RANK0
5893                                                          *UNUSED IN 63xx, and MBZ* */
5894         uint64_t rodt_d1_r1                   : 8;  /**< Read ODT mask DIMM1, RANK1/DIMM1 in SingleRanked
5895                                                          if (RANK_ENA) then
5896                                                              RODT_D1_R1[3] must be 0
5897                                                          else
5898                                                              RODT_D1_R1[3:0] is not used and MBZ
5899                                                          *Upper 4 bits UNUSED IN 63xx, and MBZ* */
5900         uint64_t rodt_d1_r0                   : 8;  /**< Read ODT mask DIMM1, RANK0
5901                                                          if (RANK_ENA) then
5902                                                              RODT_D1_RO[2] must be 0
5903                                                          else
5904                                                              RODT_D1_RO[3:2,1] must be 0
5905                                                          *Upper 4 bits UNUSED IN 63xx, and MBZ* */
5906         uint64_t rodt_d0_r1                   : 8;  /**< Read ODT mask DIMM0, RANK1/DIMM0 in SingleRanked
5907                                                          if (RANK_ENA) then
5908                                                              RODT_D0_R1[1] must be 0
5909                                                          else
5910                                                              RODT_D0_R1[3:0] is not used and MBZ
5911                                                          *Upper 4 bits UNUSED IN 63xx, and MBZ* */
5912         uint64_t rodt_d0_r0                   : 8;  /**< Read ODT mask DIMM0, RANK0
5913                                                          if (RANK_ENA) then
5914                                                              RODT_D0_RO[0] must be 0
5915                                                          else
5916                                                              RODT_D0_RO[1:0,3] must be 0
5917                                                          *Upper 4 bits UNUSED IN 63xx, and MBZ* */
5918 #else
5919         uint64_t rodt_d0_r0                   : 8;
5920         uint64_t rodt_d0_r1                   : 8;
5921         uint64_t rodt_d1_r0                   : 8;
5922         uint64_t rodt_d1_r1                   : 8;
5923         uint64_t rodt_d2_r0                   : 8;
5924         uint64_t rodt_d2_r1                   : 8;
5925         uint64_t rodt_d3_r0                   : 8;
5926         uint64_t rodt_d3_r1                   : 8;
5927 #endif
5928         } s;
5929         struct cvmx_lmcx_rodt_mask_s          cn63xx;
5930         struct cvmx_lmcx_rodt_mask_s          cn63xxp1;
5931 };
5932 typedef union cvmx_lmcx_rodt_mask cvmx_lmcx_rodt_mask_t;
5933
5934 /**
5935  * cvmx_lmc#_slot_ctl0
5936  *
5937  * LMC_SLOT_CTL0 = LMC Slot Control0
5938  * This register is an assortment of various control fields needed by the memory controller
5939  *
5940  * Notes:
5941  * If SW has not previously written to this register (since the last DRESET),
5942  * HW updates the fields in this register to the minimum allowed value
5943  * when any of LMC*_RLEVEL_RANKn, LMC*_WLEVEL_RANKn, LMC*_CONTROL and
5944  * LMC*_MODEREG_PARAMS0 CSR's change. Ideally, only read this register
5945  * after LMC has been initialized and LMC*_RLEVEL_RANKn, LMC*_WLEVEL_RANKn
5946  * have valid data.
5947  *
5948  * The interpretation of the fields in this CSR depends on LMC*_CONFIG[DDR2T]:
5949  *  - If LMC*_CONFIG[DDR2T]=1, (FieldValue + 4) is the minimum CK cycles
5950  *    between when the DRAM part registers CAS commands of the 1st and 2nd types
5951  *    from different cache blocks.
5952  *  - If LMC*_CONFIG[DDR2T]=0, (FieldValue + 3) is the minimum CK cycles
5953  *    between when the DRAM part registers CAS commands of the 1st and 2nd types
5954  *    from different cache blocks. FieldValue = 0 is always illegal in this
5955  *    case.
5956  *
5957  * The hardware-calculated minimums are:
5958  *
5959  * min R2R_INIT = 1 - LMC*_CONFIG[DDR2T]
5960  * min R2W_INIT = 5 - LMC*_CONFIG[DDR2T] + (RL + MaxRdSkew) - (WL + MinWrSkew) + LMC*_CONTROL[BPRCH]
5961  * min W2R_INIT = 2 - LMC*_CONFIG[DDR2T] + LMC*_TIMING_PARAMS1[TWTR] + WL
5962  * min W2W_INIT = 1 - LMC*_CONFIG[DDR2T]
5963  *
5964  * where
5965  *
5966  * RL        = CL  + AL (LMC*_MODEREG_PARAMS0[CL] selects CL, LMC*_MODEREG_PARAMS0[AL] selects AL)
5967  * WL        = CWL + AL (LMC*_MODEREG_PARAMS0[CWL] selects CWL)
5968  * MaxRdSkew = max(LMC*_RLEVEL_RANKi[BYTEj]/4) + 1                          (max is across all ranks i (0..3) and bytes j (0..8))
5969  * MinWrSkew = min(LMC*_WLEVEL_RANKi[BYTEj]/8) - LMC*_CONFIG[EARLY_DQX]     (min is across all ranks i (0..3) and bytes j (0..8))
5970  *
5971  * R2W_INIT has 1 CK cycle built in for OCTEON-internal ODT settling/channel turnaround time.
5972  */
5973 union cvmx_lmcx_slot_ctl0
5974 {
5975         uint64_t u64;
5976         struct cvmx_lmcx_slot_ctl0_s
5977         {
5978 #if __BYTE_ORDER == __BIG_ENDIAN
5979         uint64_t reserved_24_63               : 40;
5980         uint64_t w2w_init                     : 6;  /**< Write-to-write spacing control
5981                                                          for back to back accesses to the same rank and DIMM */
5982         uint64_t w2r_init                     : 6;  /**< Write-to-read spacing control
5983                                                          for back to back accesses to the same rank and DIMM */
5984         uint64_t r2w_init                     : 6;  /**< Read-to-write spacing control
5985                                                          for back to back accesses to the same rank and DIMM */
5986         uint64_t r2r_init                     : 6;  /**< Read-to-read spacing control
5987                                                          for back to back accesses to the same rank and DIMM */
5988 #else
5989         uint64_t r2r_init                     : 6;
5990         uint64_t r2w_init                     : 6;
5991         uint64_t w2r_init                     : 6;
5992         uint64_t w2w_init                     : 6;
5993         uint64_t reserved_24_63               : 40;
5994 #endif
5995         } s;
5996         struct cvmx_lmcx_slot_ctl0_s          cn63xx;
5997         struct cvmx_lmcx_slot_ctl0_s          cn63xxp1;
5998 };
5999 typedef union cvmx_lmcx_slot_ctl0 cvmx_lmcx_slot_ctl0_t;
6000
6001 /**
6002  * cvmx_lmc#_slot_ctl1
6003  *
6004  * LMC_SLOT_CTL1 = LMC Slot Control1
6005  * This register is an assortment of various control fields needed by the memory controller
6006  *
6007  * Notes:
6008  * If SW has not previously written to this register (since the last DRESET),
6009  * HW updates the fields in this register to the minimum allowed value
6010  * when any of LMC*_RLEVEL_RANKn, LMC*_WLEVEL_RANKn, LMC*_CONTROL and
6011  * LMC*_MODEREG_PARAMS0 CSR's change. Ideally, only read this register
6012  * after LMC has been initialized and LMC*_RLEVEL_RANKn, LMC*_WLEVEL_RANKn
6013  * have valid data.
6014  *
6015  * The interpretation of the fields in this CSR depends on LMC*_CONFIG[DDR2T]:
6016  *  - If LMC*_CONFIG[DDR2T]=1, (FieldValue + 4) is the minimum CK cycles
6017  *    between when the DRAM part registers CAS commands of the 1st and 2nd types
6018  *    from different cache blocks.
6019  *  - If LMC*_CONFIG[DDR2T]=0, (FieldValue + 3) is the minimum CK cycles
6020  *    between when the DRAM part registers CAS commands of the 1st and 2nd types
6021  *    from different cache blocks. FieldValue = 0 is always illegal in this
6022  *    case.
6023  *
6024  * The hardware-calculated minimums are:
6025  *
6026  * min R2R_XRANK_INIT = 2 - LMC*_CONFIG[DDR2T] + MaxRdSkew - MinRdSkew
6027  * min R2W_XRANK_INIT = 5 - LMC*_CONFIG[DDR2T] + (RL + MaxRdSkew) - (WL + MinWrSkew) + LMC*_CONTROL[BPRCH]
6028  * min W2R_XRANK_INIT = 3 - LMC*_CONFIG[DDR2T] + MaxWrSkew + LMC*_CONTROL[FPRCH2]
6029  * min W2W_XRANK_INIT = 4 - LMC*_CONFIG[DDR2T] + MaxWrSkew - MinWrSkew
6030  *
6031  * where
6032  *
6033  * RL        = CL  + AL (LMC*_MODEREG_PARAMS0[CL] selects CL, LMC*_MODEREG_PARAMS0[AL] selects AL)
6034  * WL        = CWL + AL (LMC*_MODEREG_PARAMS0[CWL] selects CWL)
6035  * MinRdSkew = min(LMC*_RLEVEL_RANKi[BYTEj]/4)                              (min is across all ranks i (0..3) and bytes j (0..8))
6036  * MaxRdSkew = max(LMC*_RLEVEL_RANKi[BYTEj]/4) + 1                          (max is across all ranks i (0..3) and bytes j (0..8))
6037  * MinWrSkew = min(LMC*_WLEVEL_RANKi[BYTEj]/8) - LMC*_CONFIG[EARLY_DQX]     (min is across all ranks i (0..3) and bytes j (0..8))
6038  * MaxWrSkew = max(LMC*_WLEVEL_RANKi[BYTEj]/8) - LMC*_CONFIG[EARLY_DQX] + 1 (max is across all ranks i (0..3) and bytes j (0..8))
6039  *
6040  * R2W_XRANK_INIT has 1 extra CK cycle built in for OCTEON-internal ODT settling/channel turnaround time.
6041  *
6042  * W2R_XRANK_INIT has 1 extra CK cycle built in for channel turnaround time.
6043  */
6044 union cvmx_lmcx_slot_ctl1
6045 {
6046         uint64_t u64;
6047         struct cvmx_lmcx_slot_ctl1_s
6048         {
6049 #if __BYTE_ORDER == __BIG_ENDIAN
6050         uint64_t reserved_24_63               : 40;
6051         uint64_t w2w_xrank_init               : 6;  /**< Write-to-write spacing control
6052                                                          for back to back accesses across ranks of the same DIMM */
6053         uint64_t w2r_xrank_init               : 6;  /**< Write-to-read spacing control
6054                                                          for back to back accesses across ranks of the same DIMM */
6055         uint64_t r2w_xrank_init               : 6;  /**< Read-to-write spacing control
6056                                                          for back to back accesses across ranks of the same DIMM */
6057         uint64_t r2r_xrank_init               : 6;  /**< Read-to-read spacing control
6058                                                          for back to back accesses across ranks of the same DIMM */
6059 #else
6060         uint64_t r2r_xrank_init               : 6;
6061         uint64_t r2w_xrank_init               : 6;
6062         uint64_t w2r_xrank_init               : 6;
6063         uint64_t w2w_xrank_init               : 6;
6064         uint64_t reserved_24_63               : 40;
6065 #endif
6066         } s;
6067         struct cvmx_lmcx_slot_ctl1_s          cn63xx;
6068         struct cvmx_lmcx_slot_ctl1_s          cn63xxp1;
6069 };
6070 typedef union cvmx_lmcx_slot_ctl1 cvmx_lmcx_slot_ctl1_t;
6071
6072 /**
6073  * cvmx_lmc#_slot_ctl2
6074  *
6075  * LMC_SLOT_CTL2 = LMC Slot Control2
6076  * This register is an assortment of various control fields needed by the memory controller
6077  *
6078  * Notes:
6079  * If SW has not previously written to this register (since the last DRESET),
6080  * HW updates the fields in this register to the minimum allowed value
6081  * when any of LMC*_RLEVEL_RANKn, LMC*_WLEVEL_RANKn, LMC*_CONTROL and
6082  * LMC*_MODEREG_PARAMS0 CSR's change. Ideally, only read this register
6083  * after LMC has been initialized and LMC*_RLEVEL_RANKn, LMC*_WLEVEL_RANKn
6084  * have valid data.
6085  *
6086  * The interpretation of the fields in this CSR depends on LMC*_CONFIG[DDR2T]:
6087  *  - If LMC*_CONFIG[DDR2T]=1, (FieldValue + 4) is the minimum CK cycles
6088  *    between when the DRAM part registers CAS commands of the 1st and 2nd types
6089  *    from different cache blocks.
6090  *  - If LMC*_CONFIG[DDR2T]=0, (FieldValue + 3) is the minimum CK cycles
6091  *    between when the DRAM part registers CAS commands of the 1st and 2nd types
6092  *    from different cache blocks. FieldValue = 0 is always illegal in this
6093  *    case.
6094  *
6095  * The hardware-calculated minimums are:
6096  *
6097  * min R2R_XDIMM_INIT = 3 - LMC*_CONFIG[DDR2T] + MaxRdSkew - MinRdSkew
6098  * min R2W_XDIMM_INIT = 6 - LMC*_CONFIG[DDR2T] + (RL + MaxRdSkew) - (WL + MinWrSkew) + LMC*_CONTROL[BPRCH]
6099  * min W2R_XDIMM_INIT = 3 - LMC*_CONFIG[DDR2T] + MaxWrSkew + LMC*_CONTROL[FPRCH2]
6100  * min W2W_XDIMM_INIT = 5 - LMC*_CONFIG[DDR2T] + MaxWrSkew - MinWrSkew
6101  *
6102  * where
6103  *
6104  * RL        = CL  + AL (LMC*_MODEREG_PARAMS0[CL] selects CL, LMC*_MODEREG_PARAMS0[AL] selects AL)
6105  * WL        = CWL + AL (LMC*_MODEREG_PARAMS0[CWL] selects CWL)
6106  * MinRdSkew = min(LMC*_RLEVEL_RANKi[BYTEj]/4)                              (min is across all ranks i (0..3) and bytes j (0..8))
6107  * MaxRdSkew = max(LMC*_RLEVEL_RANKi[BYTEj]/4) + 1                          (max is across all ranks i (0..3) and bytes j (0..8))
6108  * MinWrSkew = min(LMC*_WLEVEL_RANKi[BYTEj]/8) - LMC*_CONFIG[EARLY_DQX]     (min is across all ranks i (0..3) and bytes j (0..8))
6109  * MaxWrSkew = max(LMC*_WLEVEL_RANKi[BYTEj]/8) - LMC*_CONFIG[EARLY_DQX] + 1 (max is across all ranks i (0..3) and bytes j (0..8))
6110  *
6111  * R2W_XDIMM_INIT has 2 extra CK cycles built in for OCTEON-internal ODT settling/channel turnaround time.
6112  *
6113  * R2R_XDIMM_INIT, W2R_XRANK_INIT, W2W_XDIMM_INIT have 1 extra CK cycle built in for channel turnaround time.
6114  */
6115 union cvmx_lmcx_slot_ctl2
6116 {
6117         uint64_t u64;
6118         struct cvmx_lmcx_slot_ctl2_s
6119         {
6120 #if __BYTE_ORDER == __BIG_ENDIAN
6121         uint64_t reserved_24_63               : 40;
6122         uint64_t w2w_xdimm_init               : 6;  /**< Write-to-write spacing control
6123                                                          for back to back accesses across DIMMs */
6124         uint64_t w2r_xdimm_init               : 6;  /**< Write-to-read spacing control
6125                                                          for back to back accesses across DIMMs */
6126         uint64_t r2w_xdimm_init               : 6;  /**< Read-to-write spacing control
6127                                                          for back to back accesses across DIMMs */
6128         uint64_t r2r_xdimm_init               : 6;  /**< Read-to-read spacing control
6129                                                          for back to back accesses across DIMMs */
6130 #else
6131         uint64_t r2r_xdimm_init               : 6;
6132         uint64_t r2w_xdimm_init               : 6;
6133         uint64_t w2r_xdimm_init               : 6;
6134         uint64_t w2w_xdimm_init               : 6;
6135         uint64_t reserved_24_63               : 40;
6136 #endif
6137         } s;
6138         struct cvmx_lmcx_slot_ctl2_s          cn63xx;
6139         struct cvmx_lmcx_slot_ctl2_s          cn63xxp1;
6140 };
6141 typedef union cvmx_lmcx_slot_ctl2 cvmx_lmcx_slot_ctl2_t;
6142
6143 /**
6144  * cvmx_lmc#_timing_params0
6145  */
6146 union cvmx_lmcx_timing_params0
6147 {
6148         uint64_t u64;
6149         struct cvmx_lmcx_timing_params0_s
6150         {
6151 #if __BYTE_ORDER == __BIG_ENDIAN
6152         uint64_t reserved_47_63               : 17;
6153         uint64_t trp_ext                      : 1;  /**< Indicates tRP constraints.
6154                                                          Set [TRP_EXT[0:0], TRP[3:0]] (CSR field) = RNDUP[tRP(ns)/tCYC(ns)]
6155                                                          + (RNDUP[tRTP(ns)/tCYC(ns)]-4)-1,
6156                                                          where tRP, tRTP are from the DDR3 spec, and tCYC(ns)
6157                                                          is the DDR clock frequency (not data rate).
6158                                                          TYP tRP=10-15ns
6159                                                          TYP tRTP=max(4nCK, 7.5ns) */
6160         uint64_t tcksre                       : 4;  /**< Indicates tCKSRE constraints.
6161                                                          Set TCKSRE (CSR field) = RNDUP[tCKSRE(ns)/tCYC(ns)]-1,
6162                                                          where tCKSRE is from the DDR3 spec, and tCYC(ns)
6163                                                          is the DDR clock frequency (not data rate).
6164                                                          TYP=max(5nCK, 10ns) */
6165         uint64_t trp                          : 4;  /**< Indicates tRP constraints.
6166                                                          Set TRP (CSR field) = RNDUP[tRP(ns)/tCYC(ns)]
6167                                                          + (RNDUP[tRTP(ns)/tCYC(ns)])-4)-1,
6168                                                          where tRP, tRTP are from the DDR3 spec, and tCYC(ns)
6169                                                          is the DDR clock frequency (not data rate).
6170                                                          TYP tRP=10-15ns
6171                                                          TYP tRTP=max(4nCK, 7.5ns) */
6172         uint64_t tzqinit                      : 4;  /**< Indicates tZQINIT constraints.
6173                                                          Set TZQINIT (CSR field) = RNDUP[tZQINIT(ns)/(256*tCYC(ns))],
6174                                                          where tZQINIT is from the DDR3 spec, and tCYC(ns)
6175                                                          is the DDR clock frequency (not data rate).
6176                                                          TYP=2 (equivalent to 512) */
6177         uint64_t tdllk                        : 4;  /**< Indicates tDLLK constraints.
6178                                                          Set TDLLK (CSR field) = RNDUP[tDLLK(ns)/(256*tCYC(ns))],
6179                                                          where tDLLK is from the DDR3 spec, and tCYC(ns)
6180                                                          is the DDR clock frequency (not data rate).
6181                                                          TYP=2 (equivalent to 512)
6182                                                          This parameter is used in self-refresh exit
6183                                                          and assumed to be greater than tRFC */
6184         uint64_t tmod                         : 4;  /**< Indicates tMOD constraints.
6185                                                          Set TMOD (CSR field) = RNDUP[tMOD(ns)/tCYC(ns)]-1,
6186                                                          where tMOD is from the DDR3 spec, and tCYC(ns)
6187                                                          is the DDR clock frequency (not data rate).
6188                                                          TYP=max(12nCK, 15ns) */
6189         uint64_t tmrd                         : 4;  /**< Indicates tMRD constraints.
6190                                                          Set TMRD (CSR field) = RNDUP[tMRD(ns)/tCYC(ns)]-1,
6191                                                          where tMRD is from the DDR3 spec, and tCYC(ns)
6192                                                          is the DDR clock frequency (not data rate).
6193                                                          TYP=4nCK */
6194         uint64_t txpr                         : 4;  /**< Indicates tXPR constraints.
6195                                                          Set TXPR (CSR field) = RNDUP[tXPR(ns)/(16*tCYC(ns))],
6196                                                          where tXPR is from the DDR3 spec, and tCYC(ns)
6197                                                          is the DDR clock frequency (not data rate).
6198                                                          TYP=max(5nCK, tRFC+10ns) */
6199         uint64_t tcke                         : 4;  /**< Indicates tCKE constraints.
6200                                                          Set TCKE (CSR field) = RNDUP[tCKE(ns)/tCYC(ns)]-1,
6201                                                          where tCKE is from the DDR3 spec, and tCYC(ns)
6202                                                          is the DDR clock frequency (not data rate).
6203                                                          TYP=max(3nCK, 7.5/5.625/5.625/5ns) */
6204         uint64_t tzqcs                        : 4;  /**< Indicates tZQCS constraints.
6205                                                          Set TZQCS (CSR field) = RNDUP[tZQCS(ns)/(16*tCYC(ns))],
6206                                                          where tZQCS is from the DDR3 spec, and tCYC(ns)
6207                                                          is the DDR clock frequency (not data rate).
6208                                                          TYP=4 (equivalent to 64) */
6209         uint64_t tckeon                       : 10; /**< Reserved. Should be written to zero. */
6210 #else
6211         uint64_t tckeon                       : 10;
6212         uint64_t tzqcs                        : 4;
6213         uint64_t tcke                         : 4;
6214         uint64_t txpr                         : 4;
6215         uint64_t tmrd                         : 4;
6216         uint64_t tmod                         : 4;
6217         uint64_t tdllk                        : 4;
6218         uint64_t tzqinit                      : 4;
6219         uint64_t trp                          : 4;
6220         uint64_t tcksre                       : 4;
6221         uint64_t trp_ext                      : 1;
6222         uint64_t reserved_47_63               : 17;
6223 #endif
6224         } s;
6225         struct cvmx_lmcx_timing_params0_cn63xx
6226         {
6227 #if __BYTE_ORDER == __BIG_ENDIAN
6228         uint64_t reserved_47_63               : 17;
6229         uint64_t trp_ext                      : 1;  /**< Indicates tRP constraints.
6230                                                          Set [TRP_EXT[0:0], TRP[3:0]] (CSR field) = RNDUP[tRP(ns)/tCYC(ns)]
6231                                                          + (RNDUP[tRTP(ns)/tCYC(ns)]-4)-1,
6232                                                          where tRP, tRTP are from the DDR3 spec, and tCYC(ns)
6233                                                          is the DDR clock frequency (not data rate).
6234                                                          TYP tRP=10-15ns
6235                                                          TYP tRTP=max(4nCK, 7.5ns) */
6236         uint64_t tcksre                       : 4;  /**< Indicates tCKSRE constraints.
6237                                                          Set TCKSRE (CSR field) = RNDUP[tCKSRE(ns)/tCYC(ns)]-1,
6238                                                          where tCKSRE is from the DDR3 spec, and tCYC(ns)
6239                                                          is the DDR clock frequency (not data rate).
6240                                                          TYP=max(5nCK, 10ns) */
6241         uint64_t trp                          : 4;  /**< Indicates tRP constraints.
6242                                                          Set [TRP_EXT[0:0], TRP[3:0]] (CSR field) = RNDUP[tRP(ns)/tCYC(ns)]
6243                                                          + (RNDUP[tRTP(ns)/tCYC(ns)])-4)-1,
6244                                                          where tRP, tRTP are from the DDR3 spec, and tCYC(ns)
6245                                                          is the DDR clock frequency (not data rate).
6246                                                          TYP tRP=10-15ns
6247                                                          TYP tRTP=max(4nCK, 7.5ns) */
6248         uint64_t tzqinit                      : 4;  /**< Indicates tZQINIT constraints.
6249                                                          Set TZQINIT (CSR field) = RNDUP[tZQINIT(ns)/(256*tCYC(ns))],
6250                                                          where tZQINIT is from the DDR3 spec, and tCYC(ns)
6251                                                          is the DDR clock frequency (not data rate).
6252                                                          TYP=2 (equivalent to 512) */
6253         uint64_t tdllk                        : 4;  /**< Indicates tDLLK constraints.
6254                                                          Set TDLLK (CSR field) = RNDUP[tDLLK(ns)/(256*tCYC(ns))],
6255                                                          where tDLLK is from the DDR3 spec, and tCYC(ns)
6256                                                          is the DDR clock frequency (not data rate).
6257                                                          TYP=2 (equivalent to 512)
6258                                                          This parameter is used in self-refresh exit
6259                                                          and assumed to be greater than tRFC */
6260         uint64_t tmod                         : 4;  /**< Indicates tMOD constraints.
6261                                                          Set TMOD (CSR field) = RNDUP[tMOD(ns)/tCYC(ns)]-1,
6262                                                          where tMOD is from the DDR3 spec, and tCYC(ns)
6263                                                          is the DDR clock frequency (not data rate).
6264                                                          TYP=max(12nCK, 15ns) */
6265         uint64_t tmrd                         : 4;  /**< Indicates tMRD constraints.
6266                                                          Set TMRD (CSR field) = RNDUP[tMRD(ns)/tCYC(ns)]-1,
6267                                                          where tMRD is from the DDR3 spec, and tCYC(ns)
6268                                                          is the DDR clock frequency (not data rate).
6269                                                          TYP=4nCK */
6270         uint64_t txpr                         : 4;  /**< Indicates tXPR constraints.
6271                                                          Set TXPR (CSR field) = RNDUP[tXPR(ns)/(16*tCYC(ns))],
6272                                                          where tXPR is from the DDR3 spec, and tCYC(ns)
6273                                                          is the DDR clock frequency (not data rate).
6274                                                          TYP=max(5nCK, tRFC+10ns) */
6275         uint64_t tcke                         : 4;  /**< Indicates tCKE constraints.
6276                                                          Set TCKE (CSR field) = RNDUP[tCKE(ns)/tCYC(ns)]-1,
6277                                                          where tCKE is from the DDR3 spec, and tCYC(ns)
6278                                                          is the DDR clock frequency (not data rate).
6279                                                          TYP=max(3nCK, 7.5/5.625/5.625/5ns) */
6280         uint64_t tzqcs                        : 4;  /**< Indicates tZQCS constraints.
6281                                                          Set TZQCS (CSR field) = RNDUP[tZQCS(ns)/(16*tCYC(ns))],
6282                                                          where tZQCS is from the DDR3 spec, and tCYC(ns)
6283                                                          is the DDR clock frequency (not data rate).
6284                                                          TYP=4 (equivalent to 64) */
6285         uint64_t reserved_0_9                 : 10;
6286 #else
6287         uint64_t reserved_0_9                 : 10;
6288         uint64_t tzqcs                        : 4;
6289         uint64_t tcke                         : 4;
6290         uint64_t txpr                         : 4;
6291         uint64_t tmrd                         : 4;
6292         uint64_t tmod                         : 4;
6293         uint64_t tdllk                        : 4;
6294         uint64_t tzqinit                      : 4;
6295         uint64_t trp                          : 4;
6296         uint64_t tcksre                       : 4;
6297         uint64_t trp_ext                      : 1;
6298         uint64_t reserved_47_63               : 17;
6299 #endif
6300         } cn63xx;
6301         struct cvmx_lmcx_timing_params0_cn63xxp1
6302         {
6303 #if __BYTE_ORDER == __BIG_ENDIAN
6304         uint64_t reserved_46_63               : 18;
6305         uint64_t tcksre                       : 4;  /**< Indicates tCKSRE constraints.
6306                                                          Set TCKSRE (CSR field) = RNDUP[tCKSRE(ns)/tCYC(ns)]-1,
6307                                                          where tCKSRE is from the DDR3 spec, and tCYC(ns)
6308                                                          is the DDR clock frequency (not data rate).
6309                                                          TYP=max(5nCK, 10ns) */
6310         uint64_t trp                          : 4;  /**< Indicates tRP constraints.
6311                                                          Set TRP (CSR field) = RNDUP[tRP(ns)/tCYC(ns)]
6312                                                          + (RNDUP[tRTP(ns)/tCYC(ns)])-4)-1,
6313                                                          where tRP, tRTP are from the DDR3 spec, and tCYC(ns)
6314                                                          is the DDR clock frequency (not data rate).
6315                                                          TYP tRP=10-15ns
6316                                                          TYP tRTP=max(4nCK, 7.5ns) */
6317         uint64_t tzqinit                      : 4;  /**< Indicates tZQINIT constraints.
6318                                                          Set TZQINIT (CSR field) = RNDUP[tZQINIT(ns)/(256*tCYC(ns))],
6319                                                          where tZQINIT is from the DDR3 spec, and tCYC(ns)
6320                                                          is the DDR clock frequency (not data rate).
6321                                                          TYP=2 (equivalent to 512) */
6322         uint64_t tdllk                        : 4;  /**< Indicates tDLLK constraints.
6323                                                          Set TDLLK (CSR field) = RNDUP[tDLLK(ns)/(256*tCYC(ns))],
6324                                                          where tDLLK is from the DDR3 spec, and tCYC(ns)
6325                                                          is the DDR clock frequency (not data rate).
6326                                                          TYP=2 (equivalent to 512)
6327                                                          This parameter is used in self-refresh exit
6328                                                          and assumed to be greater than tRFC */
6329         uint64_t tmod                         : 4;  /**< Indicates tMOD constraints.
6330                                                          Set TMOD (CSR field) = RNDUP[tMOD(ns)/tCYC(ns)]-1,
6331                                                          where tMOD is from the DDR3 spec, and tCYC(ns)
6332                                                          is the DDR clock frequency (not data rate).
6333                                                          TYP=max(12nCK, 15ns) */
6334         uint64_t tmrd                         : 4;  /**< Indicates tMRD constraints.
6335                                                          Set TMRD (CSR field) = RNDUP[tMRD(ns)/tCYC(ns)]-1,
6336                                                          where tMRD is from the DDR3 spec, and tCYC(ns)
6337                                                          is the DDR clock frequency (not data rate).
6338                                                          TYP=4nCK */
6339         uint64_t txpr                         : 4;  /**< Indicates tXPR constraints.
6340                                                          Set TXPR (CSR field) = RNDUP[tXPR(ns)/(16*tCYC(ns))],
6341                                                          where tXPR is from the DDR3 spec, and tCYC(ns)
6342                                                          is the DDR clock frequency (not data rate).
6343                                                          TYP=max(5nCK, tRFC+10ns) */
6344         uint64_t tcke                         : 4;  /**< Indicates tCKE constraints.
6345                                                          Set TCKE (CSR field) = RNDUP[tCKE(ns)/tCYC(ns)]-1,
6346                                                          where tCKE is from the DDR3 spec, and tCYC(ns)
6347                                                          is the DDR clock frequency (not data rate).
6348                                                          TYP=max(3nCK, 7.5/5.625/5.625/5ns) */
6349         uint64_t tzqcs                        : 4;  /**< Indicates tZQCS constraints.
6350                                                          Set TZQCS (CSR field) = RNDUP[tZQCS(ns)/(16*tCYC(ns))],
6351                                                          where tZQCS is from the DDR3 spec, and tCYC(ns)
6352                                                          is the DDR clock frequency (not data rate).
6353                                                          TYP=4 (equivalent to 64) */
6354         uint64_t tckeon                       : 10; /**< Reserved. Should be written to zero. */
6355 #else
6356         uint64_t tckeon                       : 10;
6357         uint64_t tzqcs                        : 4;
6358         uint64_t tcke                         : 4;
6359         uint64_t txpr                         : 4;
6360         uint64_t tmrd                         : 4;
6361         uint64_t tmod                         : 4;
6362         uint64_t tdllk                        : 4;
6363         uint64_t tzqinit                      : 4;
6364         uint64_t trp                          : 4;
6365         uint64_t tcksre                       : 4;
6366         uint64_t reserved_46_63               : 18;
6367 #endif
6368         } cn63xxp1;
6369 };
6370 typedef union cvmx_lmcx_timing_params0 cvmx_lmcx_timing_params0_t;
6371
6372 /**
6373  * cvmx_lmc#_timing_params1
6374  */
6375 union cvmx_lmcx_timing_params1
6376 {
6377         uint64_t u64;
6378         struct cvmx_lmcx_timing_params1_s
6379         {
6380 #if __BYTE_ORDER == __BIG_ENDIAN
6381         uint64_t reserved_47_63               : 17;
6382         uint64_t tras_ext                     : 1;  /**< Indicates tRAS constraints.
6383                                                          Set [TRAS_EXT[0:0], TRAS[4:0]] (CSR field) = RNDUP[tRAS(ns)/tCYC(ns)]-1,
6384                                                          where tRAS is from the DDR3 spec, and tCYC(ns)
6385                                                          is the DDR clock frequency (not data rate).
6386                                                          TYP=35ns-9*tREFI
6387                                                              - 000000: RESERVED
6388                                                              - 000001: 2 tCYC
6389                                                              - 000010: 3 tCYC
6390                                                              - ...
6391                                                              - 111111: 64 tCYC */
6392         uint64_t txpdll                       : 5;  /**< Indicates tXPDLL constraints.
6393                                                          Set TXPDLL (CSR field) = RNDUP[tXPDLL(ns)/tCYC(ns)]-1,
6394                                                          where tXPDLL is from the DDR3 spec, and tCYC(ns)
6395                                                          is the DDR clock frequency (not data rate).
6396                                                          TYP=max(10nCK, 24ns) */
6397         uint64_t tfaw                         : 5;  /**< Indicates tFAW constraints.
6398                                                          Set TFAW (CSR field) = RNDUP[tFAW(ns)/(4*tCYC(ns))],
6399                                                          where tFAW is from the DDR3 spec, and tCYC(ns)
6400                                                          is the DDR clock frequency (not data rate).
6401                                                          TYP=30-40ns */
6402         uint64_t twldqsen                     : 4;  /**< Indicates tWLDQSEN constraints.
6403                                                          Set TWLDQSEN (CSR field) = RNDUP[tWLDQSEN(ns)/(4*tCYC(ns))],
6404                                                          where tWLDQSEN is from the DDR3 spec, and tCYC(ns)
6405                                                          is the DDR clock frequency (not data rate).
6406                                                          TYP=max(25nCK) */
6407         uint64_t twlmrd                       : 4;  /**< Indicates tWLMRD constraints.
6408                                                          Set TWLMRD (CSR field) = RNDUP[tWLMRD(ns)/(4*tCYC(ns))],
6409                                                          where tWLMRD is from the DDR3 spec, and tCYC(ns)
6410                                                          is the DDR clock frequency (not data rate).
6411                                                          TYP=max(40nCK) */
6412         uint64_t txp                          : 3;  /**< Indicates tXP constraints.
6413                                                          Set TXP (CSR field) = RNDUP[tXP(ns)/tCYC(ns)]-1,
6414                                                          where tXP is from the DDR3 spec, and tCYC(ns)
6415                                                          is the DDR clock frequency (not data rate).
6416                                                          TYP=max(3nCK, 7.5ns) */
6417         uint64_t trrd                         : 3;  /**< Indicates tRRD constraints.
6418                                                          Set TRRD (CSR field) = RNDUP[tRRD(ns)/tCYC(ns)]-2,
6419                                                          where tRRD is from the DDR3 spec, and tCYC(ns)
6420                                                          is the DDR clock frequency (not data rate).
6421                                                          TYP=max(4nCK, 10ns)
6422                                                             - 000: RESERVED
6423                                                             - 001: 3 tCYC
6424                                                             - ...
6425                                                             - 110: 8 tCYC
6426                                                             - 111: 9 tCYC */
6427         uint64_t trfc                         : 5;  /**< Indicates tRFC constraints.
6428                                                          Set TRFC (CSR field) = RNDUP[tRFC(ns)/(8*tCYC(ns))],
6429                                                          where tRFC is from the DDR3 spec, and tCYC(ns)
6430                                                          is the DDR clock frequency (not data rate).
6431                                                          TYP=90-350ns
6432                                                               - 00000: RESERVED
6433                                                               - 00001: 8 tCYC
6434                                                               - 00010: 16 tCYC
6435                                                               - 00011: 24 tCYC
6436                                                               - 00100: 32 tCYC
6437                                                               - ...
6438                                                               - 11110: 240 tCYC
6439                                                               - 11111: 248 tCYC */
6440         uint64_t twtr                         : 4;  /**< Indicates tWTR constraints.
6441                                                          Set TWTR (CSR field) = RNDUP[tWTR(ns)/tCYC(ns)]-1,
6442                                                          where tWTR is from the DDR3 spec, and tCYC(ns)
6443                                                          is the DDR clock frequency (not data rate).
6444                                                          TYP=max(4nCK, 7.5ns)
6445                                                              - 0000: RESERVED
6446                                                              - 0001: 2
6447                                                              - ...
6448                                                              - 0111: 8
6449                                                              - 1000-1111: RESERVED */
6450         uint64_t trcd                         : 4;  /**< Indicates tRCD constraints.
6451                                                          Set TRCD (CSR field) = RNDUP[tRCD(ns)/tCYC(ns)],
6452                                                          where tRCD is from the DDR3 spec, and tCYC(ns)
6453                                                          is the DDR clock frequency (not data rate).
6454                                                          TYP=10-15ns
6455                                                              - 0000: RESERVED
6456                                                              - 0001: 2 (2 is the smallest value allowed)
6457                                                              - 0002: 2
6458                                                              - ...
6459                                                              - 1001: 9
6460                                                              - 1010-1111: RESERVED
6461                                                          In 2T mode, make this register TRCD-1, not going
6462                                                          below 2. */
6463         uint64_t tras                         : 5;  /**< Indicates tRAS constraints.
6464                                                          Set TRAS (CSR field) = RNDUP[tRAS(ns)/tCYC(ns)]-1,
6465                                                          where tRAS is from the DDR3 spec, and tCYC(ns)
6466                                                          is the DDR clock frequency (not data rate).
6467                                                          TYP=35ns-9*tREFI
6468                                                              - 00000: RESERVED
6469                                                              - 00001: 2 tCYC
6470                                                              - 00010: 3 tCYC
6471                                                              - ...
6472                                                              - 11111: 32 tCYC */
6473         uint64_t tmprr                        : 4;  /**< Indicates tMPRR constraints.
6474                                                          Set TMPRR (CSR field) = RNDUP[tMPRR(ns)/tCYC(ns)]-1,
6475                                                          where tMPRR is from the DDR3 spec, and tCYC(ns)
6476                                                          is the DDR clock frequency (not data rate).
6477                                                          TYP=1nCK */
6478 #else
6479         uint64_t tmprr                        : 4;
6480         uint64_t tras                         : 5;
6481         uint64_t trcd                         : 4;
6482         uint64_t twtr                         : 4;
6483         uint64_t trfc                         : 5;
6484         uint64_t trrd                         : 3;
6485         uint64_t txp                          : 3;
6486         uint64_t twlmrd                       : 4;
6487         uint64_t twldqsen                     : 4;
6488         uint64_t tfaw                         : 5;
6489         uint64_t txpdll                       : 5;
6490         uint64_t tras_ext                     : 1;
6491         uint64_t reserved_47_63               : 17;
6492 #endif
6493         } s;
6494         struct cvmx_lmcx_timing_params1_s     cn63xx;
6495         struct cvmx_lmcx_timing_params1_cn63xxp1
6496         {
6497 #if __BYTE_ORDER == __BIG_ENDIAN
6498         uint64_t reserved_46_63               : 18;
6499         uint64_t txpdll                       : 5;  /**< Indicates tXPDLL constraints.
6500                                                          Set TXPDLL (CSR field) = RNDUP[tXPDLL(ns)/tCYC(ns)]-1,
6501                                                          where tXPDLL is from the DDR3 spec, and tCYC(ns)
6502                                                          is the DDR clock frequency (not data rate).
6503                                                          TYP=max(10nCK, 24ns) */
6504         uint64_t tfaw                         : 5;  /**< Indicates tFAW constraints.
6505                                                          Set TFAW (CSR field) = RNDUP[tFAW(ns)/(4*tCYC(ns))],
6506                                                          where tFAW is from the DDR3 spec, and tCYC(ns)
6507                                                          is the DDR clock frequency (not data rate).
6508                                                          TYP=30-40ns */
6509         uint64_t twldqsen                     : 4;  /**< Indicates tWLDQSEN constraints.
6510                                                          Set TWLDQSEN (CSR field) = RNDUP[tWLDQSEN(ns)/(4*tCYC(ns))],
6511                                                          where tWLDQSEN is from the DDR3 spec, and tCYC(ns)
6512                                                          is the DDR clock frequency (not data rate).
6513                                                          TYP=max(25nCK) */
6514         uint64_t twlmrd                       : 4;  /**< Indicates tWLMRD constraints.
6515                                                          Set TWLMRD (CSR field) = RNDUP[tWLMRD(ns)/(4*tCYC(ns))],
6516                                                          where tWLMRD is from the DDR3 spec, and tCYC(ns)
6517                                                          is the DDR clock frequency (not data rate).
6518                                                          TYP=max(40nCK) */
6519         uint64_t txp                          : 3;  /**< Indicates tXP constraints.
6520                                                          Set TXP (CSR field) = RNDUP[tXP(ns)/tCYC(ns)]-1,
6521                                                          where tXP is from the DDR3 spec, and tCYC(ns)
6522                                                          is the DDR clock frequency (not data rate).
6523                                                          TYP=max(3nCK, 7.5ns) */
6524         uint64_t trrd                         : 3;  /**< Indicates tRRD constraints.
6525                                                          Set TRRD (CSR field) = RNDUP[tRRD(ns)/tCYC(ns)]-2,
6526                                                          where tRRD is from the DDR3 spec, and tCYC(ns)
6527                                                          is the DDR clock frequency (not data rate).
6528                                                          TYP=max(4nCK, 10ns)
6529                                                             - 000: RESERVED
6530                                                             - 001: 3 tCYC
6531                                                             - ...
6532                                                             - 110: 8 tCYC
6533                                                             - 111: 9 tCYC */
6534         uint64_t trfc                         : 5;  /**< Indicates tRFC constraints.
6535                                                          Set TRFC (CSR field) = RNDUP[tRFC(ns)/(8*tCYC(ns))],
6536                                                          where tRFC is from the DDR3 spec, and tCYC(ns)
6537                                                          is the DDR clock frequency (not data rate).
6538                                                          TYP=90-350ns
6539                                                               - 00000: RESERVED
6540                                                               - 00001: 8 tCYC
6541                                                               - 00010: 16 tCYC
6542                                                               - 00011: 24 tCYC
6543                                                               - 00100: 32 tCYC
6544                                                               - ...
6545                                                               - 11110: 240 tCYC
6546                                                               - 11111: 248 tCYC */
6547         uint64_t twtr                         : 4;  /**< Indicates tWTR constraints.
6548                                                          Set TWTR (CSR field) = RNDUP[tWTR(ns)/tCYC(ns)]-1,
6549                                                          where tWTR is from the DDR3 spec, and tCYC(ns)
6550                                                          is the DDR clock frequency (not data rate).
6551                                                          TYP=max(4nCK, 7.5ns)
6552                                                              - 0000: RESERVED
6553                                                              - 0001: 2
6554                                                              - ...
6555                                                              - 0111: 8
6556                                                              - 1000-1111: RESERVED */
6557         uint64_t trcd                         : 4;  /**< Indicates tRCD constraints.
6558                                                          Set TRCD (CSR field) = RNDUP[tRCD(ns)/tCYC(ns)],
6559                                                          where tRCD is from the DDR3 spec, and tCYC(ns)
6560                                                          is the DDR clock frequency (not data rate).
6561                                                          TYP=10-15ns
6562                                                              - 0000: RESERVED
6563                                                              - 0001: 2 (2 is the smallest value allowed)
6564                                                              - 0002: 2
6565                                                              - ...
6566                                                              - 1001: 9
6567                                                              - 1010-1111: RESERVED
6568                                                          In 2T mode, make this register TRCD-1, not going
6569                                                          below 2. */
6570         uint64_t tras                         : 5;  /**< Indicates tRAS constraints.
6571                                                          Set TRAS (CSR field) = RNDUP[tRAS(ns)/tCYC(ns)]-1,
6572                                                          where tRAS is from the DDR3 spec, and tCYC(ns)
6573                                                          is the DDR clock frequency (not data rate).
6574                                                          TYP=35ns-9*tREFI
6575                                                              - 00000: RESERVED
6576                                                              - 00001: 2 tCYC
6577                                                              - 00010: 3 tCYC
6578                                                              - ...
6579                                                              - 11111: 32 tCYC */
6580         uint64_t tmprr                        : 4;  /**< Indicates tMPRR constraints.
6581                                                          Set TMPRR (CSR field) = RNDUP[tMPRR(ns)/tCYC(ns)]-1,
6582                                                          where tMPRR is from the DDR3 spec, and tCYC(ns)
6583                                                          is the DDR clock frequency (not data rate).
6584                                                          TYP=1nCK */
6585 #else
6586         uint64_t tmprr                        : 4;
6587         uint64_t tras                         : 5;
6588         uint64_t trcd                         : 4;
6589         uint64_t twtr                         : 4;
6590         uint64_t trfc                         : 5;
6591         uint64_t trrd                         : 3;
6592         uint64_t txp                          : 3;
6593         uint64_t twlmrd                       : 4;
6594         uint64_t twldqsen                     : 4;
6595         uint64_t tfaw                         : 5;
6596         uint64_t txpdll                       : 5;
6597         uint64_t reserved_46_63               : 18;
6598 #endif
6599         } cn63xxp1;
6600 };
6601 typedef union cvmx_lmcx_timing_params1 cvmx_lmcx_timing_params1_t;
6602
6603 /**
6604  * cvmx_lmc#_tro_ctl
6605  *
6606  * LMC_TRO_CTL = LMC Temperature Ring Osc Control
6607  * This register is an assortment of various control fields needed to control the temperature ring oscillator
6608  *
6609  * Notes:
6610  * To bring up the temperature ring oscillator, write TRESET to 0, and follow by initializing RCLK_CNT to desired
6611  * value
6612  */
6613 union cvmx_lmcx_tro_ctl
6614 {
6615         uint64_t u64;
6616         struct cvmx_lmcx_tro_ctl_s
6617         {
6618 #if __BYTE_ORDER == __BIG_ENDIAN
6619         uint64_t reserved_33_63               : 31;
6620         uint64_t rclk_cnt                     : 32; /**< rclk counter */
6621         uint64_t treset                       : 1;  /**< Reset ring oscillator */
6622 #else
6623         uint64_t treset                       : 1;
6624         uint64_t rclk_cnt                     : 32;
6625         uint64_t reserved_33_63               : 31;
6626 #endif
6627         } s;
6628         struct cvmx_lmcx_tro_ctl_s            cn63xx;
6629         struct cvmx_lmcx_tro_ctl_s            cn63xxp1;
6630 };
6631 typedef union cvmx_lmcx_tro_ctl cvmx_lmcx_tro_ctl_t;
6632
6633 /**
6634  * cvmx_lmc#_tro_stat
6635  *
6636  * LMC_TRO_STAT = LMC Temperature Ring Osc Status
6637  * This register is an assortment of various control fields needed to control the temperature ring oscillator
6638  */
6639 union cvmx_lmcx_tro_stat
6640 {
6641         uint64_t u64;
6642         struct cvmx_lmcx_tro_stat_s
6643         {
6644 #if __BYTE_ORDER == __BIG_ENDIAN
6645         uint64_t reserved_32_63               : 32;
6646         uint64_t ring_cnt                     : 32; /**< ring counter */
6647 #else
6648         uint64_t ring_cnt                     : 32;
6649         uint64_t reserved_32_63               : 32;
6650 #endif
6651         } s;
6652         struct cvmx_lmcx_tro_stat_s           cn63xx;
6653         struct cvmx_lmcx_tro_stat_s           cn63xxp1;
6654 };
6655 typedef union cvmx_lmcx_tro_stat cvmx_lmcx_tro_stat_t;
6656
6657 /**
6658  * cvmx_lmc#_wlevel_ctl
6659  */
6660 union cvmx_lmcx_wlevel_ctl
6661 {
6662         uint64_t u64;
6663         struct cvmx_lmcx_wlevel_ctl_s
6664         {
6665 #if __BYTE_ORDER == __BIG_ENDIAN
6666         uint64_t reserved_22_63               : 42;
6667         uint64_t rtt_nom                      : 3;  /**< RTT_NOM
6668                                                          LMC writes a decoded value to MR1[Rtt_Nom] of the rank during
6669                                                          write leveling. Per JEDEC DDR3 specifications,
6670                                                          only values MR1[Rtt_Nom] = 1 (RQZ/4), 2 (RQZ/2), or 3 (RQZ/6)
6671                                                          are allowed during write leveling with output buffer enabled.
6672                                                          000 : LMC writes 001 (RZQ/4)   to MR1[Rtt_Nom]
6673                                                          001 : LMC writes 010 (RZQ/2)   to MR1[Rtt_Nom]
6674                                                          010 : LMC writes 011 (RZQ/6)   to MR1[Rtt_Nom]
6675                                                          011 : LMC writes 100 (RZQ/12)  to MR1[Rtt_Nom]
6676                                                          100 : LMC writes 101 (RZQ/8)   to MR1[Rtt_Nom]
6677                                                          101 : LMC writes 110 (Rsvd)    to MR1[Rtt_Nom]
6678                                                          110 : LMC writes 111 (Rsvd)    to  MR1[Rtt_Nom]
6679                                                          111 : LMC writes 000 (Disabled) to MR1[Rtt_Nom] */
6680         uint64_t bitmask                      : 8;  /**< Mask to select bit lanes on which write-leveling
6681                                                          feedback is returned when OR_DIS is set to 1 */
6682         uint64_t or_dis                       : 1;  /**< Disable or'ing of bits in a byte lane when computing
6683                                                          the write-leveling bitmask */
6684         uint64_t sset                         : 1;  /**< Run write-leveling on the current setting only. */
6685         uint64_t lanemask                     : 9;  /**< One-hot mask to select byte lane to be leveled by
6686                                                          the write-leveling sequence
6687                                                          Used with x16 parts where the upper and lower byte
6688                                                          lanes need to be leveled independently */
6689 #else
6690         uint64_t lanemask                     : 9;
6691         uint64_t sset                         : 1;
6692         uint64_t or_dis                       : 1;
6693         uint64_t bitmask                      : 8;
6694         uint64_t rtt_nom                      : 3;
6695         uint64_t reserved_22_63               : 42;
6696 #endif
6697         } s;
6698         struct cvmx_lmcx_wlevel_ctl_s         cn63xx;
6699         struct cvmx_lmcx_wlevel_ctl_cn63xxp1
6700         {
6701 #if __BYTE_ORDER == __BIG_ENDIAN
6702         uint64_t reserved_10_63               : 54;
6703         uint64_t sset                         : 1;  /**< Run write-leveling on the current setting only. */
6704         uint64_t lanemask                     : 9;  /**< One-hot mask to select byte lane to be leveled by
6705                                                          the write-leveling sequence
6706                                                          Used with x16 parts where the upper and lower byte
6707                                                          lanes need to be leveled independently */
6708 #else
6709         uint64_t lanemask                     : 9;
6710         uint64_t sset                         : 1;
6711         uint64_t reserved_10_63               : 54;
6712 #endif
6713         } cn63xxp1;
6714 };
6715 typedef union cvmx_lmcx_wlevel_ctl cvmx_lmcx_wlevel_ctl_t;
6716
6717 /**
6718  * cvmx_lmc#_wlevel_dbg
6719  *
6720  * Notes:
6721  * A given write of LMC*_WLEVEL_DBG returns the write-leveling pass/fail results for all possible
6722  * delay settings (i.e. the BITMASK) for only one byte in the last rank that the HW write-leveled.
6723  * LMC*_WLEVEL_DBG[BYTE] selects the particular byte.
6724  * To get these pass/fail results for another different rank, you must run the hardware write-leveling
6725  * again. For example, it is possible to get the BITMASK results for every byte of every rank
6726  * if you run write-leveling separately for each rank, probing LMC*_WLEVEL_DBG between each
6727  * write-leveling.
6728  */
6729 union cvmx_lmcx_wlevel_dbg
6730 {
6731         uint64_t u64;
6732         struct cvmx_lmcx_wlevel_dbg_s
6733         {
6734 #if __BYTE_ORDER == __BIG_ENDIAN
6735         uint64_t reserved_12_63               : 52;
6736         uint64_t bitmask                      : 8;  /**< Bitmask generated during deskew settings sweep
6737                                                          if LMCX_WLEVEL_CTL[SSET]=0
6738                                                            BITMASK[n]=0 means deskew setting n failed
6739                                                            BITMASK[n]=1 means deskew setting n passed
6740                                                            for 0 <= n <= 7
6741                                                            BITMASK contains the first 8 results of the total 16
6742                                                            collected by LMC during the write-leveling sequence
6743                                                          else if LMCX_WLEVEL_CTL[SSET]=1
6744                                                            BITMASK[0]=0 means curr deskew setting failed
6745                                                            BITMASK[0]=1 means curr deskew setting passed */
6746         uint64_t byte                         : 4;  /**< 0 <= BYTE <= 8 */
6747 #else
6748         uint64_t byte                         : 4;
6749         uint64_t bitmask                      : 8;
6750         uint64_t reserved_12_63               : 52;
6751 #endif
6752         } s;
6753         struct cvmx_lmcx_wlevel_dbg_s         cn63xx;
6754         struct cvmx_lmcx_wlevel_dbg_s         cn63xxp1;
6755 };
6756 typedef union cvmx_lmcx_wlevel_dbg cvmx_lmcx_wlevel_dbg_t;
6757
6758 /**
6759  * cvmx_lmc#_wlevel_rank#
6760  *
6761  * Notes:
6762  * This is four CSRs per LMC, one per each rank.
6763  *
6764  * Deskew setting is measured in units of 1/8 CK, so the above BYTE* values can range over 4 CKs.
6765  *
6766  * Assuming LMC*_WLEVEL_CTL[SSET]=0, the BYTE*<2:0> values are not used during write-leveling, and
6767  * they are over-written by the hardware as part of the write-leveling sequence. (HW sets STATUS==3
6768  * after HW write-leveling completes for the rank). SW needs to set BYTE*<4:3> bits.
6769  *
6770  * Each CSR may also be written by SW, but not while a write-leveling sequence is in progress. (HW sets STATUS==1 after a CSR write.)
6771  *
6772  * SW initiates a HW write-leveling sequence by programming LMC*_WLEVEL_CTL and writing RANKMASK and INIT_START=1 with SEQUENCE=6 in LMC*_CONFIG.
6773  * LMC will then step through and accumulate write leveling results for 8 unique delay settings (twice), starting at a delay of
6774  * LMC*_WLEVEL_RANKn[BYTE*<4:3>]*8 CK increasing by 1/8 CK each setting. HW will then set LMC*_WLEVEL_RANKi[BYTE*<2:0>] to indicate the
6775  * first write leveling result of '1' that followed a reslt of '0' during the sequence by searching for a '1100' pattern in the generated
6776  * bitmask, except that LMC will always write LMC*_WLEVEL_RANKi[BYTE*<0>]=0. If HW is unable to find a match for a '1100' pattern, then HW will
6777  * set LMC*_WLEVEL_RANKi[BYTE*<2:0>] to 4.
6778  * See LMC*_WLEVEL_CTL.
6779  *
6780  * LMC*_WLEVEL_RANKi values for ranks i without attached DRAM should be set such that
6781  * they do not increase the range of possible BYTE values for any byte
6782  * lane. The easiest way to do this is to set
6783  *     LMC*_WLEVEL_RANKi = LMC*_WLEVEL_RANKj,
6784  * where j is some rank with attached DRAM whose LMC*_WLEVEL_RANKj is already fully initialized.
6785  */
6786 union cvmx_lmcx_wlevel_rankx
6787 {
6788         uint64_t u64;
6789         struct cvmx_lmcx_wlevel_rankx_s
6790         {
6791 #if __BYTE_ORDER == __BIG_ENDIAN
6792         uint64_t reserved_47_63               : 17;
6793         uint64_t status                       : 2;  /**< Indicates status of the write-leveling and where
6794                                                          the BYTE* programmings in <44:0> came from:
6795                                                          0 = BYTE* values are their reset value
6796                                                          1 = BYTE* values were set via a CSR write to this register
6797                                                          2 = write-leveling sequence currently in progress (BYTE* values are unpredictable)
6798                                                          3 = BYTE* values came from a complete write-leveling sequence, irrespective of
6799                                                              which lanes are masked via LMC*WLEVEL_CTL[LANEMASK] */
6800         uint64_t byte8                        : 5;  /**< Deskew setting
6801                                                          Bit 0 of BYTE8 must be zero during normal operation.
6802                                                          When ECC DRAM is not present (i.e. when DRAM is not
6803                                                          attached to chip signals DDR_CBS_0_* and DDR_CB[7:0]),
6804                                                          SW should write BYTE8 with a value that does
6805                                                          not increase the range of possible BYTE* values. The
6806                                                          easiest way to do this is to set
6807                                                          LMC*_WLEVEL_RANK*[BYTE8] = LMC*_WLEVEL_RANK*[BYTE0]
6808                                                          when there is no ECC DRAM, using the final BYTE0 value. */
6809         uint64_t byte7                        : 5;  /**< Deskew setting
6810                                                          Bit 0 of BYTE7 must be zero during normal operation */
6811         uint64_t byte6                        : 5;  /**< Deskew setting
6812                                                          Bit 0 of BYTE6 must be zero during normal operation */
6813         uint64_t byte5                        : 5;  /**< Deskew setting
6814                                                          Bit 0 of BYTE5 must be zero during normal operation */
6815         uint64_t byte4                        : 5;  /**< Deskew setting
6816                                                          Bit 0 of BYTE4 must be zero during normal operation */
6817         uint64_t byte3                        : 5;  /**< Deskew setting
6818                                                          Bit 0 of BYTE3 must be zero during normal operation */
6819         uint64_t byte2                        : 5;  /**< Deskew setting
6820                                                          Bit 0 of BYTE2 must be zero during normal operation */
6821         uint64_t byte1                        : 5;  /**< Deskew setting
6822                                                          Bit 0 of BYTE1 must be zero during normal operation */
6823         uint64_t byte0                        : 5;  /**< Deskew setting
6824                                                          Bit 0 of BYTE0 must be zero during normal operation */
6825 #else
6826         uint64_t byte0                        : 5;
6827         uint64_t byte1                        : 5;
6828         uint64_t byte2                        : 5;
6829         uint64_t byte3                        : 5;
6830         uint64_t byte4                        : 5;
6831         uint64_t byte5                        : 5;
6832         uint64_t byte6                        : 5;
6833         uint64_t byte7                        : 5;
6834         uint64_t byte8                        : 5;
6835         uint64_t status                       : 2;
6836         uint64_t reserved_47_63               : 17;
6837 #endif
6838         } s;
6839         struct cvmx_lmcx_wlevel_rankx_s       cn63xx;
6840         struct cvmx_lmcx_wlevel_rankx_s       cn63xxp1;
6841 };
6842 typedef union cvmx_lmcx_wlevel_rankx cvmx_lmcx_wlevel_rankx_t;
6843
6844 /**
6845  * cvmx_lmc#_wodt_ctl0
6846  *
6847  * LMC_WODT_CTL0 = LMC Write OnDieTermination control
6848  * See the description in LMC_WODT_CTL1.
6849  *
6850  * Notes:
6851  * Together, the LMC_WODT_CTL1 and LMC_WODT_CTL0 CSRs control the write ODT mask.  See LMC_WODT_CTL1.
6852  *
6853  */
6854 union cvmx_lmcx_wodt_ctl0
6855 {
6856         uint64_t u64;
6857         struct cvmx_lmcx_wodt_ctl0_s
6858         {
6859 #if __BYTE_ORDER == __BIG_ENDIAN
6860         uint64_t reserved_0_63                : 64;
6861 #else
6862         uint64_t reserved_0_63                : 64;
6863 #endif
6864         } s;
6865         struct cvmx_lmcx_wodt_ctl0_cn30xx
6866         {
6867 #if __BYTE_ORDER == __BIG_ENDIAN
6868         uint64_t reserved_32_63               : 32;
6869         uint64_t wodt_d1_r1                   : 8;  /**< Write ODT mask DIMM1, RANK1 */
6870         uint64_t wodt_d1_r0                   : 8;  /**< Write ODT mask DIMM1, RANK0 */
6871         uint64_t wodt_d0_r1                   : 8;  /**< Write ODT mask DIMM0, RANK1 */
6872         uint64_t wodt_d0_r0                   : 8;  /**< Write ODT mask DIMM0, RANK0 */
6873 #else
6874         uint64_t wodt_d0_r0                   : 8;
6875         uint64_t wodt_d0_r1                   : 8;
6876         uint64_t wodt_d1_r0                   : 8;
6877         uint64_t wodt_d1_r1                   : 8;
6878         uint64_t reserved_32_63               : 32;
6879 #endif
6880         } cn30xx;
6881         struct cvmx_lmcx_wodt_ctl0_cn30xx     cn31xx;
6882         struct cvmx_lmcx_wodt_ctl0_cn38xx
6883         {
6884 #if __BYTE_ORDER == __BIG_ENDIAN
6885         uint64_t reserved_32_63               : 32;
6886         uint64_t wodt_hi3                     : 4;  /**< Write ODT mask for position 3, data[127:64] */
6887         uint64_t wodt_hi2                     : 4;  /**< Write ODT mask for position 2, data[127:64] */
6888         uint64_t wodt_hi1                     : 4;  /**< Write ODT mask for position 1, data[127:64] */
6889         uint64_t wodt_hi0                     : 4;  /**< Write ODT mask for position 0, data[127:64] */
6890         uint64_t wodt_lo3                     : 4;  /**< Write ODT mask for position 3, data[ 63: 0] */
6891         uint64_t wodt_lo2                     : 4;  /**< Write ODT mask for position 2, data[ 63: 0] */
6892         uint64_t wodt_lo1                     : 4;  /**< Write ODT mask for position 1, data[ 63: 0] */
6893         uint64_t wodt_lo0                     : 4;  /**< Write ODT mask for position 0, data[ 63: 0] */
6894 #else
6895         uint64_t wodt_lo0                     : 4;
6896         uint64_t wodt_lo1                     : 4;
6897         uint64_t wodt_lo2                     : 4;
6898         uint64_t wodt_lo3                     : 4;
6899         uint64_t wodt_hi0                     : 4;
6900         uint64_t wodt_hi1                     : 4;
6901         uint64_t wodt_hi2                     : 4;
6902         uint64_t wodt_hi3                     : 4;
6903         uint64_t reserved_32_63               : 32;
6904 #endif
6905         } cn38xx;
6906         struct cvmx_lmcx_wodt_ctl0_cn38xx     cn38xxp2;
6907         struct cvmx_lmcx_wodt_ctl0_cn38xx     cn50xx;
6908         struct cvmx_lmcx_wodt_ctl0_cn30xx     cn52xx;
6909         struct cvmx_lmcx_wodt_ctl0_cn30xx     cn52xxp1;
6910         struct cvmx_lmcx_wodt_ctl0_cn30xx     cn56xx;
6911         struct cvmx_lmcx_wodt_ctl0_cn30xx     cn56xxp1;
6912         struct cvmx_lmcx_wodt_ctl0_cn38xx     cn58xx;
6913         struct cvmx_lmcx_wodt_ctl0_cn38xx     cn58xxp1;
6914 };
6915 typedef union cvmx_lmcx_wodt_ctl0 cvmx_lmcx_wodt_ctl0_t;
6916
6917 /**
6918  * cvmx_lmc#_wodt_ctl1
6919  *
6920  * LMC_WODT_CTL1 = LMC Write OnDieTermination control
6921  * System designers may desire to terminate DQ/DQS/DM lines for higher frequency DDR operations
6922  * (667MHz and faster), especially on a multi-rank system. DDR2 DQ/DM/DQS I/O's have built in
6923  * Termination resistor that can be turned on or off by the controller, after meeting tAOND and tAOF
6924  * timing requirements. Each Rank has its own ODT pin that fans out to all the memory parts
6925  * in that DIMM. System designers may prefer different combinations of ODT ON's for read and write
6926  * into different ranks. Octeon supports full programmability by way of the mask register below.
6927  * Each Rank position has its own 8-bit programmable field.
6928  * When the controller does a write to that rank, it sets the 8 ODT pins to the MASK pins below.
6929  * For eg., When doing a write into Rank0, a system designer may desire to terminate the lines
6930  * with the resistor on Dimm0/Rank1. The mask WODT_D0_R0 would then be [00000010].
6931  * If ODT feature is not desired, the DDR parts can be programmed to not look at these pins by
6932  * writing 0 in QS_DIC. Octeon drives the appropriate mask values on the ODT pins by default.
6933  * If this feature is not required, write 0 in this register.
6934  *
6935  * Notes:
6936  * Together, the LMC_WODT_CTL1 and LMC_WODT_CTL0 CSRs control the write ODT mask.
6937  * When a given RANK is selected, the WODT mask for that RANK is used.  The resulting WODT mask is
6938  * driven to the DIMMs in the following manner:
6939  *            BUNK_ENA=1     BUNK_ENA=0
6940  * Mask[7] -> DIMM3, RANK1    DIMM3
6941  * Mask[6] -> DIMM3, RANK0
6942  * Mask[5] -> DIMM2, RANK1    DIMM2
6943  * Mask[4] -> DIMM2, RANK0
6944  * Mask[3] -> DIMM1, RANK1    DIMM1
6945  * Mask[2] -> DIMM1, RANK0
6946  * Mask[1] -> DIMM0, RANK1    DIMM0
6947  * Mask[0] -> DIMM0, RANK0
6948  */
6949 union cvmx_lmcx_wodt_ctl1
6950 {
6951         uint64_t u64;
6952         struct cvmx_lmcx_wodt_ctl1_s
6953         {
6954 #if __BYTE_ORDER == __BIG_ENDIAN
6955         uint64_t reserved_32_63               : 32;
6956         uint64_t wodt_d3_r1                   : 8;  /**< Write ODT mask DIMM3, RANK1/DIMM3 in SingleRanked */
6957         uint64_t wodt_d3_r0                   : 8;  /**< Write ODT mask DIMM3, RANK0 */
6958         uint64_t wodt_d2_r1                   : 8;  /**< Write ODT mask DIMM2, RANK1/DIMM2 in SingleRanked */
6959         uint64_t wodt_d2_r0                   : 8;  /**< Write ODT mask DIMM2, RANK0 */
6960 #else
6961         uint64_t wodt_d2_r0                   : 8;
6962         uint64_t wodt_d2_r1                   : 8;
6963         uint64_t wodt_d3_r0                   : 8;
6964         uint64_t wodt_d3_r1                   : 8;
6965         uint64_t reserved_32_63               : 32;
6966 #endif
6967         } s;
6968         struct cvmx_lmcx_wodt_ctl1_s          cn30xx;
6969         struct cvmx_lmcx_wodt_ctl1_s          cn31xx;
6970         struct cvmx_lmcx_wodt_ctl1_s          cn52xx;
6971         struct cvmx_lmcx_wodt_ctl1_s          cn52xxp1;
6972         struct cvmx_lmcx_wodt_ctl1_s          cn56xx;
6973         struct cvmx_lmcx_wodt_ctl1_s          cn56xxp1;
6974 };
6975 typedef union cvmx_lmcx_wodt_ctl1 cvmx_lmcx_wodt_ctl1_t;
6976
6977 /**
6978  * cvmx_lmc#_wodt_mask
6979  *
6980  * LMC_WODT_MASK = LMC Write OnDieTermination mask
6981  * System designers may desire to terminate DQ/DQS lines for higher frequency DDR operations
6982  * especially on a multi-rank system. DDR3 DQ/DQS I/O's have built in
6983  * Termination resistor that can be turned on or off by the controller, after meeting tAOND and tAOF
6984  * timing requirements. Each Rank has its own ODT pin that fans out to all the memory parts
6985  * in that DIMM. System designers may prefer different combinations of ODT ON's for writes
6986  * into different ranks. Octeon supports full programmability by way of the mask register below.
6987  * Each Rank position has its own 8-bit programmable field.
6988  * When the controller does a write to that rank, it sets the 4 ODT pins to the MASK pins below.
6989  * For eg., When doing a write into Rank0, a system designer may desire to terminate the lines
6990  * with the resistor on DIMM0/Rank1. The mask WODT_D0_R0 would then be [00000010].
6991  * Octeon drives the appropriate mask values on the ODT pins by default. If this feature is not
6992  * required, write 0 in this register.
6993  *
6994  * Notes:
6995  * When a given RANK is selected, the WODT mask for that RANK is used.  The resulting WODT mask is
6996  * driven to the DIMMs in the following manner:
6997  *             RANK_ENA=1                    RANK_ENA=0
6998  * Mask[3] -> DIMM1_ODT_1                     MBZ
6999  * Mask[2] -> DIMM1_ODT_0                     DIMM1_ODT_0
7000  * Mask[1] -> DIMM0_ODT_1                     MBZ
7001  * Mask[0] -> DIMM0_ODT_0                     DIMM0_ODT_0
7002  *
7003  * LMC always writes entire cache blocks and always writes them via two consecutive
7004  * write CAS operations to the same rank+bank+row spaced exactly 4 CK's apart.
7005  * When a WODT mask bit is set, LMC asserts the OCTEON ODT output
7006  * pin(s) starting the same CK as the first write CAS operation. Then, OCTEON
7007  * normally continues to assert the ODT output pin(s) for 9+LMC*_CONTROL[WODT_BPRCH] more CK's
7008  * - for a total of 10+LMC*_CONTROL[WODT_BPRCH] CK's for the entire cache block write -
7009  * through the second write CAS operation of the cache block,
7010  * satisfying the 6 CK DDR3 ODTH8 requirements.
7011  * But it is possible for OCTEON to issue two cache block writes separated by as few as
7012  * WtW = 8 or 9 (10 if LMC*_CONTROL[WODT_BPRCH]=1) CK's. In that case, OCTEON asserts the ODT output pin(s)
7013  * for the WODT mask of the first cache block write for WtW CK's, then asserts
7014  * the ODT output pin(s) for the WODT mask of the second cache block write for 10+LMC*_CONTROL[WODT_BPRCH] CK's
7015  * (or less if a third cache block write follows within 8 or 9 (or 10) CK's of this second cache block write).
7016  * Note that it may be necessary to force LMC to space back-to-back cache block writes
7017  * to different ranks apart by at least 10+LMC*_CONTROL[WODT_BPRCH] CK's to prevent DDR3 ODTH8 violations.
7018  */
7019 union cvmx_lmcx_wodt_mask
7020 {
7021         uint64_t u64;
7022         struct cvmx_lmcx_wodt_mask_s
7023         {
7024 #if __BYTE_ORDER == __BIG_ENDIAN
7025         uint64_t wodt_d3_r1                   : 8;  /**< Write ODT mask DIMM3, RANK1/DIMM3 in SingleRanked
7026                                                          *UNUSED IN 63xx, and MBZ* */
7027         uint64_t wodt_d3_r0                   : 8;  /**< Write ODT mask DIMM3, RANK0
7028                                                          *UNUSED IN 63xx, and MBZ* */
7029         uint64_t wodt_d2_r1                   : 8;  /**< Write ODT mask DIMM2, RANK1/DIMM2 in SingleRanked
7030                                                          *UNUSED IN 63xx, and MBZ* */
7031         uint64_t wodt_d2_r0                   : 8;  /**< Write ODT mask DIMM2, RANK0
7032                                                          *UNUSED IN 63xx, and MBZ* */
7033         uint64_t wodt_d1_r1                   : 8;  /**< Write ODT mask DIMM1, RANK1/DIMM1 in SingleRanked
7034                                                          if (!RANK_ENA) then WODT_D1_R1[3:0] MBZ
7035                                                          *Upper 4 bits UNUSED IN 63xx, and MBZ* */
7036         uint64_t wodt_d1_r0                   : 8;  /**< Write ODT mask DIMM1, RANK0
7037                                                          if (!RANK_ENA) then WODT_D1_R0[3,1] MBZ
7038                                                          *Upper 4 bits UNUSED IN 63xx, and MBZ* */
7039         uint64_t wodt_d0_r1                   : 8;  /**< Write ODT mask DIMM0, RANK1/DIMM0 in SingleRanked
7040                                                          if (!RANK_ENA) then WODT_D0_R1[3:0] MBZ
7041                                                          *Upper 4 bits UNUSED IN 63xx, and MBZ* */
7042         uint64_t wodt_d0_r0                   : 8;  /**< Write ODT mask DIMM0, RANK0
7043                                                          if (!RANK_ENA) then WODT_D0_R0[3,1] MBZ
7044                                                          *Upper 4 bits UNUSED IN 63xx, and MBZ* */
7045 #else
7046         uint64_t wodt_d0_r0                   : 8;
7047         uint64_t wodt_d0_r1                   : 8;
7048         uint64_t wodt_d1_r0                   : 8;
7049         uint64_t wodt_d1_r1                   : 8;
7050         uint64_t wodt_d2_r0                   : 8;
7051         uint64_t wodt_d2_r1                   : 8;
7052         uint64_t wodt_d3_r0                   : 8;
7053         uint64_t wodt_d3_r1                   : 8;
7054 #endif
7055         } s;
7056         struct cvmx_lmcx_wodt_mask_s          cn63xx;
7057         struct cvmx_lmcx_wodt_mask_s          cn63xxp1;
7058 };
7059 typedef union cvmx_lmcx_wodt_mask cvmx_lmcx_wodt_mask_t;
7060
7061 #endif