1 /***********************license start***************
2 * Copyright (c) 2003-2012 Cavium Inc. (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Inc. nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
44 * Configuration and status register (CSR) type definitions for
47 * This file is auto generated. Do not edit.
52 #ifndef __CVMX_DFM_DEFS_H__
53 #define __CVMX_DFM_DEFS_H__
55 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
56 #define CVMX_DFM_CHAR_CTL CVMX_DFM_CHAR_CTL_FUNC()
57 static inline uint64_t CVMX_DFM_CHAR_CTL_FUNC(void)
59 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
60 cvmx_warn("CVMX_DFM_CHAR_CTL not supported on this chip\n");
61 return CVMX_ADD_IO_SEG(0x00011800D4000220ull);
64 #define CVMX_DFM_CHAR_CTL (CVMX_ADD_IO_SEG(0x00011800D4000220ull))
66 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
67 #define CVMX_DFM_CHAR_MASK0 CVMX_DFM_CHAR_MASK0_FUNC()
68 static inline uint64_t CVMX_DFM_CHAR_MASK0_FUNC(void)
70 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
71 cvmx_warn("CVMX_DFM_CHAR_MASK0 not supported on this chip\n");
72 return CVMX_ADD_IO_SEG(0x00011800D4000228ull);
75 #define CVMX_DFM_CHAR_MASK0 (CVMX_ADD_IO_SEG(0x00011800D4000228ull))
77 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
78 #define CVMX_DFM_CHAR_MASK2 CVMX_DFM_CHAR_MASK2_FUNC()
79 static inline uint64_t CVMX_DFM_CHAR_MASK2_FUNC(void)
81 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
82 cvmx_warn("CVMX_DFM_CHAR_MASK2 not supported on this chip\n");
83 return CVMX_ADD_IO_SEG(0x00011800D4000238ull);
86 #define CVMX_DFM_CHAR_MASK2 (CVMX_ADD_IO_SEG(0x00011800D4000238ull))
88 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
89 #define CVMX_DFM_CHAR_MASK4 CVMX_DFM_CHAR_MASK4_FUNC()
90 static inline uint64_t CVMX_DFM_CHAR_MASK4_FUNC(void)
92 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
93 cvmx_warn("CVMX_DFM_CHAR_MASK4 not supported on this chip\n");
94 return CVMX_ADD_IO_SEG(0x00011800D4000318ull);
97 #define CVMX_DFM_CHAR_MASK4 (CVMX_ADD_IO_SEG(0x00011800D4000318ull))
99 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
100 #define CVMX_DFM_COMP_CTL2 CVMX_DFM_COMP_CTL2_FUNC()
101 static inline uint64_t CVMX_DFM_COMP_CTL2_FUNC(void)
103 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
104 cvmx_warn("CVMX_DFM_COMP_CTL2 not supported on this chip\n");
105 return CVMX_ADD_IO_SEG(0x00011800D40001B8ull);
108 #define CVMX_DFM_COMP_CTL2 (CVMX_ADD_IO_SEG(0x00011800D40001B8ull))
110 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
111 #define CVMX_DFM_CONFIG CVMX_DFM_CONFIG_FUNC()
112 static inline uint64_t CVMX_DFM_CONFIG_FUNC(void)
114 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
115 cvmx_warn("CVMX_DFM_CONFIG not supported on this chip\n");
116 return CVMX_ADD_IO_SEG(0x00011800D4000188ull);
119 #define CVMX_DFM_CONFIG (CVMX_ADD_IO_SEG(0x00011800D4000188ull))
121 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
122 #define CVMX_DFM_CONTROL CVMX_DFM_CONTROL_FUNC()
123 static inline uint64_t CVMX_DFM_CONTROL_FUNC(void)
125 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
126 cvmx_warn("CVMX_DFM_CONTROL not supported on this chip\n");
127 return CVMX_ADD_IO_SEG(0x00011800D4000190ull);
130 #define CVMX_DFM_CONTROL (CVMX_ADD_IO_SEG(0x00011800D4000190ull))
132 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
133 #define CVMX_DFM_DLL_CTL2 CVMX_DFM_DLL_CTL2_FUNC()
134 static inline uint64_t CVMX_DFM_DLL_CTL2_FUNC(void)
136 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
137 cvmx_warn("CVMX_DFM_DLL_CTL2 not supported on this chip\n");
138 return CVMX_ADD_IO_SEG(0x00011800D40001C8ull);
141 #define CVMX_DFM_DLL_CTL2 (CVMX_ADD_IO_SEG(0x00011800D40001C8ull))
143 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
144 #define CVMX_DFM_DLL_CTL3 CVMX_DFM_DLL_CTL3_FUNC()
145 static inline uint64_t CVMX_DFM_DLL_CTL3_FUNC(void)
147 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
148 cvmx_warn("CVMX_DFM_DLL_CTL3 not supported on this chip\n");
149 return CVMX_ADD_IO_SEG(0x00011800D4000218ull);
152 #define CVMX_DFM_DLL_CTL3 (CVMX_ADD_IO_SEG(0x00011800D4000218ull))
154 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
155 #define CVMX_DFM_FCLK_CNT CVMX_DFM_FCLK_CNT_FUNC()
156 static inline uint64_t CVMX_DFM_FCLK_CNT_FUNC(void)
158 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
159 cvmx_warn("CVMX_DFM_FCLK_CNT not supported on this chip\n");
160 return CVMX_ADD_IO_SEG(0x00011800D40001E0ull);
163 #define CVMX_DFM_FCLK_CNT (CVMX_ADD_IO_SEG(0x00011800D40001E0ull))
165 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
166 #define CVMX_DFM_FNT_BIST CVMX_DFM_FNT_BIST_FUNC()
167 static inline uint64_t CVMX_DFM_FNT_BIST_FUNC(void)
169 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
170 cvmx_warn("CVMX_DFM_FNT_BIST not supported on this chip\n");
171 return CVMX_ADD_IO_SEG(0x00011800D40007F8ull);
174 #define CVMX_DFM_FNT_BIST (CVMX_ADD_IO_SEG(0x00011800D40007F8ull))
176 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
177 #define CVMX_DFM_FNT_CTL CVMX_DFM_FNT_CTL_FUNC()
178 static inline uint64_t CVMX_DFM_FNT_CTL_FUNC(void)
180 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
181 cvmx_warn("CVMX_DFM_FNT_CTL not supported on this chip\n");
182 return CVMX_ADD_IO_SEG(0x00011800D4000400ull);
185 #define CVMX_DFM_FNT_CTL (CVMX_ADD_IO_SEG(0x00011800D4000400ull))
187 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
188 #define CVMX_DFM_FNT_IENA CVMX_DFM_FNT_IENA_FUNC()
189 static inline uint64_t CVMX_DFM_FNT_IENA_FUNC(void)
191 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
192 cvmx_warn("CVMX_DFM_FNT_IENA not supported on this chip\n");
193 return CVMX_ADD_IO_SEG(0x00011800D4000410ull);
196 #define CVMX_DFM_FNT_IENA (CVMX_ADD_IO_SEG(0x00011800D4000410ull))
198 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
199 #define CVMX_DFM_FNT_SCLK CVMX_DFM_FNT_SCLK_FUNC()
200 static inline uint64_t CVMX_DFM_FNT_SCLK_FUNC(void)
202 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
203 cvmx_warn("CVMX_DFM_FNT_SCLK not supported on this chip\n");
204 return CVMX_ADD_IO_SEG(0x00011800D4000418ull);
207 #define CVMX_DFM_FNT_SCLK (CVMX_ADD_IO_SEG(0x00011800D4000418ull))
209 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
210 #define CVMX_DFM_FNT_STAT CVMX_DFM_FNT_STAT_FUNC()
211 static inline uint64_t CVMX_DFM_FNT_STAT_FUNC(void)
213 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
214 cvmx_warn("CVMX_DFM_FNT_STAT not supported on this chip\n");
215 return CVMX_ADD_IO_SEG(0x00011800D4000408ull);
218 #define CVMX_DFM_FNT_STAT (CVMX_ADD_IO_SEG(0x00011800D4000408ull))
220 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
221 #define CVMX_DFM_IFB_CNT CVMX_DFM_IFB_CNT_FUNC()
222 static inline uint64_t CVMX_DFM_IFB_CNT_FUNC(void)
224 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
225 cvmx_warn("CVMX_DFM_IFB_CNT not supported on this chip\n");
226 return CVMX_ADD_IO_SEG(0x00011800D40001D0ull);
229 #define CVMX_DFM_IFB_CNT (CVMX_ADD_IO_SEG(0x00011800D40001D0ull))
231 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
232 #define CVMX_DFM_MODEREG_PARAMS0 CVMX_DFM_MODEREG_PARAMS0_FUNC()
233 static inline uint64_t CVMX_DFM_MODEREG_PARAMS0_FUNC(void)
235 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
236 cvmx_warn("CVMX_DFM_MODEREG_PARAMS0 not supported on this chip\n");
237 return CVMX_ADD_IO_SEG(0x00011800D40001A8ull);
240 #define CVMX_DFM_MODEREG_PARAMS0 (CVMX_ADD_IO_SEG(0x00011800D40001A8ull))
242 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
243 #define CVMX_DFM_MODEREG_PARAMS1 CVMX_DFM_MODEREG_PARAMS1_FUNC()
244 static inline uint64_t CVMX_DFM_MODEREG_PARAMS1_FUNC(void)
246 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
247 cvmx_warn("CVMX_DFM_MODEREG_PARAMS1 not supported on this chip\n");
248 return CVMX_ADD_IO_SEG(0x00011800D4000260ull);
251 #define CVMX_DFM_MODEREG_PARAMS1 (CVMX_ADD_IO_SEG(0x00011800D4000260ull))
253 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
254 #define CVMX_DFM_OPS_CNT CVMX_DFM_OPS_CNT_FUNC()
255 static inline uint64_t CVMX_DFM_OPS_CNT_FUNC(void)
257 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
258 cvmx_warn("CVMX_DFM_OPS_CNT not supported on this chip\n");
259 return CVMX_ADD_IO_SEG(0x00011800D40001D8ull);
262 #define CVMX_DFM_OPS_CNT (CVMX_ADD_IO_SEG(0x00011800D40001D8ull))
264 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
265 #define CVMX_DFM_PHY_CTL CVMX_DFM_PHY_CTL_FUNC()
266 static inline uint64_t CVMX_DFM_PHY_CTL_FUNC(void)
268 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
269 cvmx_warn("CVMX_DFM_PHY_CTL not supported on this chip\n");
270 return CVMX_ADD_IO_SEG(0x00011800D4000210ull);
273 #define CVMX_DFM_PHY_CTL (CVMX_ADD_IO_SEG(0x00011800D4000210ull))
275 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
276 #define CVMX_DFM_RESET_CTL CVMX_DFM_RESET_CTL_FUNC()
277 static inline uint64_t CVMX_DFM_RESET_CTL_FUNC(void)
279 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
280 cvmx_warn("CVMX_DFM_RESET_CTL not supported on this chip\n");
281 return CVMX_ADD_IO_SEG(0x00011800D4000180ull);
284 #define CVMX_DFM_RESET_CTL (CVMX_ADD_IO_SEG(0x00011800D4000180ull))
286 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
287 #define CVMX_DFM_RLEVEL_CTL CVMX_DFM_RLEVEL_CTL_FUNC()
288 static inline uint64_t CVMX_DFM_RLEVEL_CTL_FUNC(void)
290 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
291 cvmx_warn("CVMX_DFM_RLEVEL_CTL not supported on this chip\n");
292 return CVMX_ADD_IO_SEG(0x00011800D40002A0ull);
295 #define CVMX_DFM_RLEVEL_CTL (CVMX_ADD_IO_SEG(0x00011800D40002A0ull))
297 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
298 #define CVMX_DFM_RLEVEL_DBG CVMX_DFM_RLEVEL_DBG_FUNC()
299 static inline uint64_t CVMX_DFM_RLEVEL_DBG_FUNC(void)
301 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
302 cvmx_warn("CVMX_DFM_RLEVEL_DBG not supported on this chip\n");
303 return CVMX_ADD_IO_SEG(0x00011800D40002A8ull);
306 #define CVMX_DFM_RLEVEL_DBG (CVMX_ADD_IO_SEG(0x00011800D40002A8ull))
308 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
309 static inline uint64_t CVMX_DFM_RLEVEL_RANKX(unsigned long offset)
312 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
313 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1)))))
314 cvmx_warn("CVMX_DFM_RLEVEL_RANKX(%lu) is invalid on this chip\n", offset);
315 return CVMX_ADD_IO_SEG(0x00011800D4000280ull) + ((offset) & 1) * 8;
318 #define CVMX_DFM_RLEVEL_RANKX(offset) (CVMX_ADD_IO_SEG(0x00011800D4000280ull) + ((offset) & 1) * 8)
320 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
321 #define CVMX_DFM_RODT_MASK CVMX_DFM_RODT_MASK_FUNC()
322 static inline uint64_t CVMX_DFM_RODT_MASK_FUNC(void)
324 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
325 cvmx_warn("CVMX_DFM_RODT_MASK not supported on this chip\n");
326 return CVMX_ADD_IO_SEG(0x00011800D4000268ull);
329 #define CVMX_DFM_RODT_MASK (CVMX_ADD_IO_SEG(0x00011800D4000268ull))
331 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
332 #define CVMX_DFM_SLOT_CTL0 CVMX_DFM_SLOT_CTL0_FUNC()
333 static inline uint64_t CVMX_DFM_SLOT_CTL0_FUNC(void)
335 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
336 cvmx_warn("CVMX_DFM_SLOT_CTL0 not supported on this chip\n");
337 return CVMX_ADD_IO_SEG(0x00011800D40001F8ull);
340 #define CVMX_DFM_SLOT_CTL0 (CVMX_ADD_IO_SEG(0x00011800D40001F8ull))
342 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
343 #define CVMX_DFM_SLOT_CTL1 CVMX_DFM_SLOT_CTL1_FUNC()
344 static inline uint64_t CVMX_DFM_SLOT_CTL1_FUNC(void)
346 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
347 cvmx_warn("CVMX_DFM_SLOT_CTL1 not supported on this chip\n");
348 return CVMX_ADD_IO_SEG(0x00011800D4000200ull);
351 #define CVMX_DFM_SLOT_CTL1 (CVMX_ADD_IO_SEG(0x00011800D4000200ull))
353 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
354 #define CVMX_DFM_TIMING_PARAMS0 CVMX_DFM_TIMING_PARAMS0_FUNC()
355 static inline uint64_t CVMX_DFM_TIMING_PARAMS0_FUNC(void)
357 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
358 cvmx_warn("CVMX_DFM_TIMING_PARAMS0 not supported on this chip\n");
359 return CVMX_ADD_IO_SEG(0x00011800D4000198ull);
362 #define CVMX_DFM_TIMING_PARAMS0 (CVMX_ADD_IO_SEG(0x00011800D4000198ull))
364 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
365 #define CVMX_DFM_TIMING_PARAMS1 CVMX_DFM_TIMING_PARAMS1_FUNC()
366 static inline uint64_t CVMX_DFM_TIMING_PARAMS1_FUNC(void)
368 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
369 cvmx_warn("CVMX_DFM_TIMING_PARAMS1 not supported on this chip\n");
370 return CVMX_ADD_IO_SEG(0x00011800D40001A0ull);
373 #define CVMX_DFM_TIMING_PARAMS1 (CVMX_ADD_IO_SEG(0x00011800D40001A0ull))
375 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
376 #define CVMX_DFM_WLEVEL_CTL CVMX_DFM_WLEVEL_CTL_FUNC()
377 static inline uint64_t CVMX_DFM_WLEVEL_CTL_FUNC(void)
379 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
380 cvmx_warn("CVMX_DFM_WLEVEL_CTL not supported on this chip\n");
381 return CVMX_ADD_IO_SEG(0x00011800D4000300ull);
384 #define CVMX_DFM_WLEVEL_CTL (CVMX_ADD_IO_SEG(0x00011800D4000300ull))
386 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
387 #define CVMX_DFM_WLEVEL_DBG CVMX_DFM_WLEVEL_DBG_FUNC()
388 static inline uint64_t CVMX_DFM_WLEVEL_DBG_FUNC(void)
390 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
391 cvmx_warn("CVMX_DFM_WLEVEL_DBG not supported on this chip\n");
392 return CVMX_ADD_IO_SEG(0x00011800D4000308ull);
395 #define CVMX_DFM_WLEVEL_DBG (CVMX_ADD_IO_SEG(0x00011800D4000308ull))
397 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
398 static inline uint64_t CVMX_DFM_WLEVEL_RANKX(unsigned long offset)
401 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
402 (OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1)))))
403 cvmx_warn("CVMX_DFM_WLEVEL_RANKX(%lu) is invalid on this chip\n", offset);
404 return CVMX_ADD_IO_SEG(0x00011800D40002B0ull) + ((offset) & 1) * 8;
407 #define CVMX_DFM_WLEVEL_RANKX(offset) (CVMX_ADD_IO_SEG(0x00011800D40002B0ull) + ((offset) & 1) * 8)
409 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
410 #define CVMX_DFM_WODT_MASK CVMX_DFM_WODT_MASK_FUNC()
411 static inline uint64_t CVMX_DFM_WODT_MASK_FUNC(void)
413 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
414 cvmx_warn("CVMX_DFM_WODT_MASK not supported on this chip\n");
415 return CVMX_ADD_IO_SEG(0x00011800D40001B0ull);
418 #define CVMX_DFM_WODT_MASK (CVMX_ADD_IO_SEG(0x00011800D40001B0ull))
424 * DFM_CHAR_CTL = DFM Characterization Control
425 * This register is an assortment of various control fields needed to charecterize the DDR3 interface
428 * DR bit applies on the DQ port
431 union cvmx_dfm_char_ctl {
433 struct cvmx_dfm_char_ctl_s {
434 #ifdef __BIG_ENDIAN_BITFIELD
435 uint64_t reserved_44_63 : 20;
436 uint64_t dr : 1; /**< Pattern at Data Rate (not Clock Rate) */
437 uint64_t skew_on : 1; /**< Skew adjacent bits */
438 uint64_t en : 1; /**< Enable characterization */
439 uint64_t sel : 1; /**< Pattern select
441 1 = Programmable pattern */
442 uint64_t prog : 8; /**< Programmable pattern */
443 uint64_t prbs : 32; /**< PRBS Polynomial */
449 uint64_t skew_on : 1;
451 uint64_t reserved_44_63 : 20;
454 struct cvmx_dfm_char_ctl_cn63xx {
455 #ifdef __BIG_ENDIAN_BITFIELD
456 uint64_t reserved_42_63 : 22;
457 uint64_t en : 1; /**< Enable characterization */
458 uint64_t sel : 1; /**< Pattern select
460 1 = Programmable pattern */
461 uint64_t prog : 8; /**< Programmable pattern */
462 uint64_t prbs : 32; /**< PRBS Polynomial */
468 uint64_t reserved_42_63 : 22;
471 struct cvmx_dfm_char_ctl_cn63xx cn63xxp1;
472 struct cvmx_dfm_char_ctl_s cn66xx;
474 typedef union cvmx_dfm_char_ctl cvmx_dfm_char_ctl_t;
477 * cvmx_dfm_char_mask0
479 * DFM_CHAR_MASK0 = DFM Characterization Control Mask0
480 * This register is an assortment of various control fields needed to charecterize the DDR3 interface
482 union cvmx_dfm_char_mask0 {
484 struct cvmx_dfm_char_mask0_s {
485 #ifdef __BIG_ENDIAN_BITFIELD
486 uint64_t reserved_16_63 : 48;
487 uint64_t mask : 16; /**< Mask for DQ0[15:0] */
490 uint64_t reserved_16_63 : 48;
493 struct cvmx_dfm_char_mask0_s cn63xx;
494 struct cvmx_dfm_char_mask0_s cn63xxp1;
495 struct cvmx_dfm_char_mask0_s cn66xx;
497 typedef union cvmx_dfm_char_mask0 cvmx_dfm_char_mask0_t;
500 * cvmx_dfm_char_mask2
502 * DFM_CHAR_MASK2 = DFM Characterization Control Mask2
503 * This register is an assortment of various control fields needed to charecterize the DDR3 interface
505 union cvmx_dfm_char_mask2 {
507 struct cvmx_dfm_char_mask2_s {
508 #ifdef __BIG_ENDIAN_BITFIELD
509 uint64_t reserved_16_63 : 48;
510 uint64_t mask : 16; /**< Mask for DQ1[15:0] */
513 uint64_t reserved_16_63 : 48;
516 struct cvmx_dfm_char_mask2_s cn63xx;
517 struct cvmx_dfm_char_mask2_s cn63xxp1;
518 struct cvmx_dfm_char_mask2_s cn66xx;
520 typedef union cvmx_dfm_char_mask2 cvmx_dfm_char_mask2_t;
523 * cvmx_dfm_char_mask4
525 * DFM_CHAR_MASK4 = DFM Characterization Mask4
526 * This register is an assortment of various control fields needed to charecterize the DDR3 interface
528 union cvmx_dfm_char_mask4 {
530 struct cvmx_dfm_char_mask4_s {
531 #ifdef __BIG_ENDIAN_BITFIELD
532 uint64_t reserved_33_63 : 31;
533 uint64_t reset_n_mask : 1; /**< Mask for RESET_N */
534 uint64_t a_mask : 16; /**< Mask for A[15:0] */
535 uint64_t ba_mask : 3; /**< Mask for BA[2:0] */
536 uint64_t we_n_mask : 1; /**< Mask for WE_N */
537 uint64_t cas_n_mask : 1; /**< Mask for CAS_N */
538 uint64_t ras_n_mask : 1; /**< Mask for RAS_N */
539 uint64_t odt1_mask : 2; /**< Mask for ODT1
540 For DFM, ODT1 is reserved. */
541 uint64_t odt0_mask : 2; /**< Mask for ODT0 */
542 uint64_t cs1_n_mask : 2; /**< Mask for CS1_N
543 For DFM, CS1_N is reserved. */
544 uint64_t cs0_n_mask : 2; /**< Mask for CS0_N */
545 uint64_t cke_mask : 2; /**< Mask for CKE
546 For DFM, CKE_MASK[1] is reserved. */
548 uint64_t cke_mask : 2;
549 uint64_t cs0_n_mask : 2;
550 uint64_t cs1_n_mask : 2;
551 uint64_t odt0_mask : 2;
552 uint64_t odt1_mask : 2;
553 uint64_t ras_n_mask : 1;
554 uint64_t cas_n_mask : 1;
555 uint64_t we_n_mask : 1;
556 uint64_t ba_mask : 3;
557 uint64_t a_mask : 16;
558 uint64_t reset_n_mask : 1;
559 uint64_t reserved_33_63 : 31;
562 struct cvmx_dfm_char_mask4_s cn63xx;
563 struct cvmx_dfm_char_mask4_s cn66xx;
565 typedef union cvmx_dfm_char_mask4 cvmx_dfm_char_mask4_t;
570 * DFM_COMP_CTL2 = DFM Compensation control2
573 union cvmx_dfm_comp_ctl2 {
575 struct cvmx_dfm_comp_ctl2_s {
576 #ifdef __BIG_ENDIAN_BITFIELD
577 uint64_t reserved_34_63 : 30;
578 uint64_t ddr__ptune : 4; /**< DDR pctl from compensation circuit
579 The encoded value provides debug information for the
580 compensation impedance on P-pullup */
581 uint64_t ddr__ntune : 4; /**< DDR nctl from compensation circuit
582 The encoded value provides debug information for the
583 compensation impedance on N-pulldown */
584 uint64_t m180 : 1; /**< Cap impedance at 180 ohm (instead of 240 ohm) */
585 uint64_t byp : 1; /**< Bypass mode
586 Use compensation setting from PTUNE,NTUNE */
587 uint64_t ptune : 4; /**< PCTL impedance control in bypass mode */
588 uint64_t ntune : 4; /**< NCTL impedance control in bypass mode */
589 uint64_t rodt_ctl : 4; /**< NCTL RODT impedance control bits
596 0110-1111 = Reserved */
597 uint64_t cmd_ctl : 4; /**< Drive strength control for CMD/A/RESET_N/CKE drivers
605 0000,1000-1111 = Reserved */
606 uint64_t ck_ctl : 4; /**< Drive strength control for CK/CS_N/ODT drivers
614 0000,1000-1111 = Reserved */
615 uint64_t dqx_ctl : 4; /**< Drive strength control for DQ/DQS drivers
623 0000,1000-1111 = Reserved */
625 uint64_t dqx_ctl : 4;
627 uint64_t cmd_ctl : 4;
628 uint64_t rodt_ctl : 4;
633 uint64_t ddr__ntune : 4;
634 uint64_t ddr__ptune : 4;
635 uint64_t reserved_34_63 : 30;
638 struct cvmx_dfm_comp_ctl2_s cn63xx;
639 struct cvmx_dfm_comp_ctl2_s cn63xxp1;
640 struct cvmx_dfm_comp_ctl2_s cn66xx;
642 typedef union cvmx_dfm_comp_ctl2 cvmx_dfm_comp_ctl2_t;
647 * DFM_CONFIG = DFM Memory Configuration Register
649 * This register controls certain parameters of Memory Configuration
652 * a. The self refresh entry sequence(s) power the DLL up/down (depending on DFM_MODEREG_PARAMS[DLL])
653 * when DFM_CONFIG[SREF_WITH_DLL] is set
654 * b. Prior to the self-refresh exit sequence, DFM_MODEREG_PARAMS should be re-programmed (if needed) to the
657 * DFM Bringup Sequence:
658 * 1. SW must ensure there are no pending DRAM transactions and that the DDR PLL and the DLL have been initialized.
659 * 2. Write DFM_COMP_CTL2, DFM_CONTROL, DFM_WODT_MASK, DFM_RODT_MASK, DFM_DUAL_MEMCFG, DFM_TIMING_PARAMS0, DFM_TIMING_PARAMS1,
660 * DFM_MODEREG_PARAMS0, DFM_MODEREG_PARAMS1, DFM_RESET_CTL (with DDR3RST=0), DFM_CONFIG (with INIT_START=0)
661 * with appropriate values, if necessary.
662 * 3. Wait 200us, then write DFM_RESET_CTL[DDR3RST] = 1.
663 * 4. Initialize all ranks at once by writing DFM_CONFIG[RANKMASK][n] = 1, DFM_CONFIG[INIT_STATUS][n] = 1, and DFM_CONFIG[INIT_START] = 1
664 * where n is a valid rank index for the specific board configuration.
665 * 5. for each rank n to be write-leveled [
666 * if auto write-leveling is desired [
667 * write DFM_CONFIG[RANKMASK][n] = 1, DFM_WLEVEL_CTL appropriately and DFM_CONFIG[INIT_START] = 1
668 * wait until DFM_WLEVEL_RANKn[STATUS] = 3
670 * write DFM_WLEVEL_RANKn with appropriate values
673 * 6. for each rank n to be read-leveled [
674 * if auto read-leveling is desired [
675 * write DFM_CONFIG[RANKMASK][n] = 1, DFM_RLEVEL_CTL appropriately and DFM_CONFIG[INIT_START] = 1
676 * wait until DFM_RLEVEL_RANKn[STATUS] = 3
678 * write DFM_RLEVEL_RANKn with appropriate values
682 union cvmx_dfm_config {
684 struct cvmx_dfm_config_s {
685 #ifdef __BIG_ENDIAN_BITFIELD
686 uint64_t reserved_59_63 : 5;
687 uint64_t early_unload_d1_r1 : 1; /**< Reserved */
688 uint64_t early_unload_d1_r0 : 1; /**< Reserved */
689 uint64_t early_unload_d0_r1 : 1; /**< When set, unload the PHY silo one cycle early for Rank 1
691 The recommended EARLY_UNLOAD_D0_R1 value can be calculated
692 after the final DFM_RLEVEL_RANK1[BYTE*] values are
693 selected (as part of read-leveling initialization).
694 Then, determine the largest read-leveling setting
695 for rank 1 (i.e. calculate maxset=MAX(DFM_RLEVEL_RANK1[BYTEi])
696 across all i), then set EARLY_UNLOAD_D0_R1
697 when the low two bits of this largest setting is not
698 3 (i.e. EARLY_UNLOAD_D0_R1 = (maxset<1:0>!=3)). */
699 uint64_t early_unload_d0_r0 : 1; /**< When set, unload the PHY silo one cycle early for Rank 0
701 The recommended EARLY_UNLOAD_D0_R0 value can be calculated
702 after the final DFM_RLEVEL_RANK0[BYTE*] values are
703 selected (as part of read-leveling initialization).
704 Then, determine the largest read-leveling setting
705 for rank 0 (i.e. calculate maxset=MAX(DFM_RLEVEL_RANK0[BYTEi])
706 across all i), then set EARLY_UNLOAD_D0_R0
707 when the low two bits of this largest setting is not
708 3 (i.e. EARLY_UNLOAD_D0_R0 = (maxset<1:0>!=3)). */
709 uint64_t init_status : 4; /**< Indicates status of initialization
710 INIT_STATUS[n] = 1 implies rank n has been initialized
711 SW must set necessary INIT_STATUS bits with the
712 same DFM_CONFIG write that initiates
713 power-up/init and self-refresh exit sequences
714 (if the required INIT_STATUS bits are not already
715 set before DFM initiates the sequence).
716 INIT_STATUS determines the chip-selects that assert
717 during refresh, ZQCS, and precharge power-down and
718 self-refresh entry/exit SEQUENCE's.
719 INIT_STATUS<3:2> must be zero. */
720 uint64_t mirrmask : 4; /**< Mask determining which ranks are address-mirrored.
721 MIRRMASK<n> = 1 means Rank n addresses are mirrored
723 A mirrored read/write has these differences:
724 - DDR_BA<1> is swapped with DDR_BA<0>
725 - DDR_A<8> is swapped with DDR_A<7>
726 - DDR_A<6> is swapped with DDR_A<5>
727 - DDR_A<4> is swapped with DDR_A<3>
728 MIRRMASK<3:2> must be zero.
729 When RANK_ENA=0, MIRRMASK<1> MBZ */
730 uint64_t rankmask : 4; /**< Mask to select rank to be leveled/initialized.
731 To write-level/read-level/initialize rank i, set RANKMASK<i>
732 RANK_ENA=1 RANK_ENA=0
733 RANKMASK<0> = CS0 CS0 and CS1
734 RANKMASK<1> = CS1 MBZ
735 For read/write leveling, each rank has to be leveled separately,
736 so RANKMASK should only have one bit set.
737 RANKMASK is not used during self-refresh entry/exit and
738 precharge power-down entry/exit instruction sequences.
739 RANKMASK<3:2> must be zero.
740 When RANK_ENA=0, RANKMASK<1> MBZ */
741 uint64_t rank_ena : 1; /**< RANK enable (for use with multiple ranks)
742 The RANK_ENA bit enables
743 the drive of the CS_N[1:0] and ODT_<1:0> pins differently based on the
744 (PBANK_LSB-1) address bit. */
745 uint64_t sref_with_dll : 1; /**< Self-refresh entry/exit write MR1 and MR2
746 When set, self-refresh entry and exit instruction sequences
747 write MR1 and MR2 (in all ranks). (The writes occur before
748 self-refresh entry, and after self-refresh exit.)
749 When clear, self-refresh entry and exit instruction sequences
750 do not write any registers in the DDR3 parts. */
751 uint64_t early_dqx : 1; /**< Send DQx signals one CK cycle earlier for the case when
752 the shortest DQx lines have a larger delay than the CK line */
753 uint64_t sequence : 3; /**< Instruction sequence that is run after a 0->1
754 transition on DFM_CONFIG[INIT_START]. Self-refresh entry and
755 precharge power-down entry and exit SEQUENCE's can also
756 be initiated automatically by hardware.
757 0=power-up/init (RANKMASK used, MR0, MR1, MR2, and MR3 written)
758 1=read-leveling (RANKMASK used, MR3 written)
759 2=self-refresh entry (all ranks participate, MR1 and MR2 written if SREF_WITH_DLL=1)
760 3=self-refresh exit, (all ranks participate, MR1 and MR2 written if SREF_WITH_DLL=1)
761 4=precharge power-down entry (all ranks participate)
762 5=precharge power-down exit (all ranks participate)
763 6=write-leveling (RANKMASK used, MR1 written)
765 Precharge power-down entry and exit SEQUENCE's may
766 be automatically generated by the HW when IDLEPOWER!=0.
767 Self-refresh entry SEQUENCE's may be automatically
768 generated by hardware upon a chip warm or soft reset
769 sequence when DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT] are set.
770 DFM writes the DFM_MODEREG_PARAMS0 and DFM_MODEREG_PARAMS1 CSR field values
771 to the Mode registers in the DRAM parts (MR0, MR1, MR2, and MR3) as part of some of these sequences.
772 Refer to the DFM_MODEREG_PARAMS0 and DFM_MODEREG_PARAMS1 descriptions for more details.
773 The DFR_CKE pin gets activated as part of power-up/init,
774 self-refresh exit, and precharge power-down exit sequences.
775 The DFR_CKE pin gets de-activated as part of self-refresh entry,
776 precharge power-down entry, or DRESET assertion.
777 If there are two consecutive power-up/init's without
778 a DRESET assertion between them, DFM asserts DFR_CKE as part of
779 the first power-up/init, and continues to assert DFR_CKE
780 through the remainder of the first and the second power-up/init.
781 If DFR_CKE deactivation and reactivation is needed for
782 a second power-up/init, a DRESET assertion is required
783 between the first and the second. */
784 uint64_t ref_zqcs_int : 19; /**< Refresh & ZQCS interval represented in \#of 512 fclk
785 increments. A Refresh sequence is triggered when bits
786 [24:18] are equal to 0, and a ZQCS sequence is triggered
787 when [36:18] are equal to 0.
788 Program [24:18] to RND-DN(tREFI/clkPeriod/512)
789 Program [36:25] to RND-DN(ZQCS_Interval/clkPeriod/(512*64)). Note
790 that this value should always be greater than 32, to account for
791 resistor calibration delays.
792 000_00000000_00000000: RESERVED
793 Max Refresh interval = 127 * 512 = 65024 fclks
794 Max ZQCS interval = (8*256*256-1) * 512 = 268434944 fclks ~ 335ms for a 1.25 ns clock
795 DFM_CONFIG[INIT_STATUS] determines which ranks receive
796 the REF / ZQCS. DFM does not send any refreshes / ZQCS's
797 when DFM_CONFIG[INIT_STATUS]=0. */
798 uint64_t reset : 1; /**< Reset oneshot pulse for refresh counter,
799 and DFM_OPS_CNT, DFM_IFB_CNT, and DFM_FCLK_CNT
800 CSR's. SW should write this to a one, then re-write
801 it to a zero to cause the reset. */
802 uint64_t ecc_adr : 1; /**< Must be zero. */
803 uint64_t forcewrite : 4; /**< Force the oldest outstanding write to complete after
804 having waited for 2^FORCEWRITE cycles. 0=disabled. */
805 uint64_t idlepower : 3; /**< Enter precharge power-down mode after the memory
806 controller has been idle for 2^(2+IDLEPOWER) cycles.
808 This field should only be programmed after initialization.
809 DFM_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL
810 is disabled during the precharge power-down. */
811 uint64_t pbank_lsb : 4; /**< Physical bank address bit select
812 Encoding used to determine which memory address
813 bit position represents the rank(or bunk) bit used to enable 1(of 2)
814 ranks(via chip enables) supported by the DFM DDR3 interface.
815 Reverting to the explanation for ROW_LSB, PBANK_LSB would be ROW_LSB bit +
816 \#rowbits + \#rankbits.
818 - 0: rank = mem_adr[24]
819 - 1: rank = mem_adr[25]
820 - 2: rank = mem_adr[26]
821 - 3: rank = mem_adr[27]
822 - 4: rank = mem_adr[28]
823 - 5: rank = mem_adr[29]
824 - 6: rank = mem_adr[30]
825 - 7: rank = mem_adr[31]
827 DESIGN NOTE: The DFM DDR3 memory bus is 16b wide, therefore DOES NOT
828 support standard 64b/72b DDR3 DIMM modules. The board designer should
829 populate the DFM DDR3 interface using either TWO x8bit DDR3 devices
830 (or a single x16bit device if available) to fully populate the 16b
832 The DFM DDR3 memory controller supports either 1(or 2) rank(s) based
833 on how much total memory is desired for the DFA application. See
834 RANK_ENA CSR bit when enabling for dual-ranks.
836 1) When RANK_ENA=0, SW must properly configure the PBANK_LSB to
837 reference upper unused memory address bits.
838 2) When RANK_ENA=1 (dual ranks), SW must configure PBANK_LSB to
839 reference the upper most address bit based on the total size
841 For example, for a DFM DDR3 memory populated using Samsung's k4b1g0846c-f7
842 1Gb(256MB) (16M x 8 bit x 8 bank) DDR3 parts, the column address width = 10 and
843 the device row address width = 14b. The single x8bit device contains 128MB, and
844 requires TWO such parts to populate the DFM 16b DDR3 interface. This then yields
845 a total rank size = 256MB = 2^28.
846 For a single-rank configuration (RANK_ENA=0), SW would program PBANK_LSB>=3 to
847 select mem_adr[x] bits above the legal DFM address range for mem_adr[27:0]=256MB.
848 For a dual-rank configuration (RANK_ENA=1), SW would program PBANK_LSB=4 to select
849 rank=mem_adr[28] as the bit used to determine which 256MB rank (of 512MB total) to
850 access (via rank chip enables - see: DFM DDR3 CS0[1:0] pins for connection to
851 upper and lower rank). */
852 uint64_t row_lsb : 3; /**< Row Address bit select
853 Encoding used to determine which memory address
854 bit position represents the low order DDR ROW address.
855 The DFM memory address [31:4] which references octawords
856 needs to be translated to DRAM addresses (bnk,row,col,bunk)
858 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
859 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4
860 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
861 | ROW[m:n] | COL[13:3] | BA
862 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
864 BA[2:0]: mem_adr[6:4]
865 COL[13:0]: [mem_adr[17:7],3'd0]
866 NOTE: The extracted COL address is always 14b fixed size width,
867 and upper unused bits are ignored by the DRAM device.
868 ROW[15:0]: Extraction of ROW starting address bit is programmable,
869 and is dependent on the \#column bits supported by the DRAM device.
870 The actual starting bit of the ROW can actually span into the
871 high order bits of the COL[13:3] field described above.
873 --------------------------
880 6,7: [1'b0, mem_adr[31:17]] For current DDR3 Jedec spec - UNSUPPORTED
881 For example, for Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank)
882 DDR3 parts, the column address width = 10. Therefore,
883 BA[3:0] = mem_adr[6:4] / COL[9:0] = [mem_adr[13:7],3'd0], and
884 we would want the row starting address to be extracted from mem_adr[14].
885 Therefore, a ROW_LSB=3, will extract the row from mem_adr[29:14]. */
886 uint64_t ecc_ena : 1; /**< Must be zero. */
887 uint64_t init_start : 1; /**< A 0->1 transition starts the DDR memory sequence that is
888 selected by DFM_CONFIG[SEQUENCE]. This register is a
889 oneshot and clears itself each time it is set. */
891 uint64_t init_start : 1;
892 uint64_t ecc_ena : 1;
893 uint64_t row_lsb : 3;
894 uint64_t pbank_lsb : 4;
895 uint64_t idlepower : 3;
896 uint64_t forcewrite : 4;
897 uint64_t ecc_adr : 1;
899 uint64_t ref_zqcs_int : 19;
900 uint64_t sequence : 3;
901 uint64_t early_dqx : 1;
902 uint64_t sref_with_dll : 1;
903 uint64_t rank_ena : 1;
904 uint64_t rankmask : 4;
905 uint64_t mirrmask : 4;
906 uint64_t init_status : 4;
907 uint64_t early_unload_d0_r0 : 1;
908 uint64_t early_unload_d0_r1 : 1;
909 uint64_t early_unload_d1_r0 : 1;
910 uint64_t early_unload_d1_r1 : 1;
911 uint64_t reserved_59_63 : 5;
914 struct cvmx_dfm_config_s cn63xx;
915 struct cvmx_dfm_config_cn63xxp1 {
916 #ifdef __BIG_ENDIAN_BITFIELD
917 uint64_t reserved_55_63 : 9;
918 uint64_t init_status : 4; /**< Indicates status of initialization
919 INIT_STATUS[n] = 1 implies rank n has been initialized
920 SW must set necessary INIT_STATUS bits with the
921 same DFM_CONFIG write that initiates
922 power-up/init and self-refresh exit sequences
923 (if the required INIT_STATUS bits are not already
924 set before DFM initiates the sequence).
925 INIT_STATUS determines the chip-selects that assert
926 during refresh, ZQCS, and precharge power-down and
927 self-refresh entry/exit SEQUENCE's.
928 INIT_STATUS<3:2> must be zero. */
929 uint64_t mirrmask : 4; /**< Mask determining which ranks are address-mirrored.
930 MIRRMASK<n> = 1 means Rank n addresses are mirrored
932 A mirrored read/write has these differences:
933 - DDR_BA<1> is swapped with DDR_BA<0>
934 - DDR_A<8> is swapped with DDR_A<7>
935 - DDR_A<6> is swapped with DDR_A<5>
936 - DDR_A<4> is swapped with DDR_A<3>
937 MIRRMASK<3:2> must be zero.
938 When RANK_ENA=0, MIRRMASK<1> MBZ */
939 uint64_t rankmask : 4; /**< Mask to select rank to be leveled/initialized.
940 To write-level/read-level/initialize rank i, set RANKMASK<i>
941 RANK_ENA=1 RANK_ENA=0
942 RANKMASK<0> = CS0 CS0 and CS1
943 RANKMASK<1> = CS1 MBZ
944 For read/write leveling, each rank has to be leveled separately,
945 so RANKMASK should only have one bit set.
946 RANKMASK is not used during self-refresh entry/exit and
947 precharge power-down entry/exit instruction sequences.
948 RANKMASK<3:2> must be zero.
949 When RANK_ENA=0, RANKMASK<1> MBZ */
950 uint64_t rank_ena : 1; /**< RANK enable (for use with multiple ranks)
951 The RANK_ENA bit enables
952 the drive of the CS_N[1:0] and ODT_<1:0> pins differently based on the
953 (PBANK_LSB-1) address bit. */
954 uint64_t sref_with_dll : 1; /**< Self-refresh entry/exit write MR1 and MR2
955 When set, self-refresh entry and exit instruction sequences
956 write MR1 and MR2 (in all ranks). (The writes occur before
957 self-refresh entry, and after self-refresh exit.)
958 When clear, self-refresh entry and exit instruction sequences
959 do not write any registers in the DDR3 parts. */
960 uint64_t early_dqx : 1; /**< Send DQx signals one CK cycle earlier for the case when
961 the shortest DQx lines have a larger delay than the CK line */
962 uint64_t sequence : 3; /**< Instruction sequence that is run after a 0->1
963 transition on DFM_CONFIG[INIT_START]. Self-refresh entry and
964 precharge power-down entry and exit SEQUENCE's can also
965 be initiated automatically by hardware.
966 0=power-up/init (RANKMASK used, MR0, MR1, MR2, and MR3 written)
967 1=read-leveling (RANKMASK used, MR3 written)
968 2=self-refresh entry (all ranks participate, MR1 and MR2 written if SREF_WITH_DLL=1)
969 3=self-refresh exit, (all ranks participate, MR1 and MR2 written if SREF_WITH_DLL=1)
970 4=precharge power-down entry (all ranks participate)
971 5=precharge power-down exit (all ranks participate)
972 6=write-leveling (RANKMASK used, MR1 written)
974 Precharge power-down entry and exit SEQUENCE's may
975 be automatically generated by the HW when IDLEPOWER!=0.
976 Self-refresh entry SEQUENCE's may be automatically
977 generated by hardware upon a chip warm or soft reset
978 sequence when DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT] are set.
979 DFM writes the DFM_MODEREG_PARAMS0 and DFM_MODEREG_PARAMS1 CSR field values
980 to the Mode registers in the DRAM parts (MR0, MR1, MR2, and MR3) as part of some of these sequences.
981 Refer to the DFM_MODEREG_PARAMS0 and DFM_MODEREG_PARAMS1 descriptions for more details.
982 The DFR_CKE pin gets activated as part of power-up/init,
983 self-refresh exit, and precharge power-down exit sequences.
984 The DFR_CKE pin gets de-activated as part of self-refresh entry,
985 precharge power-down entry, or DRESET assertion.
986 If there are two consecutive power-up/init's without
987 a DRESET assertion between them, DFM asserts DFR_CKE as part of
988 the first power-up/init, and continues to assert DFR_CKE
989 through the remainder of the first and the second power-up/init.
990 If DFR_CKE deactivation and reactivation is needed for
991 a second power-up/init, a DRESET assertion is required
992 between the first and the second. */
993 uint64_t ref_zqcs_int : 19; /**< Refresh & ZQCS interval represented in \#of 512 fclk
994 increments. A Refresh sequence is triggered when bits
995 [24:18] are equal to 0, and a ZQCS sequence is triggered
996 when [36:18] are equal to 0.
997 Program [24:18] to RND-DN(tREFI/clkPeriod/512)
998 Program [36:25] to RND-DN(ZQCS_Interval/clkPeriod/(512*64)). Note
999 that this value should always be greater than 32, to account for
1000 resistor calibration delays.
1001 000_00000000_00000000: RESERVED
1002 Max Refresh interval = 127 * 512 = 65024 fclks
1003 Max ZQCS interval = (8*256*256-1) * 512 = 268434944 fclks ~ 335ms for a 1.25 ns clock
1004 DFM_CONFIG[INIT_STATUS] determines which ranks receive
1005 the REF / ZQCS. DFM does not send any refreshes / ZQCS's
1006 when DFM_CONFIG[INIT_STATUS]=0. */
1007 uint64_t reset : 1; /**< Reset oneshot pulse for refresh counter,
1008 and DFM_OPS_CNT, DFM_IFB_CNT, and DFM_FCLK_CNT
1009 CSR's. SW should write this to a one, then re-write
1010 it to a zero to cause the reset. */
1011 uint64_t ecc_adr : 1; /**< Must be zero. */
1012 uint64_t forcewrite : 4; /**< Force the oldest outstanding write to complete after
1013 having waited for 2^FORCEWRITE cycles. 0=disabled. */
1014 uint64_t idlepower : 3; /**< Enter precharge power-down mode after the memory
1015 controller has been idle for 2^(2+IDLEPOWER) cycles.
1017 This field should only be programmed after initialization.
1018 DFM_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL
1019 is disabled during the precharge power-down. */
1020 uint64_t pbank_lsb : 4; /**< Physical bank address bit select
1021 Encoding used to determine which memory address
1022 bit position represents the rank(or bunk) bit used to enable 1(of 2)
1023 ranks(via chip enables) supported by the DFM DDR3 interface.
1024 Reverting to the explanation for ROW_LSB, PBANK_LSB would be ROW_LSB bit +
1025 \#rowbits + \#rankbits.
1027 - 0: rank = mem_adr[24]
1028 - 1: rank = mem_adr[25]
1029 - 2: rank = mem_adr[26]
1030 - 3: rank = mem_adr[27]
1031 - 4: rank = mem_adr[28]
1032 - 5: rank = mem_adr[29]
1033 - 6: rank = mem_adr[30]
1034 - 7: rank = mem_adr[31]
1036 DESIGN NOTE: The DFM DDR3 memory bus is 16b wide, therefore DOES NOT
1037 support standard 64b/72b DDR3 DIMM modules. The board designer should
1038 populate the DFM DDR3 interface using either TWO x8bit DDR3 devices
1039 (or a single x16bit device if available) to fully populate the 16b
1041 The DFM DDR3 memory controller supports either 1(or 2) rank(s) based
1042 on how much total memory is desired for the DFA application. See
1043 RANK_ENA CSR bit when enabling for dual-ranks.
1045 1) When RANK_ENA=0, SW must properly configure the PBANK_LSB to
1046 reference upper unused memory address bits.
1047 2) When RANK_ENA=1 (dual ranks), SW must configure PBANK_LSB to
1048 reference the upper most address bit based on the total size
1050 For example, for a DFM DDR3 memory populated using Samsung's k4b1g0846c-f7
1051 1Gb(256MB) (16M x 8 bit x 8 bank) DDR3 parts, the column address width = 10 and
1052 the device row address width = 14b. The single x8bit device contains 128MB, and
1053 requires TWO such parts to populate the DFM 16b DDR3 interface. This then yields
1054 a total rank size = 256MB = 2^28.
1055 For a single-rank configuration (RANK_ENA=0), SW would program PBANK_LSB>=3 to
1056 select mem_adr[x] bits above the legal DFM address range for mem_adr[27:0]=256MB.
1057 For a dual-rank configuration (RANK_ENA=1), SW would program PBANK_LSB=4 to select
1058 rank=mem_adr[28] as the bit used to determine which 256MB rank (of 512MB total) to
1059 access (via rank chip enables - see: DFM DDR3 CS0[1:0] pins for connection to
1060 upper and lower rank). */
1061 uint64_t row_lsb : 3; /**< Row Address bit select
1062 Encoding used to determine which memory address
1063 bit position represents the low order DDR ROW address.
1064 The DFM memory address [31:4] which references octawords
1065 needs to be translated to DRAM addresses (bnk,row,col,bunk)
1067 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
1068 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4
1069 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1070 | ROW[m:n] | COL[13:3] | BA
1071 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1073 BA[2:0]: mem_adr[6:4]
1074 COL[13:0]: [mem_adr[17:7],3'd0]
1075 NOTE: The extracted COL address is always 14b fixed size width,
1076 and upper unused bits are ignored by the DRAM device.
1077 ROW[15:0]: Extraction of ROW starting address bit is programmable,
1078 and is dependent on the \#column bits supported by the DRAM device.
1079 The actual starting bit of the ROW can actually span into the
1080 high order bits of the COL[13:3] field described above.
1082 --------------------------
1089 6,7: [1'b0, mem_adr[31:17]] For current DDR3 Jedec spec - UNSUPPORTED
1090 For example, for Samsung's k4b1g0846c-f7 1Gb (16M x 8 bit x 8 bank)
1091 DDR3 parts, the column address width = 10. Therefore,
1092 BA[3:0] = mem_adr[6:4] / COL[9:0] = [mem_adr[13:7],3'd0], and
1093 we would want the row starting address to be extracted from mem_adr[14].
1094 Therefore, a ROW_LSB=3, will extract the row from mem_adr[29:14]. */
1095 uint64_t ecc_ena : 1; /**< Must be zero. */
1096 uint64_t init_start : 1; /**< A 0->1 transition starts the DDR memory sequence that is
1097 selected by DFM_CONFIG[SEQUENCE]. This register is a
1098 oneshot and clears itself each time it is set. */
1100 uint64_t init_start : 1;
1101 uint64_t ecc_ena : 1;
1102 uint64_t row_lsb : 3;
1103 uint64_t pbank_lsb : 4;
1104 uint64_t idlepower : 3;
1105 uint64_t forcewrite : 4;
1106 uint64_t ecc_adr : 1;
1108 uint64_t ref_zqcs_int : 19;
1109 uint64_t sequence : 3;
1110 uint64_t early_dqx : 1;
1111 uint64_t sref_with_dll : 1;
1112 uint64_t rank_ena : 1;
1113 uint64_t rankmask : 4;
1114 uint64_t mirrmask : 4;
1115 uint64_t init_status : 4;
1116 uint64_t reserved_55_63 : 9;
1119 struct cvmx_dfm_config_s cn66xx;
1121 typedef union cvmx_dfm_config cvmx_dfm_config_t;
1126 * DFM_CONTROL = DFM Control
1127 * This register is an assortment of various control fields needed by the memory controller
1129 union cvmx_dfm_control {
1131 struct cvmx_dfm_control_s {
1132 #ifdef __BIG_ENDIAN_BITFIELD
1133 uint64_t reserved_24_63 : 40;
1134 uint64_t rodt_bprch : 1; /**< When set, the turn-off time for the ODT pin during a
1135 RD cmd is delayed an additional DCLK cycle. */
1136 uint64_t wodt_bprch : 1; /**< When set, the turn-off time for the ODT pin during a
1137 WR cmd is delayed an additional DCLK cycle. */
1138 uint64_t bprch : 2; /**< Back Porch Enable: When set, the turn-on time for
1139 the default DDR_DQ/DQS drivers is delayed an additional BPRCH FCLK
1145 uint64_t ext_zqcs_dis : 1; /**< Disable (external) auto-zqcs calibration
1146 When clear, DFM runs external ZQ calibration */
1147 uint64_t int_zqcs_dis : 1; /**< Disable (internal) auto-zqcs calibration
1148 When counter is re-enabled, ZQCS is run immediately,
1149 and then every DFM_CONFIG[REF_ZQCS_INT] fclk cycles. */
1150 uint64_t auto_fclkdis : 1; /**< When 1, DFM will automatically shut off its internal
1151 clock to conserve power when there is no traffic. Note
1152 that this has no effect on the DDR3 PHY and pads clocks. */
1153 uint64_t xor_bank : 1; /**< Must be zero. */
1154 uint64_t max_write_batch : 4; /**< Must be set to value 8 */
1155 uint64_t nxm_write_en : 1; /**< Must be zero. */
1156 uint64_t elev_prio_dis : 1; /**< Must be zero. */
1157 uint64_t inorder_wr : 1; /**< Must be zero. */
1158 uint64_t inorder_rd : 1; /**< Must be zero. */
1159 uint64_t throttle_wr : 1; /**< When set, use at most one IFB for writes
1160 THROTTLE_RD and THROTTLE_WR must be the same value. */
1161 uint64_t throttle_rd : 1; /**< When set, use at most one IFB for reads
1162 THROTTLE_RD and THROTTLE_WR must be the same value. */
1163 uint64_t fprch2 : 2; /**< Front Porch Enable: When set, the turn-off
1164 time for the default DDR_DQ/DQS drivers is FPRCH2 fclks earlier.
1169 uint64_t pocas : 1; /**< Enable the Posted CAS feature of DDR3.
1170 This bit should be set in conjunction with DFM_MODEREG_PARAMS[AL] */
1171 uint64_t ddr2t : 1; /**< Turn on the DDR 2T mode. 2 cycle window for CMD and
1172 address. This mode helps relieve setup time pressure
1173 on the Address and command bus which nominally have
1174 a very large fanout. Please refer to Micron's tech
1175 note tn_47_01 titled "DDR2-533 Memory Design Guide
1176 for Two Dimm Unbuffered Systems" for physical details. */
1177 uint64_t bwcnt : 1; /**< Bus utilization counter Clear.
1178 Clears the DFM_OPS_CNT, DFM_IFB_CNT, and
1179 DFM_FCLK_CNT registers. SW should first write this
1180 field to a one, then write this field to a zero to
1182 uint64_t rdimm_ena : 1; /**< Must be zero. */
1184 uint64_t rdimm_ena : 1;
1188 uint64_t fprch2 : 2;
1189 uint64_t throttle_rd : 1;
1190 uint64_t throttle_wr : 1;
1191 uint64_t inorder_rd : 1;
1192 uint64_t inorder_wr : 1;
1193 uint64_t elev_prio_dis : 1;
1194 uint64_t nxm_write_en : 1;
1195 uint64_t max_write_batch : 4;
1196 uint64_t xor_bank : 1;
1197 uint64_t auto_fclkdis : 1;
1198 uint64_t int_zqcs_dis : 1;
1199 uint64_t ext_zqcs_dis : 1;
1201 uint64_t wodt_bprch : 1;
1202 uint64_t rodt_bprch : 1;
1203 uint64_t reserved_24_63 : 40;
1206 struct cvmx_dfm_control_s cn63xx;
1207 struct cvmx_dfm_control_cn63xxp1 {
1208 #ifdef __BIG_ENDIAN_BITFIELD
1209 uint64_t reserved_22_63 : 42;
1210 uint64_t bprch : 2; /**< Back Porch Enable: When set, the turn-on time for
1211 the default DDR_DQ/DQS drivers is delayed an additional BPRCH FCLK
1217 uint64_t ext_zqcs_dis : 1; /**< Disable (external) auto-zqcs calibration
1218 When clear, DFM runs external ZQ calibration */
1219 uint64_t int_zqcs_dis : 1; /**< Disable (internal) auto-zqcs calibration
1220 When counter is re-enabled, ZQCS is run immediately,
1221 and then every DFM_CONFIG[REF_ZQCS_INT] fclk cycles. */
1222 uint64_t auto_fclkdis : 1; /**< When 1, DFM will automatically shut off its internal
1223 clock to conserve power when there is no traffic. Note
1224 that this has no effect on the DDR3 PHY and pads clocks. */
1225 uint64_t xor_bank : 1; /**< Must be zero. */
1226 uint64_t max_write_batch : 4; /**< Must be set to value 8 */
1227 uint64_t nxm_write_en : 1; /**< Must be zero. */
1228 uint64_t elev_prio_dis : 1; /**< Must be zero. */
1229 uint64_t inorder_wr : 1; /**< Must be zero. */
1230 uint64_t inorder_rd : 1; /**< Must be zero. */
1231 uint64_t throttle_wr : 1; /**< When set, use at most one IFB for writes
1232 THROTTLE_RD and THROTTLE_WR must be the same value. */
1233 uint64_t throttle_rd : 1; /**< When set, use at most one IFB for reads
1234 THROTTLE_RD and THROTTLE_WR must be the same value. */
1235 uint64_t fprch2 : 2; /**< Front Porch Enable: When set, the turn-off
1236 time for the default DDR_DQ/DQS drivers is FPRCH2 fclks earlier.
1241 uint64_t pocas : 1; /**< Enable the Posted CAS feature of DDR3.
1242 This bit should be set in conjunction with DFM_MODEREG_PARAMS[AL] */
1243 uint64_t ddr2t : 1; /**< Turn on the DDR 2T mode. 2 cycle window for CMD and
1244 address. This mode helps relieve setup time pressure
1245 on the Address and command bus which nominally have
1246 a very large fanout. Please refer to Micron's tech
1247 note tn_47_01 titled "DDR2-533 Memory Design Guide
1248 for Two Dimm Unbuffered Systems" for physical details. */
1249 uint64_t bwcnt : 1; /**< Bus utilization counter Clear.
1250 Clears the DFM_OPS_CNT, DFM_IFB_CNT, and
1251 DFM_FCLK_CNT registers. SW should first write this
1252 field to a one, then write this field to a zero to
1254 uint64_t rdimm_ena : 1; /**< Must be zero. */
1256 uint64_t rdimm_ena : 1;
1260 uint64_t fprch2 : 2;
1261 uint64_t throttle_rd : 1;
1262 uint64_t throttle_wr : 1;
1263 uint64_t inorder_rd : 1;
1264 uint64_t inorder_wr : 1;
1265 uint64_t elev_prio_dis : 1;
1266 uint64_t nxm_write_en : 1;
1267 uint64_t max_write_batch : 4;
1268 uint64_t xor_bank : 1;
1269 uint64_t auto_fclkdis : 1;
1270 uint64_t int_zqcs_dis : 1;
1271 uint64_t ext_zqcs_dis : 1;
1273 uint64_t reserved_22_63 : 42;
1276 struct cvmx_dfm_control_s cn66xx;
1278 typedef union cvmx_dfm_control cvmx_dfm_control_t;
1283 * DFM_DLL_CTL2 = DFM (Octeon) DLL control and FCLK reset
1287 * DLL Bringup sequence:
1288 * 1. If not done already, set DFM_DLL_CTL2 = 0, except when DFM_DLL_CTL2[DRESET] = 1.
1289 * 2. Write 1 to DFM_DLL_CTL2[DLL_BRINGUP]
1290 * 3. Wait for 10 FCLK cycles, then write 1 to DFM_DLL_CTL2[QUAD_DLL_ENA]. It may not be feasible to count 10 FCLK cycles, but the
1291 * idea is to configure the delay line into DLL mode by asserting DLL_BRING_UP earlier than [QUAD_DLL_ENA], even if it is one
1292 * cycle early. DFM_DLL_CTL2[QUAD_DLL_ENA] must not change after this point without restarting the DFM and/or DRESET initialization
1294 * 4. Read L2D_BST0 and wait for the result. (L2D_BST0 is subject to change depending on how it called in o63. It is still ok to go
1295 * without step 4, since step 5 has enough time)
1297 * 6. Write 0 to DFM_DLL_CTL2[DLL_BRINGUP]. DFM_DLL_CTL2[DLL_BRINGUP] must not change after this point without restarting the DFM
1298 * and/or DRESET initialization sequence.
1299 * 7. Read L2D_BST0 and wait for the result. (same as step 4, but the idea here is the wait some time before going to step 8, even it
1300 * is one cycle is fine)
1301 * 8. Write 0 to DFM_DLL_CTL2[DRESET]. DFM_DLL_CTL2[DRESET] must not change after this point without restarting the DFM and/or
1302 * DRESET initialization sequence.
1304 union cvmx_dfm_dll_ctl2 {
1306 struct cvmx_dfm_dll_ctl2_s {
1307 #ifdef __BIG_ENDIAN_BITFIELD
1308 uint64_t reserved_15_63 : 49;
1309 uint64_t dll_bringup : 1; /**< DLL Bringup */
1310 uint64_t dreset : 1; /**< Fclk domain reset. The reset signal that is used by the
1311 Fclk domain is (DRESET || ECLK_RESET). */
1312 uint64_t quad_dll_ena : 1; /**< DLL Enable */
1313 uint64_t byp_sel : 4; /**< Bypass select
1319 1011-1111 : Reserved */
1320 uint64_t byp_setting : 8; /**< Bypass setting
1326 DDR3-600 : 10101100 */
1328 uint64_t byp_setting : 8;
1329 uint64_t byp_sel : 4;
1330 uint64_t quad_dll_ena : 1;
1331 uint64_t dreset : 1;
1332 uint64_t dll_bringup : 1;
1333 uint64_t reserved_15_63 : 49;
1336 struct cvmx_dfm_dll_ctl2_s cn63xx;
1337 struct cvmx_dfm_dll_ctl2_s cn63xxp1;
1338 struct cvmx_dfm_dll_ctl2_s cn66xx;
1340 typedef union cvmx_dfm_dll_ctl2 cvmx_dfm_dll_ctl2_t;
1345 * DFM_DLL_CTL3 = DFM DLL control and FCLK reset
1348 union cvmx_dfm_dll_ctl3 {
1350 struct cvmx_dfm_dll_ctl3_s {
1351 #ifdef __BIG_ENDIAN_BITFIELD
1352 uint64_t reserved_29_63 : 35;
1353 uint64_t dll_fast : 1; /**< DLL lock
1355 uint64_t dll90_setting : 8; /**< Encoded DLL settings. Works in conjuction with
1357 uint64_t fine_tune_mode : 1; /**< Fine Tune Mode */
1358 uint64_t dll_mode : 1; /**< DLL Mode */
1359 uint64_t dll90_byte_sel : 4; /**< Observe DLL settings for selected byte
1363 0000,1010-1111 : Reserved */
1364 uint64_t offset_ena : 1; /**< Offset enable
1367 uint64_t load_offset : 1; /**< Load offset
1369 1 : load (generates a 1 cycle pulse to the PHY)
1370 This register is oneshot and clears itself each time
1372 uint64_t mode_sel : 2; /**< Mode select
1376 11 : write & read */
1377 uint64_t byte_sel : 4; /**< Byte select
1383 1011-1111 : Reserved */
1384 uint64_t offset : 6; /**< Write/read offset setting
1386 [5] : 0 = increment, 1 = decrement
1387 Not a 2's complement value */
1389 uint64_t offset : 6;
1390 uint64_t byte_sel : 4;
1391 uint64_t mode_sel : 2;
1392 uint64_t load_offset : 1;
1393 uint64_t offset_ena : 1;
1394 uint64_t dll90_byte_sel : 4;
1395 uint64_t dll_mode : 1;
1396 uint64_t fine_tune_mode : 1;
1397 uint64_t dll90_setting : 8;
1398 uint64_t dll_fast : 1;
1399 uint64_t reserved_29_63 : 35;
1402 struct cvmx_dfm_dll_ctl3_s cn63xx;
1403 struct cvmx_dfm_dll_ctl3_s cn63xxp1;
1404 struct cvmx_dfm_dll_ctl3_s cn66xx;
1406 typedef union cvmx_dfm_dll_ctl3 cvmx_dfm_dll_ctl3_t;
1411 * DFM_FCLK_CNT = Performance Counters
1414 union cvmx_dfm_fclk_cnt {
1416 struct cvmx_dfm_fclk_cnt_s {
1417 #ifdef __BIG_ENDIAN_BITFIELD
1418 uint64_t fclkcnt : 64; /**< Performance Counter that counts fclks
1421 uint64_t fclkcnt : 64;
1424 struct cvmx_dfm_fclk_cnt_s cn63xx;
1425 struct cvmx_dfm_fclk_cnt_s cn63xxp1;
1426 struct cvmx_dfm_fclk_cnt_s cn66xx;
1428 typedef union cvmx_dfm_fclk_cnt cvmx_dfm_fclk_cnt_t;
1433 * DFM_FNT_BIST = DFM Front BIST Status
1435 * This register contains Bist Status for DFM Front
1437 union cvmx_dfm_fnt_bist {
1439 struct cvmx_dfm_fnt_bist_s {
1440 #ifdef __BIG_ENDIAN_BITFIELD
1441 uint64_t reserved_5_63 : 59;
1442 uint64_t cab : 1; /**< Bist Results for CAB RAM
1443 - 0: GOOD (or bist in progress/never run)
1445 uint64_t mrq : 1; /**< Bist Results for MRQ RAM
1446 - 0: GOOD (or bist in progress/never run)
1448 uint64_t mff : 1; /**< Bist Results for MFF RAM
1449 - 0: GOOD (or bist in progress/never run)
1451 uint64_t rpb : 1; /**< Bist Results for RPB RAM
1452 - 0: GOOD (or bist in progress/never run)
1454 uint64_t mwb : 1; /**< Bist Results for MWB RAM
1455 - 0: GOOD (or bist in progress/never run)
1463 uint64_t reserved_5_63 : 59;
1466 struct cvmx_dfm_fnt_bist_s cn63xx;
1467 struct cvmx_dfm_fnt_bist_cn63xxp1 {
1468 #ifdef __BIG_ENDIAN_BITFIELD
1469 uint64_t reserved_4_63 : 60;
1470 uint64_t mrq : 1; /**< Bist Results for MRQ RAM
1471 - 0: GOOD (or bist in progress/never run)
1473 uint64_t mff : 1; /**< Bist Results for MFF RAM
1474 - 0: GOOD (or bist in progress/never run)
1476 uint64_t rpb : 1; /**< Bist Results for RPB RAM
1477 - 0: GOOD (or bist in progress/never run)
1479 uint64_t mwb : 1; /**< Bist Results for MWB RAM
1480 - 0: GOOD (or bist in progress/never run)
1487 uint64_t reserved_4_63 : 60;
1490 struct cvmx_dfm_fnt_bist_s cn66xx;
1492 typedef union cvmx_dfm_fnt_bist cvmx_dfm_fnt_bist_t;
1497 * Specify the RSL base addresses for the block
1499 * DFM_FNT_CTL = DFM Front Control Register
1501 * This register contains control registers for the DFM Front Section of Logic.
1503 union cvmx_dfm_fnt_ctl {
1505 struct cvmx_dfm_fnt_ctl_s {
1506 #ifdef __BIG_ENDIAN_BITFIELD
1507 uint64_t reserved_4_63 : 60;
1508 uint64_t sbe_ena : 1; /**< If SBE_ENA=1 & RECC_ENA=1 then all single bit errors
1509 which have been detected/corrected during GWALK reads,
1510 will be reported through RWORD0[REA]=ERR code in system
1511 memory at the conclusion of the DFA instruction.
1512 SWNOTE: The application user may wish to report single
1513 bit errors that were corrected through the
1514 RWORD0[REA]=ERR codeword.
1515 NOTE: This DOES NOT effect the reporting of SBEs in
1516 DFM_FNT_STAT[SBE] (which were corrected if RECC_ENA=1).
1517 This bit is only here for applications which 'MAY' want
1518 to be alerted with an ERR completion code if there were
1519 SBEs that were auto-corrected during GWALK instructions.
1520 Recap: If there is a SBE and SBE_ENA==1, the "err" field
1521 in the data returned to DFA will be set. If SBE_ENA==0,
1522 the "err" is always 0 when there is a SBE; however,
1523 regardless of SBE_ENA, DBE will cause "err" to be 1. */
1524 uint64_t wecc_ena : 1; /**< If WECC_ENA=1, HW will auto-generate(overwrite) the 10b
1525 OWECC codeword during Memory Writes sourced by
1526 1) DFA MLOAD instructions, or by 2) NCB-Direct CSR
1527 mode writes to DFA memory space. The HW will insert
1528 the 10b OWECC inband into OW-DATA[127:118].
1529 If WECC_ENA=0, SW is responsible for generating the
1530 10b OWECC codeword inband in the upper OW-data[127:118]
1531 during Memory writes (to provide SEC/DED coverage for
1532 the data during subsequent Memory reads-see RECC_ENA). */
1533 uint64_t recc_ena : 1; /**< If RECC_ENA=1, all DFA memory reads sourced by 1) DFA
1534 GWALK instructions or by 2) NCB-Direct CSR mode reads
1535 to DFA memory space, will be protected by an inband 10b
1536 OWECC SEC/DED codeword. The inband OW-DATA[127:118]
1537 represents the inband OWECC codeword which offers single
1538 bit error correction(SEC)/double bit error detection(DED).
1539 [see also DFM_FNT_STAT[SBE,DBE,FADR,FSYN] status fields].
1540 The FSYN field contains an encoded value which determines
1541 which bit was corrected(for SBE) or detected(for DBE) to
1542 help in bit isolation of the error.
1543 SW NOTE: If RECC_ENA=1: An NCB-Direct CSR mode read of the
1544 upper QW in memory will return ZEROES in the upper 10b of the
1546 If RECC_ENA=0: An NCB-Direct CSR mode read of the upper QW in
1547 memory will return the RAW 64bits from memory. During memory
1548 debug, writing RECC_ENA=0 provides visibility into the raw ECC
1549 stored in memory at that time. */
1550 uint64_t dfr_ena : 1; /**< DFM Memory Interface Enable
1551 The DFM powers up with the DDR3 interface disabled.
1552 If the DFA function is required, then after poweron
1553 software configures a stable DFM DDR3 memory clock
1554 (see: LMCx_DDR_PLL_CTL[DFM_PS_EN, DFM_DIV_RESET]),
1555 the DFM DDR3 memory interface can be enabled.
1556 When disabled (DFR_ENA=0), all DFM DDR3 memory
1557 output and bidirectional pins will be tristated.
1558 SW NOTE: The DFR_ENA=1 write MUST occur sometime after
1559 the DFM is brought out of reset (ie: after the
1560 DFM_DLL_CTL2[DRESET]=0 write). */
1562 uint64_t dfr_ena : 1;
1563 uint64_t recc_ena : 1;
1564 uint64_t wecc_ena : 1;
1565 uint64_t sbe_ena : 1;
1566 uint64_t reserved_4_63 : 60;
1569 struct cvmx_dfm_fnt_ctl_s cn63xx;
1570 struct cvmx_dfm_fnt_ctl_s cn63xxp1;
1571 struct cvmx_dfm_fnt_ctl_s cn66xx;
1573 typedef union cvmx_dfm_fnt_ctl cvmx_dfm_fnt_ctl_t;
1578 * DFM_FNT_IENA = DFM Front Interrupt Enable Mask
1580 * This register contains error interrupt enable information for the DFM Front Section of Logic.
1582 union cvmx_dfm_fnt_iena {
1584 struct cvmx_dfm_fnt_iena_s {
1585 #ifdef __BIG_ENDIAN_BITFIELD
1586 uint64_t reserved_2_63 : 62;
1587 uint64_t dbe_intena : 1; /**< OWECC Double Error Detected(DED) Interrupt Enable
1588 When set, the memory controller raises a processor
1589 interrupt on detecting an uncorrectable double bit
1590 OWECC during a memory read. */
1591 uint64_t sbe_intena : 1; /**< OWECC Single Error Corrected(SEC) Interrupt Enable
1592 When set, the memory controller raises a processor
1593 interrupt on detecting a correctable single bit
1594 OWECC error which was corrected during a memory
1597 uint64_t sbe_intena : 1;
1598 uint64_t dbe_intena : 1;
1599 uint64_t reserved_2_63 : 62;
1602 struct cvmx_dfm_fnt_iena_s cn63xx;
1603 struct cvmx_dfm_fnt_iena_s cn63xxp1;
1604 struct cvmx_dfm_fnt_iena_s cn66xx;
1606 typedef union cvmx_dfm_fnt_iena cvmx_dfm_fnt_iena_t;
1611 * DFM_FNT_SCLK = DFM Front SCLK Control Register
1613 * This register contains control registers for the DFM Front Section of Logic.
1614 * NOTE: This register is in USCLK domain and is ised to enable the conditional SCLK grid, as well as
1615 * to start a software BiST sequence for the DFM sub-block. (note: the DFM has conditional clocks which
1616 * prevent BiST to run under reset automatically).
1618 union cvmx_dfm_fnt_sclk {
1620 struct cvmx_dfm_fnt_sclk_s {
1621 #ifdef __BIG_ENDIAN_BITFIELD
1622 uint64_t reserved_3_63 : 61;
1623 uint64_t clear_bist : 1; /**< When START_BIST is written 0->1, if CLEAR_BIST=1, all
1624 previous BiST state is cleared.
1626 1) CLEAR_BIST must be written to 1 before START_BIST
1627 is written to 1 using a separate CSR write.
1628 2) CLEAR_BIST must not be changed after writing START_BIST
1629 0->1 until the BIST operation completes. */
1630 uint64_t bist_start : 1; /**< When software writes BIST_START=0->1, a BiST is executed
1631 for the DFM sub-block.
1633 1) This bit should only be written after BOTH sclk
1634 and fclk have been enabled by software and are stable
1635 (see: DFM_FNT_SCLK[SCLKDIS] and instructions on how to
1636 enable the DFM DDR3 memory (fclk) - which requires LMC
1637 PLL init, DFM clock divider and proper DFM DLL
1638 initialization sequence). */
1639 uint64_t sclkdis : 1; /**< DFM sclk disable Source
1640 When SET, the DFM sclk are disabled (to conserve overall
1641 chip clocking power when the DFM function is not used).
1642 NOTE: This should only be written to a different value
1643 during power-on SW initialization. */
1645 uint64_t sclkdis : 1;
1646 uint64_t bist_start : 1;
1647 uint64_t clear_bist : 1;
1648 uint64_t reserved_3_63 : 61;
1651 struct cvmx_dfm_fnt_sclk_s cn63xx;
1652 struct cvmx_dfm_fnt_sclk_s cn63xxp1;
1653 struct cvmx_dfm_fnt_sclk_s cn66xx;
1655 typedef union cvmx_dfm_fnt_sclk cvmx_dfm_fnt_sclk_t;
1660 * DFM_FNT_STAT = DFM Front Status Register
1662 * This register contains error status information for the DFM Front Section of Logic.
1664 union cvmx_dfm_fnt_stat {
1666 struct cvmx_dfm_fnt_stat_s {
1667 #ifdef __BIG_ENDIAN_BITFIELD
1668 uint64_t reserved_42_63 : 22;
1669 uint64_t fsyn : 10; /**< Failing Syndrome
1670 If SBE_ERR=1, the FSYN code determines which bit was
1671 corrected during the OWECC check/correct.
1672 NOTE: If both DBE_ERR/SBE_ERR are set, the DBE_ERR has
1673 higher priority and FSYN captured will always be for the
1675 The FSYN is "locked down" when either DBE_ERR/SBE_ERR
1676 are detected (until these bits are cleared (W1C)).
1677 However, if an SBE_ERR occurs first, followed by a
1678 DBE_ERR, the higher priority DBE_ERR will re-capture
1679 the FSYN for the higher priority error case. */
1680 uint64_t fadr : 28; /**< Failing Memory octaword address
1681 If either SBE_ERR or DBE_ERR are set, the FADR
1682 represents the failing octaword address.
1683 NOTE: If both DBE_ERR/SBE_ERR are set, the DBE_ERR has
1684 higher priority and the FADR captured will always be
1685 with the DBE_ERR detected.
1686 The FADR is "locked down" when either DBE_ERR/SBE_ERR
1687 are detected (until these bits are cleared (W1C)).
1688 However, if an SBE_ERR occurs first, followed by a
1689 DBE_ERR, the higher priority DBE_ERR will re-capture
1690 the FADR for the higher priority error case. */
1691 uint64_t reserved_2_3 : 2;
1692 uint64_t dbe_err : 1; /**< Double bit error detected(uncorrectable) during
1694 Write of 1 will clear the corresponding error bit */
1695 uint64_t sbe_err : 1; /**< Single bit error detected(corrected) during
1697 Write of 1 will clear the corresponding error bit */
1699 uint64_t sbe_err : 1;
1700 uint64_t dbe_err : 1;
1701 uint64_t reserved_2_3 : 2;
1704 uint64_t reserved_42_63 : 22;
1707 struct cvmx_dfm_fnt_stat_s cn63xx;
1708 struct cvmx_dfm_fnt_stat_s cn63xxp1;
1709 struct cvmx_dfm_fnt_stat_s cn66xx;
1711 typedef union cvmx_dfm_fnt_stat cvmx_dfm_fnt_stat_t;
1716 * DFM_IFB_CNT = Performance Counters
1719 union cvmx_dfm_ifb_cnt {
1721 struct cvmx_dfm_ifb_cnt_s {
1722 #ifdef __BIG_ENDIAN_BITFIELD
1723 uint64_t ifbcnt : 64; /**< Performance Counter
1724 64-bit counter that increments every
1725 cycle there is something in the in-flight buffer.
1726 Before using, clear counter via DFM_CONTROL.BWCNT. */
1728 uint64_t ifbcnt : 64;
1731 struct cvmx_dfm_ifb_cnt_s cn63xx;
1732 struct cvmx_dfm_ifb_cnt_s cn63xxp1;
1733 struct cvmx_dfm_ifb_cnt_s cn66xx;
1735 typedef union cvmx_dfm_ifb_cnt cvmx_dfm_ifb_cnt_t;
1738 * cvmx_dfm_modereg_params0
1741 * These parameters are written into the DDR3 MR0, MR1, MR2 and MR3 registers.
1744 union cvmx_dfm_modereg_params0 {
1746 struct cvmx_dfm_modereg_params0_s {
1747 #ifdef __BIG_ENDIAN_BITFIELD
1748 uint64_t reserved_25_63 : 39;
1749 uint64_t ppd : 1; /**< DLL Control for precharge powerdown
1750 0 = Slow exit (DLL off)
1751 1 = Fast exit (DLL on)
1752 DFM writes this value to MR0[PPD] in the selected DDR3 parts
1753 during power-up/init instruction sequencing.
1754 See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK].
1755 This value must equal the MR0[PPD] value in all the DDR3
1756 parts attached to all ranks during normal operation. */
1757 uint64_t wrp : 3; /**< Write recovery for auto precharge
1758 Should be programmed to be equal to or greater than
1759 RNDUP[tWR(ns)/tCYC(ns)]
1768 DFM writes this value to MR0[WR] in the selected DDR3 parts
1769 during power-up/init instruction sequencing.
1770 See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK].
1771 This value must equal the MR0[WR] value in all the DDR3
1772 parts attached to all ranks during normal operation. */
1773 uint64_t dllr : 1; /**< DLL Reset
1774 DFM writes this value to MR0[DLL] in the selected DDR3 parts
1775 during power-up/init instruction sequencing.
1776 See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK].
1777 The MR0[DLL] value must be 0 in all the DDR3
1778 parts attached to all ranks during normal operation. */
1779 uint64_t tm : 1; /**< Test Mode
1780 DFM writes this value to MR0[TM] in the selected DDR3 parts
1781 during power-up/init instruction sequencing.
1782 See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK].
1783 The MR0[TM] value must be 0 in all the DDR3
1784 parts attached to all ranks during normal operation. */
1785 uint64_t rbt : 1; /**< Read Burst Type
1786 1 = interleaved (fixed)
1787 DFM writes this value to MR0[RBT] in the selected DDR3 parts
1788 during power-up/init instruction sequencing.
1789 See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK].
1790 The MR0[RBT] value must be 1 in all the DDR3
1791 parts attached to all ranks during normal operation. */
1792 uint64_t cl : 4; /**< CAS Latency
1805 0000, 1011, 1101, 1111 = Reserved
1806 DFM writes this value to MR0[CAS Latency / CL] in the selected DDR3 parts
1807 during power-up/init instruction sequencing.
1808 See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK].
1809 This value must equal the MR0[CAS Latency / CL] value in all the DDR3
1810 parts attached to all ranks during normal operation. */
1811 uint64_t bl : 2; /**< Burst Length
1813 DFM writes this value to MR0[BL] in the selected DDR3 parts
1814 during power-up/init instruction sequencing.
1815 See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK].
1816 The MR0[BL] value must be 0 in all the DDR3
1817 parts attached to all ranks during normal operation. */
1818 uint64_t qoff : 1; /**< Qoff Enable
1820 DFM writes this value to MR1[Qoff] in the selected DDR3 parts
1821 during power-up/init and write-leveling instruction sequencing.
1822 If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
1823 this value to MR1[Qoff] in all DRAM parts in DFM_CONFIG[INIT_STATUS] ranks during self-refresh
1824 entry and exit instruction sequences.
1825 See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
1826 DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT].
1827 The MR1[Qoff] value must be 0 in all the DDR3
1828 parts attached to all ranks during normal operation. */
1829 uint64_t tdqs : 1; /**< TDQS Enable
1831 DFM writes this value to MR1[TDQS] in the selected DDR3 parts
1832 during power-up/init and write-leveling instruction sequencing.
1833 If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
1834 this value to MR1[TDQS] in all DRAM parts in DFM_CONFIG[INIT_STATUS] ranks during self-refresh
1835 entry and exit instruction sequences.
1836 See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
1837 DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
1838 uint64_t wlev : 1; /**< Write Leveling Enable
1840 DFM writes MR1[Level]=0 in the selected DDR3 parts
1841 during power-up/init and write-leveling instruction sequencing.
1842 (DFM also writes MR1[Level]=1 at the beginning of a
1843 write-leveling instruction sequence. Write-leveling can only be initiated via the
1844 write-leveling instruction sequence.)
1845 If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
1846 MR1[Level]=0 in all DRAM parts in DFM_CONFIG[INIT_STATUS] ranks during self-refresh
1847 entry and exit instruction sequences.
1848 See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
1849 DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
1850 uint64_t al : 2; /**< Additive Latency
1855 DFM writes this value to MR1[AL] in the selected DDR3 parts
1856 during power-up/init and write-leveling instruction sequencing.
1857 If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
1858 this value to MR1[AL] in all DRAM parts in DFM_CONFIG[INIT_STATUS] ranks during self-refresh
1859 entry and exit instruction sequences.
1860 See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
1861 DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT].
1862 This value must equal the MR1[AL] value in all the DDR3
1863 parts attached to all ranks during normal operation.
1864 See also DFM_CONTROL[POCAS]. */
1865 uint64_t dll : 1; /**< DLL Enable
1868 DFM writes this value to MR1[DLL] in the selected DDR3 parts
1869 during power-up/init and write-leveling instruction sequencing.
1870 If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
1871 this value to MR1[DLL] in all DRAM parts in DFM_CONFIG[INIT_STATUS] ranks during self-refresh
1872 entry and exit instruction sequences.
1873 See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
1874 DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT].
1875 This value must equal the MR1[DLL] value in all the DDR3
1876 parts attached to all ranks during normal operation.
1877 In dll-off mode, CL/CWL must be programmed
1878 equal to 6/6, respectively, as per the DDR3 specifications. */
1879 uint64_t mpr : 1; /**< MPR
1880 DFM writes this value to MR3[MPR] in the selected DDR3 parts
1881 during power-up/init and read-leveling instruction sequencing.
1882 (DFM also writes MR3[MPR]=1 at the beginning of a
1883 read-leveling instruction sequence. Read-leveling can only be initiated via the
1884 read-leveling instruction sequence.)
1885 See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK].
1886 The MR3[MPR] value must be 0 in all the DDR3
1887 parts attached to all ranks during normal operation. */
1888 uint64_t mprloc : 2; /**< MPR Location
1889 DFM writes this value to MR3[MPRLoc] in the selected DDR3 parts
1890 during power-up/init and read-leveling instruction sequencing.
1891 (DFM also writes MR3[MPRLoc]=0 at the beginning of the
1892 read-leveling instruction sequence.)
1893 See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK].
1894 The MR3[MPRLoc] value must be 0 in all the DDR3
1895 parts attached to all ranks during normal operation. */
1896 uint64_t cwl : 3; /**< CAS Write Latency
1905 DFM writes this value to MR2[CWL] in the selected DDR3 parts
1906 during power-up/init instruction sequencing.
1907 If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
1908 this value to MR2[CWL] in all DRAM parts in DFM_CONFIG[INIT_STATUS] ranks during self-refresh
1909 entry and exit instruction sequences.
1910 See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
1911 DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT].
1912 This value must equal the MR2[CWL] value in all the DDR3
1913 parts attached to all ranks during normal operation. */
1916 uint64_t mprloc : 2;
1930 uint64_t reserved_25_63 : 39;
1933 struct cvmx_dfm_modereg_params0_s cn63xx;
1934 struct cvmx_dfm_modereg_params0_s cn63xxp1;
1935 struct cvmx_dfm_modereg_params0_s cn66xx;
1937 typedef union cvmx_dfm_modereg_params0 cvmx_dfm_modereg_params0_t;
1940 * cvmx_dfm_modereg_params1
1943 * These parameters are written into the DDR3 MR0, MR1, MR2 and MR3 registers.
1946 union cvmx_dfm_modereg_params1 {
1948 struct cvmx_dfm_modereg_params1_s {
1949 #ifdef __BIG_ENDIAN_BITFIELD
1950 uint64_t reserved_48_63 : 16;
1951 uint64_t rtt_nom_11 : 3; /**< Must be zero */
1952 uint64_t dic_11 : 2; /**< Must be zero */
1953 uint64_t rtt_wr_11 : 2; /**< Must be zero */
1954 uint64_t srt_11 : 1; /**< Must be zero */
1955 uint64_t asr_11 : 1; /**< Must be zero */
1956 uint64_t pasr_11 : 3; /**< Must be zero */
1957 uint64_t rtt_nom_10 : 3; /**< Must be zero */
1958 uint64_t dic_10 : 2; /**< Must be zero */
1959 uint64_t rtt_wr_10 : 2; /**< Must be zero */
1960 uint64_t srt_10 : 1; /**< Must be zero */
1961 uint64_t asr_10 : 1; /**< Must be zero */
1962 uint64_t pasr_10 : 3; /**< Must be zero */
1963 uint64_t rtt_nom_01 : 3; /**< RTT_NOM Rank 1
1964 DFM writes this value to MR1[Rtt_Nom] in the rank 1 (i.e. CS1) DDR3 parts
1965 when selected during power-up/init instruction sequencing.
1966 If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
1967 this value to MR1[Rtt_Nom] in all DRAM parts in rank 1 during self-refresh
1968 entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<1>]=1).
1969 See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
1970 DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
1971 uint64_t dic_01 : 2; /**< Output Driver Impedance Control Rank 1
1972 DFM writes this value to MR1[D.I.C.] in the rank 1 (i.e. CS1) DDR3 parts
1973 when selected during power-up/init and write-leveling instruction sequencing.
1974 If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
1975 this value to MR1[D.I.C.] in all DRAM parts in rank 1 during self-refresh
1976 entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<1>]=1).
1977 See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
1978 DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
1979 uint64_t rtt_wr_01 : 2; /**< RTT_WR Rank 1
1980 DFM writes this value to MR2[Rtt_WR] in the rank 1 (i.e. CS1) DDR3 parts
1981 when selected during power-up/init instruction sequencing.
1982 If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
1983 this value to MR2[Rtt_WR] in all DRAM parts in rank 1 during self-refresh
1984 entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<1>]=1).
1985 See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
1986 DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
1987 uint64_t srt_01 : 1; /**< Self-refresh temperature range Rank 1
1988 DFM writes this value to MR2[SRT] in the rank 1 (i.e. CS1) DDR3 parts
1989 when selected during power-up/init instruction sequencing.
1990 If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
1991 this value to MR2[SRT] in all DRAM parts in rank 1 during self-refresh
1992 entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<1>]=1).
1993 See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
1994 DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
1995 uint64_t asr_01 : 1; /**< Auto self-refresh Rank 1
1996 DFM writes this value to MR2[ASR] in the rank 1 (i.e. CS1) DDR3 parts
1997 when selected during power-up/init instruction sequencing.
1998 If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
1999 this value to MR2[ASR] in all DRAM parts in rank 1 during self-refresh
2000 entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<1>]=1).
2001 See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
2002 DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
2003 uint64_t pasr_01 : 3; /**< Partial array self-refresh Rank 1
2004 DFM writes this value to MR2[PASR] in the rank 1 (i.e. CS1) DDR3 parts
2005 when selected during power-up/init instruction sequencing.
2006 If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
2007 this value to MR2[PASR] in all DRAM parts in rank 1 during self-refresh
2008 entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<1>]=1).
2009 See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
2010 DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
2011 uint64_t rtt_nom_00 : 3; /**< RTT_NOM Rank 0
2012 DFM writes this value to MR1[Rtt_Nom] in the rank 0 (i.e. CS0) DDR3 parts
2013 when selected during power-up/init instruction sequencing.
2014 If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
2015 this value to MR1[Rtt_Nom] in all DRAM parts in rank 0 during self-refresh
2016 entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<0>]=1).
2017 See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
2018 DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
2019 uint64_t dic_00 : 2; /**< Output Driver Impedance Control Rank 0
2020 DFM writes this value to MR1[D.I.C.] in the rank 0 (i.e. CS0) DDR3 parts
2021 when selected during power-up/init and write-leveling instruction sequencing.
2022 If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
2023 this value to MR1[D.I.C.] in all DRAM parts in rank 0 during self-refresh
2024 entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<0>]=1).
2025 See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
2026 DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
2027 uint64_t rtt_wr_00 : 2; /**< RTT_WR Rank 0
2028 DFM writes this value to MR2[Rtt_WR] in the rank 0 (i.e. CS0) DDR3 parts
2029 when selected during power-up/init instruction sequencing.
2030 If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
2031 this value to MR2[Rtt_WR] in all DRAM parts in rank 0 during self-refresh
2032 entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<0>]=1).
2033 See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
2034 DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
2035 uint64_t srt_00 : 1; /**< Self-refresh temperature range Rank 0
2036 DFM writes this value to MR2[SRT] in the rank 0 (i.e. CS0) DDR3 parts
2037 when selected during power-up/init instruction sequencing.
2038 If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
2039 this value to MR2[SRT] in all DRAM parts in rank 0 during self-refresh
2040 entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<0>]=1).
2041 See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
2042 DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
2043 uint64_t asr_00 : 1; /**< Auto self-refresh Rank 0
2044 DFM writes this value to MR2[ASR] in the rank 0 (i.e. CS0) DDR3 parts
2045 when selected during power-up/init instruction sequencing.
2046 If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
2047 this value to MR2[ASR] in all DRAM parts in rank 0 during self-refresh
2048 entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<0>]=1).
2049 See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
2050 DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
2051 uint64_t pasr_00 : 3; /**< Partial array self-refresh Rank 0
2052 DFM writes this value to MR2[PASR] in the rank 0 (i.e. CS0) DDR3 parts
2053 when selected during power-up/init instruction sequencing.
2054 If DFM_CONFIG[SREF_WITH_DLL] is set, DFM also writes
2055 this value to MR2[PASR] in all DRAM parts in rank 0 during self-refresh
2056 entry and exit instruction sequences (when DFM_CONFIG[INIT_STATUS<0>]=1).
2057 See DFM_CONFIG[SEQUENCE,INIT_START,RANKMASK] and
2058 DFM_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
2060 uint64_t pasr_00 : 3;
2061 uint64_t asr_00 : 1;
2062 uint64_t srt_00 : 1;
2063 uint64_t rtt_wr_00 : 2;
2064 uint64_t dic_00 : 2;
2065 uint64_t rtt_nom_00 : 3;
2066 uint64_t pasr_01 : 3;
2067 uint64_t asr_01 : 1;
2068 uint64_t srt_01 : 1;
2069 uint64_t rtt_wr_01 : 2;
2070 uint64_t dic_01 : 2;
2071 uint64_t rtt_nom_01 : 3;
2072 uint64_t pasr_10 : 3;
2073 uint64_t asr_10 : 1;
2074 uint64_t srt_10 : 1;
2075 uint64_t rtt_wr_10 : 2;
2076 uint64_t dic_10 : 2;
2077 uint64_t rtt_nom_10 : 3;
2078 uint64_t pasr_11 : 3;
2079 uint64_t asr_11 : 1;
2080 uint64_t srt_11 : 1;
2081 uint64_t rtt_wr_11 : 2;
2082 uint64_t dic_11 : 2;
2083 uint64_t rtt_nom_11 : 3;
2084 uint64_t reserved_48_63 : 16;
2087 struct cvmx_dfm_modereg_params1_s cn63xx;
2088 struct cvmx_dfm_modereg_params1_s cn63xxp1;
2089 struct cvmx_dfm_modereg_params1_s cn66xx;
2091 typedef union cvmx_dfm_modereg_params1 cvmx_dfm_modereg_params1_t;
2096 * DFM_OPS_CNT = Performance Counters
2099 union cvmx_dfm_ops_cnt {
2101 struct cvmx_dfm_ops_cnt_s {
2102 #ifdef __BIG_ENDIAN_BITFIELD
2103 uint64_t opscnt : 64; /**< Performance Counter
2104 64-bit counter that increments when the DDR3 data bus
2105 is being used. Before using, clear counter via
2107 DRAM bus utilization = DFM_OPS_CNT/DFM_FCLK_CNT */
2109 uint64_t opscnt : 64;
2112 struct cvmx_dfm_ops_cnt_s cn63xx;
2113 struct cvmx_dfm_ops_cnt_s cn63xxp1;
2114 struct cvmx_dfm_ops_cnt_s cn66xx;
2116 typedef union cvmx_dfm_ops_cnt cvmx_dfm_ops_cnt_t;
2121 * DFM_PHY_CTL = DFM PHY Control
2124 union cvmx_dfm_phy_ctl {
2126 struct cvmx_dfm_phy_ctl_s {
2127 #ifdef __BIG_ENDIAN_BITFIELD
2128 uint64_t reserved_15_63 : 49;
2129 uint64_t rx_always_on : 1; /**< Disable dynamic DDR3 IO Rx power gating */
2130 uint64_t lv_mode : 1; /**< Low Voltage Mode (1.35V) */
2131 uint64_t ck_tune1 : 1; /**< Clock Tune
2134 uint64_t ck_dlyout1 : 4; /**< Clock delay out setting
2137 uint64_t ck_tune0 : 1; /**< Clock Tune */
2138 uint64_t ck_dlyout0 : 4; /**< Clock delay out setting */
2139 uint64_t loopback : 1; /**< Loopback enable */
2140 uint64_t loopback_pos : 1; /**< Loopback pos mode */
2141 uint64_t ts_stagger : 1; /**< TS Staggermode
2142 This mode configures output drivers with 2-stage drive
2143 strength to avoid undershoot issues on the bus when strong
2144 drivers are suddenly turned on. When this mode is asserted,
2145 Octeon will configure output drivers to be weak drivers
2146 (60 ohm output impedance) at the first FCLK cycle, and
2147 change drivers to the designated drive strengths specified
2148 in DFM_COMP_CTL2 [CMD_CTL/CK_CTL/DQX_CTL] starting
2149 at the following cycle */
2151 uint64_t ts_stagger : 1;
2152 uint64_t loopback_pos : 1;
2153 uint64_t loopback : 1;
2154 uint64_t ck_dlyout0 : 4;
2155 uint64_t ck_tune0 : 1;
2156 uint64_t ck_dlyout1 : 4;
2157 uint64_t ck_tune1 : 1;
2158 uint64_t lv_mode : 1;
2159 uint64_t rx_always_on : 1;
2160 uint64_t reserved_15_63 : 49;
2163 struct cvmx_dfm_phy_ctl_s cn63xx;
2164 struct cvmx_dfm_phy_ctl_cn63xxp1 {
2165 #ifdef __BIG_ENDIAN_BITFIELD
2166 uint64_t reserved_14_63 : 50;
2167 uint64_t lv_mode : 1; /**< Low Voltage Mode (1.35V) */
2168 uint64_t ck_tune1 : 1; /**< Clock Tune
2171 uint64_t ck_dlyout1 : 4; /**< Clock delay out setting
2174 uint64_t ck_tune0 : 1; /**< Clock Tune */
2175 uint64_t ck_dlyout0 : 4; /**< Clock delay out setting */
2176 uint64_t loopback : 1; /**< Loopback enable */
2177 uint64_t loopback_pos : 1; /**< Loopback pos mode */
2178 uint64_t ts_stagger : 1; /**< TS Staggermode
2179 This mode configures output drivers with 2-stage drive
2180 strength to avoid undershoot issues on the bus when strong
2181 drivers are suddenly turned on. When this mode is asserted,
2182 Octeon will configure output drivers to be weak drivers
2183 (60 ohm output impedance) at the first FCLK cycle, and
2184 change drivers to the designated drive strengths specified
2185 in DFM_COMP_CTL2 [CMD_CTL/CK_CTL/DQX_CTL] starting
2186 at the following cycle */
2188 uint64_t ts_stagger : 1;
2189 uint64_t loopback_pos : 1;
2190 uint64_t loopback : 1;
2191 uint64_t ck_dlyout0 : 4;
2192 uint64_t ck_tune0 : 1;
2193 uint64_t ck_dlyout1 : 4;
2194 uint64_t ck_tune1 : 1;
2195 uint64_t lv_mode : 1;
2196 uint64_t reserved_14_63 : 50;
2199 struct cvmx_dfm_phy_ctl_s cn66xx;
2201 typedef union cvmx_dfm_phy_ctl cvmx_dfm_phy_ctl_t;
2204 * cvmx_dfm_reset_ctl
2206 * Specify the RSL base addresses for the block
2210 * DDR3RST - DDR3 DRAM parts have a new RESET#
2211 * pin that wasn't present in DDR2 parts. The
2212 * DDR3RST CSR field controls the assertion of
2213 * the new 6xxx pin that attaches to RESET#.
2214 * When DDR3RST is set, 6xxx asserts RESET#.
2215 * When DDR3RST is clear, 6xxx de-asserts
2218 * DDR3RST is set on a cold reset. Warm and
2219 * soft chip resets do not affect the DDR3RST
2220 * value. Outside of cold reset, only software
2221 * CSR writes change the DDR3RST value.
2223 union cvmx_dfm_reset_ctl {
2225 struct cvmx_dfm_reset_ctl_s {
2226 #ifdef __BIG_ENDIAN_BITFIELD
2227 uint64_t reserved_4_63 : 60;
2228 uint64_t ddr3psv : 1; /**< Must be zero */
2229 uint64_t ddr3psoft : 1; /**< Must be zero */
2230 uint64_t ddr3pwarm : 1; /**< Must be zero */
2231 uint64_t ddr3rst : 1; /**< Memory Reset
2233 1 = Reset de-asserted */
2235 uint64_t ddr3rst : 1;
2236 uint64_t ddr3pwarm : 1;
2237 uint64_t ddr3psoft : 1;
2238 uint64_t ddr3psv : 1;
2239 uint64_t reserved_4_63 : 60;
2242 struct cvmx_dfm_reset_ctl_s cn63xx;
2243 struct cvmx_dfm_reset_ctl_s cn63xxp1;
2244 struct cvmx_dfm_reset_ctl_s cn66xx;
2246 typedef union cvmx_dfm_reset_ctl cvmx_dfm_reset_ctl_t;
2249 * cvmx_dfm_rlevel_ctl
2251 union cvmx_dfm_rlevel_ctl {
2253 struct cvmx_dfm_rlevel_ctl_s {
2254 #ifdef __BIG_ENDIAN_BITFIELD
2255 uint64_t reserved_22_63 : 42;
2256 uint64_t delay_unload_3 : 1; /**< When set, unload the PHY silo one cycle later
2257 during read-leveling if DFM_RLEVEL_RANKi[BYTE*<1:0>] = 3
2258 DELAY_UNLOAD_3 should normally be set, particularly at higher speeds. */
2259 uint64_t delay_unload_2 : 1; /**< When set, unload the PHY silo one cycle later
2260 during read-leveling if DFM_RLEVEL_RANKi[BYTE*<1:0>] = 2
2261 DELAY_UNLOAD_2 should normally not be set. */
2262 uint64_t delay_unload_1 : 1; /**< When set, unload the PHY silo one cycle later
2263 during read-leveling if DFM_RLEVEL_RANKi[BYTE*<1:0>] = 1
2264 DELAY_UNLOAD_1 should normally not be set. */
2265 uint64_t delay_unload_0 : 1; /**< When set, unload the PHY silo one cycle later
2266 during read-leveling if DFM_RLEVEL_RANKi[BYTE*<1:0>] = 0
2267 DELAY_UNLOAD_0 should normally not be set. */
2268 uint64_t bitmask : 8; /**< Mask to select bit lanes on which read-leveling
2269 feedback is returned when OR_DIS is set to 1 */
2270 uint64_t or_dis : 1; /**< Disable or'ing of bits in a byte lane when computing
2271 the read-leveling bitmask
2272 OR_DIS should normally not be set. */
2273 uint64_t offset_en : 1; /**< Use DFM_RLEVEL_CTL[OFFSET] to calibrate read
2274 level dskew settings */
2275 uint64_t offset : 4; /**< Pick final_setting-offset (if set) for the read level
2276 deskew setting instead of the middle of the largest
2277 contiguous sequence of 1's in the bitmask */
2278 uint64_t byte : 4; /**< 0 <= BYTE <= 1
2279 Byte index for which bitmask results are saved
2280 in DFM_RLEVEL_DBG */
2283 uint64_t offset : 4;
2284 uint64_t offset_en : 1;
2285 uint64_t or_dis : 1;
2286 uint64_t bitmask : 8;
2287 uint64_t delay_unload_0 : 1;
2288 uint64_t delay_unload_1 : 1;
2289 uint64_t delay_unload_2 : 1;
2290 uint64_t delay_unload_3 : 1;
2291 uint64_t reserved_22_63 : 42;
2294 struct cvmx_dfm_rlevel_ctl_s cn63xx;
2295 struct cvmx_dfm_rlevel_ctl_cn63xxp1 {
2296 #ifdef __BIG_ENDIAN_BITFIELD
2297 uint64_t reserved_9_63 : 55;
2298 uint64_t offset_en : 1; /**< Use DFM_RLEVEL_CTL[OFFSET] to calibrate read
2299 level dskew settings */
2300 uint64_t offset : 4; /**< Pick final_setting-offset (if set) for the read level
2301 deskew setting instead of the middle of the largest
2302 contiguous sequence of 1's in the bitmask */
2303 uint64_t byte : 4; /**< 0 <= BYTE <= 1
2304 Byte index for which bitmask results are saved
2305 in DFM_RLEVEL_DBG */
2308 uint64_t offset : 4;
2309 uint64_t offset_en : 1;
2310 uint64_t reserved_9_63 : 55;
2313 struct cvmx_dfm_rlevel_ctl_s cn66xx;
2315 typedef union cvmx_dfm_rlevel_ctl cvmx_dfm_rlevel_ctl_t;
2318 * cvmx_dfm_rlevel_dbg
2321 * A given read of DFM_RLEVEL_DBG returns the read-leveling pass/fail results for all possible
2322 * delay settings (i.e. the BITMASK) for only one byte in the last rank that the HW read-leveled.
2323 * DFM_RLEVEL_CTL[BYTE] selects the particular byte.
2324 * To get these pass/fail results for another different rank, you must run the hardware read-leveling
2325 * again. For example, it is possible to get the BITMASK results for every byte of every rank
2326 * if you run read-leveling separately for each rank, probing DFM_RLEVEL_DBG between each
2329 union cvmx_dfm_rlevel_dbg {
2331 struct cvmx_dfm_rlevel_dbg_s {
2332 #ifdef __BIG_ENDIAN_BITFIELD
2333 uint64_t bitmask : 64; /**< Bitmask generated during deskew settings sweep
2334 BITMASK[n]=0 means deskew setting n failed
2335 BITMASK[n]=1 means deskew setting n passed
2338 uint64_t bitmask : 64;
2341 struct cvmx_dfm_rlevel_dbg_s cn63xx;
2342 struct cvmx_dfm_rlevel_dbg_s cn63xxp1;
2343 struct cvmx_dfm_rlevel_dbg_s cn66xx;
2345 typedef union cvmx_dfm_rlevel_dbg cvmx_dfm_rlevel_dbg_t;
2348 * cvmx_dfm_rlevel_rank#
2351 * This is TWO CSRs per DFM, one per each rank.
2353 * Deskew setting is measured in units of 1/4 FCLK, so the above BYTE* values can range over 16 FCLKs.
2355 * Each CSR is written by HW during a read-leveling sequence for the rank. (HW sets STATUS==3 after HW read-leveling completes for the rank.)
2356 * If HW is unable to find a match per DFM_RLEVEL_CTL[OFFSET_EN] and DFM_RLEVEL_CTL[OFFSET], then HW will set DFM_RLEVEL_RANKn[BYTE*<5:0>]
2359 * Each CSR may also be written by SW, but not while a read-leveling sequence is in progress. (HW sets STATUS==1 after a CSR write.)
2361 * SW initiates a HW read-leveling sequence by programming DFM_RLEVEL_CTL and writing INIT_START=1 with SEQUENCE=1 in DFM_CONFIG.
2362 * See DFM_RLEVEL_CTL.
2364 union cvmx_dfm_rlevel_rankx {
2366 struct cvmx_dfm_rlevel_rankx_s {
2367 #ifdef __BIG_ENDIAN_BITFIELD
2368 uint64_t reserved_56_63 : 8;
2369 uint64_t status : 2; /**< Indicates status of the read-levelling and where
2370 the BYTE* programmings in <35:0> came from:
2371 0 = BYTE* values are their reset value
2372 1 = BYTE* values were set via a CSR write to this register
2373 2 = read-leveling sequence currently in progress (BYTE* values are unpredictable)
2374 3 = BYTE* values came from a complete read-leveling sequence */
2375 uint64_t reserved_12_53 : 42;
2376 uint64_t byte1 : 6; /**< Deskew setting */
2377 uint64_t byte0 : 6; /**< Deskew setting */
2381 uint64_t reserved_12_53 : 42;
2382 uint64_t status : 2;
2383 uint64_t reserved_56_63 : 8;
2386 struct cvmx_dfm_rlevel_rankx_s cn63xx;
2387 struct cvmx_dfm_rlevel_rankx_s cn63xxp1;
2388 struct cvmx_dfm_rlevel_rankx_s cn66xx;
2390 typedef union cvmx_dfm_rlevel_rankx cvmx_dfm_rlevel_rankx_t;
2393 * cvmx_dfm_rodt_mask
2395 * DFM_RODT_MASK = DFM Read OnDieTermination mask
2396 * System designers may desire to terminate DQ/DQS/DM lines for higher frequency DDR operations
2397 * especially on a multi-rank system. DDR3 DQ/DM/DQS I/O's have built in
2398 * Termination resistor that can be turned on or off by the controller, after meeting tAOND and tAOF
2399 * timing requirements. Each Rank has its own ODT pin that fans out to all the memory parts
2400 * in that rank. System designers may prefer different combinations of ODT ON's for reads
2401 * into different ranks. Octeon supports full programmability by way of the mask register below.
2402 * Each Rank position has its own 8-bit programmable field.
2403 * When the controller does a read to that rank, it sets the 4 ODT pins to the MASK pins below.
2404 * For eg., When doing a read into Rank0, a system designer may desire to terminate the lines
2405 * with the resistor on Dimm0/Rank1. The mask RODT_D0_R0 would then be [00000010].
2406 * Octeon drives the appropriate mask values on the ODT pins by default. If this feature is not
2407 * required, write 0 in this register. Note that, as per the DDR3 specifications, the ODT pin
2408 * for the rank that is being read should always be 0.
2411 * - Notice that when there is only one rank, all valid fields must be zero. This is because there is no
2412 * "other" rank to terminate lines for. Read ODT is meant for multirank systems.
2413 * - For a two rank system and a read op to rank0: use RODT_D0_R0<1> to terminate lines on rank1.
2414 * - For a two rank system and a read op to rank1: use RODT_D0_R1<0> to terminate lines on rank0.
2415 * - Therefore, when a given RANK is selected, the RODT mask for that RANK is used.
2417 * DFM always reads 128-bit words independently via one read CAS operation per word.
2418 * When a RODT mask bit is set, DFM asserts the OCTEON ODT output
2419 * pin(s) starting (CL - CWL) CK's after the read CAS operation. Then, OCTEON
2420 * normally continues to assert the ODT output pin(s) for 5+DFM_CONTROL[RODT_BPRCH] more CK's
2421 * - for a total of 6+DFM_CONTROL[RODT_BPRCH] CK's for the entire 128-bit read -
2422 * satisfying the 6 CK DDR3 ODTH8 requirements.
2424 * But it is possible for OCTEON to issue two 128-bit reads separated by as few as
2425 * RtR = 4 or 5 (6 if DFM_CONTROL[RODT_BPRCH]=1) CK's. In that case, OCTEON asserts the ODT output pin(s)
2426 * for the RODT mask of the first 128-bit read for RtR CK's, then asserts
2427 * the ODT output pin(s) for the RODT mask of the second 128-bit read for 6+DFM_CONTROL[RODT_BPRCH] CK's
2428 * (or less if a third 128-bit read follows within 4 or 5 (or 6) CK's of this second 128-bit read).
2429 * Note that it may be necessary to force DFM to space back-to-back 128-bit reads
2430 * to different ranks apart by at least 6+DFM_CONTROL[RODT_BPRCH] CK's to prevent DDR3 ODTH8 violations.
2432 union cvmx_dfm_rodt_mask {
2434 struct cvmx_dfm_rodt_mask_s {
2435 #ifdef __BIG_ENDIAN_BITFIELD
2436 uint64_t rodt_d3_r1 : 8; /**< Must be zero. */
2437 uint64_t rodt_d3_r0 : 8; /**< Must be zero. */
2438 uint64_t rodt_d2_r1 : 8; /**< Must be zero. */
2439 uint64_t rodt_d2_r0 : 8; /**< Must be zero. */
2440 uint64_t rodt_d1_r1 : 8; /**< Must be zero. */
2441 uint64_t rodt_d1_r0 : 8; /**< Must be zero. */
2442 uint64_t rodt_d0_r1 : 8; /**< Read ODT mask RANK1
2443 RODT_D0_R1<7:1> must be zero in all cases.
2444 RODT_D0_R1<0> must also be zero if RANK_ENA is not set. */
2445 uint64_t rodt_d0_r0 : 8; /**< Read ODT mask RANK0
2446 RODT_D0_R0<7:2,0> must be zero in all cases.
2447 RODT_D0_R0<1> must also be zero if RANK_ENA is not set. */
2449 uint64_t rodt_d0_r0 : 8;
2450 uint64_t rodt_d0_r1 : 8;
2451 uint64_t rodt_d1_r0 : 8;
2452 uint64_t rodt_d1_r1 : 8;
2453 uint64_t rodt_d2_r0 : 8;
2454 uint64_t rodt_d2_r1 : 8;
2455 uint64_t rodt_d3_r0 : 8;
2456 uint64_t rodt_d3_r1 : 8;
2459 struct cvmx_dfm_rodt_mask_s cn63xx;
2460 struct cvmx_dfm_rodt_mask_s cn63xxp1;
2461 struct cvmx_dfm_rodt_mask_s cn66xx;
2463 typedef union cvmx_dfm_rodt_mask cvmx_dfm_rodt_mask_t;
2466 * cvmx_dfm_slot_ctl0
2468 * DFM_SLOT_CTL0 = DFM Slot Control0
2469 * This register is an assortment of various control fields needed by the memory controller
2472 * HW will update this register if SW has not previously written to it and when any of DFM_RLEVEL_RANKn, DFM_WLEVEL_RANKn, DFM_CONTROL and
2473 * DFM_MODEREG_PARAMS0 change.Ideally, this register should only be read after DFM has been initialized and DFM_RLEVEL_RANKn, DFM_WLEVEL_RANKn
2475 * R2W_INIT has 1 extra CK cycle built in for odt settling/channel turnaround time.
2477 union cvmx_dfm_slot_ctl0 {
2479 struct cvmx_dfm_slot_ctl0_s {
2480 #ifdef __BIG_ENDIAN_BITFIELD
2481 uint64_t reserved_24_63 : 40;
2482 uint64_t w2w_init : 6; /**< Write-to-write spacing control
2483 for back to back accesses to the same rank and dimm */
2484 uint64_t w2r_init : 6; /**< Write-to-read spacing control
2485 for back to back accesses to the same rank and dimm */
2486 uint64_t r2w_init : 6; /**< Read-to-write spacing control
2487 for back to back accesses to the same rank and dimm */
2488 uint64_t r2r_init : 6; /**< Read-to-read spacing control
2489 for back to back accesses to the same rank and dimm */
2491 uint64_t r2r_init : 6;
2492 uint64_t r2w_init : 6;
2493 uint64_t w2r_init : 6;
2494 uint64_t w2w_init : 6;
2495 uint64_t reserved_24_63 : 40;
2498 struct cvmx_dfm_slot_ctl0_s cn63xx;
2499 struct cvmx_dfm_slot_ctl0_s cn63xxp1;
2500 struct cvmx_dfm_slot_ctl0_s cn66xx;
2502 typedef union cvmx_dfm_slot_ctl0 cvmx_dfm_slot_ctl0_t;
2505 * cvmx_dfm_slot_ctl1
2507 * DFM_SLOT_CTL1 = DFM Slot Control1
2508 * This register is an assortment of various control fields needed by the memory controller
2511 * HW will update this register if SW has not previously written to it and when any of DFM_RLEVEL_RANKn, DFM_WLEVEL_RANKn, DFM_CONTROL and
2512 * DFM_MODEREG_PARAMS0 change.Ideally, this register should only be read after DFM has been initialized and DFM_RLEVEL_RANKn, DFM_WLEVEL_RANKn
2514 * R2W_XRANK_INIT, W2R_XRANK_INIT have 1 extra CK cycle built in for odt settling/channel turnaround time.
2516 union cvmx_dfm_slot_ctl1 {
2518 struct cvmx_dfm_slot_ctl1_s {
2519 #ifdef __BIG_ENDIAN_BITFIELD
2520 uint64_t reserved_24_63 : 40;
2521 uint64_t w2w_xrank_init : 6; /**< Write-to-write spacing control
2522 for back to back accesses across ranks of the same dimm */
2523 uint64_t w2r_xrank_init : 6; /**< Write-to-read spacing control
2524 for back to back accesses across ranks of the same dimm */
2525 uint64_t r2w_xrank_init : 6; /**< Read-to-write spacing control
2526 for back to back accesses across ranks of the same dimm */
2527 uint64_t r2r_xrank_init : 6; /**< Read-to-read spacing control
2528 for back to back accesses across ranks of the same dimm */
2530 uint64_t r2r_xrank_init : 6;
2531 uint64_t r2w_xrank_init : 6;
2532 uint64_t w2r_xrank_init : 6;
2533 uint64_t w2w_xrank_init : 6;
2534 uint64_t reserved_24_63 : 40;
2537 struct cvmx_dfm_slot_ctl1_s cn63xx;
2538 struct cvmx_dfm_slot_ctl1_s cn63xxp1;
2539 struct cvmx_dfm_slot_ctl1_s cn66xx;
2541 typedef union cvmx_dfm_slot_ctl1 cvmx_dfm_slot_ctl1_t;
2544 * cvmx_dfm_timing_params0
2546 union cvmx_dfm_timing_params0 {
2548 struct cvmx_dfm_timing_params0_s {
2549 #ifdef __BIG_ENDIAN_BITFIELD
2550 uint64_t reserved_47_63 : 17;
2551 uint64_t trp_ext : 1; /**< Indicates tRP constraints.
2552 Set [TRP_EXT[0:0], TRP[3:0]] (CSR field) = RNDUP[tRP(ns)/tCYC(ns)]
2553 + (RNDUP[tRTP(ns)/tCYC(ns)])-4)-1,
2554 where tRP, tRTP are from the DDR3 spec, and tCYC(ns)
2555 is the DDR clock frequency (not data rate).
2557 TYP tRTP=max(4nCK, 7.5ns) */
2558 uint64_t tcksre : 4; /**< Indicates tCKSRE constraints.
2559 Set TCKSRE (CSR field) = RNDUP[tCKSRE(ns)/tCYC(ns)]-1,
2560 where tCKSRE is from the DDR3 spec, and tCYC(ns)
2561 is the DDR clock frequency (not data rate).
2562 TYP=max(5nCK, 10ns) */
2563 uint64_t trp : 4; /**< Indicates tRP constraints.
2564 Set [TRP_EXT[0:0], TRP[3:0]] (CSR field) = RNDUP[tRP(ns)/tCYC(ns)]
2565 + (RNDUP[tRTP(ns)/tCYC(ns)])-4)-1,
2566 where tRP, tRTP are from the DDR3 spec, and tCYC(ns)
2567 is the DDR clock frequency (not data rate).
2569 TYP tRTP=max(4nCK, 7.5ns) */
2570 uint64_t tzqinit : 4; /**< Indicates tZQINIT constraints.
2571 Set TZQINIT (CSR field) = RNDUP[tZQINIT(ns)/(256*tCYC(ns))],
2572 where tZQINIT is from the DDR3 spec, and tCYC(ns)
2573 is the DDR clock frequency (not data rate).
2574 TYP=2 (equivalent to 512) */
2575 uint64_t tdllk : 4; /**< Indicates tDLLk constraints.
2576 Set TDLLK (CSR field) = RNDUP[tDLLk(ns)/(256*tCYC(ns))],
2577 where tDLLk is from the DDR3 spec, and tCYC(ns)
2578 is the DDR clock frequency (not data rate).
2579 TYP=2 (equivalent to 512)
2580 This parameter is used in self-refresh exit
2581 and assumed to be greater than tRFC */
2582 uint64_t tmod : 4; /**< Indicates tMOD constraints.
2583 Set TMOD (CSR field) = RNDUP[tMOD(ns)/tCYC(ns)]-1,
2584 where tMOD is from the DDR3 spec, and tCYC(ns)
2585 is the DDR clock frequency (not data rate).
2586 TYP=max(12nCK, 15ns) */
2587 uint64_t tmrd : 4; /**< Indicates tMRD constraints.
2588 Set TMRD (CSR field) = RNDUP[tMRD(ns)/tCYC(ns)]-1,
2589 where tMRD is from the DDR3 spec, and tCYC(ns)
2590 is the DDR clock frequency (not data rate).
2592 uint64_t txpr : 4; /**< Indicates tXPR constraints.
2593 Set TXPR (CSR field) = RNDUP[tXPR(ns)/(16*tCYC(ns))],
2594 where tXPR is from the DDR3 spec, and tCYC(ns)
2595 is the DDR clock frequency (not data rate).
2596 TYP=max(5nCK, tRFC+10ns) */
2597 uint64_t tcke : 4; /**< Indicates tCKE constraints.
2598 Set TCKE (CSR field) = RNDUP[tCKE(ns)/tCYC(ns)]-1,
2599 where tCKE is from the DDR3 spec, and tCYC(ns)
2600 is the DDR clock frequency (not data rate).
2601 TYP=max(3nCK, 7.5/5.625/5.625/5ns) */
2602 uint64_t tzqcs : 4; /**< Indicates tZQCS constraints.
2603 Set TZQCS (CSR field) = RNDUP[tZQCS(ns)/(16*tCYC(ns))],
2604 where tZQCS is from the DDR3 spec, and tCYC(ns)
2605 is the DDR clock frequency (not data rate).
2606 TYP=4 (equivalent to 64) */
2607 uint64_t tckeon : 10; /**< Reserved. Should be written to zero. */
2609 uint64_t tckeon : 10;
2616 uint64_t tzqinit : 4;
2618 uint64_t tcksre : 4;
2619 uint64_t trp_ext : 1;
2620 uint64_t reserved_47_63 : 17;
2623 struct cvmx_dfm_timing_params0_cn63xx {
2624 #ifdef __BIG_ENDIAN_BITFIELD
2625 uint64_t reserved_47_63 : 17;
2626 uint64_t trp_ext : 1; /**< Indicates tRP constraints.
2627 Set [TRP_EXT[0:0], TRP[3:0]] (CSR field) = RNDUP[tRP(ns)/tCYC(ns)]
2628 + (RNDUP[tRTP(ns)/tCYC(ns)])-4)-1,
2629 where tRP, tRTP are from the DDR3 spec, and tCYC(ns)
2630 is the DDR clock frequency (not data rate).
2632 TYP tRTP=max(4nCK, 7.5ns) */
2633 uint64_t tcksre : 4; /**< Indicates tCKSRE constraints.
2634 Set TCKSRE (CSR field) = RNDUP[tCKSRE(ns)/tCYC(ns)]-1,
2635 where tCKSRE is from the DDR3 spec, and tCYC(ns)
2636 is the DDR clock frequency (not data rate).
2637 TYP=max(5nCK, 10ns) */
2638 uint64_t trp : 4; /**< Indicates tRP constraints.
2639 Set [TRP_EXT[0:0], TRP[3:0]] (CSR field) = RNDUP[tRP(ns)/tCYC(ns)]
2640 + (RNDUP[tRTP(ns)/tCYC(ns)])-4)-1,
2641 where tRP, tRTP are from the DDR3 spec, and tCYC(ns)
2642 is the DDR clock frequency (not data rate).
2644 TYP tRTP=max(4nCK, 7.5ns) */
2645 uint64_t tzqinit : 4; /**< Indicates tZQINIT constraints.
2646 Set TZQINIT (CSR field) = RNDUP[tZQINIT(ns)/(256*tCYC(ns))],
2647 where tZQINIT is from the DDR3 spec, and tCYC(ns)
2648 is the DDR clock frequency (not data rate).
2649 TYP=2 (equivalent to 512) */
2650 uint64_t tdllk : 4; /**< Indicates tDLLk constraints.
2651 Set TDLLK (CSR field) = RNDUP[tDLLk(ns)/(256*tCYC(ns))],
2652 where tDLLk is from the DDR3 spec, and tCYC(ns)
2653 is the DDR clock frequency (not data rate).
2654 TYP=2 (equivalent to 512)
2655 This parameter is used in self-refresh exit
2656 and assumed to be greater than tRFC */
2657 uint64_t tmod : 4; /**< Indicates tMOD constraints.
2658 Set TMOD (CSR field) = RNDUP[tMOD(ns)/tCYC(ns)]-1,
2659 where tMOD is from the DDR3 spec, and tCYC(ns)
2660 is the DDR clock frequency (not data rate).
2661 TYP=max(12nCK, 15ns) */
2662 uint64_t tmrd : 4; /**< Indicates tMRD constraints.
2663 Set TMRD (CSR field) = RNDUP[tMRD(ns)/tCYC(ns)]-1,
2664 where tMRD is from the DDR3 spec, and tCYC(ns)
2665 is the DDR clock frequency (not data rate).
2667 uint64_t txpr : 4; /**< Indicates tXPR constraints.
2668 Set TXPR (CSR field) = RNDUP[tXPR(ns)/(16*tCYC(ns))],
2669 where tXPR is from the DDR3 spec, and tCYC(ns)
2670 is the DDR clock frequency (not data rate).
2671 TYP=max(5nCK, tRFC+10ns) */
2672 uint64_t tcke : 4; /**< Indicates tCKE constraints.
2673 Set TCKE (CSR field) = RNDUP[tCKE(ns)/tCYC(ns)]-1,
2674 where tCKE is from the DDR3 spec, and tCYC(ns)
2675 is the DDR clock frequency (not data rate).
2676 TYP=max(3nCK, 7.5/5.625/5.625/5ns) */
2677 uint64_t tzqcs : 4; /**< Indicates tZQCS constraints.
2678 Set TZQCS (CSR field) = RNDUP[tZQCS(ns)/(16*tCYC(ns))],
2679 where tZQCS is from the DDR3 spec, and tCYC(ns)
2680 is the DDR clock frequency (not data rate).
2681 TYP=4 (equivalent to 64) */
2682 uint64_t reserved_0_9 : 10;
2684 uint64_t reserved_0_9 : 10;
2691 uint64_t tzqinit : 4;
2693 uint64_t tcksre : 4;
2694 uint64_t trp_ext : 1;
2695 uint64_t reserved_47_63 : 17;
2698 struct cvmx_dfm_timing_params0_cn63xxp1 {
2699 #ifdef __BIG_ENDIAN_BITFIELD
2700 uint64_t reserved_46_63 : 18;
2701 uint64_t tcksre : 4; /**< Indicates tCKSRE constraints.
2702 Set TCKSRE (CSR field) = RNDUP[tCKSRE(ns)/tCYC(ns)]-1,
2703 where tCKSRE is from the DDR3 spec, and tCYC(ns)
2704 is the DDR clock frequency (not data rate).
2705 TYP=max(5nCK, 10ns) */
2706 uint64_t trp : 4; /**< Indicates tRP constraints.
2707 Set TRP (CSR field) = RNDUP[tRP(ns)/tCYC(ns)]
2708 + (RNDUP[tRTP(ns)/tCYC(ns)])-4)-1,
2709 where tRP, tRTP are from the DDR3 spec, and tCYC(ns)
2710 is the DDR clock frequency (not data rate).
2712 TYP tRTP=max(4nCK, 7.5ns) */
2713 uint64_t tzqinit : 4; /**< Indicates tZQINIT constraints.
2714 Set TZQINIT (CSR field) = RNDUP[tZQINIT(ns)/(256*tCYC(ns))],
2715 where tZQINIT is from the DDR3 spec, and tCYC(ns)
2716 is the DDR clock frequency (not data rate).
2717 TYP=2 (equivalent to 512) */
2718 uint64_t tdllk : 4; /**< Indicates tDLLk constraints.
2719 Set TDLLK (CSR field) = RNDUP[tDLLk(ns)/(256*tCYC(ns))],
2720 where tDLLk is from the DDR3 spec, and tCYC(ns)
2721 is the DDR clock frequency (not data rate).
2722 TYP=2 (equivalent to 512)
2723 This parameter is used in self-refresh exit
2724 and assumed to be greater than tRFC */
2725 uint64_t tmod : 4; /**< Indicates tMOD constraints.
2726 Set TMOD (CSR field) = RNDUP[tMOD(ns)/tCYC(ns)]-1,
2727 where tMOD is from the DDR3 spec, and tCYC(ns)
2728 is the DDR clock frequency (not data rate).
2729 TYP=max(12nCK, 15ns) */
2730 uint64_t tmrd : 4; /**< Indicates tMRD constraints.
2731 Set TMRD (CSR field) = RNDUP[tMRD(ns)/tCYC(ns)]-1,
2732 where tMRD is from the DDR3 spec, and tCYC(ns)
2733 is the DDR clock frequency (not data rate).
2735 uint64_t txpr : 4; /**< Indicates tXPR constraints.
2736 Set TXPR (CSR field) = RNDUP[tXPR(ns)/(16*tCYC(ns))],
2737 where tXPR is from the DDR3 spec, and tCYC(ns)
2738 is the DDR clock frequency (not data rate).
2739 TYP=max(5nCK, tRFC+10ns) */
2740 uint64_t tcke : 4; /**< Indicates tCKE constraints.
2741 Set TCKE (CSR field) = RNDUP[tCKE(ns)/tCYC(ns)]-1,
2742 where tCKE is from the DDR3 spec, and tCYC(ns)
2743 is the DDR clock frequency (not data rate).
2744 TYP=max(3nCK, 7.5/5.625/5.625/5ns) */
2745 uint64_t tzqcs : 4; /**< Indicates tZQCS constraints.
2746 Set TZQCS (CSR field) = RNDUP[tZQCS(ns)/(16*tCYC(ns))],
2747 where tZQCS is from the DDR3 spec, and tCYC(ns)
2748 is the DDR clock frequency (not data rate).
2749 TYP=4 (equivalent to 64) */
2750 uint64_t tckeon : 10; /**< Reserved. Should be written to zero. */
2752 uint64_t tckeon : 10;
2759 uint64_t tzqinit : 4;
2761 uint64_t tcksre : 4;
2762 uint64_t reserved_46_63 : 18;
2765 struct cvmx_dfm_timing_params0_cn63xx cn66xx;
2767 typedef union cvmx_dfm_timing_params0 cvmx_dfm_timing_params0_t;
2770 * cvmx_dfm_timing_params1
2772 union cvmx_dfm_timing_params1 {
2774 struct cvmx_dfm_timing_params1_s {
2775 #ifdef __BIG_ENDIAN_BITFIELD
2776 uint64_t reserved_47_63 : 17;
2777 uint64_t tras_ext : 1; /**< Indicates tRAS constraints.
2778 Set [TRAS_EXT[0:0], TRAS[4:0]] (CSR field) = RNDUP[tRAS(ns)/tCYC(ns)]-1,
2779 where tRAS is from the DDR3 spec, and tCYC(ns)
2780 is the DDR clock frequency (not data rate).
2786 - 111111: 64 tCYC */
2787 uint64_t txpdll : 5; /**< Indicates tXPDLL constraints.
2788 Set TXPDLL (CSR field) = RNDUP[tXPDLL(ns)/tCYC(ns)]-1,
2789 where tXPDLL is from the DDR3 spec, and tCYC(ns)
2790 is the DDR clock frequency (not data rate).
2791 TYP=max(10nCK, 24ns) */
2792 uint64_t tfaw : 5; /**< Indicates tFAW constraints.
2793 Set TFAW (CSR field) = RNDUP[tFAW(ns)/(4*tCYC(ns))],
2794 where tFAW is from the DDR3 spec, and tCYC(ns)
2795 is the DDR clock frequency (not data rate).
2797 uint64_t twldqsen : 4; /**< Indicates tWLDQSEN constraints.
2798 Set TWLDQSEN (CSR field) = RNDUP[tWLDQSEN(ns)/(4*tCYC(ns))],
2799 where tWLDQSEN is from the DDR3 spec, and tCYC(ns)
2800 is the DDR clock frequency (not data rate).
2802 uint64_t twlmrd : 4; /**< Indicates tWLMRD constraints.
2803 Set TWLMRD (CSR field) = RNDUP[tWLMRD(ns)/(4*tCYC(ns))],
2804 where tWLMRD is from the DDR3 spec, and tCYC(ns)
2805 is the DDR clock frequency (not data rate).
2807 uint64_t txp : 3; /**< Indicates tXP constraints.
2808 Set TXP (CSR field) = RNDUP[tXP(ns)/tCYC(ns)]-1,
2809 where tXP is from the DDR3 spec, and tCYC(ns)
2810 is the DDR clock frequency (not data rate).
2811 TYP=max(3nCK, 7.5ns) */
2812 uint64_t trrd : 3; /**< Indicates tRRD constraints.
2813 Set TRRD (CSR field) = RNDUP[tRRD(ns)/tCYC(ns)]-2,
2814 where tRRD is from the DDR3 spec, and tCYC(ns)
2815 is the DDR clock frequency (not data rate).
2822 uint64_t trfc : 5; /**< Indicates tRFC constraints.
2823 Set TRFC (CSR field) = RNDUP[tRFC(ns)/(8*tCYC(ns))],
2824 where tRFC is from the DDR3 spec, and tCYC(ns)
2825 is the DDR clock frequency (not data rate).
2834 - 11111: 248 tCYC */
2835 uint64_t twtr : 4; /**< Indicates tWTR constraints.
2836 Set TWTR (CSR field) = RNDUP[tWTR(ns)/tCYC(ns)]-1,
2837 where tWTR is from the DDR3 spec, and tCYC(ns)
2838 is the DDR clock frequency (not data rate).
2839 TYP=max(4nCK, 7.5ns)
2844 - 1000-1111: RESERVED */
2845 uint64_t trcd : 4; /**< Indicates tRCD constraints.
2846 Set TRCD (CSR field) = RNDUP[tRCD(ns)/tCYC(ns)],
2847 where tRCD is from the DDR3 spec, and tCYC(ns)
2848 is the DDR clock frequency (not data rate).
2851 - 0001: 2 (2 is the smallest value allowed)
2855 - 1010-1111: RESERVED
2856 In 2T mode, make this register TRCD-1, not going
2858 uint64_t tras : 5; /**< Indicates tRAS constraints.
2859 Set [TRAS_EXT[0:0], TRAS[4:0]] (CSR field) = RNDUP[tRAS(ns)/tCYC(ns)]-1,
2860 where tRAS is from the DDR3 spec, and tCYC(ns)
2861 is the DDR clock frequency (not data rate).
2867 - 111111: 64 tCYC */
2868 uint64_t tmprr : 4; /**< Indicates tMPRR constraints.
2869 Set TMPRR (CSR field) = RNDUP[tMPRR(ns)/tCYC(ns)]-1,
2870 where tMPRR is from the DDR3 spec, and tCYC(ns)
2871 is the DDR clock frequency (not data rate).
2881 uint64_t twlmrd : 4;
2882 uint64_t twldqsen : 4;
2884 uint64_t txpdll : 5;
2885 uint64_t tras_ext : 1;
2886 uint64_t reserved_47_63 : 17;
2889 struct cvmx_dfm_timing_params1_s cn63xx;
2890 struct cvmx_dfm_timing_params1_cn63xxp1 {
2891 #ifdef __BIG_ENDIAN_BITFIELD
2892 uint64_t reserved_46_63 : 18;
2893 uint64_t txpdll : 5; /**< Indicates tXPDLL constraints.
2894 Set TXPDLL (CSR field) = RNDUP[tXPDLL(ns)/tCYC(ns)]-1,
2895 where tXPDLL is from the DDR3 spec, and tCYC(ns)
2896 is the DDR clock frequency (not data rate).
2897 TYP=max(10nCK, 24ns) */
2898 uint64_t tfaw : 5; /**< Indicates tFAW constraints.
2899 Set TFAW (CSR field) = RNDUP[tFAW(ns)/(4*tCYC(ns))],
2900 where tFAW is from the DDR3 spec, and tCYC(ns)
2901 is the DDR clock frequency (not data rate).
2903 uint64_t twldqsen : 4; /**< Indicates tWLDQSEN constraints.
2904 Set TWLDQSEN (CSR field) = RNDUP[tWLDQSEN(ns)/(4*tCYC(ns))],
2905 where tWLDQSEN is from the DDR3 spec, and tCYC(ns)
2906 is the DDR clock frequency (not data rate).
2908 uint64_t twlmrd : 4; /**< Indicates tWLMRD constraints.
2909 Set TWLMRD (CSR field) = RNDUP[tWLMRD(ns)/(4*tCYC(ns))],
2910 where tWLMRD is from the DDR3 spec, and tCYC(ns)
2911 is the DDR clock frequency (not data rate).
2913 uint64_t txp : 3; /**< Indicates tXP constraints.
2914 Set TXP (CSR field) = RNDUP[tXP(ns)/tCYC(ns)]-1,
2915 where tXP is from the DDR3 spec, and tCYC(ns)
2916 is the DDR clock frequency (not data rate).
2917 TYP=max(3nCK, 7.5ns) */
2918 uint64_t trrd : 3; /**< Indicates tRRD constraints.
2919 Set TRRD (CSR field) = RNDUP[tRRD(ns)/tCYC(ns)]-2,
2920 where tRRD is from the DDR3 spec, and tCYC(ns)
2921 is the DDR clock frequency (not data rate).
2928 uint64_t trfc : 5; /**< Indicates tRFC constraints.
2929 Set TRFC (CSR field) = RNDUP[tRFC(ns)/(8*tCYC(ns))],
2930 where tRFC is from the DDR3 spec, and tCYC(ns)
2931 is the DDR clock frequency (not data rate).
2940 - 11111: 248 tCYC */
2941 uint64_t twtr : 4; /**< Indicates tWTR constraints.
2942 Set TWTR (CSR field) = RNDUP[tWTR(ns)/tCYC(ns)]-1,
2943 where tWTR is from the DDR3 spec, and tCYC(ns)
2944 is the DDR clock frequency (not data rate).
2945 TYP=max(4nCK, 7.5ns)
2950 - 1000-1111: RESERVED */
2951 uint64_t trcd : 4; /**< Indicates tRCD constraints.
2952 Set TRCD (CSR field) = RNDUP[tRCD(ns)/tCYC(ns)],
2953 where tRCD is from the DDR3 spec, and tCYC(ns)
2954 is the DDR clock frequency (not data rate).
2957 - 0001: 2 (2 is the smallest value allowed)
2961 - 1010-1111: RESERVED
2962 In 2T mode, make this register TRCD-1, not going
2964 uint64_t tras : 5; /**< Indicates tRAS constraints.
2965 Set TRAS (CSR field) = RNDUP[tRAS(ns)/tCYC(ns)]-1,
2966 where tRAS is from the DDR3 spec, and tCYC(ns)
2967 is the DDR clock frequency (not data rate).
2974 uint64_t tmprr : 4; /**< Indicates tMPRR constraints.
2975 Set TMPRR (CSR field) = RNDUP[tMPRR(ns)/tCYC(ns)]-1,
2976 where tMPRR is from the DDR3 spec, and tCYC(ns)
2977 is the DDR clock frequency (not data rate).
2987 uint64_t twlmrd : 4;
2988 uint64_t twldqsen : 4;
2990 uint64_t txpdll : 5;
2991 uint64_t reserved_46_63 : 18;
2994 struct cvmx_dfm_timing_params1_s cn66xx;
2996 typedef union cvmx_dfm_timing_params1 cvmx_dfm_timing_params1_t;
2999 * cvmx_dfm_wlevel_ctl
3001 union cvmx_dfm_wlevel_ctl {
3003 struct cvmx_dfm_wlevel_ctl_s {
3004 #ifdef __BIG_ENDIAN_BITFIELD
3005 uint64_t reserved_22_63 : 42;
3006 uint64_t rtt_nom : 3; /**< RTT_NOM
3007 DFM writes a decoded value to MR1[Rtt_Nom] of the rank during
3008 write leveling. Per JEDEC DDR3 specifications,
3009 only values MR1[Rtt_Nom] = 1 (RQZ/4), 2 (RQZ/2), or 3 (RQZ/6)
3010 are allowed during write leveling with output buffer enabled.
3011 000 : DFM writes 001 (RZQ/4) to MR1[Rtt_Nom]
3012 001 : DFM writes 010 (RZQ/2) to MR1[Rtt_Nom]
3013 010 : DFM writes 011 (RZQ/6) to MR1[Rtt_Nom]
3014 011 : DFM writes 100 (RZQ/12) to MR1[Rtt_Nom]
3015 100 : DFM writes 101 (RZQ/8) to MR1[Rtt_Nom]
3016 101 : DFM writes 110 (Rsvd) to MR1[Rtt_Nom]
3017 110 : DFM writes 111 (Rsvd) to MR1[Rtt_Nom]
3018 111 : DFM writes 000 (Disabled) to MR1[Rtt_Nom] */
3019 uint64_t bitmask : 8; /**< Mask to select bit lanes on which write-leveling
3020 feedback is returned when OR_DIS is set to 1 */
3021 uint64_t or_dis : 1; /**< Disable or'ing of bits in a byte lane when computing
3022 the write-leveling bitmask */
3023 uint64_t sset : 1; /**< Run write-leveling on the current setting only. */
3024 uint64_t lanemask : 9; /**< One-hot mask to select byte lane to be leveled by
3025 the write-leveling sequence
3026 Used with x16 parts where the upper and lower byte
3027 lanes need to be leveled independently
3028 LANEMASK<8:2> must be zero. */
3030 uint64_t lanemask : 9;
3032 uint64_t or_dis : 1;
3033 uint64_t bitmask : 8;
3034 uint64_t rtt_nom : 3;
3035 uint64_t reserved_22_63 : 42;
3038 struct cvmx_dfm_wlevel_ctl_s cn63xx;
3039 struct cvmx_dfm_wlevel_ctl_cn63xxp1 {
3040 #ifdef __BIG_ENDIAN_BITFIELD
3041 uint64_t reserved_10_63 : 54;
3042 uint64_t sset : 1; /**< Run write-leveling on the current setting only. */
3043 uint64_t lanemask : 9; /**< One-hot mask to select byte lane to be leveled by
3044 the write-leveling sequence
3045 Used with x16 parts where the upper and lower byte
3046 lanes need to be leveled independently
3047 LANEMASK<8:2> must be zero. */
3049 uint64_t lanemask : 9;
3051 uint64_t reserved_10_63 : 54;
3054 struct cvmx_dfm_wlevel_ctl_s cn66xx;
3056 typedef union cvmx_dfm_wlevel_ctl cvmx_dfm_wlevel_ctl_t;
3059 * cvmx_dfm_wlevel_dbg
3062 * A given write of DFM_WLEVEL_DBG returns the write-leveling pass/fail results for all possible
3063 * delay settings (i.e. the BITMASK) for only one byte in the last rank that the HW write-leveled.
3064 * DFM_WLEVEL_DBG[BYTE] selects the particular byte.
3065 * To get these pass/fail results for another different rank, you must run the hardware write-leveling
3066 * again. For example, it is possible to get the BITMASK results for every byte of every rank
3067 * if you run write-leveling separately for each rank, probing DFM_WLEVEL_DBG between each
3070 union cvmx_dfm_wlevel_dbg {
3072 struct cvmx_dfm_wlevel_dbg_s {
3073 #ifdef __BIG_ENDIAN_BITFIELD
3074 uint64_t reserved_12_63 : 52;
3075 uint64_t bitmask : 8; /**< Bitmask generated during deskew settings sweep
3076 if DFM_WLEVEL_CTL[SSET]=0
3077 BITMASK[n]=0 means deskew setting n failed
3078 BITMASK[n]=1 means deskew setting n passed
3080 BITMASK contains the first 8 results of the total 16
3081 collected by DFM during the write-leveling sequence
3082 else if DFM_WLEVEL_CTL[SSET]=1
3083 BITMASK[0]=0 means curr deskew setting failed
3084 BITMASK[0]=1 means curr deskew setting passed */
3085 uint64_t byte : 4; /**< 0 <= BYTE <= 8 */
3088 uint64_t bitmask : 8;
3089 uint64_t reserved_12_63 : 52;
3092 struct cvmx_dfm_wlevel_dbg_s cn63xx;
3093 struct cvmx_dfm_wlevel_dbg_s cn63xxp1;
3094 struct cvmx_dfm_wlevel_dbg_s cn66xx;
3096 typedef union cvmx_dfm_wlevel_dbg cvmx_dfm_wlevel_dbg_t;
3099 * cvmx_dfm_wlevel_rank#
3102 * This is TWO CSRs per DFM, one per each rank. (front bunk/back bunk)
3104 * Deskew setting is measured in units of 1/8 FCLK, so the above BYTE* values can range over 4 FCLKs.
3106 * Assuming DFM_WLEVEL_CTL[SSET]=0, the BYTE*<2:0> values are not used during write-leveling, and
3107 * they are over-written by the hardware as part of the write-leveling sequence. (HW sets STATUS==3
3108 * after HW write-leveling completes for the rank). SW needs to set BYTE*<4:3> bits.
3110 * Each CSR may also be written by SW, but not while a write-leveling sequence is in progress. (HW sets STATUS==1 after a CSR write.)
3112 * SW initiates a HW write-leveling sequence by programming DFM_WLEVEL_CTL and writing RANKMASK and INIT_START=1 with SEQUENCE=6 in DFM_CONFIG.
3113 * DFM will then step through and accumulate write leveling results for 8 unique delay settings (twice), starting at a delay of
3114 * DFM_WLEVEL_RANKn[BYTE*<4:3>]*8 CK increasing by 1/8 CK each setting. HW will then set DFM_WLEVEL_RANKn[BYTE*<2:0>] to indicate the
3115 * first write leveling result of '1' that followed a reslt of '0' during the sequence by searching for a '1100' pattern in the generated
3116 * bitmask, except that DFM will always write DFM_WLEVEL_RANKn[BYTE*<0>]=0. If HW is unable to find a match for a '1100' pattern, then HW will
3117 * set DFM_WLEVEL_RANKn[BYTE*<2:0>] to 4.
3118 * See DFM_WLEVEL_CTL.
3120 union cvmx_dfm_wlevel_rankx {
3122 struct cvmx_dfm_wlevel_rankx_s {
3123 #ifdef __BIG_ENDIAN_BITFIELD
3124 uint64_t reserved_47_63 : 17;
3125 uint64_t status : 2; /**< Indicates status of the write-leveling and where
3126 the BYTE* programmings in <44:0> came from:
3127 0 = BYTE* values are their reset value
3128 1 = BYTE* values were set via a CSR write to this register
3129 2 = write-leveling sequence currently in progress (BYTE* values are unpredictable)
3130 3 = BYTE* values came from a complete write-leveling sequence, irrespective of
3131 which lanes are masked via DFM_WLEVEL_CTL[LANEMASK] */
3132 uint64_t reserved_10_44 : 35;
3133 uint64_t byte1 : 5; /**< Deskew setting
3134 Bit 0 of BYTE1 must be zero during normal operation */
3135 uint64_t byte0 : 5; /**< Deskew setting
3136 Bit 0 of BYTE0 must be zero during normal operation */
3140 uint64_t reserved_10_44 : 35;
3141 uint64_t status : 2;
3142 uint64_t reserved_47_63 : 17;
3145 struct cvmx_dfm_wlevel_rankx_s cn63xx;
3146 struct cvmx_dfm_wlevel_rankx_s cn63xxp1;
3147 struct cvmx_dfm_wlevel_rankx_s cn66xx;
3149 typedef union cvmx_dfm_wlevel_rankx cvmx_dfm_wlevel_rankx_t;
3152 * cvmx_dfm_wodt_mask
3154 * DFM_WODT_MASK = DFM Write OnDieTermination mask
3155 * System designers may desire to terminate DQ/DQS/DM lines for higher frequency DDR operations
3156 * especially on a multi-rank system. DDR3 DQ/DM/DQS I/O's have built in
3157 * Termination resistor that can be turned on or off by the controller, after meeting tAOND and tAOF
3158 * timing requirements. Each Rank has its own ODT pin that fans out to all the memory parts
3159 * in that rank. System designers may prefer different combinations of ODT ON's for writes
3160 * into different ranks. Octeon supports full programmability by way of the mask register below.
3161 * Each Rank position has its own 8-bit programmable field.
3162 * When the controller does a write to that rank, it sets the 4 ODT pins to the MASK pins below.
3163 * For eg., When doing a write into Rank0, a system designer may desire to terminate the lines
3164 * with the resistor on Dimm0/Rank1. The mask WODT_D0_R0 would then be [00000010].
3165 * Octeon drives the appropriate mask values on the ODT pins by default. If this feature is not
3166 * required, write 0 in this register.
3169 * - DFM_WODT_MASK functions a little differently than DFM_RODT_MASK. While, in DFM_RODT_MASK, the other
3170 * rank(s) are ODT-ed, in DFM_WODT_MASK, the rank in which the write CAS is issued can be ODT-ed as well.
3171 * - For a two rank system and a write op to rank0: use RODT_D0_R0<1:0> to terminate lines on rank1 and/or rank0.
3172 * - For a two rank system and a write op to rank1: use RODT_D0_R1<1:0> to terminate lines on rank1 and/or rank0.
3173 * - When a given RANK is selected, the WODT mask for that RANK is used.
3175 * DFM always writes 128-bit words independently via one write CAS operation per word.
3176 * When a WODT mask bit is set, DFM asserts the OCTEON ODT output pin(s) starting the same cycle
3177 * as the write CAS operation. Then, OCTEON normally continues to assert the ODT output pin(s) for five
3178 * more cycles - for a total of 6 cycles for the entire word write - satisfying the 6 cycle DDR3
3179 * ODTH8 requirements. But it is possible for DFM to issue two word writes separated by as few
3180 * as WtW = 4 or 5 cycles. In that case, DFM asserts the ODT output pin(s) for the WODT mask of the
3181 * first word write for WtW cycles, then asserts the ODT output pin(s) for the WODT mask of the
3182 * second write for 6 cycles (or less if a third word write follows within 4 or 5
3183 * cycles of this second word write). Note that it may be necessary to force DFM to space back-to-back
3184 * word writes to different ranks apart by at least 6 cycles to prevent DDR3 ODTH8 violations.
3186 union cvmx_dfm_wodt_mask {
3188 struct cvmx_dfm_wodt_mask_s {
3189 #ifdef __BIG_ENDIAN_BITFIELD
3190 uint64_t wodt_d3_r1 : 8; /**< Not used by DFM. */
3191 uint64_t wodt_d3_r0 : 8; /**< Not used by DFM. */
3192 uint64_t wodt_d2_r1 : 8; /**< Not used by DFM. */
3193 uint64_t wodt_d2_r0 : 8; /**< Not used by DFM. */
3194 uint64_t wodt_d1_r1 : 8; /**< Not used by DFM. */
3195 uint64_t wodt_d1_r0 : 8; /**< Not used by DFM. */
3196 uint64_t wodt_d0_r1 : 8; /**< Write ODT mask RANK1
3197 WODT_D0_R1<7:2> not used by DFM.
3198 WODT_D0_R1<1:0> is also not used by DFM when RANK_ENA is not set. */
3199 uint64_t wodt_d0_r0 : 8; /**< Write ODT mask RANK0
3200 WODT_D0_R0<7:2> not used by DFM. */
3202 uint64_t wodt_d0_r0 : 8;
3203 uint64_t wodt_d0_r1 : 8;
3204 uint64_t wodt_d1_r0 : 8;
3205 uint64_t wodt_d1_r1 : 8;
3206 uint64_t wodt_d2_r0 : 8;
3207 uint64_t wodt_d2_r1 : 8;
3208 uint64_t wodt_d3_r0 : 8;
3209 uint64_t wodt_d3_r1 : 8;
3212 struct cvmx_dfm_wodt_mask_s cn63xx;
3213 struct cvmx_dfm_wodt_mask_s cn63xxp1;
3214 struct cvmx_dfm_wodt_mask_s cn66xx;
3216 typedef union cvmx_dfm_wodt_mask cvmx_dfm_wodt_mask_t;