1 /***********************license start***************
2 * Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Inc. nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
45 * Interface to the NAND flash controller.
46 * See cvmx-nand.h for usage documentation and notes.
48 * <hr>$Revision: 35726 $<hr>
51 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
52 #include <linux/module.h>
54 #include <asm/octeon/cvmx.h>
55 #include <asm/octeon/cvmx-clock.h>
56 #include <asm/octeon/cvmx-nand.h>
57 #include <asm/octeon/cvmx-ndf-defs.h>
58 #include <asm/octeon/cvmx-swap.h>
59 #include <asm/octeon/cvmx-bootmem.h>
62 #include "cvmx-nand.h"
63 #include "cvmx-swap.h"
64 #include "cvmx-bootmem.h"
66 #if defined(__U_BOOT__) && defined(CONFIG_HW_WATCHDOG)
67 # include <watchdog.h>
69 # define WATCHDOG_RESET()
72 #define NAND_COMMAND_READ_ID 0x90
73 #define NAND_COMMAND_READ_PARAM_PAGE 0xec
74 #define NAND_COMMAND_RESET 0xff
75 #define NAND_COMMAND_STATUS 0x70
76 #define NAND_COMMAND_READ 0x00
77 #define NAND_COMMAND_READ_FIN 0x30
78 #define NAND_COMMAND_ERASE 0x60
79 #define NAND_COMMAND_ERASE_FIN 0xd0
80 #define NAND_COMMAND_PROGRAM 0x80
81 #define NAND_COMMAND_PROGRAM_FIN 0x10
82 #define NAND_TIMEOUT_USECS_READ 100000
83 #define NAND_TIMEOUT_USECS_WRITE 1000000
84 #define NAND_TIMEOUT_USECS_BLOCK_ERASE 1000000
86 #define CVMX_NAND_ROUNDUP(_Dividend, _Divisor) (((_Dividend)+((_Divisor)-1))/(_Divisor))
89 ({ typeof (X) __x = (X); \
90 typeof (Y) __y = (Y); \
91 (__x < __y) ? __x : __y; })
95 ({ typeof (X) __x = (X); \
96 typeof (Y) __y = (Y); \
97 (__x > __y) ? __x : __y; })
100 /* Structure to store the parameters that we care about that
101 ** describe the ONFI speed modes. This is used to configure
102 ** the flash timing to match what is reported in the
103 ** parameter page of the ONFI flash chip. */
111 } onfi_speed_mode_desc_t;
112 static const onfi_speed_mode_desc_t onfi_speed_modes[] =
115 {50,30,100,20,50}, /* Mode 0 */
116 {25,15, 45,10,25}, /* Mode 1 */
117 {17,15, 35,10,15}, /* Mode 2 */
118 {15,10, 30, 5,10}, /* Mode 3 */
119 {12,10, 25, 5,10}, /* Mode 4, requires EDO timings */
120 {10, 7, 20, 5,10}, /* Mode 5, requries EDO timings */
121 {10,10, 25, 5,12}, /* Mode 6, requires EDO timings */
128 CVMX_NAND_STATE_16BIT = 1<<0,
129 } cvmx_nand_state_flags_t;
132 * Structure used to store data about the NAND devices hooked
148 cvmx_nand_state_flags_t flags;
152 * Array indexed by bootbus chip select with information
153 * about NAND devices.
155 #if defined(__U_BOOT__)
156 /* For u-boot nand boot we need to play some tricks to be able
157 ** to use this early in boot. We put them in a special section that is merged
158 ** with the text segment. (Using the text segment directly results in an assembler warning.)
160 /*#define USE_DATA_IN_TEXT*/
163 #ifdef USE_DATA_IN_TEXT
164 static uint8_t cvmx_nand_buffer[CVMX_NAND_MAX_PAGE_AND_OOB_SIZE] __attribute__((aligned(8))) __attribute__ ((section (".data_in_text")));
165 static cvmx_nand_state_t cvmx_nand_state[8] __attribute__ ((section (".data_in_text")));
166 static cvmx_nand_state_t cvmx_nand_default __attribute__ ((section (".data_in_text")));
167 static cvmx_nand_initialize_flags_t cvmx_nand_flags __attribute__ ((section (".data_in_text")));
168 static int debug_indent __attribute__ ((section (".data_in_text")));
170 static CVMX_SHARED cvmx_nand_state_t cvmx_nand_state[8];
171 static CVMX_SHARED cvmx_nand_state_t cvmx_nand_default;
172 static CVMX_SHARED cvmx_nand_initialize_flags_t cvmx_nand_flags;
173 static CVMX_SHARED uint8_t *cvmx_nand_buffer = NULL;
174 static int debug_indent = 0;
177 static CVMX_SHARED const char *cvmx_nand_opcode_labels[] =
182 "Chip Enable / Disable", /* 3 */
185 "6 - Unknown", /* 6 */
186 "7 - Unknown", /* 7 */
190 "Wait Status", /* 11 */
191 "12 - Unknown", /* 12 */
192 "13 - Unknown", /* 13 */
193 "14 - Unknown", /* 14 */
194 "Bus Aquire / Release" /* 15 */
197 #define ULL unsigned long long
198 /* This macro logs out whenever a function is called if debugging is on */
199 #define CVMX_NAND_LOG_CALLED() \
200 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG)) \
201 cvmx_dprintf("%*s%s: called\n", 2*debug_indent++, "", __FUNCTION__);
203 /* This macro logs out each function parameter if debugging is on */
204 #define CVMX_NAND_LOG_PARAM(format, param) \
205 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG)) \
206 cvmx_dprintf("%*s%s: param %s = " format "\n", 2*debug_indent, "", __FUNCTION__, #param, param);
208 /* This macro logs out when a function returns a value */
209 #define CVMX_NAND_RETURN(v) \
212 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG)) \
213 cvmx_dprintf("%*s%s: returned %s(%d)\n", 2*--debug_indent, "", __FUNCTION__, #v, r); \
217 /* This macro logs out when a function doesn't return a value */
218 #define CVMX_NAND_RETURN_NOTHING() \
220 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG)) \
221 cvmx_dprintf("%*s%s: returned\n", 2*--debug_indent, "", __FUNCTION__); \
230 /* Compute the CRC for the ONFI parameter page. Adapted from sample code
231 ** in the specification.
233 static uint16_t __onfi_parameter_crc_compute(uint8_t *data)
235 const int order = 16; // Order of the CRC-16
236 unsigned long i, j, c, bit;
237 unsigned long crc = 0x4F4E; // Initialize the shift register with 0x4F4E
238 unsigned long crcmask = ((((unsigned long)1<<(order-1))-1)<<1)|1;
239 unsigned long crchighbit = (unsigned long)1<<(order-1);
241 for (i = 0; i < 254; i++)
243 c = (unsigned long)data[i];
244 for (j = 0x80; j; j >>= 1) {
245 bit = crc & crchighbit;
259 * Validate the ONFI parameter page and return a pointer to
262 * @param param_page Pointer to the raw NAND data returned after a parameter page read. It will
263 * contain at least 4 copies of the parameter structure.
265 * @return Pointer to a validated paramter page, or NULL if one couldn't be found.
267 static cvmx_nand_onfi_param_page_t *__cvmx_nand_onfi_process(cvmx_nand_onfi_param_page_t param_page[4])
271 for (index=0; index<4; index++)
273 uint16_t crc = __onfi_parameter_crc_compute((void *)¶m_page[index]);
274 if (crc == cvmx_le16_to_cpu(param_page[index].crc))
276 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
277 cvmx_dprintf("%s: Paramter page %d is corrupt. (Expected CRC: 0x%04x, computed: 0x%04x)\n",
278 __FUNCTION__, index, cvmx_le16_to_cpu(param_page[index].crc), crc);
283 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
284 cvmx_dprintf("%s: All parameter pages fail CRC check. Checking to see if any look sane.\n", __FUNCTION__);
286 if (!memcmp(param_page, param_page + 1, 256))
288 /* First and second copies match, now check some values */
289 if (param_page[0].pages_per_block != 0 && param_page[0].pages_per_block != 0xFFFFFFFF
290 && param_page[0].page_data_bytes != 0 && param_page[0].page_data_bytes != 0xFFFFFFFF
291 && param_page[0].page_spare_bytes != 0 && param_page[0].page_spare_bytes != 0xFFFF
292 && param_page[0].blocks_per_lun != 0 && param_page[0].blocks_per_lun != 0xFFFFFFFF
293 && param_page[0].timing_mode != 0 && param_page[0].timing_mode != 0xFFFF)
295 /* Looks like we have enough values to use */
296 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
297 cvmx_dprintf("%s: Page 0 looks sane, using even though CRC fails.\n", __FUNCTION__);
305 cvmx_dprintf("%s: WARNING: ONFI part but no valid ONFI parameter pages found.\n", __FUNCTION__);
309 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
311 cvmx_dprintf("%*sONFI Information (from copy %d in param page)\n", 2*debug_indent, "", index);
313 cvmx_dprintf("%*sonfi = %c%c%c%c\n", 2*debug_indent, "", param_page[index].onfi[0], param_page[index].onfi[1],
314 param_page[index].onfi[2], param_page[index].onfi[3]);
315 cvmx_dprintf("%*srevision_number = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].revision_number));
316 cvmx_dprintf("%*sfeatures = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].features));
317 cvmx_dprintf("%*soptional_commands = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].optional_commands));
319 cvmx_dprintf("%*smanufacturer = %12.12s\n", 2*debug_indent, "", param_page[index].manufacturer);
320 cvmx_dprintf("%*smodel = %20.20s\n", 2*debug_indent, "", param_page[index].model);
321 cvmx_dprintf("%*sjedec_id = 0x%x\n", 2*debug_indent, "", param_page[index].jedec_id);
322 cvmx_dprintf("%*sdate_code = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].date_code));
324 cvmx_dprintf("%*spage_data_bytes = %u\n", 2*debug_indent, "", (int)cvmx_le32_to_cpu(param_page[index].page_data_bytes));
325 cvmx_dprintf("%*spage_spare_bytes = %u\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].page_spare_bytes));
326 cvmx_dprintf("%*spartial_page_data_bytes = %u\n", 2*debug_indent, "", (int)cvmx_le32_to_cpu(param_page[index].partial_page_data_bytes));
327 cvmx_dprintf("%*spartial_page_spare_bytes = %u\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].partial_page_spare_bytes));
328 cvmx_dprintf("%*spages_per_block = %u\n", 2*debug_indent, "", (int)cvmx_le32_to_cpu(param_page[index].pages_per_block));
329 cvmx_dprintf("%*sblocks_per_lun = %u\n", 2*debug_indent, "", (int)cvmx_le32_to_cpu(param_page[index].blocks_per_lun));
330 cvmx_dprintf("%*snumber_lun = %u\n", 2*debug_indent, "", param_page[index].number_lun);
331 cvmx_dprintf("%*saddress_cycles = 0x%x\n", 2*debug_indent, "", param_page[index].address_cycles);
332 cvmx_dprintf("%*sbits_per_cell = %u\n", 2*debug_indent, "", param_page[index].bits_per_cell);
333 cvmx_dprintf("%*sbad_block_per_lun = %u\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].bad_block_per_lun));
334 cvmx_dprintf("%*sblock_endurance = %u\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].block_endurance));
335 cvmx_dprintf("%*sgood_blocks = %u\n", 2*debug_indent, "", param_page[index].good_blocks);
336 cvmx_dprintf("%*sgood_block_endurance = %u\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].good_block_endurance));
337 cvmx_dprintf("%*sprograms_per_page = %u\n", 2*debug_indent, "", param_page[index].programs_per_page);
338 cvmx_dprintf("%*spartial_program_attrib = 0x%x\n", 2*debug_indent, "", param_page[index].partial_program_attrib);
339 cvmx_dprintf("%*sbits_ecc = %u\n", 2*debug_indent, "", param_page[index].bits_ecc);
340 cvmx_dprintf("%*sinterleaved_address_bits = 0x%x\n", 2*debug_indent, "", param_page[index].interleaved_address_bits);
341 cvmx_dprintf("%*sinterleaved_attrib = 0x%x\n", 2*debug_indent, "", param_page[index].interleaved_attrib);
343 cvmx_dprintf("%*spin_capacitance = %u\n", 2*debug_indent, "", param_page[index].pin_capacitance);
344 cvmx_dprintf("%*stiming_mode = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].timing_mode));
345 cvmx_dprintf("%*scache_timing_mode = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].cache_timing_mode));
346 cvmx_dprintf("%*st_prog = %d us\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].t_prog));
347 cvmx_dprintf("%*st_bers = %u us\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].t_bers));
348 cvmx_dprintf("%*st_r = %u us\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].t_r));
349 cvmx_dprintf("%*st_ccs = %u ns\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].t_ccs));
350 cvmx_dprintf("%*svendor_revision = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].vendor_revision));
351 //uint8_t vendor_specific[88]; /**< Byte 166-253: Vendor specific */
352 cvmx_dprintf("%*scrc = 0x%x\n", 2*debug_indent, "", param_page[index].crc);
355 return param_page + index;
358 void __set_onfi_timing_mode(int *tim_par, int clocks_us, int mode)
360 const onfi_speed_mode_desc_t *mp = &onfi_speed_modes[mode]; /* use shorter name to fill in timing array */
366 cvmx_dprintf("%s: invalid ONFI timing mode: %d\n", __FUNCTION__, mode);
370 /* Adjust the read/write pulse duty cycle to make it more even. The cycle time
371 ** requirement is longer than the sum of the high low times, so we exend both the high
372 ** and low times to meet the cycle time requirement.
374 pulse_adjust = ((mp->twc - mp->twh - mp->twp)/2 + 1) * clocks_us;
376 /* Add a small margin to all timings. */
377 margin = 2 * clocks_us;
378 /* Update timing parameters based on supported mode */
379 tim_par[1] = CVMX_NAND_ROUNDUP(mp->twp * clocks_us + margin + pulse_adjust, 1000); /* Twp, WE# pulse width */
380 tim_par[2] = CVMX_NAND_ROUNDUP(max(mp->twh, mp->twc - mp->twp) * clocks_us + margin + pulse_adjust, 1000); /* Tw, WE# pulse width high */
381 tim_par[3] = CVMX_NAND_ROUNDUP(mp->tclh * clocks_us + margin, 1000); /* Tclh, CLE hold time */
382 tim_par[4] = CVMX_NAND_ROUNDUP(mp->tals * clocks_us + margin, 1000); /* Tals, ALE setup time */
383 tim_par[5] = tim_par[3]; /* Talh, ALE hold time */
384 tim_par[6] = tim_par[1]; /* Trp, RE# pulse width*/
385 tim_par[7] = tim_par[2]; /* Treh, RE# high hold time */
390 /* Internal helper function to set chip configuration to use default values */
391 static void __set_chip_defaults(int chip, int clocks_us)
393 if (!cvmx_nand_default.page_size)
395 cvmx_nand_state[chip].page_size = cvmx_nand_default.page_size; /* NAND page size in bytes */
396 cvmx_nand_state[chip].oob_size = cvmx_nand_default.oob_size; /* NAND OOB (spare) size in bytes (per page) */
397 cvmx_nand_state[chip].pages_per_block = cvmx_nand_default.pages_per_block;
398 cvmx_nand_state[chip].blocks = cvmx_nand_default.blocks;
399 cvmx_nand_state[chip].onfi_timing = cvmx_nand_default.onfi_timing;
400 __set_onfi_timing_mode(cvmx_nand_state[chip].tim_par, clocks_us, cvmx_nand_state[chip].onfi_timing);
401 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
404 cvmx_dprintf("%s: Using default NAND parameters.\n", __FUNCTION__);
405 cvmx_dprintf("%s: Defaults: page size: %d, OOB size: %d, pages per block %d, blocks: %d, timing mode: %d\n",
406 __FUNCTION__, cvmx_nand_state[chip].page_size, cvmx_nand_state[chip].oob_size, cvmx_nand_state[chip].pages_per_block,
407 cvmx_nand_state[chip].blocks, cvmx_nand_state[chip].onfi_timing);
410 /* Do the proper wait for the ready/busy signal. First wait
411 ** for busy to be valid, then wait for busy to de-assert.
413 static int __wait_for_busy_done(int chip)
417 CVMX_NAND_LOG_CALLED();
418 CVMX_NAND_LOG_PARAM("%d", chip);
420 memset(&cmd, 0, sizeof(cmd));
425 /* Wait for RB to be valied (tWB).
426 ** Use 5 * tWC as proxy. In some modes this is
427 ** much longer than required, but does not affect performance
428 ** since we will wait much longer for busy to de-assert.
430 if (cvmx_nand_submit(cmd))
431 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
432 if (cvmx_nand_submit(cmd))
433 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
434 if (cvmx_nand_submit(cmd))
435 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
436 if (cvmx_nand_submit(cmd))
437 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
438 cmd.wait.r_b=1; /* Now wait for busy to be de-asserted */
439 if (cvmx_nand_submit(cmd))
440 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
442 CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
445 * Called to initialize the NAND controller for use. Note that
446 * you must be running out of L2 or memory and not NAND before
447 * calling this function.
448 * When probing for NAND chips, this function attempts to autoconfigure based on the NAND parts detected.
449 * It currently supports autodetection for ONFI parts (with valid parameter pages), and some Samsung NAND
450 * parts (decoding ID bits.) If autoconfiguration fails, the defaults set with __set_chip_defaults()
451 * prior to calling cvmx_nand_initialize() are used.
452 * If defaults are set and the CVMX_NAND_INITIALIZE_FLAGS_DONT_PROBE flag is provided, the defaults are used
453 * for all chips in the active_chips mask.
455 * @param flags Optional initialization flags
456 * If the CVMX_NAND_INITIALIZE_FLAGS_DONT_PROBE flag is passed, chips are not probed,
457 * and the default parameters (if set with cvmx_nand_set_defaults) are used for all chips
458 * in the active_chips mask.
459 * @param active_chips
460 * Each bit in this parameter represents a chip select that might
461 * contain NAND flash. Any chip select present in this bitmask may
462 * be connected to NAND. It is normally safe to pass 0xff here and
463 * let the API probe all 8 chip selects.
465 * @return Zero on success, a negative cvmx_nand_status error code on failure
467 cvmx_nand_status_t cvmx_nand_initialize(cvmx_nand_initialize_flags_t flags, int active_chips)
473 union cvmx_ndf_misc ndf_misc;
474 uint8_t nand_id_buffer[16];
476 if (!octeon_has_feature(OCTEON_FEATURE_NAND))
477 CVMX_NAND_RETURN(CVMX_NAND_NO_DEVICE);
479 cvmx_nand_flags = flags;
480 CVMX_NAND_LOG_CALLED();
481 CVMX_NAND_LOG_PARAM("0x%x", flags);
483 memset(&cvmx_nand_state, 0, sizeof(cvmx_nand_state));
485 #ifndef USE_DATA_IN_TEXT
486 /* cvmx_nand_buffer is statically allocated in the TEXT_IN_DATA case */
487 if (!cvmx_nand_buffer)
489 cvmx_nand_buffer = cvmx_bootmem_alloc_named_flags(CVMX_NAND_MAX_PAGE_AND_OOB_SIZE, 128, "__nand_buffer", CVMX_BOOTMEM_FLAG_END_ALLOC);
491 if (!cvmx_nand_buffer) {
492 const cvmx_bootmem_named_block_desc_t *block_desc = cvmx_bootmem_find_named_block("__nand_buffer");
494 cvmx_nand_buffer = cvmx_phys_to_ptr(block_desc->base_addr);
497 if (!cvmx_nand_buffer)
498 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
501 /* Disable boot mode and reset the fifo */
502 ndf_misc.u64 = cvmx_read_csr(CVMX_NDF_MISC);
503 ndf_misc.s.rd_cmd = 0;
504 ndf_misc.s.bt_dma = 0;
505 ndf_misc.s.bt_dis = 1;
506 ndf_misc.s.ex_dis = 0;
507 ndf_misc.s.rst_ff = 1;
508 cvmx_write_csr(CVMX_NDF_MISC, ndf_misc.u64);
509 cvmx_read_csr(CVMX_NDF_MISC);
511 /* Bring the fifo out of reset */
513 ndf_misc.s.rst_ff = 0;
514 cvmx_write_csr(CVMX_NDF_MISC, ndf_misc.u64);
515 cvmx_read_csr(CVMX_NDF_MISC);
518 /* Clear the ECC counter */
519 //cvmx_write_csr(CVMX_NDF_ECC_CNT, cvmx_read_csr(CVMX_NDF_ECC_CNT));
521 /* Clear the interrupt state */
522 cvmx_write_csr(CVMX_NDF_INT, cvmx_read_csr(CVMX_NDF_INT));
523 cvmx_write_csr(CVMX_NDF_INT_EN, 0);
524 cvmx_write_csr(CVMX_MIO_NDF_DMA_INT, cvmx_read_csr(CVMX_MIO_NDF_DMA_INT));
525 cvmx_write_csr(CVMX_MIO_NDF_DMA_INT_EN, 0);
528 /* The simulator crashes if you access non existant devices. Assume
529 only chip select 1 is connected to NAND */
530 if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
541 /* Figure out how many clocks are in one microsecond, rounding up */
542 clocks_us = CVMX_NAND_ROUNDUP(cvmx_clock_get_rate(CVMX_CLOCK_SCLK), 1000000);
544 /* If the CVMX_NAND_INITIALIZE_FLAGS_DONT_PROBE flag is set, then
545 ** use the supplied default values to configured the chips in the
546 ** active_chips mask */
547 if (cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DONT_PROBE)
549 if (cvmx_nand_default.page_size)
551 for (chip=start_chip; chip<stop_chip; chip++)
553 /* Skip chip selects that the caller didn't supply in the active chip bits */
554 if (((1<<chip) & active_chips) == 0)
556 __set_chip_defaults(chip, clocks_us);
559 CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
562 /* Probe and see what NAND flash we can find */
563 for (chip=start_chip; chip<stop_chip; chip++)
565 union cvmx_mio_boot_reg_cfgx mio_boot_reg_cfg;
566 cvmx_nand_onfi_param_page_t *onfi_param_page;
570 /* Skip chip selects that the caller didn't supply in the active chip bits */
571 if (((1<<chip) & active_chips) == 0)
574 mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(chip));
575 /* Enabled regions can't be connected to NAND flash */
576 if (mio_boot_reg_cfg.s.en)
579 /* Start out with some sane, but slow, defaults */
580 cvmx_nand_state[chip].page_size = 0;
581 cvmx_nand_state[chip].oob_size = 64;
582 cvmx_nand_state[chip].pages_per_block = 64;
583 cvmx_nand_state[chip].blocks = 100;
586 /* Set timing mode to ONFI mode 0 for initial accesses */
587 __set_onfi_timing_mode(cvmx_nand_state[chip].tim_par, clocks_us, 0);
589 /* Put the index of which timing parameter to use. The indexes are into the tim_par
590 ** which match the indexes of the 8 timing parameters that the hardware supports.
591 ** Index 0 is not software controlled, and is fixed by hardware. */
592 cvmx_nand_state[chip].clen[0] = 0; /* Command doesn't need to be held before WE */
593 cvmx_nand_state[chip].clen[1] = 1; /* Twp, WE# pulse width */
594 cvmx_nand_state[chip].clen[2] = 3; /* Tclh, CLE hold time */
595 cvmx_nand_state[chip].clen[3] = 1;
597 cvmx_nand_state[chip].alen[0] = 4; /* Tals, ALE setup time */
598 cvmx_nand_state[chip].alen[1] = 1; /* Twp, WE# pulse width */
599 cvmx_nand_state[chip].alen[2] = 2; /* Twh, WE# pulse width high */
600 cvmx_nand_state[chip].alen[3] = 5; /* Talh, ALE hold time */
602 cvmx_nand_state[chip].rdn[0] = 0;
603 cvmx_nand_state[chip].rdn[1] = 6; /* Trp, RE# pulse width*/
604 cvmx_nand_state[chip].rdn[2] = 7; /* Treh, RE# high hold time */
605 cvmx_nand_state[chip].rdn[3] = 0;
607 cvmx_nand_state[chip].wrn[0] = 1; /* Twp, WE# pulse width */
608 cvmx_nand_state[chip].wrn[1] = 2; /* Twh, WE# pulse width high */
610 /* Probe and see if we get an answer. Read more than required, as in
611 ** 16 bit mode only every other byte is valid.
612 ** Here we probe twice, once in 8 bit mode, and once in 16 bit mode to autodetect
616 for (width_16 = 0; width_16 <= 1 && probe_failed; width_16++)
621 cvmx_nand_state[chip].flags |= CVMX_NAND_STATE_16BIT;
622 memset(cvmx_nand_buffer, 0xff, 16);
623 if (cvmx_nand_read_id(chip, 0x0, cvmx_ptr_to_phys(cvmx_nand_buffer), 16) < 16)
625 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
626 cvmx_dprintf("%s: Failed to probe chip %d\n", __FUNCTION__, chip);
630 if (*(uint32_t*)cvmx_nand_buffer == 0xffffffff || *(uint32_t*)cvmx_nand_buffer == 0x0)
632 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
633 cvmx_dprintf("%s: Probe returned nothing for chip %d\n", __FUNCTION__, chip);
637 /* Neither 8 or 16 bit mode worked, so go on to next chip select */
641 /* Save copy of ID for later use */
642 memcpy(nand_id_buffer, cvmx_nand_buffer, sizeof(nand_id_buffer));
644 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
645 cvmx_dprintf("%s: NAND chip %d has ID 0x%08llx\n", __FUNCTION__, chip, (unsigned long long int)*(uint64_t*)cvmx_nand_buffer);
646 /* Read more than required, as in 16 bit mode only every other byte is valid. */
647 if (cvmx_nand_read_id(chip, 0x20, cvmx_ptr_to_phys(cvmx_nand_buffer), 8) < 8)
649 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
650 cvmx_dprintf("%s: Failed to probe chip %d\n", __FUNCTION__, chip);
654 if (((cvmx_nand_buffer[0] == 'O') && (cvmx_nand_buffer[1] == 'N') &&
655 (cvmx_nand_buffer[2] == 'F') && (cvmx_nand_buffer[3] == 'I')))
657 /* We have an ONFI part, so read the parameter page */
659 cvmx_nand_read_param_page(chip, cvmx_ptr_to_phys(cvmx_nand_buffer), 2048);
660 onfi_param_page = __cvmx_nand_onfi_process((cvmx_nand_onfi_param_page_t *)cvmx_nand_buffer);
663 /* ONFI NAND parts are described by a parameter page. Here we extract the configuration values
664 ** from the parameter page that we need to access the chip. */
665 cvmx_nand_state[chip].page_size = cvmx_le32_to_cpu(onfi_param_page->page_data_bytes);
666 cvmx_nand_state[chip].oob_size = cvmx_le16_to_cpu(onfi_param_page->page_spare_bytes);
667 cvmx_nand_state[chip].pages_per_block = cvmx_le32_to_cpu(onfi_param_page->pages_per_block);
668 cvmx_nand_state[chip].blocks = cvmx_le32_to_cpu(onfi_param_page->blocks_per_lun) * onfi_param_page->number_lun;
670 if (cvmx_le16_to_cpu(onfi_param_page->timing_mode) <= 0x3f)
672 int mode_mask = cvmx_le16_to_cpu(onfi_param_page->timing_mode);
675 for (i = 0; i < 6;i++)
677 if (mode_mask & (1 << i))
680 cvmx_nand_state[chip].onfi_timing = mode;
684 cvmx_dprintf("%s: Invalid timing mode (%d) in ONFI parameter page, ignoring\n", __FUNCTION__, cvmx_nand_state[chip].onfi_timing);
685 cvmx_nand_state[chip].onfi_timing = 0;
688 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
689 cvmx_dprintf("%s: Using ONFI timing mode: %d\n", __FUNCTION__, cvmx_nand_state[chip].onfi_timing);
690 __set_onfi_timing_mode(cvmx_nand_state[chip].tim_par, clocks_us, cvmx_nand_state[chip].onfi_timing);
691 if (cvmx_nand_state[chip].page_size + cvmx_nand_state[chip].oob_size > CVMX_NAND_MAX_PAGE_AND_OOB_SIZE)
693 cvmx_dprintf("%s: ERROR: Page size (%d) + OOB size (%d) is greater than max size (%d)\n",
694 __FUNCTION__, cvmx_nand_state[chip].page_size, cvmx_nand_state[chip].oob_size, CVMX_NAND_MAX_PAGE_AND_OOB_SIZE);
695 return(CVMX_NAND_ERROR);
697 /* We have completed setup for this ONFI chip, so go on to next chip. */
702 /* Parameter page is not valid */
703 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
704 cvmx_dprintf("%s: ONFI paramater page missing or invalid.\n", __FUNCTION__);
712 /* We have a non-ONFI part. */
713 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
714 cvmx_dprintf("%s: Chip %d doesn't support ONFI.\n", __FUNCTION__, chip);
717 if (nand_id_buffer[0] == 0xEC)
719 /* We have a Samsung part, so decode part info from ID bytes */
720 uint64_t nand_size_bits = (64*1024*1024ULL) << ((nand_id_buffer[4] & 0x70) >> 4); /* Plane size */
721 cvmx_nand_state[chip].page_size = 1024 << (nand_id_buffer[3] & 0x3); /* NAND page size in bytes */
722 /* NAND OOB (spare) size in bytes (per page) */
723 cvmx_nand_state[chip].oob_size = (cvmx_nand_state[chip].page_size / 512) * ((nand_id_buffer[3] & 4) ? 16 : 8);
724 cvmx_nand_state[chip].pages_per_block = (0x10000 << ((nand_id_buffer[3] & 0x30) >> 4))/cvmx_nand_state[chip].page_size;
726 nand_size_bits *= 1 << ((nand_id_buffer[4] & 0xc) >> 2);
728 cvmx_nand_state[chip].oob_size = cvmx_nand_state[chip].page_size/64;
729 if (nand_id_buffer[3] & 0x4)
730 cvmx_nand_state[chip].oob_size *= 2;
732 cvmx_nand_state[chip].blocks = nand_size_bits/(8ULL*cvmx_nand_state[chip].page_size*cvmx_nand_state[chip].pages_per_block);
733 switch (nand_id_buffer[1]) {
734 case 0xD3: /* K9F8G08U0M */
735 case 0xDC: /* K9F4G08U0B */
736 cvmx_nand_state[chip].onfi_timing = 6;
739 cvmx_nand_state[chip].onfi_timing = 2;
743 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
745 cvmx_dprintf("%s: Samsung NAND chip detected, using parameters decoded from ID bytes.\n", __FUNCTION__);
746 cvmx_dprintf("%s: Defaults: page size: %d, OOB size: %d, pages per block %d, part size: %d MBytes, timing mode: %d\n",
747 __FUNCTION__, cvmx_nand_state[chip].page_size, cvmx_nand_state[chip].oob_size, cvmx_nand_state[chip].pages_per_block,
748 (int)(nand_size_bits/(8*1024*1024)), cvmx_nand_state[chip].onfi_timing);
751 __set_onfi_timing_mode(cvmx_nand_state[chip].tim_par, clocks_us, cvmx_nand_state[chip].onfi_timing);
752 if (cvmx_nand_state[chip].page_size + cvmx_nand_state[chip].oob_size > CVMX_NAND_MAX_PAGE_AND_OOB_SIZE)
754 cvmx_dprintf("%s: ERROR: Page size (%d) + OOB size (%d) is greater than max size (%d)\n",
755 __FUNCTION__, cvmx_nand_state[chip].page_size, cvmx_nand_state[chip].oob_size, CVMX_NAND_MAX_PAGE_AND_OOB_SIZE);
756 return(CVMX_NAND_ERROR);
759 /* We have completed setup for this Samsung chip, so go on to next chip. */
769 /* We were not able to automatically identify the NAND chip parameters. If default values were configured,
771 if (cvmx_nand_default.page_size)
773 __set_chip_defaults(chip, clocks_us);
778 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
779 cvmx_dprintf("%s: Unable to determine NAND parameters, and no defaults supplied.\n", __FUNCTION__);
782 CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
784 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
785 EXPORT_SYMBOL(cvmx_nand_initialize);
790 * Call to shutdown the NAND controller after all transactions
791 * are done. In most setups this will never be called.
793 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
795 cvmx_nand_status_t cvmx_nand_shutdown(void)
797 CVMX_NAND_LOG_CALLED();
798 memset(&cvmx_nand_state, 0, sizeof(cvmx_nand_state));
799 CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
804 * Returns a bitmask representing the chip selects that are
805 * connected to NAND chips. This can be called after the
806 * initialize to determine the actual number of NAND chips
807 * found. Each bit in the response coresponds to a chip select.
809 * @return Zero if no NAND chips were found. Otherwise a bit is set for
810 * each chip select (1<<chip).
812 int cvmx_nand_get_active_chips(void)
816 for (chip=0; chip<8; chip++)
818 if (cvmx_nand_state[chip].page_size)
823 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
824 EXPORT_SYMBOL(cvmx_nand_get_active_chips);
829 * Override the timing parameters for a NAND chip
831 * @param chip Chip select to override
839 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
841 cvmx_nand_status_t cvmx_nand_set_timing(int chip, int tim_mult, int tim_par[8], int clen[4], int alen[4], int rdn[4], int wrn[2])
844 CVMX_NAND_LOG_CALLED();
846 if ((chip < 0) || (chip > 7))
847 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
848 if (!cvmx_nand_state[chip].page_size)
849 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
851 cvmx_nand_state[chip].tim_mult = tim_mult;
853 cvmx_nand_state[chip].tim_par[i] = tim_par[i];
855 cvmx_nand_state[chip].clen[i] = clen[i];
857 cvmx_nand_state[chip].alen[i] = alen[i];
859 cvmx_nand_state[chip].rdn[i] = rdn[i];
861 cvmx_nand_state[chip].wrn[i] = wrn[i];
863 CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
869 * Get the number of free bytes in the NAND command queue
871 * @return Number of bytes in queue
873 static inline int __cvmx_nand_get_free_cmd_bytes(void)
875 union cvmx_ndf_misc ndf_misc;
876 CVMX_NAND_LOG_CALLED();
877 ndf_misc.u64 = cvmx_read_csr(CVMX_NDF_MISC);
878 CVMX_NAND_RETURN((int)ndf_misc.s.fr_byt);
883 * Submit a command to the NAND command queue. Generally this
884 * will not be used directly. Instead most programs will use the other
885 * higher level NAND functions.
887 * @param cmd Command to submit
889 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
891 cvmx_nand_status_t cvmx_nand_submit(cvmx_nand_cmd_t cmd)
893 CVMX_NAND_LOG_CALLED();
894 CVMX_NAND_LOG_PARAM("0x%llx", (ULL)cmd.u64[0]);
895 CVMX_NAND_LOG_PARAM("0x%llx", (ULL)cmd.u64[1]);
896 CVMX_NAND_LOG_PARAM("%s", cvmx_nand_opcode_labels[cmd.s.op_code]);
897 switch (cmd.s.op_code)
899 /* All these commands fit in one 64bit word */
903 case 3: /* Chip Enable/Disable */
907 case 10: /* Read EDO */
908 case 15: /* Bus Aquire/Release */
909 if (__cvmx_nand_get_free_cmd_bytes() < 8)
910 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
911 cvmx_write_csr(CVMX_NDF_CMD, cmd.u64[1]);
912 CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
914 case 5: /* ALE commands take either one or two 64bit words */
915 if (cmd.ale.adr_byte_num < 5)
917 if (__cvmx_nand_get_free_cmd_bytes() < 8)
918 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
919 cvmx_write_csr(CVMX_NDF_CMD, cmd.u64[1]);
920 CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
924 if (__cvmx_nand_get_free_cmd_bytes() < 16)
925 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
926 cvmx_write_csr(CVMX_NDF_CMD, cmd.u64[1]);
927 cvmx_write_csr(CVMX_NDF_CMD, cmd.u64[0]);
928 CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
931 case 11: /* Wait status commands take two 64bit words */
932 if (__cvmx_nand_get_free_cmd_bytes() < 16)
933 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
934 cvmx_write_csr(CVMX_NDF_CMD, cmd.u64[1]);
935 cvmx_write_csr(CVMX_NDF_CMD, cmd.u64[0]);
936 CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
939 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
946 * Get the number of bits required to encode the column bits. This
947 * does not include padding to align on a byte boundary.
949 * @param chip NAND chip to get data for
951 * @return Number of column bits
953 static inline int __cvmx_nand_get_column_bits(int chip)
955 return cvmx_pop(cvmx_nand_state[chip].page_size - 1);
961 * Get the number of bits required to encode the row bits. This
962 * does not include padding to align on a byte boundary.
964 * @param chip NAND chip to get data for
966 * @return Number of row bits
968 static inline int __cvmx_nand_get_row_bits(int chip)
970 return cvmx_pop(cvmx_nand_state[chip].blocks-1) + cvmx_pop(cvmx_nand_state[chip].pages_per_block-1);
976 * Get the number of address cycles required for this NAND part.
977 * This include column bits, padding, page bits, and block bits.
979 * @param chip NAND chip to get data for
981 * @return Number of address cycles on the bus
983 static inline int __cvmx_nand_get_address_cycles(int chip)
985 int address_bits = ((__cvmx_nand_get_column_bits(chip) + 7) >> 3) << 3;
986 address_bits += ((__cvmx_nand_get_row_bits(chip) + 7) >> 3) << 3;
987 return (address_bits + 7) >> 3;
993 * Build the set of command common to most transactions
994 * @param chip NAND chip to program
995 * @param cmd_data NAND command for CLE cycle 1
996 * @param num_address_cycles
997 * Number of address cycles to put on the bus
998 * @param nand_address
999 * Data to be put on the bus. It is translated according to
1000 * the rules in the file information section.
1002 * @param cmd_data2 If non zero, adds a second CLE cycle used by a number of NAND
1005 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
1007 static inline cvmx_nand_status_t __cvmx_nand_build_pre_cmd(int chip, int cmd_data, int num_address_cycles, uint64_t nand_address, int cmd_data2)
1009 cvmx_nand_status_t result;
1010 cvmx_nand_cmd_t cmd;
1012 CVMX_NAND_LOG_CALLED();
1014 /* Send timing parameters */
1015 memset(&cmd, 0, sizeof(cmd));
1016 cmd.set_tm_par.one = 1;
1017 cmd.set_tm_par.tim_mult = cvmx_nand_state[chip].tim_mult;
1018 /* tim_par[0] unused */
1019 cmd.set_tm_par.tim_par1 = cvmx_nand_state[chip].tim_par[1];
1020 cmd.set_tm_par.tim_par2 = cvmx_nand_state[chip].tim_par[2];
1021 cmd.set_tm_par.tim_par3 = cvmx_nand_state[chip].tim_par[3];
1022 cmd.set_tm_par.tim_par4 = cvmx_nand_state[chip].tim_par[4];
1023 cmd.set_tm_par.tim_par5 = cvmx_nand_state[chip].tim_par[5];
1024 cmd.set_tm_par.tim_par6 = cvmx_nand_state[chip].tim_par[6];
1025 cmd.set_tm_par.tim_par7 = cvmx_nand_state[chip].tim_par[7];
1026 result = cvmx_nand_submit(cmd);
1028 CVMX_NAND_RETURN(result);
1030 /* Send bus select */
1031 memset(&cmd, 0, sizeof(cmd));
1032 cmd.bus_acq.fifteen = 15;
1033 cmd.bus_acq.one = 1;
1034 result = cvmx_nand_submit(cmd);
1036 CVMX_NAND_RETURN(result);
1038 /* Send chip select */
1039 memset(&cmd, 0, sizeof(cmd));
1040 cmd.chip_en.chip = chip;
1041 cmd.chip_en.one = 1;
1042 cmd.chip_en.three = 3;
1043 cmd.chip_en.width = (cvmx_nand_state[chip].flags & CVMX_NAND_STATE_16BIT) ? 2 : 1;
1044 result = cvmx_nand_submit(cmd);
1046 CVMX_NAND_RETURN(result);
1048 /* Send wait, fixed time
1049 ** This meets chip enable to command latch enable timing.
1050 ** This is tCS - tCLS from the ONFI spec.
1051 ** Use tWP as a proxy, as this is adequate for
1052 ** all ONFI 1.0 timing modes. */
1053 memset(&cmd, 0, sizeof(cmd));
1056 if (cvmx_nand_submit(cmd))
1057 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1060 memset(&cmd, 0, sizeof(cmd));
1061 cmd.cle.cmd_data = cmd_data;
1062 cmd.cle.clen1 = cvmx_nand_state[chip].clen[0];
1063 cmd.cle.clen2 = cvmx_nand_state[chip].clen[1];
1064 cmd.cle.clen3 = cvmx_nand_state[chip].clen[2];
1066 result = cvmx_nand_submit(cmd);
1068 CVMX_NAND_RETURN(result);
1071 if (num_address_cycles)
1073 memset(&cmd, 0, sizeof(cmd));
1074 cmd.ale.adr_byte_num = num_address_cycles;
1075 if (num_address_cycles < __cvmx_nand_get_address_cycles(chip))
1077 cmd.ale.adr_bytes_l = nand_address;
1078 cmd.ale.adr_bytes_h = nand_address >> 32;
1082 int column_bits = __cvmx_nand_get_column_bits(chip);
1083 int column_shift = ((column_bits + 7) >> 3) << 3;
1084 int column = nand_address & (cvmx_nand_state[chip].page_size-1);
1085 int row = nand_address >> column_bits;
1086 cmd.ale.adr_bytes_l = column + (row << column_shift);
1087 cmd.ale.adr_bytes_h = row >> (32 - column_shift);
1089 cmd.ale.alen1 = cvmx_nand_state[chip].alen[0];
1090 cmd.ale.alen2 = cvmx_nand_state[chip].alen[1];
1091 cmd.ale.alen3 = cvmx_nand_state[chip].alen[2];
1092 cmd.ale.alen4 = cvmx_nand_state[chip].alen[3];
1094 result = cvmx_nand_submit(cmd);
1096 CVMX_NAND_RETURN(result);
1102 memset(&cmd, 0, sizeof(cmd));
1103 cmd.cle.cmd_data = cmd_data2;
1104 cmd.cle.clen1 = cvmx_nand_state[chip].clen[0];
1105 cmd.cle.clen2 = cvmx_nand_state[chip].clen[1];
1106 cmd.cle.clen3 = cvmx_nand_state[chip].clen[2];
1108 result = cvmx_nand_submit(cmd);
1110 CVMX_NAND_RETURN(result);
1113 CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
1119 * Build the set of command common to most transactions
1120 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
1122 static inline cvmx_nand_status_t __cvmx_nand_build_post_cmd(void)
1124 cvmx_nand_status_t result;
1125 cvmx_nand_cmd_t cmd;
1127 CVMX_NAND_LOG_CALLED();
1129 /* Send chip deselect */
1130 memset(&cmd, 0, sizeof(cmd));
1131 cmd.chip_dis.three = 3;
1132 result = cvmx_nand_submit(cmd);
1134 CVMX_NAND_RETURN(result);
1136 /* Send bus release */
1137 memset(&cmd, 0, sizeof(cmd));
1138 cmd.bus_rel.fifteen = 15;
1139 result = cvmx_nand_submit(cmd);
1141 CVMX_NAND_RETURN(result);
1143 /* Ring the doorbell */
1144 cvmx_write_csr(CVMX_NDF_DRBELL, 1);
1145 CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
1151 * Setup the NAND DMA engine for a transfer
1153 * @param chip Chip select for NAND flash
1154 * @param is_write Non zero if this is a write
1155 * @param buffer_address
1156 * Physical memory address to DMA to/from
1157 * @param buffer_length
1158 * Length of the DMA in bytes
1160 static inline void __cvmx_nand_setup_dma(int chip, int is_write, uint64_t buffer_address, int buffer_length)
1162 union cvmx_mio_ndf_dma_cfg ndf_dma_cfg;
1163 CVMX_NAND_LOG_CALLED();
1164 CVMX_NAND_LOG_PARAM("%d", chip);
1165 CVMX_NAND_LOG_PARAM("%d", is_write);
1166 CVMX_NAND_LOG_PARAM("0x%llx", (ULL)buffer_address);
1167 CVMX_NAND_LOG_PARAM("%d", buffer_length);
1168 ndf_dma_cfg.u64 = 0;
1169 ndf_dma_cfg.s.en = 1;
1170 ndf_dma_cfg.s.rw = is_write; /* One means DMA reads from memory and writes to flash */
1171 ndf_dma_cfg.s.clr = 0;
1172 ndf_dma_cfg.s.size = ((buffer_length + 7) >> 3) - 1;
1173 ndf_dma_cfg.s.adr = buffer_address;
1175 cvmx_write_csr(CVMX_MIO_NDF_DMA_CFG, ndf_dma_cfg.u64);
1176 CVMX_NAND_RETURN_NOTHING();
1181 * Dump a buffer out in hex for debug
1183 * @param buffer_address
1184 * Starting physical address
1185 * @param buffer_length
1186 * Number of bytes to display
1188 static void __cvmx_nand_hex_dump(uint64_t buffer_address, int buffer_length)
1190 uint8_t *buffer = cvmx_phys_to_ptr(buffer_address);
1192 while (offset < buffer_length)
1195 cvmx_dprintf("%*s%04x:", 2*debug_indent, "", offset);
1196 for (i=0; i<32; i++)
1200 if (offset+i < buffer_length)
1201 cvmx_dprintf("%02x", 0xff & buffer[offset+i]);
1212 * Perform a low level NAND read command
1214 * @param chip Chip to read from
1215 * @param nand_command1
1216 * First command cycle value
1217 * @param address_cycles
1218 * Number of address cycles after comand 1
1219 * @param nand_address
1220 * NAND address to use for address cycles
1221 * @param nand_command2
1222 * NAND command cycle 2 if not zero
1223 * @param buffer_address
1224 * Physical address to DMA into
1225 * @param buffer_length
1226 * Length of the transfer in bytes
1228 * @return Number of bytes transfered or a negative error code
1230 static inline int __cvmx_nand_low_level_read(int chip, int nand_command1, int address_cycles, uint64_t nand_address, int nand_command2, uint64_t buffer_address, int buffer_length)
1232 cvmx_nand_cmd_t cmd;
1233 union cvmx_mio_ndf_dma_cfg ndf_dma_cfg;
1236 CVMX_NAND_LOG_CALLED();
1237 CVMX_NAND_LOG_PARAM("%d", chip);
1238 CVMX_NAND_LOG_PARAM("0x%x", nand_command1);
1239 CVMX_NAND_LOG_PARAM("%d", address_cycles);
1240 CVMX_NAND_LOG_PARAM("0x%llx", (ULL)nand_address);
1241 CVMX_NAND_LOG_PARAM("0x%x", nand_command2);
1242 CVMX_NAND_LOG_PARAM("0x%llx", (ULL)buffer_address);
1243 CVMX_NAND_LOG_PARAM("%d", buffer_length);
1245 if ((chip < 0) || (chip > 7))
1246 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1247 if (!buffer_address)
1248 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1249 if (buffer_address & 7)
1250 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1251 if (buffer_length & 7)
1252 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1254 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1256 /* Build the command and address cycles */
1257 if (__cvmx_nand_build_pre_cmd(chip, nand_command1, address_cycles, nand_address, nand_command2))
1258 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1260 /* Send WAIT. This waits for some time, then
1261 ** waits for busy to be de-asserted. */
1262 if (__wait_for_busy_done(chip))
1263 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1265 /* Wait for tRR after busy de-asserts.
1266 ** Use 2* tALS as proxy. This is overkill in
1267 ** the slow modes, but not bad in the faster ones. */
1268 memset(&cmd, 0, sizeof(cmd));
1271 if (cvmx_nand_submit(cmd))
1272 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1273 if (cvmx_nand_submit(cmd))
1274 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1277 memset(&cmd, 0, sizeof(cmd));
1278 cmd.rd.data_bytes = buffer_length;
1279 if (cvmx_nand_state[chip].onfi_timing >= 4)
1280 cmd.rd.nine = 10; /* READ_EDO command is required for ONFI timing modes 4 and 5 */
1283 cmd.rd.rdn1 = cvmx_nand_state[chip].rdn[0];
1284 cmd.rd.rdn2 = cvmx_nand_state[chip].rdn[1];
1285 cmd.rd.rdn3 = cvmx_nand_state[chip].rdn[2];
1286 cmd.rd.rdn4 = cvmx_nand_state[chip].rdn[3];
1287 if (cvmx_nand_submit(cmd))
1288 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1290 __cvmx_nand_setup_dma(chip, 0, buffer_address, buffer_length);
1292 if (__cvmx_nand_build_post_cmd())
1293 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1295 /* Wait for the DMA to complete */
1296 if (CVMX_WAIT_FOR_FIELD64(CVMX_MIO_NDF_DMA_CFG, cvmx_mio_ndf_dma_cfg_t, en, ==, 0, NAND_TIMEOUT_USECS_READ))
1299 CVMX_NAND_RETURN(CVMX_NAND_TIMEOUT);
1301 /* Return the number of bytes transfered */
1302 ndf_dma_cfg.u64 = cvmx_read_csr(CVMX_MIO_NDF_DMA_CFG);
1303 bytes = ndf_dma_cfg.s.adr - buffer_address;
1305 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
1306 __cvmx_nand_hex_dump(buffer_address, bytes);
1308 CVMX_NAND_RETURN(bytes);
1313 * Read a page from NAND. If the buffer has room, the out of band
1314 * data will be included.
1316 * @param chip Chip select for NAND flash
1317 * @param nand_address
1318 * Location in NAND to read. See description in file comment
1319 * @param buffer_address
1320 * Physical address to store the result at
1321 * @param buffer_length
1322 * Number of bytes to read
1324 * @return Bytes read on success, a negative cvmx_nand_status_t error code on failure
1326 int cvmx_nand_page_read(int chip, uint64_t nand_address, uint64_t buffer_address, int buffer_length)
1330 CVMX_NAND_LOG_CALLED();
1331 CVMX_NAND_LOG_PARAM("%d", chip);
1332 CVMX_NAND_LOG_PARAM("0x%llx", (ULL)nand_address);
1333 CVMX_NAND_LOG_PARAM("0x%llx", (ULL)buffer_address);
1334 CVMX_NAND_LOG_PARAM("%d", buffer_length);
1336 if ((chip < 0) || (chip > 7))
1337 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1338 if (!cvmx_nand_state[chip].page_size)
1339 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1340 if (!buffer_address)
1341 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1342 if (buffer_address & 7)
1343 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1344 if (buffer_length & 7)
1345 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1347 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1349 /* For 16 bit mode, addresses within a page are word address, rather than byte addresses */
1350 if (cvmx_nand_state[chip].flags & CVMX_NAND_STATE_16BIT)
1351 nand_address = (nand_address & ~(cvmx_nand_state[chip].page_size - 1)) | ((nand_address & (cvmx_nand_state[chip].page_size - 1)) >> 1);
1353 bytes = __cvmx_nand_low_level_read(chip, NAND_COMMAND_READ, __cvmx_nand_get_address_cycles(chip), nand_address, NAND_COMMAND_READ_FIN, buffer_address, buffer_length);
1354 CVMX_NAND_RETURN(bytes);
1356 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
1357 EXPORT_SYMBOL(cvmx_nand_page_read);
1362 * Write a page to NAND. The buffer must contain the entire page
1363 * including the out of band data.
1365 * @param chip Chip select for NAND flash
1366 * @param nand_address
1367 * Location in NAND to write. See description in file comment
1368 * @param buffer_address
1369 * Physical address to read the data from
1371 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
1373 cvmx_nand_status_t cvmx_nand_page_write(int chip, uint64_t nand_address, uint64_t buffer_address)
1375 cvmx_nand_cmd_t cmd;
1378 CVMX_NAND_LOG_CALLED();
1379 CVMX_NAND_LOG_PARAM("%d", chip);
1380 CVMX_NAND_LOG_PARAM("0x%llx", (ULL)nand_address);
1381 CVMX_NAND_LOG_PARAM("0x%llx", (ULL)buffer_address);
1383 if ((chip < 0) || (chip > 7))
1384 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1385 if (!cvmx_nand_state[chip].page_size)
1386 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1387 if (!buffer_address)
1388 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1389 if (buffer_address & 7)
1390 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1392 /* For 16 bit mode, addresses within a page are word address, rather than byte addresses */
1393 if (cvmx_nand_state[chip].flags & CVMX_NAND_STATE_16BIT)
1394 nand_address = (nand_address & ~(cvmx_nand_state[chip].page_size - 1)) | ((nand_address & (cvmx_nand_state[chip].page_size - 1)) >> 1);
1396 buffer_length = cvmx_nand_state[chip].page_size + cvmx_nand_state[chip].oob_size;
1398 /* The NAND DMA engine always does transfers in 8 byte blocks, so round the buffer size down
1399 ** to a multiple of 8, otherwise we will transfer too much data to the NAND chip.
1400 ** Note this prevents the last few bytes of the OOB being written. If these bytes
1401 ** need to be written, then this check needs to be removed, but this will result in
1402 ** extra write cycles beyond the end of the OOB. */
1403 buffer_length &= ~0x7;
1405 /* Build the command and address cycles */
1406 if (__cvmx_nand_build_pre_cmd(chip, NAND_COMMAND_PROGRAM, __cvmx_nand_get_address_cycles(chip), nand_address, 0))
1407 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1410 memset(&cmd, 0, sizeof(cmd));
1411 cmd.wr.data_bytes = buffer_length;
1413 cmd.wr.wrn1 = cvmx_nand_state[chip].wrn[0];
1414 cmd.wr.wrn2 = cvmx_nand_state[chip].wrn[1];
1415 if (cvmx_nand_submit(cmd))
1416 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1418 /* Send WRITE command */
1419 memset(&cmd, 0, sizeof(cmd));
1420 cmd.cle.cmd_data = NAND_COMMAND_PROGRAM_FIN;
1421 cmd.cle.clen1 = cvmx_nand_state[chip].clen[0];
1422 cmd.cle.clen2 = cvmx_nand_state[chip].clen[1];
1423 cmd.cle.clen3 = cvmx_nand_state[chip].clen[2];
1425 if (cvmx_nand_submit(cmd))
1426 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1428 __cvmx_nand_setup_dma(chip, 1, buffer_address, buffer_length);
1430 /* WAIT for R_B to signal program is complete */
1431 if (__wait_for_busy_done(chip))
1432 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1434 if (__cvmx_nand_build_post_cmd())
1435 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1437 /* Wait for the DMA to complete */
1439 if (CVMX_WAIT_FOR_FIELD64(CVMX_MIO_NDF_DMA_CFG, cvmx_mio_ndf_dma_cfg_t, en, ==, 0, NAND_TIMEOUT_USECS_WRITE))
1442 CVMX_NAND_RETURN(CVMX_NAND_TIMEOUT);
1444 CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
1446 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
1447 EXPORT_SYMBOL(cvmx_nand_page_write);
1452 * Erase a NAND block. A single block contains multiple pages.
1454 * @param chip Chip select for NAND flash
1455 * @param nand_address
1456 * Location in NAND to erase. See description in file comment
1458 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
1460 cvmx_nand_status_t cvmx_nand_block_erase(int chip, uint64_t nand_address)
1462 CVMX_NAND_LOG_CALLED();
1463 CVMX_NAND_LOG_PARAM("%d", chip);
1464 CVMX_NAND_LOG_PARAM("0x%llx", (ULL)nand_address);
1466 if ((chip < 0) || (chip > 7))
1467 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1468 if (!cvmx_nand_state[chip].page_size)
1469 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1471 /* Build the command and address cycles */
1472 if (__cvmx_nand_build_pre_cmd(chip, NAND_COMMAND_ERASE,
1473 (__cvmx_nand_get_row_bits(chip)+7) >> 3,
1474 nand_address >> __cvmx_nand_get_column_bits(chip),
1475 NAND_COMMAND_ERASE_FIN))
1476 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1478 /* WAIT for R_B to signal erase is complete */
1479 if (__wait_for_busy_done(chip))
1480 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1482 if (__cvmx_nand_build_post_cmd())
1483 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1485 /* Wait for the command queue to be idle, which means the wait is done */
1487 if (CVMX_WAIT_FOR_FIELD64(CVMX_NDF_ST_REG, cvmx_ndf_st_reg_t, exe_idle, ==, 1, NAND_TIMEOUT_USECS_BLOCK_ERASE))
1490 CVMX_NAND_RETURN(CVMX_NAND_TIMEOUT);
1493 CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
1495 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
1496 EXPORT_SYMBOL(cvmx_nand_block_erase);
1500 /* Some reads (read ID, read parameter page) only use the low 8 bits of the bus
1501 ** in 16 bit mode. We remove the unused bytes so that the data we present to the
1502 ** caller is as expected (same as 8 bit mode.)
1504 static void __cvmx_nand_fixup_16bit_id_reads(uint8_t *buf, int buffer_length)
1506 /* Decimate data, taking only every other byte. */
1508 for (i = 0; i < buffer_length/2; i++)
1509 buf[i] = buf[2*i + 1];
1513 * Read the NAND ID information
1515 * @param chip Chip select for NAND flash
1516 * @param nand_address
1517 * NAND address to read ID from. Usually this is either 0x0 or 0x20.
1518 * @param buffer_address
1519 * Physical address to store data in
1520 * @param buffer_length
1521 * Length of the buffer. Usually this is 4-8 bytes. For 16 bit mode, this must be twice
1522 * as large as the actual expected data.
1524 * @return Bytes read on success, a negative cvmx_nand_status_t error code on failure
1526 int cvmx_nand_read_id(int chip, uint64_t nand_address, uint64_t buffer_address, int buffer_length)
1530 CVMX_NAND_LOG_CALLED();
1531 CVMX_NAND_LOG_PARAM("%d", chip);
1532 CVMX_NAND_LOG_PARAM("0x%llx", (ULL)nand_address);
1533 CVMX_NAND_LOG_PARAM("0x%llx", (ULL)buffer_address);
1534 CVMX_NAND_LOG_PARAM("%d", buffer_length);
1536 if ((chip < 0) || (chip > 7))
1537 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1538 if (!buffer_address)
1539 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1540 if (buffer_address & 7)
1541 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1543 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1545 bytes = __cvmx_nand_low_level_read(chip, NAND_COMMAND_READ_ID, 1, nand_address, 0, buffer_address, buffer_length);
1546 if (cvmx_nand_state[chip].flags & CVMX_NAND_STATE_16BIT)
1547 __cvmx_nand_fixup_16bit_id_reads(cvmx_phys_to_ptr(buffer_address), buffer_length);
1549 CVMX_NAND_RETURN(bytes);
1551 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
1552 EXPORT_SYMBOL(cvmx_nand_read_id);
1557 * Read the NAND parameter page
1559 * @param chip Chip select for NAND flash
1560 * @param buffer_address
1561 * Physical address to store data in
1562 * @param buffer_length
1563 * Length of the buffer. Usually 1024 bytes for 8 bit, 2048 for 16 bit mode.
1565 * @return Bytes read on success, a negative cvmx_nand_status_t error code on failure
1567 int cvmx_nand_read_param_page(int chip, uint64_t buffer_address, int buffer_length)
1571 CVMX_NAND_LOG_CALLED();
1572 CVMX_NAND_LOG_PARAM("%d", chip);
1573 CVMX_NAND_LOG_PARAM("0x%llx", (ULL)buffer_address);
1574 CVMX_NAND_LOG_PARAM("%d", buffer_length);
1576 if ((chip < 0) || (chip > 7))
1577 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1578 if (!buffer_address)
1579 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1580 if (buffer_address & 7)
1581 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1582 if (buffer_length & 7)
1583 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1585 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1587 bytes = __cvmx_nand_low_level_read(chip, NAND_COMMAND_READ_PARAM_PAGE, 1, 0x0, 0, buffer_address, buffer_length);
1588 if (cvmx_nand_state[chip].flags & CVMX_NAND_STATE_16BIT)
1589 __cvmx_nand_fixup_16bit_id_reads(cvmx_phys_to_ptr(buffer_address), buffer_length);
1590 CVMX_NAND_RETURN(bytes);
1595 * Get the status of the NAND flash
1597 * @param chip Chip select for NAND flash
1599 * @return NAND status or a negative cvmx_nand_status_t error code on failure
1601 int cvmx_nand_get_status(int chip)
1604 int offset = !!(cvmx_nand_state[chip].flags & CVMX_NAND_STATE_16BIT); /* Normalize flag to 0/1 */
1606 CVMX_NAND_LOG_CALLED();
1607 CVMX_NAND_LOG_PARAM("%d", chip);
1609 if ((chip < 0) || (chip > 7))
1610 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1612 *((uint8_t*)cvmx_nand_buffer + offset) = 0xff;
1613 status = __cvmx_nand_low_level_read(chip, NAND_COMMAND_STATUS, 0, 0, 0, cvmx_ptr_to_phys(cvmx_nand_buffer), 8);
1615 status = *((uint8_t*)cvmx_nand_buffer + offset);
1617 CVMX_NAND_RETURN(status);
1619 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
1620 EXPORT_SYMBOL(cvmx_nand_get_status);
1625 * Get the page size, excluding out of band data. This function
1626 * will return zero for chip selects not connected to NAND.
1628 * @param chip Chip select for NAND flash
1630 * @return Page size in bytes or a negative cvmx_nand_status_t error code on failure
1632 int cvmx_nand_get_page_size(int chip)
1634 CVMX_NAND_LOG_CALLED();
1635 CVMX_NAND_LOG_PARAM("%d", chip);
1637 if ((chip < 0) || (chip > 7))
1638 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1640 CVMX_NAND_RETURN(cvmx_nand_state[chip].page_size);
1647 * @param chip Chip select for NAND flash
1649 * @return OOB in bytes or a negative cvmx_nand_status_t error code on failure
1651 int cvmx_nand_get_oob_size(int chip)
1653 CVMX_NAND_LOG_CALLED();
1654 CVMX_NAND_LOG_PARAM("%d", chip);
1656 if ((chip < 0) || (chip > 7))
1657 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1659 CVMX_NAND_RETURN(cvmx_nand_state[chip].oob_size);
1664 * Get the number of pages per NAND block
1666 * @param chip Chip select for NAND flash
1668 * @return Number of pages in each block or a negative cvmx_nand_status_t error
1671 int cvmx_nand_get_pages_per_block(int chip)
1673 CVMX_NAND_LOG_CALLED();
1674 CVMX_NAND_LOG_PARAM("%d", chip);
1676 if ((chip < 0) || (chip > 7))
1677 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1679 CVMX_NAND_RETURN(cvmx_nand_state[chip].pages_per_block);
1684 * Get the number of blocks in the NAND flash
1686 * @param chip Chip select for NAND flash
1688 * @return Number of blocks or a negative cvmx_nand_status_t error code on failure
1690 int cvmx_nand_get_blocks(int chip)
1692 CVMX_NAND_LOG_CALLED();
1693 CVMX_NAND_LOG_PARAM("%d", chip);
1695 if ((chip < 0) || (chip > 7))
1696 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1698 CVMX_NAND_RETURN(cvmx_nand_state[chip].blocks);
1703 * Reset the NAND flash
1705 * @param chip Chip select for NAND flash
1707 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
1709 cvmx_nand_status_t cvmx_nand_reset(int chip)
1711 CVMX_NAND_LOG_CALLED();
1712 CVMX_NAND_LOG_PARAM("%d", chip);
1714 if ((chip < 0) || (chip > 7))
1715 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1716 if (!cvmx_nand_state[chip].page_size)
1717 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1719 if (__cvmx_nand_build_pre_cmd(chip, NAND_COMMAND_RESET, 0, 0, 0))
1720 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1722 /* WAIT for R_B to signal reset is complete */
1723 if (__wait_for_busy_done(chip))
1724 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1726 if (__cvmx_nand_build_post_cmd())
1727 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1729 CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
1731 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
1732 EXPORT_SYMBOL(cvmx_nand_reset);
1739 * This function computes the Octeon specific ECC data used by the NAND boot
1742 * @param block pointer to 256 bytes of data
1743 * @param eccp pointer to where 8 bytes of ECC data will be stored
1745 void cvmx_nand_compute_boot_ecc(unsigned char *block, unsigned char *eccp)
1747 unsigned char pd0, pd1, pd2;
1750 pd0 = pd1 = pd2 = 0;
1752 for (i = 0; i < 256; i++) /* PD0<0> */
1753 pd0 ^= (block[i] ^ (block[i] >> 2) ^ (block[i] >> 4) ^ (block[i] >> 6)) & 1;
1754 for (i = 0; i < 256; i++) /* PD0<1> */
1755 pd0 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 4) ^ (block[i] >> 5)) & 1) << 1;
1756 for (i = 0; i < 256; i++) /* PD0<2> */
1757 pd0 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 2) ^ (block[i] >> 3)) & 1) << 2;
1758 for (i = 0; i < 128; i++) /* PD0<3> */
1759 pd0 ^= ((block[2*i] ^ (block[2*i] >> 1) ^ (block[2*i] >> 2) ^
1760 (block[2*i] >> 3) ^ (block[2*i] >> 4) ^ (block[2*i] >> 5) ^
1761 (block[2*i] >> 6) ^ (block[2*i] >> 7)) & 1) << 3;
1762 for (i = 0; i < 64; i++) /* PD0<4> */
1763 for (j = 0; j < 2; j++)
1764 pd0 ^= ((block[4*i+j] ^ (block[4*i+j] >> 1) ^ (block[4*i+j] >> 2) ^
1765 (block[4*i+j] >> 3) ^ (block[4*i+j] >> 4) ^ (block[4*i+j] >> 5) ^
1766 (block[4*i+j] >> 6) ^ (block[4*i+j] >> 7)) & 1) << 4;
1767 for (i = 0; i < 32; i++) /* PD0<5> */
1768 for (j = 0; j < 4; j++)
1769 pd0 ^= ((block[8*i+j] ^ (block[8*i+j] >> 1) ^ (block[8*i+j] >> 2) ^
1770 (block[8*i+j] >> 3) ^ (block[8*i+j] >> 4) ^ (block[8*i+j] >> 5) ^
1771 (block[8*i+j] >> 6) ^ (block[8*i+j] >> 7)) & 1) << 5;
1772 for (i = 0; i < 16; i++) /* PD0<6> */
1773 for (j = 0; j < 8; j++)
1774 pd0 ^= ((block[16*i+j] ^ (block[16*i+j] >> 1) ^ (block[16*i+j] >> 2) ^
1775 (block[16*i+j] >> 3) ^ (block[16*i+j] >> 4) ^ (block[16*i+j] >> 5) ^
1776 (block[16*i+j] >> 6) ^ (block[16*i+j] >> 7)) & 1) << 6;
1777 for (i = 0; i < 8; i++) /* PD0<7> */
1778 for (j = 0; j < 16; j++)
1779 pd0 ^= ((block[32*i+j] ^ (block[32*i+j] >> 1) ^ (block[32*i+j] >> 2) ^
1780 (block[32*i+j] >> 3) ^ (block[32*i+j] >> 4) ^ (block[32*i+j] >> 5) ^
1781 (block[32*i+j] >> 6) ^ (block[32*i+j] >> 7)) & 1) << 7;
1782 for (i = 0; i < 4; i++) /* PD1<0> */
1783 for (j = 0; j < 32; j++)
1784 pd1 ^= ((block[64*i+j] ^ (block[64*i+j] >> 1) ^ (block[64*i+j] >> 2) ^
1785 (block[64*i+j] >> 3) ^ (block[64*i+j] >> 4) ^ (block[64*i+j] >> 5) ^
1786 (block[64*i+j] >> 6) ^ (block[64*i+j] >> 7)) & 1) << 0;
1787 for (i = 0; i < 2; i++) /* PD1<1> */
1788 for (j = 0; j < 64; j++)
1789 pd1 ^= ((block[128*i+j] ^ (block[128*i+j] >> 1) ^ (block[128*i+j] >> 2) ^
1790 (block[128*i+j] >> 3) ^ (block[128*i+j] >> 4) ^ (block[128*i+j] >> 5) ^
1791 (block[128*i+j] >> 6) ^ (block[128*i+j] >> 7)) & 1) << 1;
1792 for (i = 0; i < 128; i++) /* PD1<2> */
1793 pd1 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 2) ^
1794 (block[i] >> 3) ^ (block[i] >> 4) ^ (block[i] >> 5) ^
1795 (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 2;
1798 for (i = 0; i < 256; i++) /* PD1<5> */
1799 pd1 ^= (((block[i] >> 1) ^ (block[i] >> 3) ^ (block[i] >> 5) ^ (block[i] >> 7)) & 1) << 5;
1800 for (i = 0; i < 256; i++) /* PD1<6> */
1801 pd1 ^= (((block[i] >> 2) ^ (block[i] >> 3) ^ (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 6;
1802 for (i = 0; i < 256; i++) /* PD1<7> */
1803 pd1 ^= (((block[i] >> 4) ^ (block[i] >> 5) ^ (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 7;
1804 for (i = 0; i < 128; i++) /* PD2<0> */
1805 pd2 ^= ((block[2*i+1] ^ (block[2*i+1] >> 1) ^ (block[2*i+1] >> 2) ^
1806 (block[2*i+1] >> 3) ^ (block[2*i+1] >> 4) ^ (block[2*i+1] >> 5) ^
1807 (block[2*i+1] >> 6) ^ (block[2*i+1] >> 7)) & 1) << 0;
1808 for (i = 0; i < 64; i++) /* PD2<1> */
1809 for (j = 2; j < 4; j++)
1810 pd2 ^= ((block[4*i+j] ^ (block[4*i+j] >> 1) ^ (block[4*i+j] >> 2) ^
1811 (block[4*i+j] >> 3) ^ (block[4*i+j] >> 4) ^ (block[4*i+j] >> 5) ^
1812 (block[4*i+j] >> 6) ^ (block[4*i+j] >> 7)) & 1) << 1;
1813 for (i = 0; i < 32; i++) /* PD2<2> */
1814 for (j = 4; j < 8; j++)
1815 pd2 ^= ((block[8*i+j] ^ (block[8*i+j] >> 1) ^ (block[8*i+j] >> 2) ^
1816 (block[8*i+j] >> 3) ^ (block[8*i+j] >> 4) ^ (block[8*i+j] >> 5) ^
1817 (block[8*i+j] >> 6) ^ (block[8*i+j] >> 7)) & 1) << 2;
1818 for (i = 0; i < 16; i++) /* PD2<3> */
1819 for (j = 8; j < 16; j++)
1820 pd2 ^= ((block[16*i+j] ^ (block[16*i+j] >> 1) ^ (block[16*i+j] >> 2) ^
1821 (block[16*i+j] >> 3) ^ (block[16*i+j] >> 4) ^ (block[16*i+j] >> 5) ^
1822 (block[16*i+j] >> 6) ^ (block[16*i+j] >> 7)) & 1) << 3;
1823 for (i = 0; i < 8; i++) /* PD2<4> */
1824 for (j = 16; j < 32; j++)
1825 pd2 ^= ((block[32*i+j] ^ (block[32*i+j] >> 1) ^ (block[32*i+j] >> 2) ^
1826 (block[32*i+j] >> 3) ^ (block[32*i+j] >> 4) ^ (block[32*i+j] >> 5) ^
1827 (block[32*i+j] >> 6) ^ (block[32*i+j] >> 7)) & 1) << 4;
1828 for (i = 0; i < 4; i++) /* PD2<5> */
1829 for (j = 32; j < 64; j++)
1830 pd2 ^= ((block[64*i+j] ^ (block[64*i+j] >> 1) ^ (block[64*i+j] >> 2) ^
1831 (block[64*i+j] >> 3) ^ (block[64*i+j] >> 4) ^ (block[64*i+j] >> 5) ^
1832 (block[64*i+j] >> 6) ^ (block[64*i+j] >> 7)) & 1) << 5;
1833 for (i = 0; i < 2; i++) /* PD2<6> */
1834 for (j = 64; j < 128; j++)
1835 pd2 ^= ((block[128*i+j] ^ (block[128*i+j] >> 1) ^ (block[128*i+j] >> 2) ^
1836 (block[128*i+j] >> 3) ^ (block[128*i+j] >> 4) ^ (block[128*i+j] >> 5) ^
1837 (block[128*i+j] >> 6) ^ (block[128*i+j] >> 7)) & 1) << 6;
1838 for (i = 128; i < 256; i++) /* PD2<7> */
1839 pd2 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 2) ^
1840 (block[i] >> 3) ^ (block[i] >> 4) ^ (block[i] >> 5) ^
1841 (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 7;
1849 * Check an Octeon ECC block, fixing errors if possible
1851 * @param block Pointer to block to check
1853 * @return Zero if block has no errors, one if errors were corrected, two
1854 * if the errors could not be corrected.
1856 int cvmx_nand_correct_boot_ecc(uint8_t *block)
1858 unsigned char pd0, pd1, pd2;
1860 unsigned char xorpd0, xorpd1, xorpd2;
1864 asm volatile ("pref 0,0(%0);pref 0,128(%0);pref 0,256(%0)\n" :: "r" (block));
1866 pd0 = pd1 = pd2 = 0;
1868 for (i = 0; i < 256; i++) /* PD0<0> */
1869 pd0 ^= (block[i] ^ (block[i] >> 2) ^ (block[i] >> 4) ^ (block[i] >> 6)) & 1;
1870 for (i = 0; i < 256; i++) /* PD0<1> */
1871 pd0 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 4) ^ (block[i] >> 5)) & 1) << 1;
1872 for (i = 0; i < 256; i++) /* PD0<2> */
1873 pd0 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 2) ^ (block[i] >> 3)) & 1) << 2;
1874 for (i = 0; i < 128; i++) /* PD0<3> */
1875 pd0 ^= ((block[2*i] ^ (block[2*i] >> 1) ^ (block[2*i] >> 2) ^
1876 (block[2*i] >> 3) ^ (block[2*i] >> 4) ^ (block[2*i] >> 5) ^
1877 (block[2*i] >> 6) ^ (block[2*i] >> 7)) & 1) << 3;
1878 for (i = 0; i < 64; i++) /* PD0<4> */
1879 for (j = 0; j < 2; j++)
1880 pd0 ^= ((block[4*i+j] ^ (block[4*i+j] >> 1) ^ (block[4*i+j] >> 2) ^
1881 (block[4*i+j] >> 3) ^ (block[4*i+j] >> 4) ^ (block[4*i+j] >> 5) ^
1882 (block[4*i+j] >> 6) ^ (block[4*i+j] >> 7)) & 1) << 4;
1883 for (i = 0; i < 32; i++) /* PD0<5> */
1884 for (j = 0; j < 4; j++)
1885 pd0 ^= ((block[8*i+j] ^ (block[8*i+j] >> 1) ^ (block[8*i+j] >> 2) ^
1886 (block[8*i+j] >> 3) ^ (block[8*i+j] >> 4) ^ (block[8*i+j] >> 5) ^
1887 (block[8*i+j] >> 6) ^ (block[8*i+j] >> 7)) & 1) << 5;
1888 for (i = 0; i < 16; i++) /* PD0<6> */
1889 for (j = 0; j < 8; j++)
1890 pd0 ^= ((block[16*i+j] ^ (block[16*i+j] >> 1) ^ (block[16*i+j] >> 2) ^
1891 (block[16*i+j] >> 3) ^ (block[16*i+j] >> 4) ^ (block[16*i+j] >> 5) ^
1892 (block[16*i+j] >> 6) ^ (block[16*i+j] >> 7)) & 1) << 6;
1893 for (i = 0; i < 8; i++) /* PD0<7> */
1894 for (j = 0; j < 16; j++)
1895 pd0 ^= ((block[32*i+j] ^ (block[32*i+j] >> 1) ^ (block[32*i+j] >> 2) ^
1896 (block[32*i+j] >> 3) ^ (block[32*i+j] >> 4) ^ (block[32*i+j] >> 5) ^
1897 (block[32*i+j] >> 6) ^ (block[32*i+j] >> 7)) & 1) << 7;
1898 for (i = 0; i < 4; i++) /* PD1<0> */
1899 for (j = 0; j < 32; j++)
1900 pd1 ^= ((block[64*i+j] ^ (block[64*i+j] >> 1) ^ (block[64*i+j] >> 2) ^
1901 (block[64*i+j] >> 3) ^ (block[64*i+j] >> 4) ^ (block[64*i+j] >> 5) ^
1902 (block[64*i+j] >> 6) ^ (block[64*i+j] >> 7)) & 1) << 0;
1903 for (i = 0; i < 2; i++) /* PD1<1> */
1904 for (j = 0; j < 64; j++)
1905 pd1 ^= ((block[128*i+j] ^ (block[128*i+j] >> 1) ^ (block[128*i+j] >> 2) ^
1906 (block[128*i+j] >> 3) ^ (block[128*i+j] >> 4) ^ (block[128*i+j] >> 5) ^
1907 (block[128*i+j] >> 6) ^ (block[128*i+j] >> 7)) & 1) << 1;
1908 for (i = 0; i < 128; i++) /* PD1<2> */
1909 pd1 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 2) ^
1910 (block[i] >> 3) ^ (block[i] >> 4) ^ (block[i] >> 5) ^
1911 (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 2;
1914 for (i = 0; i < 256; i++) /* PD1<5> */
1915 pd1 ^= (((block[i] >> 1) ^ (block[i] >> 3) ^ (block[i] >> 5) ^ (block[i] >> 7)) & 1) << 5;
1916 for (i = 0; i < 256; i++) /* PD1<6> */
1917 pd1 ^= (((block[i] >> 2) ^ (block[i] >> 3) ^ (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 6;
1918 for (i = 0; i < 256; i++) /* PD1<7> */
1919 pd1 ^= (((block[i] >> 4) ^ (block[i] >> 5) ^ (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 7;
1920 for (i = 0; i < 128; i++) /* PD2<0> */
1921 pd2 ^= ((block[2*i+1] ^ (block[2*i+1] >> 1) ^ (block[2*i+1] >> 2) ^
1922 (block[2*i+1] >> 3) ^ (block[2*i+1] >> 4) ^ (block[2*i+1] >> 5) ^
1923 (block[2*i+1] >> 6) ^ (block[2*i+1] >> 7)) & 1) << 0;
1924 for (i = 0; i < 64; i++) /* PD2<1> */
1925 for (j = 2; j < 4; j++)
1926 pd2 ^= ((block[4*i+j] ^ (block[4*i+j] >> 1) ^ (block[4*i+j] >> 2) ^
1927 (block[4*i+j] >> 3) ^ (block[4*i+j] >> 4) ^ (block[4*i+j] >> 5) ^
1928 (block[4*i+j] >> 6) ^ (block[4*i+j] >> 7)) & 1) << 1;
1929 for (i = 0; i < 32; i++) /* PD2<2> */
1930 for (j = 4; j < 8; j++)
1931 pd2 ^= ((block[8*i+j] ^ (block[8*i+j] >> 1) ^ (block[8*i+j] >> 2) ^
1932 (block[8*i+j] >> 3) ^ (block[8*i+j] >> 4) ^ (block[8*i+j] >> 5) ^
1933 (block[8*i+j] >> 6) ^ (block[8*i+j] >> 7)) & 1) << 2;
1934 for (i = 0; i < 16; i++) /* PD2<3> */
1935 for (j = 8; j < 16; j++)
1936 pd2 ^= ((block[16*i+j] ^ (block[16*i+j] >> 1) ^ (block[16*i+j] >> 2) ^
1937 (block[16*i+j] >> 3) ^ (block[16*i+j] >> 4) ^ (block[16*i+j] >> 5) ^
1938 (block[16*i+j] >> 6) ^ (block[16*i+j] >> 7)) & 1) << 3;
1939 for (i = 0; i < 8; i++) /* PD2<4> */
1940 for (j = 16; j < 32; j++)
1941 pd2 ^= ((block[32*i+j] ^ (block[32*i+j] >> 1) ^ (block[32*i+j] >> 2) ^
1942 (block[32*i+j] >> 3) ^ (block[32*i+j] >> 4) ^ (block[32*i+j] >> 5) ^
1943 (block[32*i+j] >> 6) ^ (block[32*i+j] >> 7)) & 1) << 4;
1944 for (i = 0; i < 4; i++) /* PD2<5> */
1945 for (j = 32; j < 64; j++)
1946 pd2 ^= ((block[64*i+j] ^ (block[64*i+j] >> 1) ^ (block[64*i+j] >> 2) ^
1947 (block[64*i+j] >> 3) ^ (block[64*i+j] >> 4) ^ (block[64*i+j] >> 5) ^
1948 (block[64*i+j] >> 6) ^ (block[64*i+j] >> 7)) & 1) << 5;
1949 for (i = 0; i < 2; i++) /* PD2<6> */
1950 for (j = 64; j < 128; j++)
1951 pd2 ^= ((block[128*i+j] ^ (block[128*i+j] >> 1) ^ (block[128*i+j] >> 2) ^
1952 (block[128*i+j] >> 3) ^ (block[128*i+j] >> 4) ^ (block[128*i+j] >> 5) ^
1953 (block[128*i+j] >> 6) ^ (block[128*i+j] >> 7)) & 1) << 6;
1954 for (i = 128; i < 256; i++) /* PD2<7> */
1955 pd2 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 2) ^
1956 (block[i] >> 3) ^ (block[i] >> 4) ^ (block[i] >> 5) ^
1957 (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 7;
1959 xorpd0 = pd0 ^ block[256];
1960 xorpd1 = pd1 ^ block[257];
1961 xorpd2 = pd2 ^ block[258];
1963 xor_num = __builtin_popcount((xorpd0 << 16) | (xorpd1 << 8) | xorpd2);
1964 check = (((xorpd1 & 7) << 8) | xorpd0) ^ ((xorpd2 << 3) | (xorpd1 >> 5));
1968 else if ((xor_num > 1) && (check != 0x7FF))
1973 /* Correct the error */
1974 block[xorpd2] ^= 1 << (xorpd1 >> 5);
1980 cvmx_nand_status_t cvmx_nand_set_defaults(int page_size, int oob_size, int pages_per_block, int blocks, int onfi_timing_mode)
1982 if (!page_size || !oob_size || !pages_per_block || !blocks || onfi_timing_mode > 5)
1983 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1985 cvmx_nand_default.page_size = page_size;
1986 cvmx_nand_default.oob_size = oob_size;
1987 cvmx_nand_default.pages_per_block = pages_per_block;
1988 cvmx_nand_default.blocks = blocks;
1989 cvmx_nand_default.onfi_timing = onfi_timing_mode;
1991 CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);