1 /***********************license start***************
2 * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Networks nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
24 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS
25 * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
26 * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
27 * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
28 * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
29 * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
30 * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET
31 * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT
32 * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
35 * For any questions regarding licensing please contact marketing@caviumnetworks.com
37 ***********************license end**************************************/
43 * Interface to the NAND flash controller.
44 * See cvmx-nand.h for usage documentation and notes.
46 * <hr>$Revision: 35726 $<hr>
50 #include "cvmx-nand.h"
51 #include "cvmx-swap.h"
52 #include "cvmx-bootmem.h"
54 #define NAND_COMMAND_READ_ID 0x90
55 #define NAND_COMMAND_READ_PARAM_PAGE 0xec
56 #define NAND_COMMAND_RESET 0xff
57 #define NAND_COMMAND_STATUS 0x70
58 #define NAND_COMMAND_READ 0x00
59 #define NAND_COMMAND_READ_FIN 0x30
60 #define NAND_COMMAND_ERASE 0x60
61 #define NAND_COMMAND_ERASE_FIN 0xd0
62 #define NAND_COMMAND_PROGRAM 0x80
63 #define NAND_COMMAND_PROGRAM_FIN 0x10
64 #define NAND_TIMEOUT_USECS 1000000
66 #define CVMX_NAND_ROUNDUP(_Dividend, _Divisor) (((_Dividend)+(_Divisor-1))/(_Divisor))
69 ({ typeof (X) __x = (X), __y = (Y); \
70 (__x < __y) ? __x : __y; })
74 ({ typeof (X) __x = (X), __y = (Y); \
75 (__x > __y) ? __x : __y; })
78 /* Structure to store the parameters that we care about that
79 ** describe the ONFI speed modes. This is used to configure
80 ** the flash timing to match what is reported in the
81 ** parameter page of the ONFI flash chip. */
89 } onfi_speed_mode_desc_t;
90 static const onfi_speed_mode_desc_t onfi_speed_modes[] =
93 {50,30,100,20,50}, /* Mode 0 */
94 {25,15, 45,10,25}, /* Mode 1 */
95 {17,15, 35,10,15}, /* Mode 2 */
96 {15,10, 30, 5,10}, /* Mode 3 */
97 {12,10, 25, 5,10}, /* Mode 4, requires EDO timings */
98 {10, 7, 20, 5,10}, /* Mode 5, requries EDO timings */
102 * Structure used to store data about the NAND devices hooked
121 * Array indexed by bootbus chip select with information
122 * about NAND devices.
124 #if defined(CVMX_BUILD_FOR_UBOOT) && CONFIG_OCTEON_NAND_STAGE2
125 /* For u-boot nand boot we need to play some tricks to be able
126 ** to use this early in boot. We put them in a special section that is merged
127 ** with the text segment. (Using the text segment directly results in an assembler warning.)
129 #define USE_DATA_IN_TEXT
132 #ifdef USE_DATA_IN_TEXT
133 static uint8_t cvmx_nand_buffer[4096] __attribute__((aligned(8))) __attribute__ ((section (".data_in_text")));
134 static cvmx_nand_state_t cvmx_nand_state[8] __attribute__ ((section (".data_in_text")));
135 static cvmx_nand_initialize_flags_t cvmx_nand_flags __attribute__ ((section (".data_in_text")));
136 static int debug_indent __attribute__ ((section (".data_in_text")));
138 static CVMX_SHARED cvmx_nand_state_t cvmx_nand_state[8];
139 static CVMX_SHARED cvmx_nand_initialize_flags_t cvmx_nand_flags;
140 static CVMX_SHARED uint8_t *cvmx_nand_buffer = NULL;
141 static int debug_indent = 0;
144 static CVMX_SHARED const char *cvmx_nand_opcode_labels[] =
149 "Chip Enable / Disable", /* 3 */
152 "6 - Unknown", /* 6 */
153 "7 - Unknown", /* 7 */
157 "Wait Status", /* 11 */
158 "12 - Unknown", /* 12 */
159 "13 - Unknown", /* 13 */
160 "14 - Unknown", /* 14 */
161 "Bus Aquire / Release" /* 15 */
164 #define ULL unsigned long long
165 /* This macro logs out whenever a function is called if debugging is on */
166 #define CVMX_NAND_LOG_CALLED() \
167 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG)) \
168 cvmx_dprintf("%*s%s: called\n", 2*debug_indent++, "", __FUNCTION__);
170 /* This macro logs out each function parameter if debugging is on */
171 #define CVMX_NAND_LOG_PARAM(format, param) \
172 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG)) \
173 cvmx_dprintf("%*s%s: param %s = " format "\n", 2*debug_indent, "", __FUNCTION__, #param, param);
175 /* This macro logs out when a function returns a value */
176 #define CVMX_NAND_RETURN(v) \
179 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG)) \
180 cvmx_dprintf("%*s%s: returned %s(%d)\n", 2*--debug_indent, "", __FUNCTION__, #v, r); \
184 /* This macro logs out when a function doesn't return a value */
185 #define CVMX_NAND_RETURN_NOTHING() \
187 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG)) \
188 cvmx_dprintf("%*s%s: returned\n", 2*--debug_indent, "", __FUNCTION__); \
197 /* Compute the CRC for the ONFI parameter page. Adapted from sample code
198 ** in the specification.
200 static uint16_t __onfi_parameter_crc_compute(uint8_t *data)
202 const int order = 16; // Order of the CRC-16
203 unsigned long i, j, c, bit;
204 unsigned long crc = 0x4F4E; // Initialize the shift register with 0x4F4E
205 unsigned long crcmask = ((((unsigned long)1<<(order-1))-1)<<1)|1;
206 unsigned long crchighbit = (unsigned long)1<<(order-1);
208 for (i = 0; i < 254; i++)
210 c = (unsigned long)data[i];
211 for (j = 0x80; j; j >>= 1) {
212 bit = crc & crchighbit;
226 * Validate the ONFI parameter page and return a pointer to
229 * @param param_page Pointer to the raw NAND data returned after a parameter page read. It will
230 * contain at least 4 copies of the parameter structure.
232 * @return Pointer to a validated paramter page, or NULL if one couldn't be found.
234 static cvmx_nand_onfi_param_page_t *__cvmx_nand_onfi_process(cvmx_nand_onfi_param_page_t param_page[4])
238 for (index=0; index<4; index++)
240 uint16_t crc = __onfi_parameter_crc_compute((void *)¶m_page[index]);
241 if (crc == cvmx_le16_to_cpu(param_page[index].crc))
243 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
244 cvmx_dprintf("%s: Paramter page %d is corrupt. (Expected CRC: 0x%04x, computed: 0x%04x)\n",
245 __FUNCTION__, index, cvmx_le16_to_cpu(param_page[index].crc), crc);
250 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
251 cvmx_dprintf("%s: All parameter pages fail CRC check. Checking to see if any look sane.\n", __FUNCTION__);
253 if (!memcmp(param_page, param_page + 1, 256))
255 /* First and second copies match, now check some values */
256 if (param_page[0].pages_per_block != 0 && param_page[0].pages_per_block != 0xFFFFFFFF
257 && param_page[0].page_data_bytes != 0 && param_page[0].page_data_bytes != 0xFFFFFFFF
258 && param_page[0].page_spare_bytes != 0 && param_page[0].page_spare_bytes != 0xFFFF
259 && param_page[0].blocks_per_lun != 0 && param_page[0].blocks_per_lun != 0xFFFFFFFF
260 && param_page[0].timing_mode != 0 && param_page[0].timing_mode != 0xFFFF)
262 /* Looks like we have enough values to use */
263 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
264 cvmx_dprintf("%s: Page 0 looks sane, using even though CRC fails.\n", __FUNCTION__);
272 cvmx_dprintf("%s: No valid ONFI parameter pages found.\n", __FUNCTION__);
276 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
278 cvmx_dprintf("%*sONFI Information\n", 2*debug_indent, "");
280 cvmx_dprintf("%*sonfi = %c%c%c%c\n", 2*debug_indent, "", param_page[index].onfi[0], param_page[index].onfi[1],
281 param_page[index].onfi[2], param_page[index].onfi[3]);
282 cvmx_dprintf("%*srevision_number = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].revision_number));
283 cvmx_dprintf("%*sfeatures = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].features));
284 cvmx_dprintf("%*soptional_commands = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].optional_commands));
286 cvmx_dprintf("%*smanufacturer = %12.12s\n", 2*debug_indent, "", param_page[index].manufacturer);
287 cvmx_dprintf("%*smodel = %20.20s\n", 2*debug_indent, "", param_page[index].model);
288 cvmx_dprintf("%*sjedec_id = 0x%x\n", 2*debug_indent, "", param_page[index].jedec_id);
289 cvmx_dprintf("%*sdate_code = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].date_code));
291 cvmx_dprintf("%*spage_data_bytes = %u\n", 2*debug_indent, "", (int)cvmx_le32_to_cpu(param_page[index].page_data_bytes));
292 cvmx_dprintf("%*spage_spare_bytes = %u\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].page_spare_bytes));
293 cvmx_dprintf("%*spartial_page_data_bytes = %u\n", 2*debug_indent, "", (int)cvmx_le32_to_cpu(param_page[index].partial_page_data_bytes));
294 cvmx_dprintf("%*spartial_page_spare_bytes = %u\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].partial_page_spare_bytes));
295 cvmx_dprintf("%*spages_per_block = %u\n", 2*debug_indent, "", (int)cvmx_le32_to_cpu(param_page[index].pages_per_block));
296 cvmx_dprintf("%*sblocks_per_lun = %u\n", 2*debug_indent, "", (int)cvmx_le32_to_cpu(param_page[index].blocks_per_lun));
297 cvmx_dprintf("%*snumber_lun = %u\n", 2*debug_indent, "", param_page[index].number_lun);
298 cvmx_dprintf("%*saddress_cycles = 0x%x\n", 2*debug_indent, "", param_page[index].address_cycles);
299 cvmx_dprintf("%*sbits_per_cell = %u\n", 2*debug_indent, "", param_page[index].bits_per_cell);
300 cvmx_dprintf("%*sbad_block_per_lun = %u\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].bad_block_per_lun));
301 cvmx_dprintf("%*sblock_endurance = %u\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].block_endurance));
302 cvmx_dprintf("%*sgood_blocks = %u\n", 2*debug_indent, "", param_page[index].good_blocks);
303 cvmx_dprintf("%*sgood_block_endurance = %u\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].good_block_endurance));
304 cvmx_dprintf("%*sprograms_per_page = %u\n", 2*debug_indent, "", param_page[index].programs_per_page);
305 cvmx_dprintf("%*spartial_program_attrib = 0x%x\n", 2*debug_indent, "", param_page[index].partial_program_attrib);
306 cvmx_dprintf("%*sbits_ecc = %u\n", 2*debug_indent, "", param_page[index].bits_ecc);
307 cvmx_dprintf("%*sinterleaved_address_bits = 0x%x\n", 2*debug_indent, "", param_page[index].interleaved_address_bits);
308 cvmx_dprintf("%*sinterleaved_attrib = 0x%x\n", 2*debug_indent, "", param_page[index].interleaved_attrib);
310 cvmx_dprintf("%*spin_capacitance = %u\n", 2*debug_indent, "", param_page[index].pin_capacitance);
311 cvmx_dprintf("%*stiming_mode = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].timing_mode));
312 cvmx_dprintf("%*scache_timing_mode = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].cache_timing_mode));
313 cvmx_dprintf("%*st_prog = %d us\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].t_prog));
314 cvmx_dprintf("%*st_bers = %u us\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].t_bers));
315 cvmx_dprintf("%*st_r = %u us\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].t_r));
316 cvmx_dprintf("%*st_ccs = %u ns\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].t_ccs));
317 cvmx_dprintf("%*svendor_revision = 0x%x\n", 2*debug_indent, "", cvmx_le16_to_cpu(param_page[index].vendor_revision));
318 //uint8_t vendor_specific[88]; /**< Byte 166-253: Vendor specific */
319 cvmx_dprintf("%*scrc = 0x%x\n", 2*debug_indent, "", param_page[index].crc);
322 return param_page + index;
325 void __set_onfi_timing_mode(int *tim_par, int clocks_us, int mode)
327 const onfi_speed_mode_desc_t *mp = &onfi_speed_modes[mode]; /* use shorter name to fill in timing array */
333 cvmx_dprintf("%s: invalid ONFI timing mode: %d\n", __FUNCTION__, mode);
337 /* Adjust the read/write pulse duty cycle to make it more even. The cycle time
338 ** requirement is longer than the sum of the high low times, so we exend both the high
339 ** and low times to meet the cycle time requirement.
341 pulse_adjust = ((mp->twc - mp->twh - mp->twp)/2 + 1) * clocks_us;
343 /* Add a small margin to all timings. */
344 margin = 2 * clocks_us;
345 /* Update timing parameters based on supported mode */
346 tim_par[1] = CVMX_NAND_ROUNDUP(mp->twp * clocks_us + margin + pulse_adjust, 1000); /* Twp, WE# pulse width */
347 tim_par[2] = CVMX_NAND_ROUNDUP(max(mp->twh, mp->twc - mp->twp) * clocks_us + margin + pulse_adjust, 1000); /* Tw, WE# pulse width high */
348 tim_par[3] = CVMX_NAND_ROUNDUP(mp->tclh * clocks_us + margin, 1000); /* Tclh, CLE hold time */
349 tim_par[4] = CVMX_NAND_ROUNDUP(mp->tals * clocks_us + margin, 1000); /* Tals, ALE setup time */
350 tim_par[5] = tim_par[3]; /* Talh, ALE hold time */
351 tim_par[6] = tim_par[1]; /* Trp, RE# pulse width*/
352 tim_par[7] = tim_par[2]; /* Treh, RE# high hold time */
357 * Called to initialize the NAND controller for use. Note that
358 * you must be running out of L2 or memory and not NAND before
359 * calling this function.
361 * @param flags Optional initialization flags
362 * @param active_chips
363 * Each bit in this parameter represents a chip select that might
364 * contain NAND flash. Any chip select present in this bitmask may
365 * be connected to NAND. It is normally safe to pass 0xff here and
366 * let the API probe all 8 chip selects.
368 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
370 cvmx_nand_status_t cvmx_nand_initialize(cvmx_nand_initialize_flags_t flags, int active_chips)
376 cvmx_ndf_misc_t ndf_misc;
378 cvmx_nand_flags = flags;
379 CVMX_NAND_LOG_CALLED();
380 CVMX_NAND_LOG_PARAM("0x%x", flags);
382 memset(&cvmx_nand_state, 0, sizeof(cvmx_nand_state));
384 #ifndef USE_DATA_IN_TEXT
385 if (!cvmx_nand_buffer)
386 cvmx_nand_buffer = cvmx_bootmem_alloc(4096, 128);
388 if (!cvmx_nand_buffer)
389 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
391 /* Disable boot mode and reset the fifo */
392 ndf_misc.u64 = cvmx_read_csr(CVMX_NDF_MISC);
393 ndf_misc.s.rd_cmd = 0;
394 ndf_misc.s.bt_dma = 0;
395 ndf_misc.s.bt_dis = 1;
396 ndf_misc.s.ex_dis = 0;
397 ndf_misc.s.rst_ff = 1;
398 cvmx_write_csr(CVMX_NDF_MISC, ndf_misc.u64);
399 cvmx_read_csr(CVMX_NDF_MISC);
401 /* Bring the fifo out of reset */
403 ndf_misc.s.rst_ff = 0;
404 cvmx_write_csr(CVMX_NDF_MISC, ndf_misc.u64);
405 cvmx_read_csr(CVMX_NDF_MISC);
408 /* Clear the ECC counter */
409 //cvmx_write_csr(CVMX_NDF_ECC_CNT, cvmx_read_csr(CVMX_NDF_ECC_CNT));
411 /* Clear the interrupt state */
412 cvmx_write_csr(CVMX_NDF_INT, cvmx_read_csr(CVMX_NDF_INT));
413 cvmx_write_csr(CVMX_NDF_INT_EN, 0);
414 cvmx_write_csr(CVMX_MIO_NDF_DMA_INT, cvmx_read_csr(CVMX_MIO_NDF_DMA_INT));
415 cvmx_write_csr(CVMX_MIO_NDF_DMA_INT_EN, 0);
417 if (cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DONT_PROBE)
418 CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
420 /* The simulator crashes if you access non existant devices. Assume
421 only chip select 1 is connected to NAND */
422 if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
433 /* Figure out how many clocks are in one microsecond, rounding up */
434 clocks_us = CVMX_NAND_ROUNDUP(cvmx_sysinfo_get()->cpu_clock_hz, 1000000);
436 /* Probe and see what NAND flash we can find */
437 for (chip=start_chip; chip<stop_chip; chip++)
439 cvmx_mio_boot_reg_cfgx_t mio_boot_reg_cfg;
440 cvmx_nand_onfi_param_page_t *onfi_param_page;
442 /* Skip chip selects that the caller didn't supply in the active chip bits */
443 if (((1<<chip) & active_chips) == 0)
446 mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(chip));
447 /* Enabled regions can't be connected to NAND flash */
448 if (mio_boot_reg_cfg.s.en)
451 /* Start out with some sane, but slow, defaults */
452 cvmx_nand_state[chip].page_size = 0;
453 cvmx_nand_state[chip].oob_size = 64;
454 cvmx_nand_state[chip].pages_per_block = 64;
455 cvmx_nand_state[chip].blocks = 100;
456 cvmx_nand_state[chip].tim_mult = 0; /* Don't use a multiplier. Values are in cycles */
459 /* Set timing mode to ONFI mode 0 for initial accesses */
460 __set_onfi_timing_mode(cvmx_nand_state[chip].tim_par, clocks_us, 0);
462 /* Put the index of which timing parameter to use. The indexes are into the tim_par
463 ** which match the indexes of the 8 timing parameters that the hardware supports.
464 ** Index 0 is not software controlled, and is fixed by hardware. */
465 cvmx_nand_state[chip].clen[0] = 0; /* Command doesn't need to be held before WE */
466 cvmx_nand_state[chip].clen[1] = 1; /* Twp, WE# pulse width */
467 cvmx_nand_state[chip].clen[2] = 3; /* Tclh, CLE hold time */
468 cvmx_nand_state[chip].clen[3] = 1;
470 cvmx_nand_state[chip].alen[0] = 4; /* Tals, ALE setup time */
471 cvmx_nand_state[chip].alen[1] = 1; /* Twp, WE# pulse width */
472 cvmx_nand_state[chip].alen[2] = 2; /* Twh, WE# pulse width high */
473 cvmx_nand_state[chip].alen[3] = 5; /* Talh, ALE hold time */
475 cvmx_nand_state[chip].rdn[0] = 0;
476 cvmx_nand_state[chip].rdn[1] = 6; /* Trp, RE# pulse width*/
477 cvmx_nand_state[chip].rdn[2] = 7; /* Treh, RE# high hold time */
478 cvmx_nand_state[chip].rdn[3] = 0;
480 cvmx_nand_state[chip].wrn[0] = 1; /* Twp, WE# pulse width */
481 cvmx_nand_state[chip].wrn[1] = 2; /* Twh, WE# pulse width high */
483 /* Probe and see if we get an answer */
484 memset(cvmx_nand_buffer, 0xff, 8);
485 if (cvmx_nand_read_id(chip, 0x0, cvmx_ptr_to_phys(cvmx_nand_buffer), 8) < 4)
487 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
488 cvmx_dprintf("%s: Failed to probe chip %d\n", __FUNCTION__, chip);
491 if (*(uint32_t*)cvmx_nand_buffer == 0xffffffff)
493 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
494 cvmx_dprintf("%s: Probe returned nothing for chip %d\n", __FUNCTION__, chip);
498 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
499 cvmx_dprintf("%s: NAND chip %d has ID 0x%08llx\n", __FUNCTION__, chip, (unsigned long long int)*(uint64_t*)cvmx_nand_buffer);
501 if (cvmx_nand_read_id(chip, 0x20, cvmx_ptr_to_phys(cvmx_nand_buffer), 4) < 4)
503 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
504 cvmx_dprintf("%s: Failed to probe chip %d\n", __FUNCTION__, chip);
508 if (!((cvmx_nand_buffer[0] == 'O') && (cvmx_nand_buffer[1] == 'N') &&
509 (cvmx_nand_buffer[2] == 'F') && (cvmx_nand_buffer[3] == 'I')))
511 /* FIXME: This is where non ONFI NAND devices need to be handled */
512 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
513 cvmx_dprintf("%s: Chip %d doesn't support ONFI, skipping\n", __FUNCTION__, chip);
517 cvmx_nand_read_param_page(chip, cvmx_ptr_to_phys(cvmx_nand_buffer), 1024);
518 onfi_param_page = __cvmx_nand_onfi_process((cvmx_nand_onfi_param_page_t *)cvmx_nand_buffer);
521 /* ONFI NAND parts are described by a parameter page. Here we extract the configuration values
522 ** from the parameter page that we need to access the chip. */
523 cvmx_nand_state[chip].page_size = cvmx_le32_to_cpu(onfi_param_page->page_data_bytes);
524 cvmx_nand_state[chip].oob_size = cvmx_le16_to_cpu(onfi_param_page->page_spare_bytes);
525 cvmx_nand_state[chip].pages_per_block = cvmx_le32_to_cpu(onfi_param_page->pages_per_block);
526 cvmx_nand_state[chip].blocks = cvmx_le32_to_cpu(onfi_param_page->blocks_per_lun) * onfi_param_page->number_lun;
528 if (cvmx_le16_to_cpu(onfi_param_page->timing_mode) <= 0x3f)
530 int mode_mask = cvmx_le16_to_cpu(onfi_param_page->timing_mode);
533 for (i = 0; i < 6;i++)
535 if (mode_mask & (1 << i))
538 cvmx_nand_state[chip].onfi_timing = mode;
542 cvmx_dprintf("%s: Invalid timing mode (%d) in ONFI parameter page, ignoring\n", __FUNCTION__, cvmx_nand_state[chip].onfi_timing);
543 cvmx_nand_state[chip].onfi_timing = 0;
546 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
547 cvmx_dprintf("%s: Using ONFI timing mode: %d\n", __FUNCTION__, cvmx_nand_state[chip].onfi_timing);
548 __set_onfi_timing_mode(cvmx_nand_state[chip].tim_par, clocks_us, cvmx_nand_state[chip].onfi_timing);
552 /* We did not find a valid parameter page in the FLASH part. This means that the part
553 ** does not provide the parameter page that ONFI requires. In this case, hard coded defaults
554 ** can be used, but they _must_ be updated to match the flash used.
556 /* Enable this code to force a configuration for NAND chip that doesn't have a proper parameter page.
557 ** ONFI requires a parameter page, so this should not be needed for compliant chips */
559 /* The default values below are for the Numonyx NAND08GW3B2CN6E part */
560 #define NAND_SIZE_BITS (8*1024*1024*1024ULL)
561 cvmx_nand_state[chip].page_size = 2048; /* NAND page size in bytes */
562 cvmx_nand_state[chip].oob_size = 64; /* NAND OOB (spare) size in bytes (per page) */
563 cvmx_nand_state[chip].pages_per_block = 64;
564 cvmx_nand_state[chip].blocks = (NAND_SIZE_BITS)/(8ULL*cvmx_nand_state[chip].page_size*cvmx_nand_state[chip].pages_per_block);
565 cvmx_nand_state[chip].onfi_timing = 2;
566 cvmx_dprintf("%s: WARNING: No valid ONFI parameter page found, using fixed defaults.\n", __FUNCTION__);
567 cvmx_dprintf("%s: Defaults: page size: %d, OOB size: %d, pages per block %d, part size: %d MBytes, timing mode: %d\n",
568 __FUNCTION__, cvmx_nand_state[chip].page_size, cvmx_nand_state[chip].oob_size, cvmx_nand_state[chip].pages_per_block,
569 (int)(NAND_SIZE_BITS/(8*1024*1024)), cvmx_nand_state[chip].onfi_timing);
571 __set_onfi_timing_mode(cvmx_nand_state[chip].tim_par, clocks_us, cvmx_nand_state[chip].onfi_timing);
577 CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
582 * Call to shutdown the NAND controller after all transactions
583 * are done. In most setups this will never be called.
585 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
587 cvmx_nand_status_t cvmx_nand_shutdown(void)
589 CVMX_NAND_LOG_CALLED();
590 memset(&cvmx_nand_state, 0, sizeof(cvmx_nand_state));
591 CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
596 * Returns a bitmask representing the chip selects that are
597 * connected to NAND chips. This can be called after the
598 * initialize to determine the actual number of NAND chips
599 * found. Each bit in the response coresponds to a chip select.
601 * @return Zero if no NAND chips were found. Otherwise a bit is set for
602 * each chip select (1<<chip).
604 int cvmx_nand_get_active_chips(void)
608 for (chip=0; chip<8; chip++)
610 if (cvmx_nand_state[chip].page_size)
618 * Override the timing parameters for a NAND chip
620 * @param chip Chip select to override
628 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
630 cvmx_nand_status_t cvmx_nand_set_timing(int chip, int tim_mult, int tim_par[8], int clen[4], int alen[4], int rdn[4], int wrn[2])
633 CVMX_NAND_LOG_CALLED();
635 if ((chip < 0) || (chip > 7))
636 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
637 if (!cvmx_nand_state[chip].page_size)
638 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
640 cvmx_nand_state[chip].tim_mult = tim_mult;
642 cvmx_nand_state[chip].tim_par[i] = tim_par[i];
644 cvmx_nand_state[chip].clen[i] = clen[i];
646 cvmx_nand_state[chip].alen[i] = alen[i];
648 cvmx_nand_state[chip].rdn[i] = rdn[i];
650 cvmx_nand_state[chip].wrn[i] = wrn[i];
652 CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
658 * Get the number of free bytes in the NAND command queue
660 * @return Number of bytes in queue
662 static inline int __cvmx_nand_get_free_cmd_bytes(void)
664 cvmx_ndf_misc_t ndf_misc;
665 CVMX_NAND_LOG_CALLED();
666 ndf_misc.u64 = cvmx_read_csr(CVMX_NDF_MISC);
667 CVMX_NAND_RETURN((int)ndf_misc.s.fr_byt);
672 * Submit a command to the NAND command queue. Generally this
673 * will not be used directly. Instead most programs will use the other
674 * higher level NAND functions.
676 * @param cmd Command to submit
678 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
680 cvmx_nand_status_t cvmx_nand_submit(cvmx_nand_cmd_t cmd)
682 CVMX_NAND_LOG_CALLED();
683 CVMX_NAND_LOG_PARAM("0x%llx", (ULL)cmd.u64[0]);
684 CVMX_NAND_LOG_PARAM("0x%llx", (ULL)cmd.u64[1]);
685 CVMX_NAND_LOG_PARAM("%s", cvmx_nand_opcode_labels[cmd.s.op_code]);
686 switch (cmd.s.op_code)
688 /* All these commands fit in one 64bit word */
692 case 3: /* Chip Enable/Disable */
696 case 10: /* Read EDO */
697 case 15: /* Bus Aquire/Release */
698 if (__cvmx_nand_get_free_cmd_bytes() < 8)
699 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
700 cvmx_write_csr(CVMX_NDF_CMD, cmd.u64[1]);
701 CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
703 case 5: /* ALE commands take either one or two 64bit words */
704 if (cmd.ale.adr_byte_num < 5)
706 if (__cvmx_nand_get_free_cmd_bytes() < 8)
707 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
708 cvmx_write_csr(CVMX_NDF_CMD, cmd.u64[1]);
709 CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
713 if (__cvmx_nand_get_free_cmd_bytes() < 16)
714 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
715 cvmx_write_csr(CVMX_NDF_CMD, cmd.u64[1]);
716 cvmx_write_csr(CVMX_NDF_CMD, cmd.u64[0]);
717 CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
720 case 11: /* Wait status commands take two 64bit words */
721 if (__cvmx_nand_get_free_cmd_bytes() < 16)
722 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
723 cvmx_write_csr(CVMX_NDF_CMD, cmd.u64[1]);
724 cvmx_write_csr(CVMX_NDF_CMD, cmd.u64[0]);
725 CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
728 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
735 * Get the number of bits required to encode the column bits. This
736 * does not include padding to align on a byte boundary.
738 * @param chip NAND chip to get data for
740 * @return Number of column bits
742 static inline int __cvmx_nand_get_column_bits(int chip)
744 return cvmx_pop(cvmx_nand_state[chip].page_size - 1);
750 * Get the number of bits required to encode the row bits. This
751 * does not include padding to align on a byte boundary.
753 * @param chip NAND chip to get data for
755 * @return Number of row bits
757 static inline int __cvmx_nand_get_row_bits(int chip)
759 return cvmx_pop(cvmx_nand_state[chip].blocks-1) + cvmx_pop(cvmx_nand_state[chip].pages_per_block-1);
765 * Get the number of address cycles required for this NAND part.
766 * This include column bits, padding, page bits, and block bits.
768 * @param chip NAND chip to get data for
770 * @return Number of address cycles on the bus
772 static inline int __cvmx_nand_get_address_cycles(int chip)
774 int address_bits = ((__cvmx_nand_get_column_bits(chip) + 7) >> 3) << 3;
775 address_bits += ((__cvmx_nand_get_row_bits(chip) + 7) >> 3) << 3;
776 return (address_bits + 7) >> 3;
782 * Build the set of command common to most transactions
783 * @param chip NAND chip to program
784 * @param cmd_data NAND comamnd for CLE cycle 1
785 * @param num_address_cycles
786 * Number of address cycles to put on the bus
787 * @param nand_address
788 * Data to be put on the bus. It is translated according to
789 * the rules in the file information section.
791 * @param cmd_data2 If non zero, adds a second CLE cycle used by a number of NAND
794 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
796 static inline cvmx_nand_status_t __cvmx_nand_build_pre_cmd(int chip, int cmd_data, int num_address_cycles, uint64_t nand_address, int cmd_data2)
798 cvmx_nand_status_t result;
801 CVMX_NAND_LOG_CALLED();
803 /* Send timing parameters */
804 memset(&cmd, 0, sizeof(cmd));
805 cmd.set_tm_par.one = 1;
806 cmd.set_tm_par.tim_mult = cvmx_nand_state[chip].tim_mult;
807 /* tim_par[0] unused */
808 cmd.set_tm_par.tim_par1 = cvmx_nand_state[chip].tim_par[1];
809 cmd.set_tm_par.tim_par2 = cvmx_nand_state[chip].tim_par[2];
810 cmd.set_tm_par.tim_par3 = cvmx_nand_state[chip].tim_par[3];
811 cmd.set_tm_par.tim_par4 = cvmx_nand_state[chip].tim_par[4];
812 cmd.set_tm_par.tim_par5 = cvmx_nand_state[chip].tim_par[5];
813 cmd.set_tm_par.tim_par6 = cvmx_nand_state[chip].tim_par[6];
814 cmd.set_tm_par.tim_par7 = cvmx_nand_state[chip].tim_par[7];
815 result = cvmx_nand_submit(cmd);
817 CVMX_NAND_RETURN(result);
819 /* Send bus select */
820 memset(&cmd, 0, sizeof(cmd));
821 cmd.bus_acq.fifteen = 15;
823 result = cvmx_nand_submit(cmd);
825 CVMX_NAND_RETURN(result);
827 /* Send chip select */
828 memset(&cmd, 0, sizeof(cmd));
829 cmd.chip_en.chip = chip;
831 cmd.chip_en.three = 3;
832 cmd.chip_en.width = (cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_16BIT) ? 2 : 1;
833 result = cvmx_nand_submit(cmd);
835 CVMX_NAND_RETURN(result);
837 /* Send wait, fixed time
838 ** This meets chip enable to command latch enable timing.
839 ** This is tCS - tCLS from the ONFI spec.
840 ** Use tWP as a proxy, as this is adequate for
841 ** all ONFI 1.0 timing modes. */
842 memset(&cmd, 0, sizeof(cmd));
845 if (cvmx_nand_submit(cmd))
846 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
849 memset(&cmd, 0, sizeof(cmd));
850 cmd.cle.cmd_data = cmd_data;
851 cmd.cle.clen1 = cvmx_nand_state[chip].clen[0];
852 cmd.cle.clen2 = cvmx_nand_state[chip].clen[1];
853 cmd.cle.clen3 = cvmx_nand_state[chip].clen[2];
855 result = cvmx_nand_submit(cmd);
857 CVMX_NAND_RETURN(result);
860 if (num_address_cycles)
862 memset(&cmd, 0, sizeof(cmd));
863 cmd.ale.adr_byte_num = num_address_cycles;
864 if (num_address_cycles < __cvmx_nand_get_address_cycles(chip))
866 cmd.ale.adr_bytes_l = nand_address;
867 cmd.ale.adr_bytes_h = nand_address >> 32;
871 int column_bits = __cvmx_nand_get_column_bits(chip);
872 int column_shift = ((column_bits + 7) >> 3) << 3;
873 int column = nand_address & (cvmx_nand_state[chip].page_size-1);
874 int row = nand_address >> column_bits;
875 cmd.ale.adr_bytes_l = column + (row << column_shift);
876 cmd.ale.adr_bytes_h = row >> (32 - column_shift);
878 cmd.ale.alen1 = cvmx_nand_state[chip].alen[0];
879 cmd.ale.alen2 = cvmx_nand_state[chip].alen[1];
880 cmd.ale.alen3 = cvmx_nand_state[chip].alen[2];
881 cmd.ale.alen4 = cvmx_nand_state[chip].alen[3];
883 result = cvmx_nand_submit(cmd);
885 CVMX_NAND_RETURN(result);
891 memset(&cmd, 0, sizeof(cmd));
892 cmd.cle.cmd_data = cmd_data2;
893 cmd.cle.clen1 = cvmx_nand_state[chip].clen[0];
894 cmd.cle.clen2 = cvmx_nand_state[chip].clen[1];
895 cmd.cle.clen3 = cvmx_nand_state[chip].clen[2];
897 result = cvmx_nand_submit(cmd);
899 CVMX_NAND_RETURN(result);
902 CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
908 * Build the set of command common to most transactions
909 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
911 static inline cvmx_nand_status_t __cvmx_nand_build_post_cmd(void)
913 cvmx_nand_status_t result;
916 CVMX_NAND_LOG_CALLED();
918 /* Send chip deselect */
919 memset(&cmd, 0, sizeof(cmd));
920 cmd.chip_dis.three = 3;
921 result = cvmx_nand_submit(cmd);
923 CVMX_NAND_RETURN(result);
925 /* Send bus release */
926 memset(&cmd, 0, sizeof(cmd));
927 cmd.bus_rel.fifteen = 15;
928 result = cvmx_nand_submit(cmd);
930 CVMX_NAND_RETURN(result);
932 /* Ring the doorbell */
933 cvmx_write_csr(CVMX_NDF_DRBELL, 1);
934 CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
940 * Setup the NAND DMA engine for a transfer
942 * @param chip Chip select for NAND flash
943 * @param is_write Non zero if this is a write
944 * @param buffer_address
945 * Physical memory address to DMA to/from
946 * @param buffer_length
947 * Length of the DMA in bytes
949 static inline void __cvmx_nand_setup_dma(int chip, int is_write, uint64_t buffer_address, int buffer_length)
951 cvmx_mio_ndf_dma_cfg_t ndf_dma_cfg;
952 CVMX_NAND_LOG_CALLED();
953 CVMX_NAND_LOG_PARAM("%d", chip);
954 CVMX_NAND_LOG_PARAM("%d", is_write);
955 CVMX_NAND_LOG_PARAM("0x%llx", (ULL)buffer_address);
956 CVMX_NAND_LOG_PARAM("%d", buffer_length);
958 ndf_dma_cfg.s.en = 1;
959 ndf_dma_cfg.s.rw = is_write; /* One means DMA reads from memory and writes to flash */
960 ndf_dma_cfg.s.clr = 0;
961 ndf_dma_cfg.s.size = ((buffer_length + 7) >> 3) - 1;
962 ndf_dma_cfg.s.adr = buffer_address;
964 cvmx_write_csr(CVMX_MIO_NDF_DMA_CFG, ndf_dma_cfg.u64);
965 CVMX_NAND_RETURN_NOTHING();
970 * Dump a buffer out in hex for debug
972 * @param buffer_address
973 * Starting physical address
974 * @param buffer_length
975 * Number of bytes to display
977 static void __cvmx_nand_hex_dump(uint64_t buffer_address, int buffer_length)
979 uint8_t *buffer = cvmx_phys_to_ptr(buffer_address);
981 while (offset < buffer_length)
984 cvmx_dprintf("%*s%04x:", 2*debug_indent, "", offset);
989 if (offset+i < buffer_length)
990 cvmx_dprintf("%02x", 0xff & buffer[offset+i]);
1001 * Perform a low level NAND read command
1003 * @param chip Chip to read from
1004 * @param nand_command1
1005 * First command cycle value
1006 * @param address_cycles
1007 * Number of address cycles after comand 1
1008 * @param nand_address
1009 * NAND address to use for address cycles
1010 * @param nand_command2
1011 * NAND comamnd cycle 2 if not zero
1012 * @param buffer_address
1013 * Physical address to DMA into
1014 * @param buffer_length
1015 * Length of the transfer in bytes
1017 * @return Number of bytes transfered or a negative error code
1019 static inline int __cvmx_nand_low_level_read(int chip, int nand_command1, int address_cycles, uint64_t nand_address, int nand_command2, uint64_t buffer_address, int buffer_length)
1021 cvmx_nand_cmd_t cmd;
1022 cvmx_mio_ndf_dma_cfg_t ndf_dma_cfg;
1025 CVMX_NAND_LOG_CALLED();
1026 CVMX_NAND_LOG_PARAM("%d", chip);
1027 CVMX_NAND_LOG_PARAM("0x%x", nand_command1);
1028 CVMX_NAND_LOG_PARAM("%d", address_cycles);
1029 CVMX_NAND_LOG_PARAM("0x%llx", (ULL)nand_address);
1030 CVMX_NAND_LOG_PARAM("0x%x", nand_command2);
1031 CVMX_NAND_LOG_PARAM("0x%llx", (ULL)buffer_address);
1032 CVMX_NAND_LOG_PARAM("%d", buffer_length);
1034 if ((chip < 0) || (chip > 7))
1035 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1036 if (!buffer_address)
1037 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1038 if (buffer_address & 7)
1039 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1041 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1043 /* Build the command and address cycles */
1044 if (__cvmx_nand_build_pre_cmd(chip, nand_command1, address_cycles, nand_address, nand_command2))
1045 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1047 /* Send WAIT. This waits for some time, then
1048 ** waits for busy to be de-asserted. */
1049 memset(&cmd, 0, sizeof(cmd));
1053 if (cvmx_nand_submit(cmd))
1054 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1056 /* Wait for tRR after busy de-asserts.
1057 ** Use 2* tALS as proxy. This is overkill in
1058 ** the slow modes, but not bad in the faster ones. */
1061 if (cvmx_nand_submit(cmd))
1062 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1063 if (cvmx_nand_submit(cmd))
1064 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1067 memset(&cmd, 0, sizeof(cmd));
1068 cmd.rd.data_bytes = buffer_length;
1069 if (cvmx_nand_state[chip].onfi_timing >= 4)
1070 cmd.rd.nine = 10; /* READ_EDO command is required for ONFI timing modes 4 and 5 */
1073 cmd.rd.rdn1 = cvmx_nand_state[chip].rdn[0];
1074 cmd.rd.rdn2 = cvmx_nand_state[chip].rdn[1];
1075 cmd.rd.rdn3 = cvmx_nand_state[chip].rdn[2];
1076 cmd.rd.rdn4 = cvmx_nand_state[chip].rdn[3];
1077 if (cvmx_nand_submit(cmd))
1078 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1080 __cvmx_nand_setup_dma(chip, 0, buffer_address, buffer_length);
1082 if (__cvmx_nand_build_post_cmd())
1083 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1085 /* Wait for the DMA to complete */
1086 if (CVMX_WAIT_FOR_FIELD64(CVMX_MIO_NDF_DMA_CFG, cvmx_mio_ndf_dma_cfg_t, en, ==, 0, NAND_TIMEOUT_USECS))
1087 CVMX_NAND_RETURN(CVMX_NAND_TIMEOUT);
1089 /* Return the number of bytes transfered */
1090 ndf_dma_cfg.u64 = cvmx_read_csr(CVMX_MIO_NDF_DMA_CFG);
1091 bytes = ndf_dma_cfg.s.adr - buffer_address;
1093 if (cvmx_unlikely(cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_DEBUG))
1094 __cvmx_nand_hex_dump(buffer_address, bytes);
1096 CVMX_NAND_RETURN(bytes);
1101 * Read a page from NAND. If the buffer has room, the out of band
1102 * data will be included.
1104 * @param chip Chip select for NAND flash
1105 * @param nand_address
1106 * Location in NAND to read. See description in file comment
1107 * @param buffer_address
1108 * Physical address to store the result at
1109 * @param buffer_length
1110 * Number of bytes to read
1112 * @return Bytes read on success, a negative cvmx_nand_status_t error code on failure
1114 int cvmx_nand_page_read(int chip, uint64_t nand_address, uint64_t buffer_address, int buffer_length)
1118 CVMX_NAND_LOG_CALLED();
1119 CVMX_NAND_LOG_PARAM("%d", chip);
1120 CVMX_NAND_LOG_PARAM("0x%llx", (ULL)nand_address);
1121 CVMX_NAND_LOG_PARAM("0x%llx", (ULL)buffer_address);
1122 CVMX_NAND_LOG_PARAM("%d", buffer_length);
1124 if ((chip < 0) || (chip > 7))
1125 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1126 if (!cvmx_nand_state[chip].page_size)
1127 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1128 if (!buffer_address)
1129 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1130 if (buffer_address & 7)
1131 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1133 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1135 bytes = __cvmx_nand_low_level_read(chip, NAND_COMMAND_READ, __cvmx_nand_get_address_cycles(chip), nand_address, NAND_COMMAND_READ_FIN, buffer_address, buffer_length);
1136 CVMX_NAND_RETURN(bytes);
1141 * Write a page to NAND. The buffer must contain the entire page
1142 * including the out of band data.
1144 * @param chip Chip select for NAND flash
1145 * @param nand_address
1146 * Location in NAND to write. See description in file comment
1147 * @param buffer_address
1148 * Physical address to read the data from
1150 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
1152 cvmx_nand_status_t cvmx_nand_page_write(int chip, uint64_t nand_address, uint64_t buffer_address)
1154 cvmx_nand_cmd_t cmd;
1157 CVMX_NAND_LOG_CALLED();
1158 CVMX_NAND_LOG_PARAM("%d", chip);
1159 CVMX_NAND_LOG_PARAM("0x%llx", (ULL)nand_address);
1160 CVMX_NAND_LOG_PARAM("0x%llx", (ULL)buffer_address);
1162 if ((chip < 0) || (chip > 7))
1163 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1164 if (!cvmx_nand_state[chip].page_size)
1165 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1166 if (!buffer_address)
1167 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1168 if (buffer_address & 7)
1169 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1171 buffer_length = cvmx_nand_state[chip].page_size + cvmx_nand_state[chip].oob_size;
1173 /* Build the command and address cycles */
1174 if (__cvmx_nand_build_pre_cmd(chip, NAND_COMMAND_PROGRAM, __cvmx_nand_get_address_cycles(chip), nand_address, 0))
1175 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1178 memset(&cmd, 0, sizeof(cmd));
1179 cmd.wr.data_bytes = buffer_length;
1181 cmd.wr.wrn1 = cvmx_nand_state[chip].wrn[0];
1182 cmd.wr.wrn2 = cvmx_nand_state[chip].wrn[1];
1183 if (cvmx_nand_submit(cmd))
1184 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1186 /* Send WRITE command */
1187 memset(&cmd, 0, sizeof(cmd));
1188 cmd.cle.cmd_data = NAND_COMMAND_PROGRAM_FIN;
1189 cmd.cle.clen1 = cvmx_nand_state[chip].clen[0];
1190 cmd.cle.clen2 = cvmx_nand_state[chip].clen[1];
1191 cmd.cle.clen3 = cvmx_nand_state[chip].clen[2];
1193 if (cvmx_nand_submit(cmd))
1194 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1196 __cvmx_nand_setup_dma(chip, 1, buffer_address, buffer_length);
1198 /* WAIT for R_B to signal program is complete */
1199 memset(&cmd, 0, sizeof(cmd));
1203 if (cvmx_nand_submit(cmd))
1204 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1206 if (__cvmx_nand_build_post_cmd())
1207 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1209 /* Wait for the DMA to complete */
1210 if (CVMX_WAIT_FOR_FIELD64(CVMX_MIO_NDF_DMA_CFG, cvmx_mio_ndf_dma_cfg_t, en, ==, 0, NAND_TIMEOUT_USECS))
1211 CVMX_NAND_RETURN(CVMX_NAND_TIMEOUT);
1213 CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
1218 * Erase a NAND block. A single block contains multiple pages.
1220 * @param chip Chip select for NAND flash
1221 * @param nand_address
1222 * Location in NAND to erase. See description in file comment
1224 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
1226 cvmx_nand_status_t cvmx_nand_block_erase(int chip, uint64_t nand_address)
1228 cvmx_nand_cmd_t cmd;
1230 CVMX_NAND_LOG_CALLED();
1231 CVMX_NAND_LOG_PARAM("%d", chip);
1232 CVMX_NAND_LOG_PARAM("0x%llx", (ULL)nand_address);
1234 if ((chip < 0) || (chip > 7))
1235 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1236 if (!cvmx_nand_state[chip].page_size)
1237 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1239 /* Build the command and address cycles */
1240 if (__cvmx_nand_build_pre_cmd(chip, NAND_COMMAND_ERASE,
1241 (__cvmx_nand_get_row_bits(chip)+7) >> 3,
1242 nand_address >> __cvmx_nand_get_column_bits(chip),
1243 NAND_COMMAND_ERASE_FIN))
1244 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1246 /* WAIT for R_B to signal erase is complete */
1247 memset(&cmd, 0, sizeof(cmd));
1251 if (cvmx_nand_submit(cmd))
1252 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1254 if (__cvmx_nand_build_post_cmd())
1255 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1257 /* Wait for the command queue to be idle, which means the wait is done */
1258 if (CVMX_WAIT_FOR_FIELD64(CVMX_NDF_ST_REG, cvmx_ndf_st_reg_t, exe_idle, ==, 1, NAND_TIMEOUT_USECS))
1259 CVMX_NAND_RETURN(CVMX_NAND_TIMEOUT);
1261 CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
1266 * Read the NAND ID information
1268 * @param chip Chip select for NAND flash
1269 * @param nand_address
1270 * NAND address to read ID from. Usually this is either 0x0 or 0x20.
1271 * @param buffer_address
1272 * Physical address to store data in
1273 * @param buffer_length
1274 * Length of the buffer. Usually this is 4 bytes
1276 * @return Bytes read on success, a negative cvmx_nand_status_t error code on failure
1278 int cvmx_nand_read_id(int chip, uint64_t nand_address, uint64_t buffer_address, int buffer_length)
1282 CVMX_NAND_LOG_CALLED();
1283 CVMX_NAND_LOG_PARAM("%d", chip);
1284 CVMX_NAND_LOG_PARAM("0x%llx", (ULL)nand_address);
1285 CVMX_NAND_LOG_PARAM("0x%llx", (ULL)buffer_address);
1286 CVMX_NAND_LOG_PARAM("%d", buffer_length);
1288 if ((chip < 0) || (chip > 7))
1289 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1290 if (!buffer_address)
1291 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1292 if (buffer_address & 7)
1293 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1295 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1297 bytes = __cvmx_nand_low_level_read(chip, NAND_COMMAND_READ_ID, 1, nand_address, 0, buffer_address, buffer_length);
1298 CVMX_NAND_RETURN(bytes);
1303 * Read the NAND parameter page
1305 * @param chip Chip select for NAND flash
1306 * @param buffer_address
1307 * Physical address to store data in
1308 * @param buffer_length
1309 * Length of the buffer. Usually this is 4 bytes
1311 * @return Bytes read on success, a negative cvmx_nand_status_t error code on failure
1313 int cvmx_nand_read_param_page(int chip, uint64_t buffer_address, int buffer_length)
1317 CVMX_NAND_LOG_CALLED();
1318 CVMX_NAND_LOG_PARAM("%d", chip);
1319 CVMX_NAND_LOG_PARAM("0x%llx", (ULL)buffer_address);
1320 CVMX_NAND_LOG_PARAM("%d", buffer_length);
1322 if ((chip < 0) || (chip > 7))
1323 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1324 if (!buffer_address)
1325 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1326 if (buffer_address & 7)
1327 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1329 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1331 bytes = __cvmx_nand_low_level_read(chip, NAND_COMMAND_READ_PARAM_PAGE, 1, 0x0, 0, buffer_address, buffer_length);
1332 CVMX_NAND_RETURN(bytes);
1337 * Get the status of the NAND flash
1339 * @param chip Chip select for NAND flash
1341 * @return NAND status or a negative cvmx_nand_status_t error code on failure
1343 int cvmx_nand_get_status(int chip)
1347 CVMX_NAND_LOG_CALLED();
1348 CVMX_NAND_LOG_PARAM("%d", chip);
1350 if ((chip < 0) || (chip > 7))
1351 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1353 *(uint8_t*)cvmx_nand_buffer = 0xff;
1354 status = __cvmx_nand_low_level_read(chip, NAND_COMMAND_STATUS, 0, 0, 0, cvmx_ptr_to_phys(cvmx_nand_buffer), (cvmx_nand_flags & CVMX_NAND_INITIALIZE_FLAGS_16BIT) ? 2 : 1);
1356 status = *(uint8_t*)cvmx_nand_buffer;
1358 CVMX_NAND_RETURN(status);
1363 * Get the page size, excluding out of band data. This function
1364 * will return zero for chip selects not connected to NAND.
1366 * @param chip Chip select for NAND flash
1368 * @return Page size in bytes or a negative cvmx_nand_status_t error code on failure
1370 int cvmx_nand_get_page_size(int chip)
1372 CVMX_NAND_LOG_CALLED();
1373 CVMX_NAND_LOG_PARAM("%d", chip);
1375 if ((chip < 0) || (chip > 7))
1376 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1378 CVMX_NAND_RETURN(cvmx_nand_state[chip].page_size);
1385 * @param chip Chip select for NAND flash
1387 * @return OOB in bytes or a negative cvmx_nand_status_t error code on failure
1389 int cvmx_nand_get_oob_size(int chip)
1391 CVMX_NAND_LOG_CALLED();
1392 CVMX_NAND_LOG_PARAM("%d", chip);
1394 if ((chip < 0) || (chip > 7))
1395 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1397 CVMX_NAND_RETURN(cvmx_nand_state[chip].oob_size);
1402 * Get the number of pages per NAND block
1404 * @param chip Chip select for NAND flash
1406 * @return Number of pages in each block or a negative cvmx_nand_status_t error
1409 int cvmx_nand_get_pages_per_block(int chip)
1411 CVMX_NAND_LOG_CALLED();
1412 CVMX_NAND_LOG_PARAM("%d", chip);
1414 if ((chip < 0) || (chip > 7))
1415 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1417 CVMX_NAND_RETURN(cvmx_nand_state[chip].pages_per_block);
1422 * Get the number of blocks in the NAND flash
1424 * @param chip Chip select for NAND flash
1426 * @return Number of blocks or a negative cvmx_nand_status_t error code on failure
1428 int cvmx_nand_get_blocks(int chip)
1430 CVMX_NAND_LOG_CALLED();
1431 CVMX_NAND_LOG_PARAM("%d", chip);
1433 if ((chip < 0) || (chip > 7))
1434 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1436 CVMX_NAND_RETURN(cvmx_nand_state[chip].blocks);
1441 * Reset the NAND flash
1443 * @param chip Chip select for NAND flash
1445 * @return Zero on success, a negative cvmx_nand_status_t error code on failure
1447 cvmx_nand_status_t cvmx_nand_reset(int chip)
1449 cvmx_nand_cmd_t cmd;
1451 CVMX_NAND_LOG_CALLED();
1452 CVMX_NAND_LOG_PARAM("%d", chip);
1454 if ((chip < 0) || (chip > 7))
1455 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1456 if (!cvmx_nand_state[chip].page_size)
1457 CVMX_NAND_RETURN(CVMX_NAND_INVALID_PARAM);
1459 if (__cvmx_nand_build_pre_cmd(chip, NAND_COMMAND_RESET, 0, 0, 0))
1460 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1462 /* WAIT for R_B to signal reset is complete */
1463 memset(&cmd, 0, sizeof(cmd));
1467 if (cvmx_nand_submit(cmd))
1468 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1470 if (__cvmx_nand_build_post_cmd())
1471 CVMX_NAND_RETURN(CVMX_NAND_NO_MEMORY);
1473 CVMX_NAND_RETURN(CVMX_NAND_SUCCESS);
1480 * This function computes the Octeon specific ECC data used by the NAND boot
1483 * @param block pointer to 256 bytes of data
1484 * @param eccp pointer to where 8 bytes of ECC data will be stored
1486 void cvmx_nand_compute_boot_ecc(unsigned char *block, unsigned char *eccp)
1488 unsigned char pd0, pd1, pd2;
1491 pd0 = pd1 = pd2 = 0;
1493 for (i = 0; i < 256; i++) /* PD0<0> */
1494 pd0 ^= (block[i] ^ (block[i] >> 2) ^ (block[i] >> 4) ^ (block[i] >> 6)) & 1;
1495 for (i = 0; i < 256; i++) /* PD0<1> */
1496 pd0 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 4) ^ (block[i] >> 5)) & 1) << 1;
1497 for (i = 0; i < 256; i++) /* PD0<2> */
1498 pd0 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 2) ^ (block[i] >> 3)) & 1) << 2;
1499 for (i = 0; i < 128; i++) /* PD0<3> */
1500 pd0 ^= ((block[2*i] ^ (block[2*i] >> 1) ^ (block[2*i] >> 2) ^
1501 (block[2*i] >> 3) ^ (block[2*i] >> 4) ^ (block[2*i] >> 5) ^
1502 (block[2*i] >> 6) ^ (block[2*i] >> 7)) & 1) << 3;
1503 for (i = 0; i < 64; i++) /* PD0<4> */
1504 for (j = 0; j < 2; j++)
1505 pd0 ^= ((block[4*i+j] ^ (block[4*i+j] >> 1) ^ (block[4*i+j] >> 2) ^
1506 (block[4*i+j] >> 3) ^ (block[4*i+j] >> 4) ^ (block[4*i+j] >> 5) ^
1507 (block[4*i+j] >> 6) ^ (block[4*i+j] >> 7)) & 1) << 4;
1508 for (i = 0; i < 32; i++) /* PD0<5> */
1509 for (j = 0; j < 4; j++)
1510 pd0 ^= ((block[8*i+j] ^ (block[8*i+j] >> 1) ^ (block[8*i+j] >> 2) ^
1511 (block[8*i+j] >> 3) ^ (block[8*i+j] >> 4) ^ (block[8*i+j] >> 5) ^
1512 (block[8*i+j] >> 6) ^ (block[8*i+j] >> 7)) & 1) << 5;
1513 for (i = 0; i < 16; i++) /* PD0<6> */
1514 for (j = 0; j < 8; j++)
1515 pd0 ^= ((block[16*i+j] ^ (block[16*i+j] >> 1) ^ (block[16*i+j] >> 2) ^
1516 (block[16*i+j] >> 3) ^ (block[16*i+j] >> 4) ^ (block[16*i+j] >> 5) ^
1517 (block[16*i+j] >> 6) ^ (block[16*i+j] >> 7)) & 1) << 6;
1518 for (i = 0; i < 8; i++) /* PD0<7> */
1519 for (j = 0; j < 16; j++)
1520 pd0 ^= ((block[32*i+j] ^ (block[32*i+j] >> 1) ^ (block[32*i+j] >> 2) ^
1521 (block[32*i+j] >> 3) ^ (block[32*i+j] >> 4) ^ (block[32*i+j] >> 5) ^
1522 (block[32*i+j] >> 6) ^ (block[32*i+j] >> 7)) & 1) << 7;
1523 for (i = 0; i < 4; i++) /* PD1<0> */
1524 for (j = 0; j < 32; j++)
1525 pd1 ^= ((block[64*i+j] ^ (block[64*i+j] >> 1) ^ (block[64*i+j] >> 2) ^
1526 (block[64*i+j] >> 3) ^ (block[64*i+j] >> 4) ^ (block[64*i+j] >> 5) ^
1527 (block[64*i+j] >> 6) ^ (block[64*i+j] >> 7)) & 1) << 0;
1528 for (i = 0; i < 2; i++) /* PD1<1> */
1529 for (j = 0; j < 64; j++)
1530 pd1 ^= ((block[128*i+j] ^ (block[128*i+j] >> 1) ^ (block[128*i+j] >> 2) ^
1531 (block[128*i+j] >> 3) ^ (block[128*i+j] >> 4) ^ (block[128*i+j] >> 5) ^
1532 (block[128*i+j] >> 6) ^ (block[128*i+j] >> 7)) & 1) << 1;
1533 for (i = 0; i < 128; i++) /* PD1<2> */
1534 pd1 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 2) ^
1535 (block[i] >> 3) ^ (block[i] >> 4) ^ (block[i] >> 5) ^
1536 (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 2;
1539 for (i = 0; i < 256; i++) /* PD1<5> */
1540 pd1 ^= (((block[i] >> 1) ^ (block[i] >> 3) ^ (block[i] >> 5) ^ (block[i] >> 7)) & 1) << 5;
1541 for (i = 0; i < 256; i++) /* PD1<6> */
1542 pd1 ^= (((block[i] >> 2) ^ (block[i] >> 3) ^ (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 6;
1543 for (i = 0; i < 256; i++) /* PD1<7> */
1544 pd1 ^= (((block[i] >> 4) ^ (block[i] >> 5) ^ (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 7;
1545 for (i = 0; i < 128; i++) /* PD2<0> */
1546 pd2 ^= ((block[2*i+1] ^ (block[2*i+1] >> 1) ^ (block[2*i+1] >> 2) ^
1547 (block[2*i+1] >> 3) ^ (block[2*i+1] >> 4) ^ (block[2*i+1] >> 5) ^
1548 (block[2*i+1] >> 6) ^ (block[2*i+1] >> 7)) & 1) << 0;
1549 for (i = 0; i < 64; i++) /* PD2<1> */
1550 for (j = 2; j < 4; j++)
1551 pd2 ^= ((block[4*i+j] ^ (block[4*i+j] >> 1) ^ (block[4*i+j] >> 2) ^
1552 (block[4*i+j] >> 3) ^ (block[4*i+j] >> 4) ^ (block[4*i+j] >> 5) ^
1553 (block[4*i+j] >> 6) ^ (block[4*i+j] >> 7)) & 1) << 1;
1554 for (i = 0; i < 32; i++) /* PD2<2> */
1555 for (j = 4; j < 8; j++)
1556 pd2 ^= ((block[8*i+j] ^ (block[8*i+j] >> 1) ^ (block[8*i+j] >> 2) ^
1557 (block[8*i+j] >> 3) ^ (block[8*i+j] >> 4) ^ (block[8*i+j] >> 5) ^
1558 (block[8*i+j] >> 6) ^ (block[8*i+j] >> 7)) & 1) << 2;
1559 for (i = 0; i < 16; i++) /* PD2<3> */
1560 for (j = 8; j < 16; j++)
1561 pd2 ^= ((block[16*i+j] ^ (block[16*i+j] >> 1) ^ (block[16*i+j] >> 2) ^
1562 (block[16*i+j] >> 3) ^ (block[16*i+j] >> 4) ^ (block[16*i+j] >> 5) ^
1563 (block[16*i+j] >> 6) ^ (block[16*i+j] >> 7)) & 1) << 3;
1564 for (i = 0; i < 8; i++) /* PD2<4> */
1565 for (j = 16; j < 32; j++)
1566 pd2 ^= ((block[32*i+j] ^ (block[32*i+j] >> 1) ^ (block[32*i+j] >> 2) ^
1567 (block[32*i+j] >> 3) ^ (block[32*i+j] >> 4) ^ (block[32*i+j] >> 5) ^
1568 (block[32*i+j] >> 6) ^ (block[32*i+j] >> 7)) & 1) << 4;
1569 for (i = 0; i < 4; i++) /* PD2<5> */
1570 for (j = 32; j < 64; j++)
1571 pd2 ^= ((block[64*i+j] ^ (block[64*i+j] >> 1) ^ (block[64*i+j] >> 2) ^
1572 (block[64*i+j] >> 3) ^ (block[64*i+j] >> 4) ^ (block[64*i+j] >> 5) ^
1573 (block[64*i+j] >> 6) ^ (block[64*i+j] >> 7)) & 1) << 5;
1574 for (i = 0; i < 2; i++) /* PD2<6> */
1575 for (j = 64; j < 128; j++)
1576 pd2 ^= ((block[128*i+j] ^ (block[128*i+j] >> 1) ^ (block[128*i+j] >> 2) ^
1577 (block[128*i+j] >> 3) ^ (block[128*i+j] >> 4) ^ (block[128*i+j] >> 5) ^
1578 (block[128*i+j] >> 6) ^ (block[128*i+j] >> 7)) & 1) << 6;
1579 for (i = 128; i < 256; i++) /* PD2<7> */
1580 pd2 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 2) ^
1581 (block[i] >> 3) ^ (block[i] >> 4) ^ (block[i] >> 5) ^
1582 (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 7;
1590 * Check an Octeon ECC block, fixing errors if possible
1592 * @param block Pointer to block to check
1594 * @return Zero if block has no errors, one if errors were corrected, two
1595 * if the errors could not be corrected.
1597 int cvmx_nand_correct_boot_ecc(uint8_t *block)
1599 unsigned char pd0, pd1, pd2;
1601 unsigned char xorpd0, xorpd1, xorpd2;
1605 asm volatile ("pref 0,0(%0);pref 0,128(%0);pref 0,256(%0)\n" :: "r" (block));
1607 pd0 = pd1 = pd2 = 0;
1609 for (i = 0; i < 256; i++) /* PD0<0> */
1610 pd0 ^= (block[i] ^ (block[i] >> 2) ^ (block[i] >> 4) ^ (block[i] >> 6)) & 1;
1611 for (i = 0; i < 256; i++) /* PD0<1> */
1612 pd0 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 4) ^ (block[i] >> 5)) & 1) << 1;
1613 for (i = 0; i < 256; i++) /* PD0<2> */
1614 pd0 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 2) ^ (block[i] >> 3)) & 1) << 2;
1615 for (i = 0; i < 128; i++) /* PD0<3> */
1616 pd0 ^= ((block[2*i] ^ (block[2*i] >> 1) ^ (block[2*i] >> 2) ^
1617 (block[2*i] >> 3) ^ (block[2*i] >> 4) ^ (block[2*i] >> 5) ^
1618 (block[2*i] >> 6) ^ (block[2*i] >> 7)) & 1) << 3;
1619 for (i = 0; i < 64; i++) /* PD0<4> */
1620 for (j = 0; j < 2; j++)
1621 pd0 ^= ((block[4*i+j] ^ (block[4*i+j] >> 1) ^ (block[4*i+j] >> 2) ^
1622 (block[4*i+j] >> 3) ^ (block[4*i+j] >> 4) ^ (block[4*i+j] >> 5) ^
1623 (block[4*i+j] >> 6) ^ (block[4*i+j] >> 7)) & 1) << 4;
1624 for (i = 0; i < 32; i++) /* PD0<5> */
1625 for (j = 0; j < 4; j++)
1626 pd0 ^= ((block[8*i+j] ^ (block[8*i+j] >> 1) ^ (block[8*i+j] >> 2) ^
1627 (block[8*i+j] >> 3) ^ (block[8*i+j] >> 4) ^ (block[8*i+j] >> 5) ^
1628 (block[8*i+j] >> 6) ^ (block[8*i+j] >> 7)) & 1) << 5;
1629 for (i = 0; i < 16; i++) /* PD0<6> */
1630 for (j = 0; j < 8; j++)
1631 pd0 ^= ((block[16*i+j] ^ (block[16*i+j] >> 1) ^ (block[16*i+j] >> 2) ^
1632 (block[16*i+j] >> 3) ^ (block[16*i+j] >> 4) ^ (block[16*i+j] >> 5) ^
1633 (block[16*i+j] >> 6) ^ (block[16*i+j] >> 7)) & 1) << 6;
1634 for (i = 0; i < 8; i++) /* PD0<7> */
1635 for (j = 0; j < 16; j++)
1636 pd0 ^= ((block[32*i+j] ^ (block[32*i+j] >> 1) ^ (block[32*i+j] >> 2) ^
1637 (block[32*i+j] >> 3) ^ (block[32*i+j] >> 4) ^ (block[32*i+j] >> 5) ^
1638 (block[32*i+j] >> 6) ^ (block[32*i+j] >> 7)) & 1) << 7;
1639 for (i = 0; i < 4; i++) /* PD1<0> */
1640 for (j = 0; j < 32; j++)
1641 pd1 ^= ((block[64*i+j] ^ (block[64*i+j] >> 1) ^ (block[64*i+j] >> 2) ^
1642 (block[64*i+j] >> 3) ^ (block[64*i+j] >> 4) ^ (block[64*i+j] >> 5) ^
1643 (block[64*i+j] >> 6) ^ (block[64*i+j] >> 7)) & 1) << 0;
1644 for (i = 0; i < 2; i++) /* PD1<1> */
1645 for (j = 0; j < 64; j++)
1646 pd1 ^= ((block[128*i+j] ^ (block[128*i+j] >> 1) ^ (block[128*i+j] >> 2) ^
1647 (block[128*i+j] >> 3) ^ (block[128*i+j] >> 4) ^ (block[128*i+j] >> 5) ^
1648 (block[128*i+j] >> 6) ^ (block[128*i+j] >> 7)) & 1) << 1;
1649 for (i = 0; i < 128; i++) /* PD1<2> */
1650 pd1 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 2) ^
1651 (block[i] >> 3) ^ (block[i] >> 4) ^ (block[i] >> 5) ^
1652 (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 2;
1655 for (i = 0; i < 256; i++) /* PD1<5> */
1656 pd1 ^= (((block[i] >> 1) ^ (block[i] >> 3) ^ (block[i] >> 5) ^ (block[i] >> 7)) & 1) << 5;
1657 for (i = 0; i < 256; i++) /* PD1<6> */
1658 pd1 ^= (((block[i] >> 2) ^ (block[i] >> 3) ^ (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 6;
1659 for (i = 0; i < 256; i++) /* PD1<7> */
1660 pd1 ^= (((block[i] >> 4) ^ (block[i] >> 5) ^ (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 7;
1661 for (i = 0; i < 128; i++) /* PD2<0> */
1662 pd2 ^= ((block[2*i+1] ^ (block[2*i+1] >> 1) ^ (block[2*i+1] >> 2) ^
1663 (block[2*i+1] >> 3) ^ (block[2*i+1] >> 4) ^ (block[2*i+1] >> 5) ^
1664 (block[2*i+1] >> 6) ^ (block[2*i+1] >> 7)) & 1) << 0;
1665 for (i = 0; i < 64; i++) /* PD2<1> */
1666 for (j = 2; j < 4; j++)
1667 pd2 ^= ((block[4*i+j] ^ (block[4*i+j] >> 1) ^ (block[4*i+j] >> 2) ^
1668 (block[4*i+j] >> 3) ^ (block[4*i+j] >> 4) ^ (block[4*i+j] >> 5) ^
1669 (block[4*i+j] >> 6) ^ (block[4*i+j] >> 7)) & 1) << 1;
1670 for (i = 0; i < 32; i++) /* PD2<2> */
1671 for (j = 4; j < 8; j++)
1672 pd2 ^= ((block[8*i+j] ^ (block[8*i+j] >> 1) ^ (block[8*i+j] >> 2) ^
1673 (block[8*i+j] >> 3) ^ (block[8*i+j] >> 4) ^ (block[8*i+j] >> 5) ^
1674 (block[8*i+j] >> 6) ^ (block[8*i+j] >> 7)) & 1) << 2;
1675 for (i = 0; i < 16; i++) /* PD2<3> */
1676 for (j = 8; j < 16; j++)
1677 pd2 ^= ((block[16*i+j] ^ (block[16*i+j] >> 1) ^ (block[16*i+j] >> 2) ^
1678 (block[16*i+j] >> 3) ^ (block[16*i+j] >> 4) ^ (block[16*i+j] >> 5) ^
1679 (block[16*i+j] >> 6) ^ (block[16*i+j] >> 7)) & 1) << 3;
1680 for (i = 0; i < 8; i++) /* PD2<4> */
1681 for (j = 16; j < 32; j++)
1682 pd2 ^= ((block[32*i+j] ^ (block[32*i+j] >> 1) ^ (block[32*i+j] >> 2) ^
1683 (block[32*i+j] >> 3) ^ (block[32*i+j] >> 4) ^ (block[32*i+j] >> 5) ^
1684 (block[32*i+j] >> 6) ^ (block[32*i+j] >> 7)) & 1) << 4;
1685 for (i = 0; i < 4; i++) /* PD2<5> */
1686 for (j = 32; j < 64; j++)
1687 pd2 ^= ((block[64*i+j] ^ (block[64*i+j] >> 1) ^ (block[64*i+j] >> 2) ^
1688 (block[64*i+j] >> 3) ^ (block[64*i+j] >> 4) ^ (block[64*i+j] >> 5) ^
1689 (block[64*i+j] >> 6) ^ (block[64*i+j] >> 7)) & 1) << 5;
1690 for (i = 0; i < 2; i++) /* PD2<6> */
1691 for (j = 64; j < 128; j++)
1692 pd2 ^= ((block[128*i+j] ^ (block[128*i+j] >> 1) ^ (block[128*i+j] >> 2) ^
1693 (block[128*i+j] >> 3) ^ (block[128*i+j] >> 4) ^ (block[128*i+j] >> 5) ^
1694 (block[128*i+j] >> 6) ^ (block[128*i+j] >> 7)) & 1) << 6;
1695 for (i = 128; i < 256; i++) /* PD2<7> */
1696 pd2 ^= ((block[i] ^ (block[i] >> 1) ^ (block[i] >> 2) ^
1697 (block[i] >> 3) ^ (block[i] >> 4) ^ (block[i] >> 5) ^
1698 (block[i] >> 6) ^ (block[i] >> 7)) & 1) << 7;
1700 xorpd0 = pd0 ^ block[256];
1701 xorpd1 = pd1 ^ block[257];
1702 xorpd2 = pd2 ^ block[258];
1704 xor_num = __builtin_popcount((xorpd0 << 16) | (xorpd1 << 8) | xorpd2);
1705 check = (((xorpd1 & 7) << 8) | xorpd0) ^ ((xorpd2 << 3) | (xorpd1 >> 5));
1709 else if ((xor_num > 1) && (check != 0x7FF))
1714 /* Correct the error */
1715 block[xorpd2] ^= 1 << (xorpd1 >> 5);