2 * Copyright (c) 2007, Juniper Networks, Inc.
3 * Copyright (c) 2012-2013, SRI International
6 * Portions of this software were developed by SRI International and the
7 * University of Cambridge Computer Laboratory under DARPA/AFRL contract
8 * (FA8750-10-C-0237) ("CTSRD"), as part of the DARPA CRASH research
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the author nor the names of any co-contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
41 #include <sys/param.h>
42 #include <sys/systm.h>
45 #include <sys/endian.h>
47 #include <sys/kernel.h>
48 #include <sys/malloc.h>
49 #include <sys/module.h>
51 #include <sys/sysctl.h>
53 #include <machine/bus.h>
55 #include <dev/cfi/cfi_reg.h>
56 #include <dev/cfi/cfi_var.h>
58 static void cfi_add_sysctls(struct cfi_softc *);
60 extern struct cdevsw cfi_cdevsw;
62 char cfi_driver_name[] = "cfi";
63 devclass_t cfi_devclass;
64 devclass_t cfi_diskclass;
67 cfi_read_raw(struct cfi_softc *sc, u_int ofs)
71 ofs &= ~(sc->sc_width - 1);
72 switch (sc->sc_width) {
74 val = bus_space_read_1(sc->sc_tag, sc->sc_handle, ofs);
77 val = bus_space_read_2(sc->sc_tag, sc->sc_handle, ofs);
80 val = bus_space_read_4(sc->sc_tag, sc->sc_handle, ofs);
90 cfi_read(struct cfi_softc *sc, u_int ofs)
95 ofs &= ~(sc->sc_width - 1);
96 switch (sc->sc_width) {
98 val = bus_space_read_1(sc->sc_tag, sc->sc_handle, ofs);
101 sval = bus_space_read_2(sc->sc_tag, sc->sc_handle, ofs);
102 #ifdef CFI_HARDWAREBYTESWAP
109 val = bus_space_read_4(sc->sc_tag, sc->sc_handle, ofs);
110 #ifndef CFI_HARDWAREBYTESWAP
122 cfi_write(struct cfi_softc *sc, u_int ofs, u_int val)
125 ofs &= ~(sc->sc_width - 1);
126 switch (sc->sc_width) {
128 bus_space_write_1(sc->sc_tag, sc->sc_handle, ofs, val);
131 #ifdef CFI_HARDWAREBYTESWAP
132 bus_space_write_2(sc->sc_tag, sc->sc_handle, ofs, val);
134 bus_space_write_2(sc->sc_tag, sc->sc_handle, ofs, htole16(val));
139 #ifdef CFI_HARDWAREBYTESWAP
140 bus_space_write_4(sc->sc_tag, sc->sc_handle, ofs, val);
142 bus_space_write_4(sc->sc_tag, sc->sc_handle, ofs, htole32(val));
149 cfi_read_qry(struct cfi_softc *sc, u_int ofs)
153 cfi_write(sc, CFI_QRY_CMD_ADDR * sc->sc_width, CFI_QRY_CMD_DATA);
154 val = cfi_read(sc, ofs * sc->sc_width);
155 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
160 cfi_amd_write(struct cfi_softc *sc, u_int ofs, u_int addr, u_int data)
163 cfi_write(sc, ofs + AMD_ADDR_START, CFI_AMD_UNLOCK);
164 cfi_write(sc, ofs + AMD_ADDR_ACK, CFI_AMD_UNLOCK_ACK);
165 cfi_write(sc, ofs + addr, data);
169 cfi_fmtsize(uint32_t sz)
172 static const char *sfx[] = { "", "K", "M", "G" };
176 while (sfxidx < 3 && sz > 1023) {
181 sprintf(buf, "%u%sB", sz, sfx[sfxidx]);
186 cfi_probe(device_t dev)
189 struct cfi_softc *sc;
192 uint16_t iface, vend;
194 sc = device_get_softc(dev);
198 sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
200 if (sc->sc_res == NULL)
203 sc->sc_tag = rman_get_bustag(sc->sc_res);
204 sc->sc_handle = rman_get_bushandle(sc->sc_res);
206 if (sc->sc_width == 0) {
208 while (sc->sc_width <= 4) {
209 if (cfi_read_qry(sc, CFI_QRY_IDENT) == 'Q')
213 } else if (cfi_read_qry(sc, CFI_QRY_IDENT) != 'Q') {
217 if (sc->sc_width > 4) {
222 /* We got a Q. Check if we also have the R and the Y. */
223 if (cfi_read_qry(sc, CFI_QRY_IDENT + 1) != 'R' ||
224 cfi_read_qry(sc, CFI_QRY_IDENT + 2) != 'Y') {
229 /* Get the vendor and command set. */
230 vend = cfi_read_qry(sc, CFI_QRY_VEND) |
231 (cfi_read_qry(sc, CFI_QRY_VEND + 1) << 8);
233 sc->sc_cmdset = vend;
236 case CFI_VEND_AMD_ECS:
237 case CFI_VEND_AMD_SCS:
238 vend_str = "AMD/Fujitsu";
240 case CFI_VEND_INTEL_ECS:
241 vend_str = "Intel/Sharp";
243 case CFI_VEND_INTEL_SCS:
246 case CFI_VEND_MITSUBISHI_ECS:
247 case CFI_VEND_MITSUBISHI_SCS:
248 vend_str = "Mitsubishi";
251 vend_str = "Unknown vendor";
255 /* Get the device size. */
256 sc->sc_size = 1U << cfi_read_qry(sc, CFI_QRY_SIZE);
258 /* Sanity-check the I/F */
259 iface = cfi_read_qry(sc, CFI_QRY_IFACE) |
260 (cfi_read_qry(sc, CFI_QRY_IFACE + 1) << 8);
263 * Adding 1 to iface will give us a bit-wise "switch"
264 * that allows us to test for the interface width by
265 * testing a single bit.
269 error = (iface & sc->sc_width) ? 0 : EINVAL;
273 snprintf(desc, sizeof(desc), "%s - %s", vend_str,
274 cfi_fmtsize(sc->sc_size));
275 device_set_desc_copy(dev, desc);
278 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
283 cfi_attach(device_t dev)
285 struct cfi_softc *sc;
288 uint64_t mtoexp, ttoexp;
289 #ifdef CFI_SUPPORT_STRATAFLASH
291 char name[KENV_MNAMELEN], value[32];
294 sc = device_get_softc(dev);
298 sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
299 #ifndef ATSE_CFI_HACK
302 RF_ACTIVE | RF_SHAREABLE);
304 if (sc->sc_res == NULL)
307 sc->sc_tag = rman_get_bustag(sc->sc_res);
308 sc->sc_handle = rman_get_bushandle(sc->sc_res);
310 /* Get time-out values for erase, write, and buffer write. */
311 ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_ERASE);
312 mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_ERASE);
314 device_printf(dev, "erase timeout == 0, using 2^16ms\n");
318 device_printf(dev, "insane timeout: 2^%jdms\n", ttoexp);
322 device_printf(dev, "max erase timeout == 0, using 2^%jdms\n",
326 if (ttoexp + mtoexp > 41) {
327 device_printf(dev, "insane max erase timeout: 2^%jd\n",
331 sc->sc_typical_timeouts[CFI_TIMEOUT_ERASE] = SBT_1MS * (1ULL << ttoexp);
332 sc->sc_max_timeouts[CFI_TIMEOUT_ERASE] =
333 sc->sc_typical_timeouts[CFI_TIMEOUT_ERASE] * (1ULL << mtoexp);
335 ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_WRITE);
336 mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_WRITE);
338 device_printf(dev, "write timeout == 0, using 2^18ns\n");
342 device_printf(dev, "insane write timeout: 2^%jdus\n", ttoexp);
346 device_printf(dev, "max write timeout == 0, using 2^%jdms\n",
350 if (ttoexp + mtoexp > 51) {
351 device_printf(dev, "insane max write timeout: 2^%jdus\n",
355 sc->sc_typical_timeouts[CFI_TIMEOUT_WRITE] = SBT_1US * (1ULL << ttoexp);
356 sc->sc_max_timeouts[CFI_TIMEOUT_WRITE] =
357 sc->sc_typical_timeouts[CFI_TIMEOUT_WRITE] * (1ULL << mtoexp);
359 ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_BUFWRITE);
360 mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_BUFWRITE);
361 /* Don't check for 0, it means not-supported. */
363 device_printf(dev, "insane write timeout: 2^%jdus\n", ttoexp);
366 if (ttoexp + mtoexp > 51) {
367 device_printf(dev, "insane max write timeout: 2^%jdus\n",
371 sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] =
372 SBT_1US * (1ULL << cfi_read_qry(sc, CFI_QRY_TTO_BUFWRITE));
373 sc->sc_max_timeouts[CFI_TIMEOUT_BUFWRITE] =
374 sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] *
375 (1ULL << cfi_read_qry(sc, CFI_QRY_MTO_BUFWRITE));
377 /* Get the maximum size of a multibyte program */
378 if (sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] != 0)
379 sc->sc_maxbuf = 1 << (cfi_read_qry(sc, CFI_QRY_MAXBUF) |
380 cfi_read_qry(sc, CFI_QRY_MAXBUF) << 8);
384 /* Get erase regions. */
385 sc->sc_regions = cfi_read_qry(sc, CFI_QRY_NREGIONS);
386 sc->sc_region = malloc(sc->sc_regions * sizeof(struct cfi_region),
387 M_TEMP, M_WAITOK | M_ZERO);
388 for (r = 0; r < sc->sc_regions; r++) {
389 blocks = cfi_read_qry(sc, CFI_QRY_REGION(r)) |
390 (cfi_read_qry(sc, CFI_QRY_REGION(r) + 1) << 8);
391 sc->sc_region[r].r_blocks = blocks + 1;
393 blksz = cfi_read_qry(sc, CFI_QRY_REGION(r) + 2) |
394 (cfi_read_qry(sc, CFI_QRY_REGION(r) + 3) << 8);
395 sc->sc_region[r].r_blksz = (blksz == 0) ? 128 :
399 /* Reset the device to a default state. */
400 cfi_write(sc, 0, CFI_BCS_CLEAR_STATUS);
403 device_printf(dev, "[");
404 for (r = 0; r < sc->sc_regions; r++) {
405 printf("%ux%s%s", sc->sc_region[r].r_blocks,
406 cfi_fmtsize(sc->sc_region[r].r_blksz),
407 (r == sc->sc_regions - 1) ? "]\n" : ",");
411 u = device_get_unit(dev);
412 sc->sc_nod = make_dev(&cfi_cdevsw, u, UID_ROOT, GID_WHEEL, 0600,
413 "%s%u", cfi_driver_name, u);
414 sc->sc_nod->si_drv1 = sc;
418 #ifdef CFI_SUPPORT_STRATAFLASH
420 * Store the Intel factory PPR in the environment. In some
421 * cases it is the most unique ID on a board.
423 if (cfi_intel_get_factory_pr(sc, &ppr) == 0) {
424 if (snprintf(name, sizeof(name), "%s.factory_ppr",
425 device_get_nameunit(dev)) < (sizeof(name) - 1) &&
426 snprintf(value, sizeof(value), "0x%016jx", ppr) <
428 (void) kern_setenv(name, value);
432 device_add_child(dev, "cfid", -1);
433 bus_generic_attach(dev);
439 cfi_add_sysctls(struct cfi_softc *sc)
441 struct sysctl_ctx_list *ctx;
442 struct sysctl_oid_list *children;
444 ctx = device_get_sysctl_ctx(sc->sc_dev);
445 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev));
447 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
448 "typical_erase_timout_count",
449 CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_ERASE],
450 0, "Number of times the typical erase timeout was exceeded");
451 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
452 "max_erase_timout_count",
453 CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_ERASE], 0,
454 "Number of times the maximum erase timeout was exceeded");
455 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
456 "typical_write_timout_count",
457 CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_WRITE], 0,
458 "Number of times the typical write timeout was exceeded");
459 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
460 "max_write_timout_count",
461 CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_WRITE], 0,
462 "Number of times the maximum write timeout was exceeded");
463 if (sc->sc_maxbuf > 0) {
464 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
465 "typical_bufwrite_timout_count",
466 CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_BUFWRITE], 0,
467 "Number of times the typical buffered write timeout was "
469 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
470 "max_bufwrite_timout_count",
471 CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_BUFWRITE], 0,
472 "Number of times the maximum buffered write timeout was "
478 cfi_detach(device_t dev)
480 struct cfi_softc *sc;
482 sc = device_get_softc(dev);
484 destroy_dev(sc->sc_nod);
485 free(sc->sc_region, M_TEMP);
486 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
491 cfi_wait_ready(struct cfi_softc *sc, u_int ofs, sbintime_t start,
492 enum cfi_wait_cmd cmd)
494 int done, error, tto_exceeded;
495 uint32_t st0 = 0, st = 0;
501 while (!done && !error) {
503 * Save time before we start so we always do one check
504 * after the timeout has expired.
508 switch (sc->sc_cmdset) {
509 case CFI_VEND_INTEL_ECS:
510 case CFI_VEND_INTEL_SCS:
511 st = cfi_read(sc, ofs);
512 done = (st & CFI_INTEL_STATUS_WSMS);
514 /* NB: bit 0 is reserved */
515 st &= ~(CFI_INTEL_XSTATUS_RSVD |
516 CFI_INTEL_STATUS_WSMS |
517 CFI_INTEL_STATUS_RSVD);
518 if (st & CFI_INTEL_STATUS_DPS)
520 else if (st & CFI_INTEL_STATUS_PSLBS)
522 else if (st & CFI_INTEL_STATUS_ECLBS)
528 case CFI_VEND_AMD_SCS:
529 case CFI_VEND_AMD_ECS:
530 st0 = cfi_read(sc, ofs);
531 st = cfi_read(sc, ofs);
532 done = ((st & 0x40) == (st0 & 0x40)) ? 1 : 0;
537 now > start + sc->sc_typical_timeouts[cmd]) {
540 sc->sc_tto_counts[cmd]++;
541 #ifdef CFI_DEBUG_TIMEOUT
542 device_printf(sc->sc_dev,
543 "typical timeout exceeded (cmd %d)", cmd);
546 if (now > start + sc->sc_max_timeouts[cmd]) {
547 sc->sc_mto_counts[cmd]++;
548 #ifdef CFI_DEBUG_TIMEOUT
549 device_printf(sc->sc_dev,
550 "max timeout exceeded (cmd %d)", cmd);
558 printf("\nerror=%d (st 0x%x st0 0x%x)\n", error, st, st0);
563 cfi_write_block(struct cfi_softc *sc)
571 int error, i, neederase = 0;
576 /* Intel flash must be unlocked before modification */
577 switch (sc->sc_cmdset) {
578 case CFI_VEND_INTEL_ECS:
579 case CFI_VEND_INTEL_SCS:
580 cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LBS);
581 cfi_write(sc, sc->sc_wrofs, CFI_INTEL_UB);
582 cfi_write(sc, sc->sc_wrofs, CFI_BCS_READ_ARRAY);
586 /* Check if an erase is required. */
587 for (i = 0; i < sc->sc_wrbufsz; i++)
588 if ((sc->sc_wrbuf[i] & sc->sc_wrbufcpy[i]) != sc->sc_wrbuf[i]) {
594 intr = intr_disable();
595 start = sbinuptime();
596 /* Erase the block. */
597 switch (sc->sc_cmdset) {
598 case CFI_VEND_INTEL_ECS:
599 case CFI_VEND_INTEL_SCS:
600 cfi_write(sc, sc->sc_wrofs, CFI_BCS_BLOCK_ERASE);
601 cfi_write(sc, sc->sc_wrofs, CFI_BCS_CONFIRM);
603 case CFI_VEND_AMD_SCS:
604 case CFI_VEND_AMD_ECS:
605 cfi_amd_write(sc, sc->sc_wrofs, AMD_ADDR_START,
606 CFI_AMD_ERASE_SECTOR);
607 cfi_amd_write(sc, sc->sc_wrofs, 0, CFI_AMD_BLOCK_ERASE);
610 /* Better safe than sorry... */
615 error = cfi_wait_ready(sc, sc->sc_wrofs, start,
622 /* Write the block using a multibyte write if supported. */
623 ptr.x8 = sc->sc_wrbuf;
624 cpyprt.x8 = sc->sc_wrbufcpy;
625 if (sc->sc_maxbuf > sc->sc_width) {
626 switch (sc->sc_cmdset) {
627 case CFI_VEND_INTEL_ECS:
628 case CFI_VEND_INTEL_SCS:
629 for (i = 0; i < sc->sc_wrbufsz; i += wlen) {
630 wlen = MIN(sc->sc_maxbuf, sc->sc_wrbufsz - i);
632 intr = intr_disable();
634 start = sbinuptime();
636 cfi_write(sc, sc->sc_wrofs + i,
637 CFI_BCS_BUF_PROG_SETUP);
638 if (sbinuptime() > start + sc->sc_max_timeouts[CFI_TIMEOUT_BUFWRITE]) {
642 st = cfi_read(sc, sc->sc_wrofs + i);
643 } while (! (st & CFI_INTEL_STATUS_WSMS));
645 cfi_write(sc, sc->sc_wrofs + i,
646 (wlen / sc->sc_width) - 1);
647 switch (sc->sc_width) {
649 bus_space_write_region_1(sc->sc_tag,
650 sc->sc_handle, sc->sc_wrofs + i,
654 bus_space_write_region_2(sc->sc_tag,
655 sc->sc_handle, sc->sc_wrofs + i,
656 ptr.x16 + i / 2, wlen / 2);
659 bus_space_write_region_4(sc->sc_tag,
660 sc->sc_handle, sc->sc_wrofs + i,
661 ptr.x32 + i / 4, wlen / 4);
665 cfi_write(sc, sc->sc_wrofs + i,
670 error = cfi_wait_ready(sc, sc->sc_wrofs + i,
671 start, CFI_TIMEOUT_BUFWRITE);
677 /* Fall through to single word case */
683 /* Write the block one byte/word at a time. */
684 for (i = 0; i < sc->sc_wrbufsz; i += sc->sc_width) {
686 /* Avoid writing unless we are actually changing bits */
688 switch (sc->sc_width) {
690 if(*(ptr.x8 + i) == *(cpyprt.x8 + i))
694 if(*(ptr.x16 + i / 2) == *(cpyprt.x16 + i / 2))
698 if(*(ptr.x32 + i / 4) == *(cpyprt.x32 + i / 4))
705 * Make sure the command to start a write and the
706 * actual write happens back-to-back without any
709 intr = intr_disable();
711 start = sbinuptime();
712 switch (sc->sc_cmdset) {
713 case CFI_VEND_INTEL_ECS:
714 case CFI_VEND_INTEL_SCS:
715 cfi_write(sc, sc->sc_wrofs + i, CFI_BCS_PROGRAM);
717 case CFI_VEND_AMD_SCS:
718 case CFI_VEND_AMD_ECS:
719 cfi_amd_write(sc, 0, AMD_ADDR_START, CFI_AMD_PROGRAM);
722 switch (sc->sc_width) {
724 bus_space_write_1(sc->sc_tag, sc->sc_handle,
725 sc->sc_wrofs + i, *(ptr.x8 + i));
728 bus_space_write_2(sc->sc_tag, sc->sc_handle,
729 sc->sc_wrofs + i, *(ptr.x16 + i / 2));
732 bus_space_write_4(sc->sc_tag, sc->sc_handle,
733 sc->sc_wrofs + i, *(ptr.x32 + i / 4));
739 error = cfi_wait_ready(sc, sc->sc_wrofs, start,
748 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
750 /* Relock Intel flash */
751 switch (sc->sc_cmdset) {
752 case CFI_VEND_INTEL_ECS:
753 case CFI_VEND_INTEL_SCS:
754 cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LBS);
755 cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LB);
756 cfi_write(sc, sc->sc_wrofs, CFI_BCS_READ_ARRAY);
762 #ifdef CFI_SUPPORT_STRATAFLASH
764 * Intel StrataFlash Protection Register Support.
766 * The memory includes a 128-bit Protection Register that can be
767 * used for security. There are two 64-bit segments; one is programmed
768 * at the factory with a unique 64-bit number which is immutable.
769 * The other segment is left blank for User (OEM) programming.
770 * The User/OEM segment is One Time Programmable (OTP). It can also
771 * be locked to prevent any further writes by setting bit 0 of the
772 * Protection Lock Register (PLR). The PLR can written only once.
776 cfi_get16(struct cfi_softc *sc, int off)
778 uint16_t v = bus_space_read_2(sc->sc_tag, sc->sc_handle, off<<1);
782 #ifdef CFI_ARMEDANDDANGEROUS
784 cfi_put16(struct cfi_softc *sc, int off, uint16_t v)
786 bus_space_write_2(sc->sc_tag, sc->sc_handle, off<<1, v);
791 * Read the factory-defined 64-bit segment of the PR.
794 cfi_intel_get_factory_pr(struct cfi_softc *sc, uint64_t *id)
796 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
798 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
800 cfi_write(sc, 0, CFI_INTEL_READ_ID);
801 *id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(0)))<<48 |
802 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(1)))<<32 |
803 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(2)))<<16 |
804 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(3)));
805 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
810 * Read the User/OEM 64-bit segment of the PR.
813 cfi_intel_get_oem_pr(struct cfi_softc *sc, uint64_t *id)
815 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
817 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
819 cfi_write(sc, 0, CFI_INTEL_READ_ID);
820 *id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(4)))<<48 |
821 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(5)))<<32 |
822 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(6)))<<16 |
823 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(7)));
824 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
829 * Write the User/OEM 64-bit segment of the PR.
830 * XXX should allow writing individual words/bytes
833 cfi_intel_set_oem_pr(struct cfi_softc *sc, uint64_t id)
835 #ifdef CFI_ARMEDANDDANGEROUS
841 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
843 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
845 #ifdef CFI_ARMEDANDDANGEROUS
846 for (i = 7; i >= 4; i--, id >>= 16) {
847 intr = intr_disable();
848 start = sbinuptime();
849 cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
850 cfi_put16(sc, CFI_INTEL_PR(i), id&0xffff);
852 error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, start,
857 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
860 device_printf(sc->sc_dev, "%s: OEM PR not set, "
861 "CFI_ARMEDANDDANGEROUS not configured\n", __func__);
867 * Read the contents of the Protection Lock Register.
870 cfi_intel_get_plr(struct cfi_softc *sc, uint32_t *plr)
872 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
874 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
876 cfi_write(sc, 0, CFI_INTEL_READ_ID);
877 *plr = cfi_get16(sc, CFI_INTEL_PLR);
878 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
883 * Write the Protection Lock Register to lock down the
884 * user-settable segment of the Protection Register.
885 * NOTE: this operation is not reversible.
888 cfi_intel_set_plr(struct cfi_softc *sc)
890 #ifdef CFI_ARMEDANDDANGEROUS
895 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
897 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
899 #ifdef CFI_ARMEDANDDANGEROUS
900 /* worthy of console msg */
901 device_printf(sc->sc_dev, "set PLR\n");
902 intr = intr_disable();
904 cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
905 cfi_put16(sc, CFI_INTEL_PLR, 0xFFFD);
907 error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, start,
909 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
912 device_printf(sc->sc_dev, "%s: PLR not set, "
913 "CFI_ARMEDANDDANGEROUS not configured\n", __func__);
917 #endif /* CFI_SUPPORT_STRATAFLASH */