2 * Copyright (c) 2007, Juniper Networks, Inc.
3 * Copyright (c) 2012-2013, SRI International
6 * Portions of this software were developed by SRI International and the
7 * University of Cambridge Computer Laboratory under DARPA/AFRL contract
8 * (FA8750-10-C-0237) ("CTSRD"), as part of the DARPA CRASH research
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the author nor the names of any co-contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
41 #include <sys/param.h>
42 #include <sys/systm.h>
45 #include <sys/endian.h>
47 #include <sys/kernel.h>
48 #include <sys/malloc.h>
49 #include <sys/module.h>
51 #include <sys/sysctl.h>
53 #include <machine/bus.h>
55 #include <dev/cfi/cfi_reg.h>
56 #include <dev/cfi/cfi_var.h>
58 static void cfi_add_sysctls(struct cfi_softc *);
60 extern struct cdevsw cfi_cdevsw;
62 char cfi_driver_name[] = "cfi";
63 devclass_t cfi_devclass;
64 devclass_t cfi_diskclass;
67 cfi_read_raw(struct cfi_softc *sc, u_int ofs)
71 ofs &= ~(sc->sc_width - 1);
72 switch (sc->sc_width) {
74 val = bus_space_read_1(sc->sc_tag, sc->sc_handle, ofs);
77 val = bus_space_read_2(sc->sc_tag, sc->sc_handle, ofs);
80 val = bus_space_read_4(sc->sc_tag, sc->sc_handle, ofs);
90 cfi_read(struct cfi_softc *sc, u_int ofs)
95 ofs &= ~(sc->sc_width - 1);
96 switch (sc->sc_width) {
98 val = bus_space_read_1(sc->sc_tag, sc->sc_handle, ofs);
101 sval = bus_space_read_2(sc->sc_tag, sc->sc_handle, ofs);
105 val = bus_space_read_4(sc->sc_tag, sc->sc_handle, ofs);
116 cfi_write(struct cfi_softc *sc, u_int ofs, u_int val)
119 ofs &= ~(sc->sc_width - 1);
120 switch (sc->sc_width) {
122 bus_space_write_1(sc->sc_tag, sc->sc_handle, ofs, val);
125 bus_space_write_2(sc->sc_tag, sc->sc_handle, ofs, htole16(val));
128 bus_space_write_4(sc->sc_tag, sc->sc_handle, ofs, htole32(val));
134 cfi_read_qry(struct cfi_softc *sc, u_int ofs)
138 cfi_write(sc, CFI_QRY_CMD_ADDR * sc->sc_width, CFI_QRY_CMD_DATA);
139 val = cfi_read(sc, ofs * sc->sc_width);
140 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
145 cfi_amd_write(struct cfi_softc *sc, u_int ofs, u_int addr, u_int data)
148 cfi_write(sc, ofs + AMD_ADDR_START, CFI_AMD_UNLOCK);
149 cfi_write(sc, ofs + AMD_ADDR_ACK, CFI_AMD_UNLOCK_ACK);
150 cfi_write(sc, ofs + addr, data);
154 cfi_fmtsize(uint32_t sz)
157 static const char *sfx[] = { "", "K", "M", "G" };
161 while (sfxidx < 3 && sz > 1023) {
166 sprintf(buf, "%u%sB", sz, sfx[sfxidx]);
171 cfi_probe(device_t dev)
174 struct cfi_softc *sc;
177 uint16_t iface, vend;
179 sc = device_get_softc(dev);
183 sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
185 if (sc->sc_res == NULL)
188 sc->sc_tag = rman_get_bustag(sc->sc_res);
189 sc->sc_handle = rman_get_bushandle(sc->sc_res);
191 if (sc->sc_width == 0) {
193 while (sc->sc_width <= 4) {
194 if (cfi_read_qry(sc, CFI_QRY_IDENT) == 'Q')
198 } else if (cfi_read_qry(sc, CFI_QRY_IDENT) != 'Q') {
202 if (sc->sc_width > 4) {
207 /* We got a Q. Check if we also have the R and the Y. */
208 if (cfi_read_qry(sc, CFI_QRY_IDENT + 1) != 'R' ||
209 cfi_read_qry(sc, CFI_QRY_IDENT + 2) != 'Y') {
214 /* Get the vendor and command set. */
215 vend = cfi_read_qry(sc, CFI_QRY_VEND) |
216 (cfi_read_qry(sc, CFI_QRY_VEND + 1) << 8);
218 sc->sc_cmdset = vend;
221 case CFI_VEND_AMD_ECS:
222 case CFI_VEND_AMD_SCS:
223 vend_str = "AMD/Fujitsu";
225 case CFI_VEND_INTEL_ECS:
226 vend_str = "Intel/Sharp";
228 case CFI_VEND_INTEL_SCS:
231 case CFI_VEND_MITSUBISHI_ECS:
232 case CFI_VEND_MITSUBISHI_SCS:
233 vend_str = "Mitsubishi";
236 vend_str = "Unknown vendor";
240 /* Get the device size. */
241 sc->sc_size = 1U << cfi_read_qry(sc, CFI_QRY_SIZE);
243 /* Sanity-check the I/F */
244 iface = cfi_read_qry(sc, CFI_QRY_IFACE) |
245 (cfi_read_qry(sc, CFI_QRY_IFACE + 1) << 8);
248 * Adding 1 to iface will give us a bit-wise "switch"
249 * that allows us to test for the interface width by
250 * testing a single bit.
254 error = (iface & sc->sc_width) ? 0 : EINVAL;
258 snprintf(desc, sizeof(desc), "%s - %s", vend_str,
259 cfi_fmtsize(sc->sc_size));
260 device_set_desc_copy(dev, desc);
263 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
268 cfi_attach(device_t dev)
270 struct cfi_softc *sc;
273 uint64_t mtoexp, ttoexp;
274 #ifdef CFI_SUPPORT_STRATAFLASH
276 char name[KENV_MNAMELEN], value[32];
279 sc = device_get_softc(dev);
283 sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
285 if (sc->sc_res == NULL)
288 sc->sc_tag = rman_get_bustag(sc->sc_res);
289 sc->sc_handle = rman_get_bushandle(sc->sc_res);
291 /* Get time-out values for erase, write, and buffer write. */
292 ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_ERASE);
293 mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_ERASE);
295 device_printf(dev, "erase timeout == 0, using 2^16ms\n");
299 device_printf(dev, "insane timeout: 2^%jdms\n", ttoexp);
303 device_printf(dev, "max erase timeout == 0, using 2^%jdms\n",
307 if (ttoexp + mtoexp > 41) {
308 device_printf(dev, "insane max erase timeout: 2^%jd\n",
312 sc->sc_typical_timeouts[CFI_TIMEOUT_ERASE] = SBT_1MS * (1ULL << ttoexp);
313 sc->sc_max_timeouts[CFI_TIMEOUT_ERASE] =
314 sc->sc_typical_timeouts[CFI_TIMEOUT_ERASE] * (1ULL << mtoexp);
316 ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_WRITE);
317 mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_WRITE);
319 device_printf(dev, "write timeout == 0, using 2^18ns\n");
323 device_printf(dev, "insane write timeout: 2^%jdus\n", ttoexp);
327 device_printf(dev, "max write timeout == 0, using 2^%jdms\n",
331 if (ttoexp + mtoexp > 51) {
332 device_printf(dev, "insane max write timeout: 2^%jdus\n",
336 sc->sc_typical_timeouts[CFI_TIMEOUT_WRITE] = SBT_1US * (1ULL << ttoexp);
337 sc->sc_max_timeouts[CFI_TIMEOUT_WRITE] =
338 sc->sc_typical_timeouts[CFI_TIMEOUT_WRITE] * (1ULL << mtoexp);
340 ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_BUFWRITE);
341 mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_BUFWRITE);
342 /* Don't check for 0, it means not-supported. */
344 device_printf(dev, "insane write timeout: 2^%jdus\n", ttoexp);
347 if (ttoexp + mtoexp > 51) {
348 device_printf(dev, "insane max write timeout: 2^%jdus\n",
352 sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] =
353 SBT_1US * (1ULL << cfi_read_qry(sc, CFI_QRY_TTO_BUFWRITE));
354 sc->sc_max_timeouts[CFI_TIMEOUT_BUFWRITE] =
355 sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] *
356 (1ULL << cfi_read_qry(sc, CFI_QRY_MTO_BUFWRITE));
358 /* Get the maximum size of a multibyte program */
359 if (sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] != 0)
360 sc->sc_maxbuf = 1 << (cfi_read_qry(sc, CFI_QRY_MAXBUF) |
361 cfi_read_qry(sc, CFI_QRY_MAXBUF) << 8);
365 /* Get erase regions. */
366 sc->sc_regions = cfi_read_qry(sc, CFI_QRY_NREGIONS);
367 sc->sc_region = malloc(sc->sc_regions * sizeof(struct cfi_region),
368 M_TEMP, M_WAITOK | M_ZERO);
369 for (r = 0; r < sc->sc_regions; r++) {
370 blocks = cfi_read_qry(sc, CFI_QRY_REGION(r)) |
371 (cfi_read_qry(sc, CFI_QRY_REGION(r) + 1) << 8);
372 sc->sc_region[r].r_blocks = blocks + 1;
374 blksz = cfi_read_qry(sc, CFI_QRY_REGION(r) + 2) |
375 (cfi_read_qry(sc, CFI_QRY_REGION(r) + 3) << 8);
376 sc->sc_region[r].r_blksz = (blksz == 0) ? 128 :
380 /* Reset the device to a default state. */
381 cfi_write(sc, 0, CFI_BCS_CLEAR_STATUS);
384 device_printf(dev, "[");
385 for (r = 0; r < sc->sc_regions; r++) {
386 printf("%ux%s%s", sc->sc_region[r].r_blocks,
387 cfi_fmtsize(sc->sc_region[r].r_blksz),
388 (r == sc->sc_regions - 1) ? "]\n" : ",");
392 u = device_get_unit(dev);
393 sc->sc_nod = make_dev(&cfi_cdevsw, u, UID_ROOT, GID_WHEEL, 0600,
394 "%s%u", cfi_driver_name, u);
395 sc->sc_nod->si_drv1 = sc;
399 #ifdef CFI_SUPPORT_STRATAFLASH
401 * Store the Intel factory PPR in the environment. In some
402 * cases it is the most unique ID on a board.
404 if (cfi_intel_get_factory_pr(sc, &ppr) == 0) {
405 if (snprintf(name, sizeof(name), "%s.factory_ppr",
406 device_get_nameunit(dev)) < (sizeof(name) - 1) &&
407 snprintf(value, sizeof(value), "0x%016jx", ppr) <
409 (void) setenv(name, value);
413 device_add_child(dev, "cfid", -1);
414 bus_generic_attach(dev);
420 cfi_add_sysctls(struct cfi_softc *sc)
422 struct sysctl_ctx_list *ctx;
423 struct sysctl_oid_list *children;
425 ctx = device_get_sysctl_ctx(sc->sc_dev);
426 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev));
428 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
429 "typical_erase_timout_count",
430 CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_ERASE],
431 0, "Number of times the typical erase timeout was exceeded");
432 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
433 "max_erase_timout_count",
434 CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_ERASE], 0,
435 "Number of times the maximum erase timeout was exceeded");
436 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
437 "typical_write_timout_count",
438 CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_WRITE], 0,
439 "Number of times the typical write timeout was exceeded");
440 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
441 "max_write_timout_count",
442 CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_WRITE], 0,
443 "Number of times the maximum write timeout was exceeded");
444 if (sc->sc_maxbuf > 0) {
445 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
446 "typical_bufwrite_timout_count",
447 CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_BUFWRITE], 0,
448 "Number of times the typical buffered write timeout was "
450 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
451 "max_bufwrite_timout_count",
452 CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_BUFWRITE], 0,
453 "Number of times the maximum buffered write timeout was "
459 cfi_detach(device_t dev)
461 struct cfi_softc *sc;
463 sc = device_get_softc(dev);
465 destroy_dev(sc->sc_nod);
466 free(sc->sc_region, M_TEMP);
467 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
472 cfi_wait_ready(struct cfi_softc *sc, u_int ofs, sbintime_t start,
473 enum cfi_wait_cmd cmd)
475 int done, error, tto_exceeded;
476 uint32_t st0 = 0, st = 0;
482 while (!done && !error) {
484 * Save time before we start so we always do one check
485 * after the timeout has expired.
489 switch (sc->sc_cmdset) {
490 case CFI_VEND_INTEL_ECS:
491 case CFI_VEND_INTEL_SCS:
492 st = cfi_read(sc, ofs);
493 done = (st & CFI_INTEL_STATUS_WSMS);
495 /* NB: bit 0 is reserved */
496 st &= ~(CFI_INTEL_XSTATUS_RSVD |
497 CFI_INTEL_STATUS_WSMS |
498 CFI_INTEL_STATUS_RSVD);
499 if (st & CFI_INTEL_STATUS_DPS)
501 else if (st & CFI_INTEL_STATUS_PSLBS)
503 else if (st & CFI_INTEL_STATUS_ECLBS)
509 case CFI_VEND_AMD_SCS:
510 case CFI_VEND_AMD_ECS:
511 st0 = cfi_read(sc, ofs);
512 st = cfi_read(sc, ofs);
513 done = ((st & 0x40) == (st0 & 0x40)) ? 1 : 0;
518 now > start + sc->sc_typical_timeouts[cmd]) {
521 sc->sc_tto_counts[cmd]++;
522 #ifdef CFI_DEBUG_TIMEOUT
523 device_printf(sc->sc_dev,
524 "typical timeout exceeded (cmd %d)", cmd);
527 if (now > start + sc->sc_max_timeouts[cmd]) {
528 sc->sc_mto_counts[cmd]++;
529 #ifdef CFI_DEBUG_TIMEOUT
530 device_printf(sc->sc_dev,
531 "max timeout exceeded (cmd %d)", cmd);
539 printf("\nerror=%d (st 0x%x st0 0x%x)\n", error, st, st0);
544 cfi_write_block(struct cfi_softc *sc)
552 int error, i, neederase = 0;
557 /* Intel flash must be unlocked before modification */
558 switch (sc->sc_cmdset) {
559 case CFI_VEND_INTEL_ECS:
560 case CFI_VEND_INTEL_SCS:
561 cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LBS);
562 cfi_write(sc, sc->sc_wrofs, CFI_INTEL_UB);
563 cfi_write(sc, sc->sc_wrofs, CFI_BCS_READ_ARRAY);
567 /* Check if an erase is required. */
568 for (i = 0; i < sc->sc_wrbufsz; i++)
569 if ((sc->sc_wrbuf[i] & sc->sc_wrbufcpy[i]) != sc->sc_wrbuf[i]) {
575 intr = intr_disable();
576 start = sbinuptime();
577 /* Erase the block. */
578 switch (sc->sc_cmdset) {
579 case CFI_VEND_INTEL_ECS:
580 case CFI_VEND_INTEL_SCS:
581 cfi_write(sc, sc->sc_wrofs, CFI_BCS_BLOCK_ERASE);
582 cfi_write(sc, sc->sc_wrofs, CFI_BCS_CONFIRM);
584 case CFI_VEND_AMD_SCS:
585 case CFI_VEND_AMD_ECS:
586 cfi_amd_write(sc, sc->sc_wrofs, AMD_ADDR_START,
587 CFI_AMD_ERASE_SECTOR);
588 cfi_amd_write(sc, sc->sc_wrofs, 0, CFI_AMD_BLOCK_ERASE);
591 /* Better safe than sorry... */
596 error = cfi_wait_ready(sc, sc->sc_wrofs, start,
603 /* Write the block using a multibyte write if supported. */
604 ptr.x8 = sc->sc_wrbuf;
605 cpyprt.x8 = sc->sc_wrbufcpy;
606 if (sc->sc_maxbuf > sc->sc_width) {
607 switch (sc->sc_cmdset) {
608 case CFI_VEND_INTEL_ECS:
609 case CFI_VEND_INTEL_SCS:
610 for (i = 0; i < sc->sc_wrbufsz; i += wlen) {
611 wlen = MIN(sc->sc_maxbuf, sc->sc_wrbufsz - i);
613 intr = intr_disable();
615 start = sbinuptime();
617 cfi_write(sc, sc->sc_wrofs + i,
618 CFI_BCS_BUF_PROG_SETUP);
619 if (sbinuptime() > start + sc->sc_max_timeouts[CFI_TIMEOUT_BUFWRITE]) {
623 st = cfi_read(sc, sc->sc_wrofs + i);
624 } while (! (st & CFI_INTEL_STATUS_WSMS));
626 cfi_write(sc, sc->sc_wrofs + i,
627 (wlen / sc->sc_width) - 1);
628 switch (sc->sc_width) {
630 bus_space_write_region_1(sc->sc_tag,
631 sc->sc_handle, sc->sc_wrofs + i,
635 bus_space_write_region_2(sc->sc_tag,
636 sc->sc_handle, sc->sc_wrofs + i,
637 ptr.x16 + i / 2, wlen / 2);
640 bus_space_write_region_4(sc->sc_tag,
641 sc->sc_handle, sc->sc_wrofs + i,
642 ptr.x32 + i / 4, wlen / 4);
646 cfi_write(sc, sc->sc_wrofs + i,
651 error = cfi_wait_ready(sc, sc->sc_wrofs + i,
652 start, CFI_TIMEOUT_BUFWRITE);
658 /* Fall through to single word case */
664 /* Write the block one byte/word at a time. */
665 for (i = 0; i < sc->sc_wrbufsz; i += sc->sc_width) {
667 /* Avoid writing unless we are actually changing bits */
669 switch (sc->sc_width) {
671 if(*(ptr.x8 + i) == *(cpyprt.x8 + i))
675 if(*(ptr.x16 + i / 2) == *(cpyprt.x16 + i / 2))
679 if(*(ptr.x32 + i / 4) == *(cpyprt.x32 + i / 4))
686 * Make sure the command to start a write and the
687 * actual write happens back-to-back without any
690 intr = intr_disable();
692 start = sbinuptime();
693 switch (sc->sc_cmdset) {
694 case CFI_VEND_INTEL_ECS:
695 case CFI_VEND_INTEL_SCS:
696 cfi_write(sc, sc->sc_wrofs + i, CFI_BCS_PROGRAM);
698 case CFI_VEND_AMD_SCS:
699 case CFI_VEND_AMD_ECS:
700 cfi_amd_write(sc, 0, AMD_ADDR_START, CFI_AMD_PROGRAM);
703 switch (sc->sc_width) {
705 bus_space_write_1(sc->sc_tag, sc->sc_handle,
706 sc->sc_wrofs + i, *(ptr.x8 + i));
709 bus_space_write_2(sc->sc_tag, sc->sc_handle,
710 sc->sc_wrofs + i, *(ptr.x16 + i / 2));
713 bus_space_write_4(sc->sc_tag, sc->sc_handle,
714 sc->sc_wrofs + i, *(ptr.x32 + i / 4));
720 error = cfi_wait_ready(sc, sc->sc_wrofs, start,
729 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
731 /* Relock Intel flash */
732 switch (sc->sc_cmdset) {
733 case CFI_VEND_INTEL_ECS:
734 case CFI_VEND_INTEL_SCS:
735 cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LBS);
736 cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LB);
737 cfi_write(sc, sc->sc_wrofs, CFI_BCS_READ_ARRAY);
743 #ifdef CFI_SUPPORT_STRATAFLASH
745 * Intel StrataFlash Protection Register Support.
747 * The memory includes a 128-bit Protection Register that can be
748 * used for security. There are two 64-bit segments; one is programmed
749 * at the factory with a unique 64-bit number which is immutable.
750 * The other segment is left blank for User (OEM) programming.
751 * The User/OEM segment is One Time Programmable (OTP). It can also
752 * be locked to prevent any further writes by setting bit 0 of the
753 * Protection Lock Register (PLR). The PLR can written only once.
757 cfi_get16(struct cfi_softc *sc, int off)
759 uint16_t v = bus_space_read_2(sc->sc_tag, sc->sc_handle, off<<1);
763 #ifdef CFI_ARMEDANDDANGEROUS
765 cfi_put16(struct cfi_softc *sc, int off, uint16_t v)
767 bus_space_write_2(sc->sc_tag, sc->sc_handle, off<<1, v);
772 * Read the factory-defined 64-bit segment of the PR.
775 cfi_intel_get_factory_pr(struct cfi_softc *sc, uint64_t *id)
777 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
779 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
781 cfi_write(sc, 0, CFI_INTEL_READ_ID);
782 *id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(0)))<<48 |
783 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(1)))<<32 |
784 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(2)))<<16 |
785 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(3)));
786 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
791 * Read the User/OEM 64-bit segment of the PR.
794 cfi_intel_get_oem_pr(struct cfi_softc *sc, uint64_t *id)
796 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
798 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
800 cfi_write(sc, 0, CFI_INTEL_READ_ID);
801 *id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(4)))<<48 |
802 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(5)))<<32 |
803 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(6)))<<16 |
804 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(7)));
805 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
810 * Write the User/OEM 64-bit segment of the PR.
811 * XXX should allow writing individual words/bytes
814 cfi_intel_set_oem_pr(struct cfi_softc *sc, uint64_t id)
816 #ifdef CFI_ARMEDANDDANGEROUS
822 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
824 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
826 #ifdef CFI_ARMEDANDDANGEROUS
827 for (i = 7; i >= 4; i--, id >>= 16) {
828 intr = intr_disable();
829 start = sbinuptime();
830 cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
831 cfi_put16(sc, CFI_INTEL_PR(i), id&0xffff);
833 error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, start,
838 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
841 device_printf(sc->sc_dev, "%s: OEM PR not set, "
842 "CFI_ARMEDANDDANGEROUS not configured\n", __func__);
848 * Read the contents of the Protection Lock Register.
851 cfi_intel_get_plr(struct cfi_softc *sc, uint32_t *plr)
853 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
855 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
857 cfi_write(sc, 0, CFI_INTEL_READ_ID);
858 *plr = cfi_get16(sc, CFI_INTEL_PLR);
859 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
864 * Write the Protection Lock Register to lock down the
865 * user-settable segment of the Protection Register.
866 * NOTE: this operation is not reversible.
869 cfi_intel_set_plr(struct cfi_softc *sc)
871 #ifdef CFI_ARMEDANDDANGEROUS
876 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
878 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
880 #ifdef CFI_ARMEDANDDANGEROUS
881 /* worthy of console msg */
882 device_printf(sc->sc_dev, "set PLR\n");
883 intr = intr_disable();
885 cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
886 cfi_put16(sc, CFI_INTEL_PLR, 0xFFFD);
888 error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, start,
890 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
893 device_printf(sc->sc_dev, "%s: PLR not set, "
894 "CFI_ARMEDANDDANGEROUS not configured\n", __func__);
898 #endif /* CFI_SUPPORT_STRATAFLASH */