2 * Copyright (c) 2007, Juniper Networks, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
22 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
24 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/systm.h>
39 #include <sys/endian.h>
40 #include <sys/kernel.h>
41 #include <sys/malloc.h>
42 #include <sys/module.h>
44 #include <sys/sysctl.h>
46 #include <machine/bus.h>
48 #include <dev/cfi/cfi_reg.h>
49 #include <dev/cfi/cfi_var.h>
51 extern struct cdevsw cfi_cdevsw;
53 char cfi_driver_name[] = "cfi";
54 devclass_t cfi_devclass;
55 devclass_t cfi_diskclass;
58 cfi_read_raw(struct cfi_softc *sc, u_int ofs)
62 ofs &= ~(sc->sc_width - 1);
63 switch (sc->sc_width) {
65 val = bus_space_read_1(sc->sc_tag, sc->sc_handle, ofs);
68 val = bus_space_read_2(sc->sc_tag, sc->sc_handle, ofs);
71 val = bus_space_read_4(sc->sc_tag, sc->sc_handle, ofs);
81 cfi_read(struct cfi_softc *sc, u_int ofs)
86 ofs &= ~(sc->sc_width - 1);
87 switch (sc->sc_width) {
89 val = bus_space_read_1(sc->sc_tag, sc->sc_handle, ofs);
92 sval = bus_space_read_2(sc->sc_tag, sc->sc_handle, ofs);
96 val = bus_space_read_4(sc->sc_tag, sc->sc_handle, ofs);
107 cfi_write(struct cfi_softc *sc, u_int ofs, u_int val)
110 ofs &= ~(sc->sc_width - 1);
111 switch (sc->sc_width) {
113 bus_space_write_1(sc->sc_tag, sc->sc_handle, ofs, val);
116 bus_space_write_2(sc->sc_tag, sc->sc_handle, ofs, htole16(val));
119 bus_space_write_4(sc->sc_tag, sc->sc_handle, ofs, htole32(val));
125 cfi_read_qry(struct cfi_softc *sc, u_int ofs)
129 cfi_write(sc, CFI_QRY_CMD_ADDR * sc->sc_width, CFI_QRY_CMD_DATA);
130 val = cfi_read(sc, ofs * sc->sc_width);
131 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
136 cfi_amd_write(struct cfi_softc *sc, u_int ofs, u_int addr, u_int data)
139 cfi_write(sc, ofs + AMD_ADDR_START, CFI_AMD_UNLOCK);
140 cfi_write(sc, ofs + AMD_ADDR_ACK, CFI_AMD_UNLOCK_ACK);
141 cfi_write(sc, ofs + addr, data);
145 cfi_fmtsize(uint32_t sz)
148 static const char *sfx[] = { "", "K", "M", "G" };
152 while (sfxidx < 3 && sz > 1023) {
157 sprintf(buf, "%u%sB", sz, sfx[sfxidx]);
162 cfi_probe(device_t dev)
165 struct cfi_softc *sc;
168 uint16_t iface, vend;
170 sc = device_get_softc(dev);
174 sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
176 if (sc->sc_res == NULL)
179 sc->sc_tag = rman_get_bustag(sc->sc_res);
180 sc->sc_handle = rman_get_bushandle(sc->sc_res);
182 if (sc->sc_width == 0) {
184 while (sc->sc_width <= 4) {
185 if (cfi_read_qry(sc, CFI_QRY_IDENT) == 'Q')
189 } else if (cfi_read_qry(sc, CFI_QRY_IDENT) != 'Q') {
193 if (sc->sc_width > 4) {
198 /* We got a Q. Check if we also have the R and the Y. */
199 if (cfi_read_qry(sc, CFI_QRY_IDENT + 1) != 'R' ||
200 cfi_read_qry(sc, CFI_QRY_IDENT + 2) != 'Y') {
205 /* Get the vendor and command set. */
206 vend = cfi_read_qry(sc, CFI_QRY_VEND) |
207 (cfi_read_qry(sc, CFI_QRY_VEND + 1) << 8);
209 sc->sc_cmdset = vend;
212 case CFI_VEND_AMD_ECS:
213 case CFI_VEND_AMD_SCS:
214 vend_str = "AMD/Fujitsu";
216 case CFI_VEND_INTEL_ECS:
217 vend_str = "Intel/Sharp";
219 case CFI_VEND_INTEL_SCS:
222 case CFI_VEND_MITSUBISHI_ECS:
223 case CFI_VEND_MITSUBISHI_SCS:
224 vend_str = "Mitsubishi";
227 vend_str = "Unknown vendor";
231 /* Get the device size. */
232 sc->sc_size = 1U << cfi_read_qry(sc, CFI_QRY_SIZE);
234 /* Sanity-check the I/F */
235 iface = cfi_read_qry(sc, CFI_QRY_IFACE) |
236 (cfi_read_qry(sc, CFI_QRY_IFACE + 1) << 8);
239 * Adding 1 to iface will give us a bit-wise "switch"
240 * that allows us to test for the interface width by
241 * testing a single bit.
245 error = (iface & sc->sc_width) ? 0 : EINVAL;
249 snprintf(desc, sizeof(desc), "%s - %s", vend_str,
250 cfi_fmtsize(sc->sc_size));
251 device_set_desc_copy(dev, desc);
254 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
259 cfi_attach(device_t dev)
261 struct cfi_softc *sc;
265 sc = device_get_softc(dev);
269 sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
271 if (sc->sc_res == NULL)
274 sc->sc_tag = rman_get_bustag(sc->sc_res);
275 sc->sc_handle = rman_get_bushandle(sc->sc_res);
277 /* Get time-out values for erase and write. */
278 sc->sc_write_timeout = 1 << cfi_read_qry(sc, CFI_QRY_TTO_WRITE);
279 sc->sc_erase_timeout = 1 << cfi_read_qry(sc, CFI_QRY_TTO_ERASE);
280 sc->sc_write_timeout *= 1 << cfi_read_qry(sc, CFI_QRY_MTO_WRITE);
281 sc->sc_erase_timeout *= 1 << cfi_read_qry(sc, CFI_QRY_MTO_ERASE);
283 /* Get erase regions. */
284 sc->sc_regions = cfi_read_qry(sc, CFI_QRY_NREGIONS);
285 sc->sc_region = malloc(sc->sc_regions * sizeof(struct cfi_region),
286 M_TEMP, M_WAITOK | M_ZERO);
287 for (r = 0; r < sc->sc_regions; r++) {
288 blocks = cfi_read_qry(sc, CFI_QRY_REGION(r)) |
289 (cfi_read_qry(sc, CFI_QRY_REGION(r) + 1) << 8);
290 sc->sc_region[r].r_blocks = blocks + 1;
292 blksz = cfi_read_qry(sc, CFI_QRY_REGION(r) + 2) |
293 (cfi_read_qry(sc, CFI_QRY_REGION(r) + 3) << 8);
294 sc->sc_region[r].r_blksz = (blksz == 0) ? 128 :
298 /* Reset the device to a default state. */
299 cfi_write(sc, 0, CFI_BCS_CLEAR_STATUS);
302 device_printf(dev, "[");
303 for (r = 0; r < sc->sc_regions; r++) {
304 printf("%ux%s%s", sc->sc_region[r].r_blocks,
305 cfi_fmtsize(sc->sc_region[r].r_blksz),
306 (r == sc->sc_regions - 1) ? "]\n" : ",");
310 u = device_get_unit(dev);
311 sc->sc_nod = make_dev(&cfi_cdevsw, u, UID_ROOT, GID_WHEEL, 0600,
312 "%s%u", cfi_driver_name, u);
313 sc->sc_nod->si_drv1 = sc;
315 device_add_child(dev, "cfid", -1);
316 bus_generic_attach(dev);
322 cfi_detach(device_t dev)
324 struct cfi_softc *sc;
326 sc = device_get_softc(dev);
328 destroy_dev(sc->sc_nod);
329 free(sc->sc_region, M_TEMP);
330 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
335 cfi_wait_ready(struct cfi_softc *sc, u_int ofs, u_int timeout)
338 uint32_t st0 = 0, st = 0;
343 while (!done && !error && timeout) {
347 switch (sc->sc_cmdset) {
348 case CFI_VEND_INTEL_ECS:
349 case CFI_VEND_INTEL_SCS:
350 st = cfi_read(sc, ofs);
351 done = (st & CFI_INTEL_STATUS_WSMS);
353 /* NB: bit 0 is reserved */
354 st &= ~(CFI_INTEL_XSTATUS_RSVD |
355 CFI_INTEL_STATUS_WSMS |
356 CFI_INTEL_STATUS_RSVD);
357 if (st & CFI_INTEL_STATUS_DPS)
359 else if (st & CFI_INTEL_STATUS_PSLBS)
361 else if (st & CFI_INTEL_STATUS_ECLBS)
367 case CFI_VEND_AMD_SCS:
368 case CFI_VEND_AMD_ECS:
369 st0 = cfi_read(sc, ofs);
370 st = cfi_read(sc, ofs);
371 done = ((st & 0x40) == (st0 & 0x40)) ? 1 : 0;
378 printf("\nerror=%d (st 0x%x st0 0x%x)\n", error, st, st0);
383 cfi_write_block(struct cfi_softc *sc)
393 /* Erase the block. */
394 switch (sc->sc_cmdset) {
395 case CFI_VEND_INTEL_ECS:
396 case CFI_VEND_INTEL_SCS:
397 cfi_write(sc, sc->sc_wrofs, CFI_BCS_BLOCK_ERASE);
398 cfi_write(sc, sc->sc_wrofs, CFI_BCS_CONFIRM);
400 case CFI_VEND_AMD_SCS:
401 case CFI_VEND_AMD_ECS:
402 cfi_amd_write(sc, sc->sc_wrofs, AMD_ADDR_START,
403 CFI_AMD_ERASE_SECTOR);
404 cfi_amd_write(sc, sc->sc_wrofs, 0, CFI_AMD_BLOCK_ERASE);
407 /* Better safe than sorry... */
410 error = cfi_wait_ready(sc, sc->sc_wrofs, sc->sc_erase_timeout);
414 /* Write the block. */
415 ptr.x8 = sc->sc_wrbuf;
416 for (i = 0; i < sc->sc_wrbufsz; i += sc->sc_width) {
419 * Make sure the command to start a write and the
420 * actual write happens back-to-back without any
423 intr = intr_disable();
425 switch (sc->sc_cmdset) {
426 case CFI_VEND_INTEL_ECS:
427 case CFI_VEND_INTEL_SCS:
428 cfi_write(sc, sc->sc_wrofs + i, CFI_BCS_PROGRAM);
430 case CFI_VEND_AMD_SCS:
431 case CFI_VEND_AMD_ECS:
432 cfi_amd_write(sc, 0, AMD_ADDR_START, CFI_AMD_PROGRAM);
435 switch (sc->sc_width) {
437 bus_space_write_1(sc->sc_tag, sc->sc_handle,
438 sc->sc_wrofs + i, *(ptr.x8)++);
441 bus_space_write_2(sc->sc_tag, sc->sc_handle,
442 sc->sc_wrofs + i, *(ptr.x16)++);
445 bus_space_write_4(sc->sc_tag, sc->sc_handle,
446 sc->sc_wrofs + i, *(ptr.x32)++);
452 error = cfi_wait_ready(sc, sc->sc_wrofs, sc->sc_write_timeout);
460 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
464 #ifdef CFI_SUPPORT_STRATAFLASH
466 * Intel StrataFlash Protection Register Support.
468 * The memory includes a 128-bit Protection Register that can be
469 * used for security. There are two 64-bit segments; one is programmed
470 * at the factory with a unique 64-bit number which is immutable.
471 * The other segment is left blank for User (OEM) programming.
472 * The User/OEM segment is One Time Programmable (OTP). It can also
473 * be locked to prevent any further writes by setting bit 0 of the
474 * Protection Lock Register (PLR). The PLR can written only once.
478 cfi_get16(struct cfi_softc *sc, int off)
480 uint16_t v = bus_space_read_2(sc->sc_tag, sc->sc_handle, off<<1);
484 #ifdef CFI_ARMEDANDDANGEROUS
486 cfi_put16(struct cfi_softc *sc, int off, uint16_t v)
488 bus_space_write_2(sc->sc_tag, sc->sc_handle, off<<1, v);
493 * Read the factory-defined 64-bit segment of the PR.
496 cfi_intel_get_factory_pr(struct cfi_softc *sc, uint64_t *id)
498 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
500 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
502 cfi_write(sc, 0, CFI_INTEL_READ_ID);
503 *id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(0)))<<48 |
504 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(1)))<<32 |
505 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(2)))<<16 |
506 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(3)));
507 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
512 * Read the User/OEM 64-bit segment of the PR.
515 cfi_intel_get_oem_pr(struct cfi_softc *sc, uint64_t *id)
517 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
519 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
521 cfi_write(sc, 0, CFI_INTEL_READ_ID);
522 *id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(4)))<<48 |
523 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(5)))<<32 |
524 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(6)))<<16 |
525 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(7)));
526 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
531 * Write the User/OEM 64-bit segment of the PR.
532 * XXX should allow writing individual words/bytes
535 cfi_intel_set_oem_pr(struct cfi_softc *sc, uint64_t id)
537 #ifdef CFI_ARMEDANDDANGEROUS
542 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
544 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
546 #ifdef CFI_ARMEDANDDANGEROUS
547 for (i = 7; i >= 4; i--, id >>= 16) {
548 intr = intr_disable();
549 cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
550 cfi_put16(sc, CFI_INTEL_PR(i), id&0xffff);
552 error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS,
553 sc->sc_write_timeout);
557 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
560 device_printf(sc->sc_dev, "%s: OEM PR not set, "
561 "CFI_ARMEDANDDANGEROUS not configured\n", __func__);
567 * Read the contents of the Protection Lock Register.
570 cfi_intel_get_plr(struct cfi_softc *sc, uint32_t *plr)
572 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
574 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
576 cfi_write(sc, 0, CFI_INTEL_READ_ID);
577 *plr = cfi_get16(sc, CFI_INTEL_PLR);
578 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
583 * Write the Protection Lock Register to lock down the
584 * user-settable segment of the Protection Register.
585 * NOTE: this operation is not reversible.
588 cfi_intel_set_plr(struct cfi_softc *sc)
590 #ifdef CFI_ARMEDANDDANGEROUS
594 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
596 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
598 #ifdef CFI_ARMEDANDDANGEROUS
599 /* worthy of console msg */
600 device_printf(sc->sc_dev, "set PLR\n");
601 intr = intr_disable();
602 cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
603 cfi_put16(sc, CFI_INTEL_PLR, 0xFFFD);
605 error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, sc->sc_write_timeout);
606 cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
609 device_printf(sc->sc_dev, "%s: PLR not set, "
610 "CFI_ARMEDANDDANGEROUS not configured\n", __func__);
614 #endif /* CFI_SUPPORT_STRATAFLASH */