]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/cfi/cfi_core.c
Merge llvm-project release/17.x llvmorg-17.0.6-0-g6009708b4367
[FreeBSD/FreeBSD.git] / sys / dev / cfi / cfi_core.c
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2007, Juniper Networks, Inc.
5  * Copyright (c) 2012-2013, SRI International
6  * All rights reserved.
7  *
8  * Portions of this software were developed by SRI International and the
9  * University of Cambridge Computer Laboratory under DARPA/AFRL contract
10  * (FA8750-10-C-0237) ("CTSRD"), as part of the DARPA CRASH research
11  * programme.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the author nor the names of any co-contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
26  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
27  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37
38 #include <sys/cdefs.h>
39 #include "opt_cfi.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/bus.h>
44 #include <sys/conf.h>
45 #include <sys/endian.h>
46 #include <sys/kenv.h>
47 #include <sys/kernel.h>
48 #include <sys/malloc.h>   
49 #include <sys/module.h>
50 #include <sys/rman.h>
51 #include <sys/sysctl.h>
52
53 #include <machine/bus.h>
54
55 #include <dev/cfi/cfi_reg.h>
56 #include <dev/cfi/cfi_var.h>
57
58 static void cfi_add_sysctls(struct cfi_softc *);
59
60 extern struct cdevsw cfi_cdevsw;
61
62 char cfi_driver_name[] = "cfi";
63
64 uint32_t
65 cfi_read_raw(struct cfi_softc *sc, u_int ofs)
66 {
67         uint32_t val;
68
69         ofs &= ~(sc->sc_width - 1);
70         switch (sc->sc_width) {
71         case 1:
72                 val = bus_space_read_1(sc->sc_tag, sc->sc_handle, ofs);
73                 break;
74         case 2:
75                 val = bus_space_read_2(sc->sc_tag, sc->sc_handle, ofs);
76                 break;
77         case 4:
78                 val = bus_space_read_4(sc->sc_tag, sc->sc_handle, ofs);
79                 break;
80         default:
81                 val = ~0;
82                 break;
83         }
84         return (val);
85 }
86
87 uint32_t
88 cfi_read(struct cfi_softc *sc, u_int ofs)
89 {
90         uint32_t val;
91         uint16_t sval;
92
93         ofs &= ~(sc->sc_width - 1);
94         switch (sc->sc_width) {
95         case 1:
96                 val = bus_space_read_1(sc->sc_tag, sc->sc_handle, ofs);
97                 break;
98         case 2:
99                 sval = bus_space_read_2(sc->sc_tag, sc->sc_handle, ofs);
100 #ifdef CFI_HARDWAREBYTESWAP
101                 val = sval;
102 #else
103                 val = le16toh(sval);
104 #endif
105                 break;
106         case 4:
107                 val = bus_space_read_4(sc->sc_tag, sc->sc_handle, ofs);
108 #ifndef CFI_HARDWAREBYTESWAP
109                 val = le32toh(val);
110 #endif
111                 break;
112         default:
113                 val = ~0;
114                 break;
115         }
116         return (val);
117 }
118
119 static void
120 cfi_write(struct cfi_softc *sc, u_int ofs, u_int val)
121 {
122
123         ofs &= ~(sc->sc_width - 1);
124         switch (sc->sc_width) {
125         case 1:
126                 bus_space_write_1(sc->sc_tag, sc->sc_handle, ofs, val);
127                 break;
128         case 2:
129 #ifdef CFI_HARDWAREBYTESWAP
130                 bus_space_write_2(sc->sc_tag, sc->sc_handle, ofs, val);
131 #else
132                 bus_space_write_2(sc->sc_tag, sc->sc_handle, ofs, htole16(val));
133
134 #endif
135                 break;
136         case 4:
137 #ifdef CFI_HARDWAREBYTESWAP
138                 bus_space_write_4(sc->sc_tag, sc->sc_handle, ofs, val);
139 #else
140                 bus_space_write_4(sc->sc_tag, sc->sc_handle, ofs, htole32(val));
141 #endif
142                 break;
143         }
144 }
145
146 /*
147  * This is same workaound as NetBSD sys/dev/nor/cfi.c cfi_reset_default()
148  */
149 static void
150 cfi_reset_default(struct cfi_softc *sc)
151 {
152
153         cfi_write(sc, 0, CFI_BCS_READ_ARRAY2);
154         cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
155 }
156
157 uint8_t
158 cfi_read_qry(struct cfi_softc *sc, u_int ofs)
159 {
160         uint8_t val;
161
162         cfi_write(sc, CFI_QRY_CMD_ADDR * sc->sc_width, CFI_QRY_CMD_DATA); 
163         val = cfi_read(sc, ofs * sc->sc_width);
164         cfi_reset_default(sc);
165         return (val);
166
167
168 static void
169 cfi_amd_write(struct cfi_softc *sc, u_int ofs, u_int addr, u_int data)
170 {
171
172         cfi_write(sc, ofs + AMD_ADDR_START, CFI_AMD_UNLOCK);
173         cfi_write(sc, ofs + AMD_ADDR_ACK, CFI_AMD_UNLOCK_ACK);
174         cfi_write(sc, ofs + addr, data);
175 }
176
177 static char *
178 cfi_fmtsize(uint32_t sz)
179 {
180         static char buf[8];
181         static const char *sfx[] = { "", "K", "M", "G" };
182         int sfxidx;
183
184         sfxidx = 0;
185         while (sfxidx < 3 && sz > 1023) {
186                 sz /= 1024;
187                 sfxidx++;
188         }
189
190         sprintf(buf, "%u%sB", sz, sfx[sfxidx]);
191         return (buf);
192 }
193
194 int
195 cfi_probe(device_t dev)
196 {
197         char desc[80];
198         struct cfi_softc *sc;
199         char *vend_str;
200         int error;
201         uint16_t iface, vend;
202
203         sc = device_get_softc(dev);
204         sc->sc_dev = dev;
205
206         sc->sc_rid = 0;
207         sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
208             RF_ACTIVE);
209         if (sc->sc_res == NULL)
210                 return (ENXIO);
211
212         sc->sc_tag = rman_get_bustag(sc->sc_res);
213         sc->sc_handle = rman_get_bushandle(sc->sc_res);
214
215         if (sc->sc_width == 0) {
216                 sc->sc_width = 1;
217                 while (sc->sc_width <= 4) {
218                         if (cfi_read_qry(sc, CFI_QRY_IDENT) == 'Q')
219                                 break;
220                         sc->sc_width <<= 1;
221                 }
222         } else if (cfi_read_qry(sc, CFI_QRY_IDENT) != 'Q') {
223                 error = ENXIO;
224                 goto out;
225         }
226         if (sc->sc_width > 4) {
227                 error = ENXIO;
228                 goto out;
229         }
230
231         /* We got a Q. Check if we also have the R and the Y. */
232         if (cfi_read_qry(sc, CFI_QRY_IDENT + 1) != 'R' ||
233             cfi_read_qry(sc, CFI_QRY_IDENT + 2) != 'Y') {
234                 error = ENXIO;
235                 goto out;
236         }
237
238         /* Get the vendor and command set. */
239         vend = cfi_read_qry(sc, CFI_QRY_VEND) |
240             (cfi_read_qry(sc, CFI_QRY_VEND + 1) << 8);
241
242         sc->sc_cmdset = vend;
243
244         switch (vend) {
245         case CFI_VEND_AMD_ECS:
246         case CFI_VEND_AMD_SCS:
247                 vend_str = "AMD/Fujitsu";
248                 break;
249         case CFI_VEND_INTEL_ECS:
250                 vend_str = "Intel/Sharp";
251                 break;
252         case CFI_VEND_INTEL_SCS:
253                 vend_str = "Intel";
254                 break;
255         case CFI_VEND_MITSUBISHI_ECS:
256         case CFI_VEND_MITSUBISHI_SCS:
257                 vend_str = "Mitsubishi";
258                 break;
259         default:
260                 vend_str = "Unknown vendor";
261                 break;
262         }
263
264         /* Get the device size. */
265         sc->sc_size = 1U << cfi_read_qry(sc, CFI_QRY_SIZE);
266
267         /* Sanity-check the I/F */
268         iface = cfi_read_qry(sc, CFI_QRY_IFACE) |
269             (cfi_read_qry(sc, CFI_QRY_IFACE + 1) << 8);
270
271         /*
272          * Adding 1 to iface will give us a bit-wise "switch"
273          * that allows us to test for the interface width by
274          * testing a single bit.
275          */
276         iface++;
277
278         error = (iface & sc->sc_width) ? 0 : EINVAL;
279         if (error)
280                 goto out;
281
282         snprintf(desc, sizeof(desc), "%s - %s", vend_str,
283             cfi_fmtsize(sc->sc_size));
284         device_set_desc_copy(dev, desc);
285
286  out:
287         bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
288         return (error);
289 }
290
291 int
292 cfi_attach(device_t dev) 
293 {
294         struct cfi_softc *sc;
295         u_int blksz, blocks;
296         u_int r, u;
297         uint64_t mtoexp, ttoexp;
298 #ifdef CFI_SUPPORT_STRATAFLASH
299         uint64_t ppr;
300         char name[KENV_MNAMELEN], value[32];
301 #endif
302
303         sc = device_get_softc(dev);
304         sc->sc_dev = dev;
305
306         sc->sc_rid = 0;
307         sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
308 #ifndef ATSE_CFI_HACK
309             RF_ACTIVE);
310 #else
311             RF_ACTIVE | RF_SHAREABLE);
312 #endif
313         if (sc->sc_res == NULL)
314                 return (ENXIO);
315
316         sc->sc_tag = rman_get_bustag(sc->sc_res);
317         sc->sc_handle = rman_get_bushandle(sc->sc_res);
318
319         /* Get time-out values for erase, write, and buffer write. */
320         ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_ERASE);
321         mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_ERASE);
322         if (ttoexp == 0) {
323                 device_printf(dev, "erase timeout == 0, using 2^16ms\n");
324                 ttoexp = 16;
325         }
326         if (ttoexp > 41) {
327                 device_printf(dev, "insane timeout: 2^%jdms\n", ttoexp);
328                 return (EINVAL);
329         }
330         if (mtoexp == 0) {
331                 device_printf(dev, "max erase timeout == 0, using 2^%jdms\n",
332                     ttoexp + 4);
333                 mtoexp = 4;
334         }
335         if (ttoexp + mtoexp > 41) {
336                 device_printf(dev, "insane max erase timeout: 2^%jd\n",
337                     ttoexp + mtoexp);
338                 return (EINVAL);
339         }
340         sc->sc_typical_timeouts[CFI_TIMEOUT_ERASE] = SBT_1MS * (1ULL << ttoexp);
341         sc->sc_max_timeouts[CFI_TIMEOUT_ERASE] =
342             sc->sc_typical_timeouts[CFI_TIMEOUT_ERASE] * (1ULL << mtoexp);
343
344         ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_WRITE);
345         mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_WRITE);
346         if (ttoexp == 0) {
347                 device_printf(dev, "write timeout == 0, using 2^18ns\n");
348                 ttoexp = 18;
349         }
350         if (ttoexp > 51) {
351                 device_printf(dev, "insane write timeout: 2^%jdus\n", ttoexp);
352                 return (EINVAL);
353         }
354         if (mtoexp == 0) {
355                 device_printf(dev, "max write timeout == 0, using 2^%jdms\n",
356                     ttoexp + 4);
357                 mtoexp = 4;
358         }
359         if (ttoexp + mtoexp > 51) {
360                 device_printf(dev, "insane max write timeout: 2^%jdus\n",
361                     ttoexp + mtoexp);
362                 return (EINVAL);
363         }
364         sc->sc_typical_timeouts[CFI_TIMEOUT_WRITE] = SBT_1US * (1ULL << ttoexp);
365         sc->sc_max_timeouts[CFI_TIMEOUT_WRITE] =
366             sc->sc_typical_timeouts[CFI_TIMEOUT_WRITE] * (1ULL << mtoexp);
367
368         ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_BUFWRITE);
369         mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_BUFWRITE);
370         /* Don't check for 0, it means not-supported. */
371         if (ttoexp > 51) {
372                 device_printf(dev, "insane write timeout: 2^%jdus\n", ttoexp);
373                 return (EINVAL);
374         }
375         if (ttoexp + mtoexp > 51) {
376                 device_printf(dev, "insane max write timeout: 2^%jdus\n",
377                     ttoexp + mtoexp);
378                 return (EINVAL);
379         }
380         sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] =
381             SBT_1US * (1ULL << cfi_read_qry(sc, CFI_QRY_TTO_BUFWRITE));
382         sc->sc_max_timeouts[CFI_TIMEOUT_BUFWRITE] =
383             sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] *
384             (1ULL << cfi_read_qry(sc, CFI_QRY_MTO_BUFWRITE));
385
386         /* Get the maximum size of a multibyte program */
387         if (sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] != 0)
388                 sc->sc_maxbuf = 1 << (cfi_read_qry(sc, CFI_QRY_MAXBUF) |
389                     cfi_read_qry(sc, CFI_QRY_MAXBUF) << 8);
390         else
391                 sc->sc_maxbuf = 0;
392
393         /* Get erase regions. */
394         sc->sc_regions = cfi_read_qry(sc, CFI_QRY_NREGIONS);
395         sc->sc_region = malloc(sc->sc_regions * sizeof(struct cfi_region),
396             M_TEMP, M_WAITOK | M_ZERO);
397         for (r = 0; r < sc->sc_regions; r++) {
398                 blocks = cfi_read_qry(sc, CFI_QRY_REGION(r)) |
399                     (cfi_read_qry(sc, CFI_QRY_REGION(r) + 1) << 8);
400                 sc->sc_region[r].r_blocks = blocks + 1;
401
402                 blksz = cfi_read_qry(sc, CFI_QRY_REGION(r) + 2) |
403                     (cfi_read_qry(sc, CFI_QRY_REGION(r) + 3) << 8);
404                 sc->sc_region[r].r_blksz = (blksz == 0) ? 128 :
405                     blksz * 256;
406         }
407
408         /* Reset the device to a default state. */
409         cfi_write(sc, 0, CFI_BCS_CLEAR_STATUS);
410
411         if (bootverbose) {
412                 device_printf(dev, "[");
413                 for (r = 0; r < sc->sc_regions; r++) {
414                         printf("%ux%s%s", sc->sc_region[r].r_blocks,
415                             cfi_fmtsize(sc->sc_region[r].r_blksz),
416                             (r == sc->sc_regions - 1) ? "]\n" : ",");
417                 }
418         }
419
420         if (sc->sc_cmdset == CFI_VEND_AMD_ECS  ||
421             sc->sc_cmdset == CFI_VEND_AMD_SCS) {
422                 cfi_amd_write(sc, 0, AMD_ADDR_START, CFI_AMD_AUTO_SELECT);
423                 sc->sc_manid = cfi_read(sc, 0);
424                 sc->sc_devid = cfi_read(sc, 2);
425                 device_printf(dev, "Manufacturer ID:%x Device ID:%x\n",
426                     sc->sc_manid, sc->sc_devid);
427                 cfi_write(sc, 0, CFI_BCS_READ_ARRAY2);
428         }
429
430         u = device_get_unit(dev);
431         sc->sc_nod = make_dev(&cfi_cdevsw, u, UID_ROOT, GID_WHEEL, 0600,
432             "%s%u", cfi_driver_name, u);
433         sc->sc_nod->si_drv1 = sc;
434
435         cfi_add_sysctls(sc);
436
437 #ifdef CFI_SUPPORT_STRATAFLASH
438         /*
439          * Store the Intel factory PPR in the environment.  In some
440          * cases it is the most unique ID on a board.
441          */
442         if (cfi_intel_get_factory_pr(sc, &ppr) == 0) {
443                 if (snprintf(name, sizeof(name), "%s.factory_ppr",
444                     device_get_nameunit(dev)) < (sizeof(name) - 1) &&
445                     snprintf(value, sizeof(value), "0x%016jx", ppr) <
446                     (sizeof(value) - 1))
447                         (void) kern_setenv(name, value);
448         }
449 #endif
450
451         device_add_child(dev, "cfid", -1);
452         bus_generic_attach(dev);
453
454         return (0);
455 }
456
457 static void
458 cfi_add_sysctls(struct cfi_softc *sc)
459 {
460         struct sysctl_ctx_list *ctx;
461         struct sysctl_oid_list *children;
462
463         ctx = device_get_sysctl_ctx(sc->sc_dev);
464         children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev));
465
466         SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
467             "typical_erase_timout_count",
468             CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_ERASE],
469             0, "Number of times the typical erase timeout was exceeded");
470         SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
471             "max_erase_timout_count",
472             CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_ERASE], 0,
473             "Number of times the maximum erase timeout was exceeded");
474         SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
475             "typical_write_timout_count",
476             CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_WRITE], 0,
477             "Number of times the typical write timeout was exceeded");
478         SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
479             "max_write_timout_count",
480             CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_WRITE], 0,
481             "Number of times the maximum write timeout was exceeded");
482         if (sc->sc_maxbuf > 0) {
483                 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
484                     "typical_bufwrite_timout_count",
485                     CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_BUFWRITE], 0,
486                     "Number of times the typical buffered write timeout was "
487                     "exceeded");
488                 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
489                     "max_bufwrite_timout_count",
490                     CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_BUFWRITE], 0,
491                     "Number of times the maximum buffered write timeout was "
492                     "exceeded");
493         }
494 }
495
496 int
497 cfi_detach(device_t dev)
498 {
499         struct cfi_softc *sc;
500
501         sc = device_get_softc(dev);
502
503         destroy_dev(sc->sc_nod);
504         free(sc->sc_region, M_TEMP);
505         bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
506         return (0);
507 }
508
509 static bool
510 cfi_check_erase(struct cfi_softc *sc, u_int ofs, u_int sz)
511 {
512         bool result;
513         int i;
514         uint32_t val;
515
516         result = FALSE;
517         for (i = 0; i < sz; i += sc->sc_width) {
518                 val = cfi_read(sc, ofs + i);
519                 switch (sc->sc_width) {
520                 case 1:
521                         if (val != 0xff)
522                                 goto out;
523                         continue;
524                 case 2:
525                         if (val != 0xffff)
526                                 goto out;
527                         continue;
528                 case 4:
529                         if (val != 0xffffffff)
530                                 goto out;
531                         continue;
532                 }
533         }
534         result = TRUE;
535
536 out:
537         return (result);
538 }
539
540 static int
541 cfi_wait_ready(struct cfi_softc *sc, u_int ofs, sbintime_t start,
542     enum cfi_wait_cmd cmd)
543 {
544         int done, error, tto_exceeded;
545         uint32_t st0 = 0, st = 0;
546         sbintime_t now;
547
548         done = 0;
549         error = 0;
550         tto_exceeded = 0;
551         while (!done && !error) {
552                 /*
553                  * Save time before we start so we always do one check
554                  * after the timeout has expired.
555                  */
556                 now = sbinuptime();
557
558                 switch (sc->sc_cmdset) {
559                 case CFI_VEND_INTEL_ECS:
560                 case CFI_VEND_INTEL_SCS:
561                         st = cfi_read(sc, ofs);
562                         done = (st & CFI_INTEL_STATUS_WSMS);
563                         if (done) {
564                                 /* NB: bit 0 is reserved */
565                                 st &= ~(CFI_INTEL_XSTATUS_RSVD |
566                                         CFI_INTEL_STATUS_WSMS |
567                                         CFI_INTEL_STATUS_RSVD);
568                                 if (st & CFI_INTEL_STATUS_DPS)
569                                         error = EPERM;
570                                 else if (st & CFI_INTEL_STATUS_PSLBS)
571                                         error = EIO;
572                                 else if (st & CFI_INTEL_STATUS_ECLBS)
573                                         error = ENXIO;
574                                 else if (st)
575                                         error = EACCES;
576                         }
577                         break;
578                 case CFI_VEND_AMD_SCS:
579                 case CFI_VEND_AMD_ECS:
580                         st0 = cfi_read(sc, ofs);
581                         st = cfi_read(sc, ofs);
582                         done = ((st & 0x40) == (st0 & 0x40)) ? 1 : 0;
583                         break;
584                 }
585
586                 if (tto_exceeded ||
587                     now > start + sc->sc_typical_timeouts[cmd]) {
588                         if (!tto_exceeded) {
589                                 tto_exceeded = 1;
590                                 sc->sc_tto_counts[cmd]++;
591 #ifdef CFI_DEBUG_TIMEOUT
592                                 device_printf(sc->sc_dev,
593                                     "typical timeout exceeded (cmd %d)", cmd);
594 #endif
595                         }
596                         if (now > start + sc->sc_max_timeouts[cmd]) {
597                                 sc->sc_mto_counts[cmd]++;
598 #ifdef CFI_DEBUG_TIMEOUT
599                                 device_printf(sc->sc_dev,
600                                     "max timeout exceeded (cmd %d)", cmd);
601 #endif
602                         }
603                 }
604         }
605         if (!done && !error)
606                 error = ETIMEDOUT;
607         if (error)
608                 printf("\nerror=%d (st 0x%x st0 0x%x)\n", error, st, st0);
609         return (error);
610 }
611
612 int
613 cfi_write_block(struct cfi_softc *sc)
614 {
615         union {
616                 uint8_t         *x8;
617                 uint16_t        *x16;
618                 uint32_t        *x32;
619         } ptr, cpyprt;
620         register_t intr;
621         int error, i, j, neederase = 0;
622         uint32_t st;
623         u_int wlen;
624         sbintime_t start;
625         u_int minsz;
626         uint32_t val;
627
628         /* Intel flash must be unlocked before modification */
629         switch (sc->sc_cmdset) {
630         case CFI_VEND_INTEL_ECS:
631         case CFI_VEND_INTEL_SCS:
632                 cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LBS);
633                 cfi_write(sc, sc->sc_wrofs, CFI_INTEL_UB);
634                 cfi_write(sc, sc->sc_wrofs, CFI_BCS_READ_ARRAY);
635                 break;
636         }
637
638         /* Check if an erase is required. */
639         for (i = 0; i < sc->sc_wrbufsz; i++)
640                 if ((sc->sc_wrbuf[i] & sc->sc_wrbufcpy[i]) != sc->sc_wrbuf[i]) {
641                         neederase = 1;
642                         break;
643                 }
644
645         if (neederase) {
646                 intr = intr_disable();
647                 start = sbinuptime();
648                 /* Erase the block. */
649                 switch (sc->sc_cmdset) {
650                 case CFI_VEND_INTEL_ECS:
651                 case CFI_VEND_INTEL_SCS:
652                         cfi_write(sc, sc->sc_wrofs, CFI_BCS_BLOCK_ERASE);
653                         cfi_write(sc, sc->sc_wrofs, CFI_BCS_CONFIRM);
654                         break;
655                 case CFI_VEND_AMD_SCS:
656                 case CFI_VEND_AMD_ECS:
657                         /* find minimum sector size */
658                         minsz = sc->sc_region[0].r_blksz;
659                         for (i = 1; i < sc->sc_regions; i++) {
660                                 if (sc->sc_region[i].r_blksz < minsz)
661                                         minsz = sc->sc_region[i].r_blksz;
662                         }
663                         cfi_amd_write(sc, sc->sc_wrofs, AMD_ADDR_START,
664                             CFI_AMD_ERASE_SECTOR);
665                         cfi_amd_write(sc, sc->sc_wrofs, 
666                             sc->sc_wrofs >> (ffs(minsz) - 1),
667                             CFI_AMD_BLOCK_ERASE);
668                         for (i = 0; i < CFI_AMD_MAXCHK; ++i) {
669                                 if (cfi_check_erase(sc, sc->sc_wrofs,
670                                     sc->sc_wrbufsz))
671                                         break;
672                                 DELAY(10);
673                         }
674                         if (i == CFI_AMD_MAXCHK) {
675                                 printf("\nCFI Sector Erase time out error\n");
676                                 return (ENODEV);
677                         }
678                         break;
679                 default:
680                         /* Better safe than sorry... */
681                         intr_restore(intr);
682                         return (ENODEV);
683                 }
684                 intr_restore(intr);
685                 error = cfi_wait_ready(sc, sc->sc_wrofs, start, 
686                     CFI_TIMEOUT_ERASE);
687                 if (error)
688                         goto out;
689         } else
690                 error = 0;
691
692         /* Write the block using a multibyte write if supported. */
693         ptr.x8 = sc->sc_wrbuf;
694         cpyprt.x8 = sc->sc_wrbufcpy;
695         if (sc->sc_maxbuf > sc->sc_width) {
696                 switch (sc->sc_cmdset) {
697                 case CFI_VEND_INTEL_ECS:
698                 case CFI_VEND_INTEL_SCS:
699                         for (i = 0; i < sc->sc_wrbufsz; i += wlen) {
700                                 wlen = MIN(sc->sc_maxbuf, sc->sc_wrbufsz - i);
701
702                                 intr = intr_disable();
703
704                                 start = sbinuptime();
705                                 do {
706                                         cfi_write(sc, sc->sc_wrofs + i,
707                                             CFI_BCS_BUF_PROG_SETUP);
708                                         if (sbinuptime() > start + sc->sc_max_timeouts[CFI_TIMEOUT_BUFWRITE]) {
709                                                 error = ETIMEDOUT;
710                                                 goto out;
711                                         }
712                                         st = cfi_read(sc, sc->sc_wrofs + i);
713                                 } while (! (st & CFI_INTEL_STATUS_WSMS));
714
715                                 cfi_write(sc, sc->sc_wrofs + i,
716                                     (wlen / sc->sc_width) - 1);
717                                 switch (sc->sc_width) {
718                                 case 1:
719                                         bus_space_write_region_1(sc->sc_tag,
720                                             sc->sc_handle, sc->sc_wrofs + i,
721                                             ptr.x8 + i, wlen);
722                                         break;
723                                 case 2:
724                                         bus_space_write_region_2(sc->sc_tag,
725                                             sc->sc_handle, sc->sc_wrofs + i,
726                                             ptr.x16 + i / 2, wlen / 2);
727                                         break;
728                                 case 4:
729                                         bus_space_write_region_4(sc->sc_tag,
730                                             sc->sc_handle, sc->sc_wrofs + i,
731                                             ptr.x32 + i / 4, wlen / 4);
732                                         break;
733                                 }
734
735                                 cfi_write(sc, sc->sc_wrofs + i,
736                                     CFI_BCS_CONFIRM);
737
738                                 intr_restore(intr);
739
740                                 error = cfi_wait_ready(sc, sc->sc_wrofs + i,
741                                     start, CFI_TIMEOUT_BUFWRITE);
742                                 if (error != 0)
743                                         goto out;
744                         }
745                         goto out;
746                 default:
747                         /* Fall through to single word case */
748                         break;
749                 }
750         }
751
752         /* Write the block one byte/word at a time. */
753         for (i = 0; i < sc->sc_wrbufsz; i += sc->sc_width) {
754                 /* Avoid writing unless we are actually changing bits */
755                 if (!neederase) {
756                         switch (sc->sc_width) {
757                         case 1:
758                                 if(*(ptr.x8 + i) == *(cpyprt.x8 + i))
759                                         continue;
760                                 break;
761                         case 2:
762                                 if(*(ptr.x16 + i / 2) == *(cpyprt.x16 + i / 2))
763                                         continue;
764                                 break;
765                         case 4:
766                                 if(*(ptr.x32 + i / 4) == *(cpyprt.x32 + i / 4))
767                                         continue;
768                                 break;
769                         }
770                 }
771
772                 /*
773                  * Make sure the command to start a write and the
774                  * actual write happens back-to-back without any
775                  * excessive delays.
776                  */
777                 intr = intr_disable();
778
779                 start = sbinuptime();
780                 switch (sc->sc_cmdset) {
781                 case CFI_VEND_INTEL_ECS:
782                 case CFI_VEND_INTEL_SCS:
783                         cfi_write(sc, sc->sc_wrofs + i, CFI_BCS_PROGRAM);
784                         break;
785                 case CFI_VEND_AMD_SCS:
786                 case CFI_VEND_AMD_ECS:
787                         cfi_amd_write(sc, 0, AMD_ADDR_START, CFI_AMD_PROGRAM);
788                         break;
789                 }
790                 switch (sc->sc_width) {
791                 case 1:
792                         bus_space_write_1(sc->sc_tag, sc->sc_handle,
793                             sc->sc_wrofs + i, *(ptr.x8 + i));
794                         break;
795                 case 2:
796                         bus_space_write_2(sc->sc_tag, sc->sc_handle,
797                             sc->sc_wrofs + i, *(ptr.x16 + i / 2));
798                         break;
799                 case 4:
800                         bus_space_write_4(sc->sc_tag, sc->sc_handle,
801                             sc->sc_wrofs + i, *(ptr.x32 + i / 4));
802                         break;
803                 }
804                 
805                 intr_restore(intr);
806
807                 if (sc->sc_cmdset == CFI_VEND_AMD_ECS  ||
808                     sc->sc_cmdset == CFI_VEND_AMD_SCS) {
809                         for (j = 0; j < CFI_AMD_MAXCHK; ++j) {
810                                 switch (sc->sc_width) {
811                                 case 1:
812                                         val = *(ptr.x8 + i);
813                                         break;
814                                 case 2:
815                                         val = *(ptr.x16 + i / 2);
816                                         break;
817                                 case 4:
818                                         val = *(ptr.x32 + i / 4);
819                                         break;
820                                 }
821
822                                 if (cfi_read(sc, sc->sc_wrofs + i) == val)
823                                         break;
824                                         
825                                 DELAY(10);
826                         }
827                         if (j == CFI_AMD_MAXCHK) {
828                                 printf("\nCFI Program Verify time out error\n");
829                                 error = ENXIO;
830                                 goto out;
831                         }
832                 } else {
833                         error = cfi_wait_ready(sc, sc->sc_wrofs, start,
834                            CFI_TIMEOUT_WRITE);
835                         if (error)
836                                 goto out;
837                 }
838         }
839
840         /* error is 0. */
841
842  out:
843         cfi_reset_default(sc);
844
845         /* Relock Intel flash */
846         switch (sc->sc_cmdset) {
847         case CFI_VEND_INTEL_ECS:
848         case CFI_VEND_INTEL_SCS:
849                 cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LBS);
850                 cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LB);
851                 cfi_write(sc, sc->sc_wrofs, CFI_BCS_READ_ARRAY);
852                 break;
853         }
854         return (error);
855 }
856
857 #ifdef CFI_SUPPORT_STRATAFLASH
858 /*
859  * Intel StrataFlash Protection Register Support.
860  *
861  * The memory includes a 128-bit Protection Register that can be
862  * used for security.  There are two 64-bit segments; one is programmed
863  * at the factory with a unique 64-bit number which is immutable.
864  * The other segment is left blank for User (OEM) programming.
865  * The User/OEM segment is One Time Programmable (OTP).  It can also
866  * be locked to prevent any further writes by setting bit 0 of the
867  * Protection Lock Register (PLR).  The PLR can written only once.
868  */
869
870 static uint16_t
871 cfi_get16(struct cfi_softc *sc, int off)
872 {
873         uint16_t v = bus_space_read_2(sc->sc_tag, sc->sc_handle, off<<1);
874         return v;
875 }
876
877 #ifdef CFI_ARMEDANDDANGEROUS
878 static void
879 cfi_put16(struct cfi_softc *sc, int off, uint16_t v)
880 {
881         bus_space_write_2(sc->sc_tag, sc->sc_handle, off<<1, v);
882 }
883 #endif
884
885 /*
886  * Read the factory-defined 64-bit segment of the PR.
887  */
888 int 
889 cfi_intel_get_factory_pr(struct cfi_softc *sc, uint64_t *id)
890 {
891         if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
892                 return EOPNOTSUPP;
893         KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
894
895         cfi_write(sc, 0, CFI_INTEL_READ_ID);
896         *id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(0)))<<48 |
897               ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(1)))<<32 |
898               ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(2)))<<16 |
899               ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(3)));
900         cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
901         return 0;
902 }
903
904 /*
905  * Read the User/OEM 64-bit segment of the PR.
906  */
907 int 
908 cfi_intel_get_oem_pr(struct cfi_softc *sc, uint64_t *id)
909 {
910         if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
911                 return EOPNOTSUPP;
912         KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
913
914         cfi_write(sc, 0, CFI_INTEL_READ_ID);
915         *id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(4)))<<48 |
916               ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(5)))<<32 |
917               ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(6)))<<16 |
918               ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(7)));
919         cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
920         return 0;
921 }
922
923 /*
924  * Write the User/OEM 64-bit segment of the PR.
925  * XXX should allow writing individual words/bytes
926  */
927 int
928 cfi_intel_set_oem_pr(struct cfi_softc *sc, uint64_t id)
929 {
930 #ifdef CFI_ARMEDANDDANGEROUS
931         register_t intr;
932         int i, error;
933         sbintime_t start;
934 #endif
935
936         if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
937                 return EOPNOTSUPP;
938         KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
939
940 #ifdef CFI_ARMEDANDDANGEROUS
941         for (i = 7; i >= 4; i--, id >>= 16) {
942                 intr = intr_disable();
943                 start = sbinuptime();
944                 cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
945                 cfi_put16(sc, CFI_INTEL_PR(i), id&0xffff);
946                 intr_restore(intr);
947                 error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, start,
948                     CFI_TIMEOUT_WRITE);
949                 if (error)
950                         break;
951         }
952         cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
953         return error;
954 #else
955         device_printf(sc->sc_dev, "%s: OEM PR not set, "
956             "CFI_ARMEDANDDANGEROUS not configured\n", __func__);
957         return ENXIO;
958 #endif
959 }
960
961 /*
962  * Read the contents of the Protection Lock Register.
963  */
964 int 
965 cfi_intel_get_plr(struct cfi_softc *sc, uint32_t *plr)
966 {
967         if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
968                 return EOPNOTSUPP;
969         KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
970
971         cfi_write(sc, 0, CFI_INTEL_READ_ID);
972         *plr = cfi_get16(sc, CFI_INTEL_PLR);
973         cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
974         return 0;
975 }
976
977 /*
978  * Write the Protection Lock Register to lock down the
979  * user-settable segment of the Protection Register.
980  * NOTE: this operation is not reversible.
981  */
982 int 
983 cfi_intel_set_plr(struct cfi_softc *sc)
984 {
985 #ifdef CFI_ARMEDANDDANGEROUS
986         register_t intr;
987         int error;
988         sbintime_t start;
989 #endif
990         if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
991                 return EOPNOTSUPP;
992         KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
993
994 #ifdef CFI_ARMEDANDDANGEROUS
995         /* worthy of console msg */
996         device_printf(sc->sc_dev, "set PLR\n");
997         intr = intr_disable();
998         binuptime(&start);
999         cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
1000         cfi_put16(sc, CFI_INTEL_PLR, 0xFFFD);
1001         intr_restore(intr);
1002         error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, start,
1003             CFI_TIMEOUT_WRITE);
1004         cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
1005         return error;
1006 #else
1007         device_printf(sc->sc_dev, "%s: PLR not set, "
1008             "CFI_ARMEDANDDANGEROUS not configured\n", __func__);
1009         return ENXIO;
1010 #endif
1011 }
1012 #endif /* CFI_SUPPORT_STRATAFLASH */