]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/cfi/cfi_core.c
Import DTS files from Linux 5.4
[FreeBSD/FreeBSD.git] / sys / dev / cfi / cfi_core.c
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2007, Juniper Networks, Inc.
5  * Copyright (c) 2012-2013, SRI International
6  * All rights reserved.
7  *
8  * Portions of this software were developed by SRI International and the
9  * University of Cambridge Computer Laboratory under DARPA/AFRL contract
10  * (FA8750-10-C-0237) ("CTSRD"), as part of the DARPA CRASH research
11  * programme.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the author nor the names of any co-contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
26  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
27  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40
41 #include "opt_cfi.h"
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/bus.h>
46 #include <sys/conf.h>
47 #include <sys/endian.h>
48 #include <sys/kenv.h>
49 #include <sys/kernel.h>
50 #include <sys/malloc.h>   
51 #include <sys/module.h>
52 #include <sys/rman.h>
53 #include <sys/sysctl.h>
54
55 #include <machine/bus.h>
56
57 #include <dev/cfi/cfi_reg.h>
58 #include <dev/cfi/cfi_var.h>
59
60 static void cfi_add_sysctls(struct cfi_softc *);
61
62 extern struct cdevsw cfi_cdevsw;
63
64 char cfi_driver_name[] = "cfi";
65 devclass_t cfi_devclass;
66 devclass_t cfi_diskclass;
67
68 uint32_t
69 cfi_read_raw(struct cfi_softc *sc, u_int ofs)
70 {
71         uint32_t val;
72
73         ofs &= ~(sc->sc_width - 1);
74         switch (sc->sc_width) {
75         case 1:
76                 val = bus_space_read_1(sc->sc_tag, sc->sc_handle, ofs);
77                 break;
78         case 2:
79                 val = bus_space_read_2(sc->sc_tag, sc->sc_handle, ofs);
80                 break;
81         case 4:
82                 val = bus_space_read_4(sc->sc_tag, sc->sc_handle, ofs);
83                 break;
84         default:
85                 val = ~0;
86                 break;
87         }
88         return (val);
89 }
90
91 uint32_t
92 cfi_read(struct cfi_softc *sc, u_int ofs)
93 {
94         uint32_t val;
95         uint16_t sval;
96
97         ofs &= ~(sc->sc_width - 1);
98         switch (sc->sc_width) {
99         case 1:
100                 val = bus_space_read_1(sc->sc_tag, sc->sc_handle, ofs);
101                 break;
102         case 2:
103                 sval = bus_space_read_2(sc->sc_tag, sc->sc_handle, ofs);
104 #ifdef CFI_HARDWAREBYTESWAP
105                 val = sval;
106 #else
107                 val = le16toh(sval);
108 #endif
109                 break;
110         case 4:
111                 val = bus_space_read_4(sc->sc_tag, sc->sc_handle, ofs);
112 #ifndef CFI_HARDWAREBYTESWAP
113                 val = le32toh(val);
114 #endif
115                 break;
116         default:
117                 val = ~0;
118                 break;
119         }
120         return (val);
121 }
122
123 static void
124 cfi_write(struct cfi_softc *sc, u_int ofs, u_int val)
125 {
126
127         ofs &= ~(sc->sc_width - 1);
128         switch (sc->sc_width) {
129         case 1:
130                 bus_space_write_1(sc->sc_tag, sc->sc_handle, ofs, val);
131                 break;
132         case 2:
133 #ifdef CFI_HARDWAREBYTESWAP
134                 bus_space_write_2(sc->sc_tag, sc->sc_handle, ofs, val);
135 #else
136                 bus_space_write_2(sc->sc_tag, sc->sc_handle, ofs, htole16(val));
137
138 #endif
139                 break;
140         case 4:
141 #ifdef CFI_HARDWAREBYTESWAP
142                 bus_space_write_4(sc->sc_tag, sc->sc_handle, ofs, val);
143 #else
144                 bus_space_write_4(sc->sc_tag, sc->sc_handle, ofs, htole32(val));
145 #endif
146                 break;
147         }
148 }
149
150 /*
151  * This is same workaound as NetBSD sys/dev/nor/cfi.c cfi_reset_default()
152  */
153 static void
154 cfi_reset_default(struct cfi_softc *sc)
155 {
156
157         cfi_write(sc, 0, CFI_BCS_READ_ARRAY2);
158         cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
159 }
160
161 uint8_t
162 cfi_read_qry(struct cfi_softc *sc, u_int ofs)
163 {
164         uint8_t val;
165  
166         cfi_write(sc, CFI_QRY_CMD_ADDR * sc->sc_width, CFI_QRY_CMD_DATA); 
167         val = cfi_read(sc, ofs * sc->sc_width);
168         cfi_reset_default(sc);
169         return (val);
170
171
172 static void
173 cfi_amd_write(struct cfi_softc *sc, u_int ofs, u_int addr, u_int data)
174 {
175
176         cfi_write(sc, ofs + AMD_ADDR_START, CFI_AMD_UNLOCK);
177         cfi_write(sc, ofs + AMD_ADDR_ACK, CFI_AMD_UNLOCK_ACK);
178         cfi_write(sc, ofs + addr, data);
179 }
180
181 static char *
182 cfi_fmtsize(uint32_t sz)
183 {
184         static char buf[8];
185         static const char *sfx[] = { "", "K", "M", "G" };
186         int sfxidx;
187
188         sfxidx = 0;
189         while (sfxidx < 3 && sz > 1023) {
190                 sz /= 1024;
191                 sfxidx++;
192         }
193
194         sprintf(buf, "%u%sB", sz, sfx[sfxidx]);
195         return (buf);
196 }
197
198 int
199 cfi_probe(device_t dev)
200 {
201         char desc[80];
202         struct cfi_softc *sc;
203         char *vend_str;
204         int error;
205         uint16_t iface, vend;
206
207         sc = device_get_softc(dev);
208         sc->sc_dev = dev;
209
210         sc->sc_rid = 0;
211         sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
212             RF_ACTIVE);
213         if (sc->sc_res == NULL)
214                 return (ENXIO);
215
216         sc->sc_tag = rman_get_bustag(sc->sc_res);
217         sc->sc_handle = rman_get_bushandle(sc->sc_res);
218
219         if (sc->sc_width == 0) {
220                 sc->sc_width = 1;
221                 while (sc->sc_width <= 4) {
222                         if (cfi_read_qry(sc, CFI_QRY_IDENT) == 'Q')
223                                 break;
224                         sc->sc_width <<= 1;
225                 }
226         } else if (cfi_read_qry(sc, CFI_QRY_IDENT) != 'Q') {
227                 error = ENXIO;
228                 goto out;
229         }
230         if (sc->sc_width > 4) {
231                 error = ENXIO;
232                 goto out;
233         }
234
235         /* We got a Q. Check if we also have the R and the Y. */
236         if (cfi_read_qry(sc, CFI_QRY_IDENT + 1) != 'R' ||
237             cfi_read_qry(sc, CFI_QRY_IDENT + 2) != 'Y') {
238                 error = ENXIO;
239                 goto out;
240         }
241
242         /* Get the vendor and command set. */
243         vend = cfi_read_qry(sc, CFI_QRY_VEND) |
244             (cfi_read_qry(sc, CFI_QRY_VEND + 1) << 8);
245
246         sc->sc_cmdset = vend;
247
248         switch (vend) {
249         case CFI_VEND_AMD_ECS:
250         case CFI_VEND_AMD_SCS:
251                 vend_str = "AMD/Fujitsu";
252                 break;
253         case CFI_VEND_INTEL_ECS:
254                 vend_str = "Intel/Sharp";
255                 break;
256         case CFI_VEND_INTEL_SCS:
257                 vend_str = "Intel";
258                 break;
259         case CFI_VEND_MITSUBISHI_ECS:
260         case CFI_VEND_MITSUBISHI_SCS:
261                 vend_str = "Mitsubishi";
262                 break;
263         default:
264                 vend_str = "Unknown vendor";
265                 break;
266         }
267
268         /* Get the device size. */
269         sc->sc_size = 1U << cfi_read_qry(sc, CFI_QRY_SIZE);
270
271         /* Sanity-check the I/F */
272         iface = cfi_read_qry(sc, CFI_QRY_IFACE) |
273             (cfi_read_qry(sc, CFI_QRY_IFACE + 1) << 8);
274
275         /*
276          * Adding 1 to iface will give us a bit-wise "switch"
277          * that allows us to test for the interface width by
278          * testing a single bit.
279          */
280         iface++;
281
282         error = (iface & sc->sc_width) ? 0 : EINVAL;
283         if (error)
284                 goto out;
285
286         snprintf(desc, sizeof(desc), "%s - %s", vend_str,
287             cfi_fmtsize(sc->sc_size));
288         device_set_desc_copy(dev, desc);
289
290  out:
291         bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
292         return (error);
293 }
294
295 int
296 cfi_attach(device_t dev) 
297 {
298         struct cfi_softc *sc;
299         u_int blksz, blocks;
300         u_int r, u;
301         uint64_t mtoexp, ttoexp;
302 #ifdef CFI_SUPPORT_STRATAFLASH
303         uint64_t ppr;
304         char name[KENV_MNAMELEN], value[32];
305 #endif
306
307         sc = device_get_softc(dev);
308         sc->sc_dev = dev;
309
310         sc->sc_rid = 0;
311         sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
312 #ifndef ATSE_CFI_HACK
313             RF_ACTIVE);
314 #else
315             RF_ACTIVE | RF_SHAREABLE);
316 #endif
317         if (sc->sc_res == NULL)
318                 return (ENXIO);
319
320         sc->sc_tag = rman_get_bustag(sc->sc_res);
321         sc->sc_handle = rman_get_bushandle(sc->sc_res);
322
323         /* Get time-out values for erase, write, and buffer write. */
324         ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_ERASE);
325         mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_ERASE);
326         if (ttoexp == 0) {
327                 device_printf(dev, "erase timeout == 0, using 2^16ms\n");
328                 ttoexp = 16;
329         }
330         if (ttoexp > 41) {
331                 device_printf(dev, "insane timeout: 2^%jdms\n", ttoexp);
332                 return (EINVAL);
333         }
334         if (mtoexp == 0) {
335                 device_printf(dev, "max erase timeout == 0, using 2^%jdms\n",
336                     ttoexp + 4);
337                 mtoexp = 4;
338         }
339         if (ttoexp + mtoexp > 41) {
340                 device_printf(dev, "insane max erase timeout: 2^%jd\n",
341                     ttoexp + mtoexp);
342                 return (EINVAL);
343         }
344         sc->sc_typical_timeouts[CFI_TIMEOUT_ERASE] = SBT_1MS * (1ULL << ttoexp);
345         sc->sc_max_timeouts[CFI_TIMEOUT_ERASE] =
346             sc->sc_typical_timeouts[CFI_TIMEOUT_ERASE] * (1ULL << mtoexp);
347
348         ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_WRITE);
349         mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_WRITE);
350         if (ttoexp == 0) {
351                 device_printf(dev, "write timeout == 0, using 2^18ns\n");
352                 ttoexp = 18;
353         }
354         if (ttoexp > 51) {
355                 device_printf(dev, "insane write timeout: 2^%jdus\n", ttoexp);
356                 return (EINVAL);
357         }
358         if (mtoexp == 0) {
359                 device_printf(dev, "max write timeout == 0, using 2^%jdms\n",
360                     ttoexp + 4);
361                 mtoexp = 4;
362         }
363         if (ttoexp + mtoexp > 51) {
364                 device_printf(dev, "insane max write timeout: 2^%jdus\n",
365                     ttoexp + mtoexp);
366                 return (EINVAL);
367         }
368         sc->sc_typical_timeouts[CFI_TIMEOUT_WRITE] = SBT_1US * (1ULL << ttoexp);
369         sc->sc_max_timeouts[CFI_TIMEOUT_WRITE] =
370             sc->sc_typical_timeouts[CFI_TIMEOUT_WRITE] * (1ULL << mtoexp);
371
372         ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_BUFWRITE);
373         mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_BUFWRITE);
374         /* Don't check for 0, it means not-supported. */
375         if (ttoexp > 51) {
376                 device_printf(dev, "insane write timeout: 2^%jdus\n", ttoexp);
377                 return (EINVAL);
378         }
379         if (ttoexp + mtoexp > 51) {
380                 device_printf(dev, "insane max write timeout: 2^%jdus\n",
381                     ttoexp + mtoexp);
382                 return (EINVAL);
383         }
384         sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] =
385             SBT_1US * (1ULL << cfi_read_qry(sc, CFI_QRY_TTO_BUFWRITE));
386         sc->sc_max_timeouts[CFI_TIMEOUT_BUFWRITE] =
387             sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] *
388             (1ULL << cfi_read_qry(sc, CFI_QRY_MTO_BUFWRITE));
389
390         /* Get the maximum size of a multibyte program */
391         if (sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] != 0)
392                 sc->sc_maxbuf = 1 << (cfi_read_qry(sc, CFI_QRY_MAXBUF) |
393                     cfi_read_qry(sc, CFI_QRY_MAXBUF) << 8);
394         else
395                 sc->sc_maxbuf = 0;
396
397         /* Get erase regions. */
398         sc->sc_regions = cfi_read_qry(sc, CFI_QRY_NREGIONS);
399         sc->sc_region = malloc(sc->sc_regions * sizeof(struct cfi_region),
400             M_TEMP, M_WAITOK | M_ZERO);
401         for (r = 0; r < sc->sc_regions; r++) {
402                 blocks = cfi_read_qry(sc, CFI_QRY_REGION(r)) |
403                     (cfi_read_qry(sc, CFI_QRY_REGION(r) + 1) << 8);
404                 sc->sc_region[r].r_blocks = blocks + 1;
405
406                 blksz = cfi_read_qry(sc, CFI_QRY_REGION(r) + 2) |
407                     (cfi_read_qry(sc, CFI_QRY_REGION(r) + 3) << 8);
408                 sc->sc_region[r].r_blksz = (blksz == 0) ? 128 :
409                     blksz * 256;
410         }
411
412         /* Reset the device to a default state. */
413         cfi_write(sc, 0, CFI_BCS_CLEAR_STATUS);
414
415         if (bootverbose) {
416                 device_printf(dev, "[");
417                 for (r = 0; r < sc->sc_regions; r++) {
418                         printf("%ux%s%s", sc->sc_region[r].r_blocks,
419                             cfi_fmtsize(sc->sc_region[r].r_blksz),
420                             (r == sc->sc_regions - 1) ? "]\n" : ",");
421                 }
422         }
423
424         if (sc->sc_cmdset == CFI_VEND_AMD_ECS  ||
425             sc->sc_cmdset == CFI_VEND_AMD_SCS) {
426                 cfi_amd_write(sc, 0, AMD_ADDR_START, CFI_AMD_AUTO_SELECT);
427                 sc->sc_manid = cfi_read(sc, 0);
428                 sc->sc_devid = cfi_read(sc, 2);
429                 device_printf(dev, "Manufacturer ID:%x Device ID:%x\n",
430                     sc->sc_manid, sc->sc_devid);
431                 cfi_write(sc, 0, CFI_BCS_READ_ARRAY2);
432         }
433
434         u = device_get_unit(dev);
435         sc->sc_nod = make_dev(&cfi_cdevsw, u, UID_ROOT, GID_WHEEL, 0600,
436             "%s%u", cfi_driver_name, u);
437         sc->sc_nod->si_drv1 = sc;
438
439         cfi_add_sysctls(sc);
440
441 #ifdef CFI_SUPPORT_STRATAFLASH
442         /*
443          * Store the Intel factory PPR in the environment.  In some
444          * cases it is the most unique ID on a board.
445          */
446         if (cfi_intel_get_factory_pr(sc, &ppr) == 0) {
447                 if (snprintf(name, sizeof(name), "%s.factory_ppr",
448                     device_get_nameunit(dev)) < (sizeof(name) - 1) &&
449                     snprintf(value, sizeof(value), "0x%016jx", ppr) <
450                     (sizeof(value) - 1))
451                         (void) kern_setenv(name, value);
452         }
453 #endif
454
455         device_add_child(dev, "cfid", -1);
456         bus_generic_attach(dev);
457
458         return (0);
459 }
460
461 static void
462 cfi_add_sysctls(struct cfi_softc *sc)
463 {
464         struct sysctl_ctx_list *ctx;
465         struct sysctl_oid_list *children;
466
467         ctx = device_get_sysctl_ctx(sc->sc_dev);
468         children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev));
469
470         SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
471             "typical_erase_timout_count",
472             CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_ERASE],
473             0, "Number of times the typical erase timeout was exceeded");
474         SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
475             "max_erase_timout_count",
476             CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_ERASE], 0,
477             "Number of times the maximum erase timeout was exceeded");
478         SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
479             "typical_write_timout_count",
480             CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_WRITE], 0,
481             "Number of times the typical write timeout was exceeded");
482         SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
483             "max_write_timout_count",
484             CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_WRITE], 0,
485             "Number of times the maximum write timeout was exceeded");
486         if (sc->sc_maxbuf > 0) {
487                 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
488                     "typical_bufwrite_timout_count",
489                     CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_BUFWRITE], 0,
490                     "Number of times the typical buffered write timeout was "
491                     "exceeded");
492                 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
493                     "max_bufwrite_timout_count",
494                     CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_BUFWRITE], 0,
495                     "Number of times the maximum buffered write timeout was "
496                     "exceeded");
497         }
498 }
499
500 int
501 cfi_detach(device_t dev)
502 {
503         struct cfi_softc *sc;
504
505         sc = device_get_softc(dev);
506
507         destroy_dev(sc->sc_nod);
508         free(sc->sc_region, M_TEMP);
509         bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
510         return (0);
511 }
512
513 static bool
514 cfi_check_erase(struct cfi_softc *sc, u_int ofs, u_int sz)
515 {
516         bool result;
517         int i;
518         uint32_t val;
519
520         result = FALSE;
521         for (i = 0; i < sz; i += sc->sc_width) {
522                 val = cfi_read(sc, ofs + i);
523                 switch (sc->sc_width) {
524                 case 1:
525                         if (val != 0xff)
526                                 goto out;
527                         continue;
528                 case 2:
529                         if (val != 0xffff)
530                                 goto out;
531                         continue;
532                 case 4:
533                         if (val != 0xffffffff)
534                                 goto out;
535                         continue;
536                 }
537         }
538         result = TRUE;
539
540 out:
541         return (result);
542 }
543
544 static int
545 cfi_wait_ready(struct cfi_softc *sc, u_int ofs, sbintime_t start,
546     enum cfi_wait_cmd cmd)
547 {
548         int done, error, tto_exceeded;
549         uint32_t st0 = 0, st = 0;
550         sbintime_t now;
551
552         done = 0;
553         error = 0;
554         tto_exceeded = 0;
555         while (!done && !error) {
556                 /*
557                  * Save time before we start so we always do one check
558                  * after the timeout has expired.
559                  */
560                 now = sbinuptime();
561
562                 switch (sc->sc_cmdset) {
563                 case CFI_VEND_INTEL_ECS:
564                 case CFI_VEND_INTEL_SCS:
565                         st = cfi_read(sc, ofs);
566                         done = (st & CFI_INTEL_STATUS_WSMS);
567                         if (done) {
568                                 /* NB: bit 0 is reserved */
569                                 st &= ~(CFI_INTEL_XSTATUS_RSVD |
570                                         CFI_INTEL_STATUS_WSMS |
571                                         CFI_INTEL_STATUS_RSVD);
572                                 if (st & CFI_INTEL_STATUS_DPS)
573                                         error = EPERM;
574                                 else if (st & CFI_INTEL_STATUS_PSLBS)
575                                         error = EIO;
576                                 else if (st & CFI_INTEL_STATUS_ECLBS)
577                                         error = ENXIO;
578                                 else if (st)
579                                         error = EACCES;
580                         }
581                         break;
582                 case CFI_VEND_AMD_SCS:
583                 case CFI_VEND_AMD_ECS:
584                         st0 = cfi_read(sc, ofs);
585                         st = cfi_read(sc, ofs);
586                         done = ((st & 0x40) == (st0 & 0x40)) ? 1 : 0;
587                         break;
588                 }
589
590                 if (tto_exceeded ||
591                     now > start + sc->sc_typical_timeouts[cmd]) {
592                         if (!tto_exceeded) {
593                                 tto_exceeded = 1;
594                                 sc->sc_tto_counts[cmd]++;
595 #ifdef CFI_DEBUG_TIMEOUT
596                                 device_printf(sc->sc_dev,
597                                     "typical timeout exceeded (cmd %d)", cmd);
598 #endif
599                         }
600                         if (now > start + sc->sc_max_timeouts[cmd]) {
601                                 sc->sc_mto_counts[cmd]++;
602 #ifdef CFI_DEBUG_TIMEOUT
603                                 device_printf(sc->sc_dev,
604                                     "max timeout exceeded (cmd %d)", cmd);
605 #endif
606                         }
607                 }
608         }
609         if (!done && !error)
610                 error = ETIMEDOUT;
611         if (error)
612                 printf("\nerror=%d (st 0x%x st0 0x%x)\n", error, st, st0);
613         return (error);
614 }
615
616 int
617 cfi_write_block(struct cfi_softc *sc)
618 {
619         union {
620                 uint8_t         *x8;
621                 uint16_t        *x16;
622                 uint32_t        *x32;
623         } ptr, cpyprt;
624         register_t intr;
625         int error, i, j, neederase = 0;
626         uint32_t st;
627         u_int wlen;
628         sbintime_t start;
629         u_int minsz;
630         uint32_t val;
631
632         /* Intel flash must be unlocked before modification */
633         switch (sc->sc_cmdset) {
634         case CFI_VEND_INTEL_ECS:
635         case CFI_VEND_INTEL_SCS:
636                 cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LBS);
637                 cfi_write(sc, sc->sc_wrofs, CFI_INTEL_UB);
638                 cfi_write(sc, sc->sc_wrofs, CFI_BCS_READ_ARRAY);
639                 break;
640         }
641
642         /* Check if an erase is required. */
643         for (i = 0; i < sc->sc_wrbufsz; i++)
644                 if ((sc->sc_wrbuf[i] & sc->sc_wrbufcpy[i]) != sc->sc_wrbuf[i]) {
645                         neederase = 1;
646                         break;
647                 }
648
649         if (neederase) {
650                 intr = intr_disable();
651                 start = sbinuptime();
652                 /* Erase the block. */
653                 switch (sc->sc_cmdset) {
654                 case CFI_VEND_INTEL_ECS:
655                 case CFI_VEND_INTEL_SCS:
656                         cfi_write(sc, sc->sc_wrofs, CFI_BCS_BLOCK_ERASE);
657                         cfi_write(sc, sc->sc_wrofs, CFI_BCS_CONFIRM);
658                         break;
659                 case CFI_VEND_AMD_SCS:
660                 case CFI_VEND_AMD_ECS:
661                         /* find minimum sector size */
662                         minsz = sc->sc_region[0].r_blksz;
663                         for (i = 1; i < sc->sc_regions; i++) {
664                                 if (sc->sc_region[i].r_blksz < minsz)
665                                         minsz = sc->sc_region[i].r_blksz;
666                         }
667                         cfi_amd_write(sc, sc->sc_wrofs, AMD_ADDR_START,
668                             CFI_AMD_ERASE_SECTOR);
669                         cfi_amd_write(sc, sc->sc_wrofs, 
670                             sc->sc_wrofs >> (ffs(minsz) - 1),
671                             CFI_AMD_BLOCK_ERASE);
672                         for (i = 0; i < CFI_AMD_MAXCHK; ++i) {
673                                 if (cfi_check_erase(sc, sc->sc_wrofs,
674                                     sc->sc_wrbufsz))
675                                         break;
676                                 DELAY(10);
677                         }
678                         if (i == CFI_AMD_MAXCHK) {
679                                 printf("\nCFI Sector Erase time out error\n");
680                                 return (ENODEV);
681                         }
682                         break;
683                 default:
684                         /* Better safe than sorry... */
685                         intr_restore(intr);
686                         return (ENODEV);
687                 }
688                 intr_restore(intr);
689                 error = cfi_wait_ready(sc, sc->sc_wrofs, start, 
690                     CFI_TIMEOUT_ERASE);
691                 if (error)
692                         goto out;
693         } else
694                 error = 0;
695
696         /* Write the block using a multibyte write if supported. */
697         ptr.x8 = sc->sc_wrbuf;
698         cpyprt.x8 = sc->sc_wrbufcpy;
699         if (sc->sc_maxbuf > sc->sc_width) {
700                 switch (sc->sc_cmdset) {
701                 case CFI_VEND_INTEL_ECS:
702                 case CFI_VEND_INTEL_SCS:
703                         for (i = 0; i < sc->sc_wrbufsz; i += wlen) {
704                                 wlen = MIN(sc->sc_maxbuf, sc->sc_wrbufsz - i);
705
706                                 intr = intr_disable();
707
708                                 start = sbinuptime();
709                                 do {
710                                         cfi_write(sc, sc->sc_wrofs + i,
711                                             CFI_BCS_BUF_PROG_SETUP);
712                                         if (sbinuptime() > start + sc->sc_max_timeouts[CFI_TIMEOUT_BUFWRITE]) {
713                                                 error = ETIMEDOUT;
714                                                 goto out;
715                                         }
716                                         st = cfi_read(sc, sc->sc_wrofs + i);
717                                 } while (! (st & CFI_INTEL_STATUS_WSMS));
718
719                                 cfi_write(sc, sc->sc_wrofs + i,
720                                     (wlen / sc->sc_width) - 1);
721                                 switch (sc->sc_width) {
722                                 case 1:
723                                         bus_space_write_region_1(sc->sc_tag,
724                                             sc->sc_handle, sc->sc_wrofs + i,
725                                             ptr.x8 + i, wlen);
726                                         break;
727                                 case 2:
728                                         bus_space_write_region_2(sc->sc_tag,
729                                             sc->sc_handle, sc->sc_wrofs + i,
730                                             ptr.x16 + i / 2, wlen / 2);
731                                         break;
732                                 case 4:
733                                         bus_space_write_region_4(sc->sc_tag,
734                                             sc->sc_handle, sc->sc_wrofs + i,
735                                             ptr.x32 + i / 4, wlen / 4);
736                                         break;
737                                 }
738
739                                 cfi_write(sc, sc->sc_wrofs + i,
740                                     CFI_BCS_CONFIRM);
741
742                                 intr_restore(intr);
743
744                                 error = cfi_wait_ready(sc, sc->sc_wrofs + i,
745                                     start, CFI_TIMEOUT_BUFWRITE);
746                                 if (error != 0)
747                                         goto out;
748                         }
749                         goto out;
750                 default:
751                         /* Fall through to single word case */
752                         break;
753                 }
754
755         }
756
757         /* Write the block one byte/word at a time. */
758         for (i = 0; i < sc->sc_wrbufsz; i += sc->sc_width) {
759
760                 /* Avoid writing unless we are actually changing bits */
761                 if (!neederase) {
762                         switch (sc->sc_width) {
763                         case 1:
764                                 if(*(ptr.x8 + i) == *(cpyprt.x8 + i))
765                                         continue;
766                                 break;
767                         case 2:
768                                 if(*(ptr.x16 + i / 2) == *(cpyprt.x16 + i / 2))
769                                         continue;
770                                 break;
771                         case 4:
772                                 if(*(ptr.x32 + i / 4) == *(cpyprt.x32 + i / 4))
773                                         continue;
774                                 break;
775                         }
776                 }
777
778                 /*
779                  * Make sure the command to start a write and the
780                  * actual write happens back-to-back without any
781                  * excessive delays.
782                  */
783                 intr = intr_disable();
784
785                 start = sbinuptime();
786                 switch (sc->sc_cmdset) {
787                 case CFI_VEND_INTEL_ECS:
788                 case CFI_VEND_INTEL_SCS:
789                         cfi_write(sc, sc->sc_wrofs + i, CFI_BCS_PROGRAM);
790                         break;
791                 case CFI_VEND_AMD_SCS:
792                 case CFI_VEND_AMD_ECS:
793                         cfi_amd_write(sc, 0, AMD_ADDR_START, CFI_AMD_PROGRAM);
794                         break;
795                 }
796                 switch (sc->sc_width) {
797                 case 1:
798                         bus_space_write_1(sc->sc_tag, sc->sc_handle,
799                             sc->sc_wrofs + i, *(ptr.x8 + i));
800                         break;
801                 case 2:
802                         bus_space_write_2(sc->sc_tag, sc->sc_handle,
803                             sc->sc_wrofs + i, *(ptr.x16 + i / 2));
804                         break;
805                 case 4:
806                         bus_space_write_4(sc->sc_tag, sc->sc_handle,
807                             sc->sc_wrofs + i, *(ptr.x32 + i / 4));
808                         break;
809                 }
810                 
811                 intr_restore(intr);
812
813                 if (sc->sc_cmdset == CFI_VEND_AMD_ECS  ||
814                     sc->sc_cmdset == CFI_VEND_AMD_SCS) {
815                         for (j = 0; j < CFI_AMD_MAXCHK; ++j) {
816                                 switch (sc->sc_width) {
817                                 case 1:
818                                         val = *(ptr.x8 + i);
819                                         break;
820                                 case 2:
821                                         val = *(ptr.x16 + i / 2);
822                                         break;
823                                 case 4:
824                                         val = *(ptr.x32 + i / 4);
825                                         break;
826                                 }
827
828                                 if (cfi_read(sc, sc->sc_wrofs + i) == val)
829                                         break;
830                                         
831                                 DELAY(10);
832                         }
833                         if (j == CFI_AMD_MAXCHK) {
834                                 printf("\nCFI Program Verify time out error\n");
835                                 error = ENXIO;
836                                 goto out;
837                         }
838                 } else {
839                         error = cfi_wait_ready(sc, sc->sc_wrofs, start,
840                            CFI_TIMEOUT_WRITE);
841                         if (error)
842                                 goto out;
843                 }
844         }
845
846         /* error is 0. */
847
848  out:
849         cfi_reset_default(sc);
850
851         /* Relock Intel flash */
852         switch (sc->sc_cmdset) {
853         case CFI_VEND_INTEL_ECS:
854         case CFI_VEND_INTEL_SCS:
855                 cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LBS);
856                 cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LB);
857                 cfi_write(sc, sc->sc_wrofs, CFI_BCS_READ_ARRAY);
858                 break;
859         }
860         return (error);
861 }
862
863 #ifdef CFI_SUPPORT_STRATAFLASH
864 /*
865  * Intel StrataFlash Protection Register Support.
866  *
867  * The memory includes a 128-bit Protection Register that can be
868  * used for security.  There are two 64-bit segments; one is programmed
869  * at the factory with a unique 64-bit number which is immutable.
870  * The other segment is left blank for User (OEM) programming.
871  * The User/OEM segment is One Time Programmable (OTP).  It can also
872  * be locked to prevent any further writes by setting bit 0 of the
873  * Protection Lock Register (PLR).  The PLR can written only once.
874  */
875
876 static uint16_t
877 cfi_get16(struct cfi_softc *sc, int off)
878 {
879         uint16_t v = bus_space_read_2(sc->sc_tag, sc->sc_handle, off<<1);
880         return v;
881 }
882
883 #ifdef CFI_ARMEDANDDANGEROUS
884 static void
885 cfi_put16(struct cfi_softc *sc, int off, uint16_t v)
886 {
887         bus_space_write_2(sc->sc_tag, sc->sc_handle, off<<1, v);
888 }
889 #endif
890
891 /*
892  * Read the factory-defined 64-bit segment of the PR.
893  */
894 int 
895 cfi_intel_get_factory_pr(struct cfi_softc *sc, uint64_t *id)
896 {
897         if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
898                 return EOPNOTSUPP;
899         KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
900
901         cfi_write(sc, 0, CFI_INTEL_READ_ID);
902         *id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(0)))<<48 |
903               ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(1)))<<32 |
904               ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(2)))<<16 |
905               ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(3)));
906         cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
907         return 0;
908 }
909
910 /*
911  * Read the User/OEM 64-bit segment of the PR.
912  */
913 int 
914 cfi_intel_get_oem_pr(struct cfi_softc *sc, uint64_t *id)
915 {
916         if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
917                 return EOPNOTSUPP;
918         KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
919
920         cfi_write(sc, 0, CFI_INTEL_READ_ID);
921         *id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(4)))<<48 |
922               ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(5)))<<32 |
923               ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(6)))<<16 |
924               ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(7)));
925         cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
926         return 0;
927 }
928
929 /*
930  * Write the User/OEM 64-bit segment of the PR.
931  * XXX should allow writing individual words/bytes
932  */
933 int
934 cfi_intel_set_oem_pr(struct cfi_softc *sc, uint64_t id)
935 {
936 #ifdef CFI_ARMEDANDDANGEROUS
937         register_t intr;
938         int i, error;
939         sbintime_t start;
940 #endif
941
942         if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
943                 return EOPNOTSUPP;
944         KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
945
946 #ifdef CFI_ARMEDANDDANGEROUS
947         for (i = 7; i >= 4; i--, id >>= 16) {
948                 intr = intr_disable();
949                 start = sbinuptime();
950                 cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
951                 cfi_put16(sc, CFI_INTEL_PR(i), id&0xffff);
952                 intr_restore(intr);
953                 error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, start,
954                     CFI_TIMEOUT_WRITE);
955                 if (error)
956                         break;
957         }
958         cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
959         return error;
960 #else
961         device_printf(sc->sc_dev, "%s: OEM PR not set, "
962             "CFI_ARMEDANDDANGEROUS not configured\n", __func__);
963         return ENXIO;
964 #endif
965 }
966
967 /*
968  * Read the contents of the Protection Lock Register.
969  */
970 int 
971 cfi_intel_get_plr(struct cfi_softc *sc, uint32_t *plr)
972 {
973         if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
974                 return EOPNOTSUPP;
975         KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
976
977         cfi_write(sc, 0, CFI_INTEL_READ_ID);
978         *plr = cfi_get16(sc, CFI_INTEL_PLR);
979         cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
980         return 0;
981 }
982
983 /*
984  * Write the Protection Lock Register to lock down the
985  * user-settable segment of the Protection Register.
986  * NOTE: this operation is not reversible.
987  */
988 int 
989 cfi_intel_set_plr(struct cfi_softc *sc)
990 {
991 #ifdef CFI_ARMEDANDDANGEROUS
992         register_t intr;
993         int error;
994         sbintime_t start;
995 #endif
996         if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
997                 return EOPNOTSUPP;
998         KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
999
1000 #ifdef CFI_ARMEDANDDANGEROUS
1001         /* worthy of console msg */
1002         device_printf(sc->sc_dev, "set PLR\n");
1003         intr = intr_disable();
1004         binuptime(&start);
1005         cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
1006         cfi_put16(sc, CFI_INTEL_PLR, 0xFFFD);
1007         intr_restore(intr);
1008         error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, start,
1009             CFI_TIMEOUT_WRITE);
1010         cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
1011         return error;
1012 #else
1013         device_printf(sc->sc_dev, "%s: PLR not set, "
1014             "CFI_ARMEDANDDANGEROUS not configured\n", __func__);
1015         return ENXIO;
1016 #endif
1017 }
1018 #endif /* CFI_SUPPORT_STRATAFLASH */