]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm/arm/cpufunc.c
This commit was generated by cvs2svn to compensate for changes in r162017,
[FreeBSD/FreeBSD.git] / sys / arm / arm / cpufunc.c
1 /*      $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
2
3 /*-
4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
5  * arm8 support code Copyright (c) 1997 ARM Limited
6  * arm8 support code Copyright (c) 1997 Causality Limited
7  * arm9 support code Copyright (C) 2001 ARM Ltd
8  * Copyright (c) 1997 Mark Brinicombe.
9  * Copyright (c) 1997 Causality Limited
10  * All rights reserved.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *      This product includes software developed by Causality Limited.
23  * 4. The name of Causality Limited may not be used to endorse or promote
24  *    products derived from this software without specific prior written
25  *    permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  * RiscBSD kernel project
40  *
41  * cpufuncs.c
42  *
43  * C functions for supporting CPU / MMU / TLB specific operations.
44  *
45  * Created      : 30/01/97
46  */
47 #include <sys/cdefs.h>
48 __FBSDID("$FreeBSD$");
49
50 #include <sys/cdefs.h>
51
52 #include <sys/types.h>
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/lock.h>
56 #include <sys/mutex.h>
57 #include <sys/bus.h>
58 #include <machine/bus.h>
59 #include <machine/cpu.h>
60 #include <machine/disassem.h>
61
62 #include <vm/vm.h>
63 #include <vm/pmap.h>
64
65 #include <machine/cpuconf.h>
66 #include <machine/cpufunc.h>
67 #include <machine/bootconfig.h>
68
69 #ifdef CPU_XSCALE_80200
70 #include <arm/xscale/i80200/i80200reg.h>
71 #include <arm/xscale/i80200/i80200var.h>
72 #endif
73
74 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
75 #include <arm/xscale/i80321/i80321reg.h>
76 #include <arm/xscale/i80321/i80321var.h>
77 #endif
78
79 #ifdef CPU_XSCALE_IXP425
80 #include <arm/xscale/ixp425/ixp425reg.h>
81 #include <arm/xscale/ixp425/ixp425var.h>
82 #endif
83
84 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
85     defined(CPU_XSCALE_80219)
86 #include <arm/xscale/xscalereg.h>
87 #endif
88
89 #if defined(PERFCTRS)
90 struct arm_pmc_funcs *arm_pmc;
91 #endif
92
93 /* PRIMARY CACHE VARIABLES */
94 int     arm_picache_size;
95 int     arm_picache_line_size;
96 int     arm_picache_ways;
97
98 int     arm_pdcache_size;       /* and unified */
99 int     arm_pdcache_line_size;
100 int     arm_pdcache_ways;
101
102 int     arm_pcache_type;
103 int     arm_pcache_unified;
104
105 int     arm_dcache_align;
106 int     arm_dcache_align_mask;
107
108 /* 1 == use cpu_sleep(), 0 == don't */
109 int cpu_do_powersave;
110 int ctrl;
111
112 #ifdef CPU_ARM7TDMI
113 struct cpu_functions arm7tdmi_cpufuncs = {
114         /* CPU functions */
115         
116         cpufunc_id,                     /* id                   */
117         cpufunc_nullop,                 /* cpwait               */
118
119         /* MMU functions */
120
121         cpufunc_control,                /* control              */
122         cpufunc_domains,                /* domain               */
123         arm7tdmi_setttb,                /* setttb               */
124         cpufunc_faultstatus,            /* faultstatus          */
125         cpufunc_faultaddress,           /* faultaddress         */
126
127         /* TLB functions */
128
129         arm7tdmi_tlb_flushID,           /* tlb_flushID          */
130         arm7tdmi_tlb_flushID_SE,        /* tlb_flushID_SE       */
131         arm7tdmi_tlb_flushID,           /* tlb_flushI           */
132         arm7tdmi_tlb_flushID_SE,        /* tlb_flushI_SE        */
133         arm7tdmi_tlb_flushID,           /* tlb_flushD           */
134         arm7tdmi_tlb_flushID_SE,        /* tlb_flushD_SE        */
135
136         /* Cache operations */
137
138         cpufunc_nullop,                 /* icache_sync_all      */
139         (void *)cpufunc_nullop,         /* icache_sync_range    */
140
141         arm7tdmi_cache_flushID,         /* dcache_wbinv_all     */
142         (void *)arm7tdmi_cache_flushID, /* dcache_wbinv_range   */
143         (void *)arm7tdmi_cache_flushID, /* dcache_inv_range     */
144         (void *)cpufunc_nullop,         /* dcache_wb_range      */
145
146         arm7tdmi_cache_flushID,         /* idcache_wbinv_all    */
147         (void *)arm7tdmi_cache_flushID, /* idcache_wbinv_range  */
148
149         /* Other functions */
150
151         cpufunc_nullop,                 /* flush_prefetchbuf    */
152         cpufunc_nullop,                 /* drain_writebuf       */
153         cpufunc_nullop,                 /* flush_brnchtgt_C     */
154         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
155
156         (void *)cpufunc_nullop,         /* sleep                */
157
158         /* Soft functions */
159
160         late_abort_fixup,               /* dataabt_fixup        */
161         cpufunc_null_fixup,             /* prefetchabt_fixup    */
162
163         arm7tdmi_context_switch,        /* context_switch       */
164
165         arm7tdmi_setup                  /* cpu setup            */
166
167 };
168 #endif  /* CPU_ARM7TDMI */
169
170 #ifdef CPU_ARM8
171 struct cpu_functions arm8_cpufuncs = {
172         /* CPU functions */
173         
174         cpufunc_id,                     /* id                   */
175         cpufunc_nullop,                 /* cpwait               */
176
177         /* MMU functions */
178
179         cpufunc_control,                /* control              */
180         cpufunc_domains,                /* domain               */
181         arm8_setttb,                    /* setttb               */
182         cpufunc_faultstatus,            /* faultstatus          */
183         cpufunc_faultaddress,           /* faultaddress         */
184
185         /* TLB functions */
186
187         arm8_tlb_flushID,               /* tlb_flushID          */
188         arm8_tlb_flushID_SE,            /* tlb_flushID_SE       */
189         arm8_tlb_flushID,               /* tlb_flushI           */
190         arm8_tlb_flushID_SE,            /* tlb_flushI_SE        */
191         arm8_tlb_flushID,               /* tlb_flushD           */
192         arm8_tlb_flushID_SE,            /* tlb_flushD_SE        */
193
194         /* Cache operations */
195
196         cpufunc_nullop,                 /* icache_sync_all      */
197         (void *)cpufunc_nullop,         /* icache_sync_range    */
198
199         arm8_cache_purgeID,             /* dcache_wbinv_all     */
200         (void *)arm8_cache_purgeID,     /* dcache_wbinv_range   */
201 /*XXX*/ (void *)arm8_cache_purgeID,     /* dcache_inv_range     */
202         (void *)arm8_cache_cleanID,     /* dcache_wb_range      */
203
204         arm8_cache_purgeID,             /* idcache_wbinv_all    */
205         (void *)arm8_cache_purgeID,     /* idcache_wbinv_range  */
206
207         /* Other functions */
208
209         cpufunc_nullop,                 /* flush_prefetchbuf    */
210         cpufunc_nullop,                 /* drain_writebuf       */
211         cpufunc_nullop,                 /* flush_brnchtgt_C     */
212         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
213
214         (void *)cpufunc_nullop,         /* sleep                */
215
216         /* Soft functions */
217
218         cpufunc_null_fixup,             /* dataabt_fixup        */
219         cpufunc_null_fixup,             /* prefetchabt_fixup    */
220
221         arm8_context_switch,            /* context_switch       */
222
223         arm8_setup                      /* cpu setup            */
224 };          
225 #endif  /* CPU_ARM8 */
226
227 #ifdef CPU_ARM9
228 struct cpu_functions arm9_cpufuncs = {
229         /* CPU functions */
230
231         cpufunc_id,                     /* id                   */
232         cpufunc_nullop,                 /* cpwait               */
233
234         /* MMU functions */
235
236         cpufunc_control,                /* control              */
237         cpufunc_domains,                /* Domain               */
238         arm9_setttb,                    /* Setttb               */
239         cpufunc_faultstatus,            /* Faultstatus          */
240         cpufunc_faultaddress,           /* Faultaddress         */
241
242         /* TLB functions */
243
244         armv4_tlb_flushID,              /* tlb_flushID          */
245         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
246         armv4_tlb_flushI,               /* tlb_flushI           */
247         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
248         armv4_tlb_flushD,               /* tlb_flushD           */
249         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
250
251         /* Cache operations */
252
253         arm9_icache_sync_all,           /* icache_sync_all      */
254         arm9_icache_sync_range,         /* icache_sync_range    */
255
256         arm9_dcache_wbinv_all,          /* dcache_wbinv_all     */
257         arm9_dcache_wbinv_range,        /* dcache_wbinv_range   */
258 /*XXX*/ arm9_dcache_wbinv_range,        /* dcache_inv_range     */
259         arm9_dcache_wb_range,           /* dcache_wb_range      */
260
261         arm9_idcache_wbinv_all,         /* idcache_wbinv_all    */
262         arm9_idcache_wbinv_range,       /* idcache_wbinv_range  */
263
264         /* Other functions */
265
266         cpufunc_nullop,                 /* flush_prefetchbuf    */
267         armv4_drain_writebuf,           /* drain_writebuf       */
268         cpufunc_nullop,                 /* flush_brnchtgt_C     */
269         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
270
271         (void *)cpufunc_nullop,         /* sleep                */
272
273         /* Soft functions */
274
275         cpufunc_null_fixup,             /* dataabt_fixup        */
276         cpufunc_null_fixup,             /* prefetchabt_fixup    */
277
278         arm9_context_switch,            /* context_switch       */
279
280         arm9_setup                      /* cpu setup            */
281
282 };
283 #endif /* CPU_ARM9 */
284
285 #ifdef CPU_ARM10
286 struct cpu_functions arm10_cpufuncs = {
287         /* CPU functions */
288
289         cpufunc_id,                     /* id                   */
290         cpufunc_nullop,                 /* cpwait               */
291
292         /* MMU functions */
293
294         cpufunc_control,                /* control              */
295         cpufunc_domains,                /* Domain               */
296         arm10_setttb,                   /* Setttb               */
297         cpufunc_faultstatus,            /* Faultstatus          */
298         cpufunc_faultaddress,           /* Faultaddress         */
299
300         /* TLB functions */
301
302         armv4_tlb_flushID,              /* tlb_flushID          */
303         arm10_tlb_flushID_SE,           /* tlb_flushID_SE       */
304         armv4_tlb_flushI,               /* tlb_flushI           */
305         arm10_tlb_flushI_SE,            /* tlb_flushI_SE        */
306         armv4_tlb_flushD,               /* tlb_flushD           */
307         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
308
309         /* Cache operations */
310
311         arm10_icache_sync_all,          /* icache_sync_all      */
312         arm10_icache_sync_range,        /* icache_sync_range    */
313
314         arm10_dcache_wbinv_all,         /* dcache_wbinv_all     */
315         arm10_dcache_wbinv_range,       /* dcache_wbinv_range   */
316         arm10_dcache_inv_range,         /* dcache_inv_range     */
317         arm10_dcache_wb_range,          /* dcache_wb_range      */
318
319         arm10_idcache_wbinv_all,        /* idcache_wbinv_all    */
320         arm10_idcache_wbinv_range,      /* idcache_wbinv_range  */
321
322         /* Other functions */
323
324         cpufunc_nullop,                 /* flush_prefetchbuf    */
325         armv4_drain_writebuf,           /* drain_writebuf       */
326         cpufunc_nullop,                 /* flush_brnchtgt_C     */
327         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
328
329         (void *)cpufunc_nullop,         /* sleep                */
330
331         /* Soft functions */
332
333         cpufunc_null_fixup,             /* dataabt_fixup        */
334         cpufunc_null_fixup,             /* prefetchabt_fixup    */
335
336         arm10_context_switch,           /* context_switch       */
337
338         arm10_setup                     /* cpu setup            */
339
340 };
341 #endif /* CPU_ARM10 */
342
343 #ifdef CPU_SA110
344 struct cpu_functions sa110_cpufuncs = {
345         /* CPU functions */
346         
347         cpufunc_id,                     /* id                   */
348         cpufunc_nullop,                 /* cpwait               */
349
350         /* MMU functions */
351
352         cpufunc_control,                /* control              */
353         cpufunc_domains,                /* domain               */
354         sa1_setttb,                     /* setttb               */
355         cpufunc_faultstatus,            /* faultstatus          */
356         cpufunc_faultaddress,           /* faultaddress         */
357
358         /* TLB functions */
359
360         armv4_tlb_flushID,              /* tlb_flushID          */
361         sa1_tlb_flushID_SE,             /* tlb_flushID_SE       */
362         armv4_tlb_flushI,               /* tlb_flushI           */
363         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
364         armv4_tlb_flushD,               /* tlb_flushD           */
365         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
366
367         /* Cache operations */
368
369         sa1_cache_syncI,                /* icache_sync_all      */
370         sa1_cache_syncI_rng,            /* icache_sync_range    */
371
372         sa1_cache_purgeD,               /* dcache_wbinv_all     */
373         sa1_cache_purgeD_rng,           /* dcache_wbinv_range   */
374 /*XXX*/ sa1_cache_purgeD_rng,           /* dcache_inv_range     */
375         sa1_cache_cleanD_rng,           /* dcache_wb_range      */
376
377         sa1_cache_purgeID,              /* idcache_wbinv_all    */
378         sa1_cache_purgeID_rng,          /* idcache_wbinv_range  */
379
380         /* Other functions */
381
382         cpufunc_nullop,                 /* flush_prefetchbuf    */
383         armv4_drain_writebuf,           /* drain_writebuf       */
384         cpufunc_nullop,                 /* flush_brnchtgt_C     */
385         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
386
387         (void *)cpufunc_nullop,         /* sleep                */
388
389         /* Soft functions */
390
391         cpufunc_null_fixup,             /* dataabt_fixup        */
392         cpufunc_null_fixup,             /* prefetchabt_fixup    */
393
394         sa110_context_switch,           /* context_switch       */
395
396         sa110_setup                     /* cpu setup            */
397 };          
398 #endif  /* CPU_SA110 */
399
400 #if defined(CPU_SA1100) || defined(CPU_SA1110)
401 struct cpu_functions sa11x0_cpufuncs = {
402         /* CPU functions */
403         
404         cpufunc_id,                     /* id                   */
405         cpufunc_nullop,                 /* cpwait               */
406
407         /* MMU functions */
408
409         cpufunc_control,                /* control              */
410         cpufunc_domains,                /* domain               */
411         sa1_setttb,                     /* setttb               */
412         cpufunc_faultstatus,            /* faultstatus          */
413         cpufunc_faultaddress,           /* faultaddress         */
414
415         /* TLB functions */
416
417         armv4_tlb_flushID,              /* tlb_flushID          */
418         sa1_tlb_flushID_SE,             /* tlb_flushID_SE       */
419         armv4_tlb_flushI,               /* tlb_flushI           */
420         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
421         armv4_tlb_flushD,               /* tlb_flushD           */
422         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
423
424         /* Cache operations */
425
426         sa1_cache_syncI,                /* icache_sync_all      */
427         sa1_cache_syncI_rng,            /* icache_sync_range    */
428
429         sa1_cache_purgeD,               /* dcache_wbinv_all     */
430         sa1_cache_purgeD_rng,           /* dcache_wbinv_range   */
431 /*XXX*/ sa1_cache_purgeD_rng,           /* dcache_inv_range     */
432         sa1_cache_cleanD_rng,           /* dcache_wb_range      */
433
434         sa1_cache_purgeID,              /* idcache_wbinv_all    */
435         sa1_cache_purgeID_rng,          /* idcache_wbinv_range  */
436
437         /* Other functions */
438
439         sa11x0_drain_readbuf,           /* flush_prefetchbuf    */
440         armv4_drain_writebuf,           /* drain_writebuf       */
441         cpufunc_nullop,                 /* flush_brnchtgt_C     */
442         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
443
444         sa11x0_cpu_sleep,               /* sleep                */
445
446         /* Soft functions */
447
448         cpufunc_null_fixup,             /* dataabt_fixup        */
449         cpufunc_null_fixup,             /* prefetchabt_fixup    */
450
451         sa11x0_context_switch,          /* context_switch       */
452
453         sa11x0_setup                    /* cpu setup            */
454 };          
455 #endif  /* CPU_SA1100 || CPU_SA1110 */
456
457 #ifdef CPU_IXP12X0
458 struct cpu_functions ixp12x0_cpufuncs = {
459         /* CPU functions */
460         
461         cpufunc_id,                     /* id                   */
462         cpufunc_nullop,                 /* cpwait               */
463
464         /* MMU functions */
465
466         cpufunc_control,                /* control              */
467         cpufunc_domains,                /* domain               */
468         sa1_setttb,                     /* setttb               */
469         cpufunc_faultstatus,            /* faultstatus          */
470         cpufunc_faultaddress,           /* faultaddress         */
471
472         /* TLB functions */
473
474         armv4_tlb_flushID,              /* tlb_flushID          */
475         sa1_tlb_flushID_SE,             /* tlb_flushID_SE       */
476         armv4_tlb_flushI,               /* tlb_flushI           */
477         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
478         armv4_tlb_flushD,               /* tlb_flushD           */
479         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
480
481         /* Cache operations */
482
483         sa1_cache_syncI,                /* icache_sync_all      */
484         sa1_cache_syncI_rng,            /* icache_sync_range    */
485
486         sa1_cache_purgeD,               /* dcache_wbinv_all     */
487         sa1_cache_purgeD_rng,           /* dcache_wbinv_range   */
488 /*XXX*/ sa1_cache_purgeD_rng,           /* dcache_inv_range     */
489         sa1_cache_cleanD_rng,           /* dcache_wb_range      */
490
491         sa1_cache_purgeID,              /* idcache_wbinv_all    */
492         sa1_cache_purgeID_rng,          /* idcache_wbinv_range  */
493
494         /* Other functions */
495
496         ixp12x0_drain_readbuf,                  /* flush_prefetchbuf    */
497         armv4_drain_writebuf,           /* drain_writebuf       */
498         cpufunc_nullop,                 /* flush_brnchtgt_C     */
499         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
500
501         (void *)cpufunc_nullop,         /* sleep                */
502
503         /* Soft functions */
504
505         cpufunc_null_fixup,             /* dataabt_fixup        */
506         cpufunc_null_fixup,             /* prefetchabt_fixup    */
507
508         ixp12x0_context_switch,         /* context_switch       */
509
510         ixp12x0_setup                   /* cpu setup            */
511 };          
512 #endif  /* CPU_IXP12X0 */
513
514 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
515   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
516   defined(CPU_XSCALE_80219)
517
518 struct cpu_functions xscale_cpufuncs = {
519         /* CPU functions */
520         
521         cpufunc_id,                     /* id                   */
522         xscale_cpwait,                  /* cpwait               */
523
524         /* MMU functions */
525
526         xscale_control,                 /* control              */
527         cpufunc_domains,                /* domain               */
528         xscale_setttb,                  /* setttb               */
529         cpufunc_faultstatus,            /* faultstatus          */
530         cpufunc_faultaddress,           /* faultaddress         */
531
532         /* TLB functions */
533
534         armv4_tlb_flushID,              /* tlb_flushID          */
535         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
536         armv4_tlb_flushI,               /* tlb_flushI           */
537         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
538         armv4_tlb_flushD,               /* tlb_flushD           */
539         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
540
541         /* Cache operations */
542
543         xscale_cache_syncI,             /* icache_sync_all      */
544         xscale_cache_syncI_rng,         /* icache_sync_range    */
545
546         xscale_cache_purgeD,            /* dcache_wbinv_all     */
547         xscale_cache_purgeD_rng,        /* dcache_wbinv_range   */
548         xscale_cache_flushD_rng,        /* dcache_inv_range     */
549         xscale_cache_cleanD_rng,        /* dcache_wb_range      */
550
551         xscale_cache_purgeID,           /* idcache_wbinv_all    */
552         xscale_cache_purgeID_rng,       /* idcache_wbinv_range  */
553
554         /* Other functions */
555
556         cpufunc_nullop,                 /* flush_prefetchbuf    */
557         armv4_drain_writebuf,           /* drain_writebuf       */
558         cpufunc_nullop,                 /* flush_brnchtgt_C     */
559         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
560
561         xscale_cpu_sleep,               /* sleep                */
562
563         /* Soft functions */
564
565         cpufunc_null_fixup,             /* dataabt_fixup        */
566         cpufunc_null_fixup,             /* prefetchabt_fixup    */
567
568         xscale_context_switch,          /* context_switch       */
569
570         xscale_setup                    /* cpu setup            */
571 };
572 #endif
573 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
574    CPU_XSCALE_80219 */
575
576 /*
577  * Global constants also used by locore.s
578  */
579
580 struct cpu_functions cpufuncs;
581 u_int cputype;
582 u_int cpu_reset_needs_v4_MMU_disable;   /* flag used in locore.s */
583
584 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
585   defined (CPU_ARM10) ||                                               \
586   defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||            \
587   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||          \
588   defined(CPU_XSCALE_80219)
589
590 static void get_cachetype_cp15(void);
591
592 /* Additional cache information local to this file.  Log2 of some of the
593    above numbers.  */
594 static int      arm_dcache_l2_nsets;
595 static int      arm_dcache_l2_assoc;
596 static int      arm_dcache_l2_linesize;
597
598 static void
599 get_cachetype_cp15()
600 {
601         u_int ctype, isize, dsize;
602         u_int multiplier;
603
604         __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
605                 : "=r" (ctype));
606
607         /*
608          * ...and thus spake the ARM ARM:
609          *
610          * If an <opcode2> value corresponding to an unimplemented or
611          * reserved ID register is encountered, the System Control
612          * processor returns the value of the main ID register.
613          */
614         if (ctype == cpufunc_id())
615                 goto out;
616
617         if ((ctype & CPU_CT_S) == 0)
618                 arm_pcache_unified = 1;
619
620         /*
621          * If you want to know how this code works, go read the ARM ARM.
622          */
623
624         arm_pcache_type = CPU_CT_CTYPE(ctype);
625
626         if (arm_pcache_unified == 0) {
627                 isize = CPU_CT_ISIZE(ctype);
628                 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
629                 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
630                 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
631                         if (isize & CPU_CT_xSIZE_M)
632                                 arm_picache_line_size = 0; /* not present */
633                         else
634                                 arm_picache_ways = 1;
635                 } else {
636                         arm_picache_ways = multiplier <<
637                             (CPU_CT_xSIZE_ASSOC(isize) - 1);
638                 }
639                 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
640         }
641
642         dsize = CPU_CT_DSIZE(ctype);
643         multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
644         arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
645         if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
646                 if (dsize & CPU_CT_xSIZE_M)
647                         arm_pdcache_line_size = 0; /* not present */
648                 else
649                         arm_pdcache_ways = 1;
650         } else {
651                 arm_pdcache_ways = multiplier <<
652                     (CPU_CT_xSIZE_ASSOC(dsize) - 1);
653         }
654         arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
655
656         arm_dcache_align = arm_pdcache_line_size;
657
658         arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
659         arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
660         arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
661             CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
662
663  out:
664         arm_dcache_align_mask = arm_dcache_align - 1;
665 }
666 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
667
668 #if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
669     defined(CPU_IXP12X0)
670 /* Cache information for CPUs without cache type registers. */
671 struct cachetab {
672         u_int32_t ct_cpuid;
673         int     ct_pcache_type;
674         int     ct_pcache_unified;
675         int     ct_pdcache_size;
676         int     ct_pdcache_line_size;
677         int     ct_pdcache_ways;
678         int     ct_picache_size;
679         int     ct_picache_line_size;
680         int     ct_picache_ways;
681 };
682
683 struct cachetab cachetab[] = {
684     /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
685     /* XXX is this type right for SA-1? */
686     { CPU_ID_SA110,     CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
687     { CPU_ID_SA1100,    CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
688     { CPU_ID_SA1110,    CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
689     { CPU_ID_IXP1200,   CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
690     { 0, 0, 0, 0, 0, 0, 0, 0}
691 };
692
693 static void get_cachetype_table(void);
694
695 static void
696 get_cachetype_table()
697 {
698         int i;
699         u_int32_t cpuid = cpufunc_id();
700
701         for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
702                 if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
703                         arm_pcache_type = cachetab[i].ct_pcache_type;
704                         arm_pcache_unified = cachetab[i].ct_pcache_unified;
705                         arm_pdcache_size = cachetab[i].ct_pdcache_size;
706                         arm_pdcache_line_size =
707                             cachetab[i].ct_pdcache_line_size;
708                         arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
709                         arm_picache_size = cachetab[i].ct_picache_size;
710                         arm_picache_line_size =
711                             cachetab[i].ct_picache_line_size;
712                         arm_picache_ways = cachetab[i].ct_picache_ways;
713                 }
714         }
715         arm_dcache_align = arm_pdcache_line_size;
716
717         arm_dcache_align_mask = arm_dcache_align - 1;
718 }
719
720 #endif /* SA110 || SA1100 || SA1111 || IXP12X0 */
721
722 /*
723  * Cannot panic here as we may not have a console yet ...
724  */
725
726 int
727 set_cpufuncs()
728 {
729         cputype = cpufunc_id();
730         cputype &= CPU_ID_CPU_MASK;
731
732         /*
733          * NOTE: cpu_do_powersave defaults to off.  If we encounter a
734          * CPU type where we want to use it by default, then we set it.
735          */
736
737 #ifdef CPU_ARM7TDMI
738         if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
739             CPU_ID_IS7(cputype) &&
740             (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
741                 cpufuncs = arm7tdmi_cpufuncs;
742                 cpu_reset_needs_v4_MMU_disable = 0;
743                 get_cachetype_cp15();
744                 pmap_pte_init_generic();
745                 return 0;
746         }
747 #endif  
748 #ifdef CPU_ARM8
749         if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
750             (cputype & 0x0000f000) == 0x00008000) {
751                 cpufuncs = arm8_cpufuncs;
752                 cpu_reset_needs_v4_MMU_disable = 0;     /* XXX correct? */
753                 get_cachetype_cp15();
754                 pmap_pte_init_arm8();
755                 return 0;
756         }
757 #endif  /* CPU_ARM8 */
758 #ifdef CPU_ARM9
759         if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
760              (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
761             (cputype & 0x0000f000) == 0x00009000) {
762                 cpufuncs = arm9_cpufuncs;
763                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
764                 get_cachetype_cp15();
765                 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
766                 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
767                     arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
768                 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
769                 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
770 #ifdef ARM9_CACHE_WRITE_THROUGH
771                 pmap_pte_init_arm9();
772 #else
773                 pmap_pte_init_generic();
774 #endif
775                 return 0;
776         }
777 #endif /* CPU_ARM9 */
778 #ifdef CPU_ARM10
779         if (/* cputype == CPU_ID_ARM1020T || */
780             cputype == CPU_ID_ARM1020E) {
781                 /*
782                  * Select write-through cacheing (this isn't really an
783                  * option on ARM1020T).
784                  */
785                 cpufuncs = arm10_cpufuncs;
786                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
787                 get_cachetype_cp15();
788                 arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
789                 arm10_dcache_sets_max = 
790                     (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
791                     arm10_dcache_sets_inc;
792                 arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
793                 arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
794                 pmap_pte_init_generic();
795                 return 0;
796         }
797 #endif /* CPU_ARM10 */
798 #ifdef CPU_SA110
799         if (cputype == CPU_ID_SA110) {
800                 cpufuncs = sa110_cpufuncs;
801                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it */
802                 get_cachetype_table();
803                 pmap_pte_init_sa1();
804                 return 0;
805         }
806 #endif  /* CPU_SA110 */
807 #ifdef CPU_SA1100
808         if (cputype == CPU_ID_SA1100) {
809                 cpufuncs = sa11x0_cpufuncs;
810                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
811                 get_cachetype_table();
812                 pmap_pte_init_sa1();
813                 /* Use powersave on this CPU. */
814                 cpu_do_powersave = 1;
815
816                 return 0;
817         }
818 #endif  /* CPU_SA1100 */
819 #ifdef CPU_SA1110
820         if (cputype == CPU_ID_SA1110) {
821                 cpufuncs = sa11x0_cpufuncs;
822                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
823                 get_cachetype_table();
824                 pmap_pte_init_sa1();
825                 /* Use powersave on this CPU. */
826                 cpu_do_powersave = 1;
827
828                 return 0;
829         }
830 #endif  /* CPU_SA1110 */
831 #ifdef CPU_IXP12X0
832         if (cputype == CPU_ID_IXP1200) {
833                 cpufuncs = ixp12x0_cpufuncs;
834                 cpu_reset_needs_v4_MMU_disable = 1;
835                 get_cachetype_table();
836                 pmap_pte_init_sa1();
837                 return 0;
838         }
839 #endif  /* CPU_IXP12X0 */
840 #ifdef CPU_XSCALE_80200
841         if (cputype == CPU_ID_80200) {
842                 int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
843
844                 i80200_icu_init();
845
846                 /*
847                  * Reset the Performance Monitoring Unit to a
848                  * pristine state:
849                  *      - CCNT, PMN0, PMN1 reset to 0
850                  *      - overflow indications cleared
851                  *      - all counters disabled
852                  */
853                 __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
854                         :
855                         : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
856                                PMNC_CC_IF));
857
858 #if defined(XSCALE_CCLKCFG)
859                 /*
860                  * Crank CCLKCFG to maximum legal value.
861                  */
862                 __asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
863                         :
864                         : "r" (XSCALE_CCLKCFG));
865 #endif
866
867                 /*
868                  * XXX Disable ECC in the Bus Controller Unit; we
869                  * don't really support it, yet.  Clear any pending
870                  * error indications.
871                  */
872                 __asm __volatile("mcr p13, 0, %0, c0, c1, 0"
873                         :
874                         : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
875
876                 cpufuncs = xscale_cpufuncs;
877 #if defined(PERFCTRS)
878                 xscale_pmu_init();
879 #endif
880
881                 /*
882                  * i80200 errata: Step-A0 and A1 have a bug where
883                  * D$ dirty bits are not cleared on "invalidate by
884                  * address".
885                  *
886                  * Workaround: Clean cache line before invalidating.
887                  */
888                 if (rev == 0 || rev == 1)
889                         cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
890
891                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
892                 get_cachetype_cp15();
893                 pmap_pte_init_xscale();
894                 return 0;
895         }
896 #endif /* CPU_XSCALE_80200 */
897 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
898         if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
899             cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
900             cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
901
902                 /*
903                  * Reset the Performance Monitoring Unit to a
904                  * pristine state:
905                  *      - CCNT, PMN0, PMN1 reset to 0
906                  *      - overflow indications cleared
907                  *      - all counters disabled
908                  */
909                 __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
910                         :
911                         : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
912                                PMNC_CC_IF));
913
914                 cpufuncs = xscale_cpufuncs;
915 #if defined(PERFCTRS)
916                 xscale_pmu_init();
917 #endif
918
919                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
920                 get_cachetype_cp15();
921                 pmap_pte_init_xscale();
922                 return 0;
923         }
924 #endif /* CPU_XSCALE_80321 */
925
926 #ifdef CPU_XSCALE_PXA2X0
927         /* ignore core revision to test PXA2xx CPUs */
928         if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
929             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
930
931                 cpufuncs = xscale_cpufuncs;
932 #if defined(PERFCTRS)
933                 xscale_pmu_init();
934 #endif
935
936                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
937                 get_cachetype_cp15();
938                 pmap_pte_init_xscale();
939
940                 /* Use powersave on this CPU. */
941                 cpu_do_powersave = 1;
942
943                 return 0;
944         }
945 #endif /* CPU_XSCALE_PXA2X0 */
946 #ifdef CPU_XSCALE_IXP425
947         if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
948             cputype == CPU_ID_IXP425_266) {
949                 ixp425_icu_init();
950
951                 cpufuncs = xscale_cpufuncs;
952 #if defined(PERFCTRS)
953                 xscale_pmu_init();
954 #endif
955
956                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
957                 get_cachetype_cp15();
958                 pmap_pte_init_xscale();
959
960                 return 0;
961         }
962 #endif /* CPU_XSCALE_IXP425 */
963         /*
964          * Bzzzz. And the answer was ...
965          */
966         panic("No support for this CPU type (%08x) in kernel", cputype);
967         return(ARCHITECTURE_NOT_PRESENT);
968 }
969
970 /*
971  * Fixup routines for data and prefetch aborts.
972  *
973  * Several compile time symbols are used
974  *
975  * DEBUG_FAULT_CORRECTION - Print debugging information during the
976  * correction of registers after a fault.
977  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
978  * when defined should use late aborts
979  */
980
981
982 /*
983  * Null abort fixup routine.
984  * For use when no fixup is required.
985  */
986 int
987 cpufunc_null_fixup(arg)
988         void *arg;
989 {
990         return(ABORT_FIXUP_OK);
991 }
992
993
994 #if defined(CPU_ARM7TDMI)
995
996 #ifdef DEBUG_FAULT_CORRECTION
997 #define DFC_PRINTF(x)           printf x
998 #define DFC_DISASSEMBLE(x)      disassemble(x)
999 #else
1000 #define DFC_PRINTF(x)           /* nothing */
1001 #define DFC_DISASSEMBLE(x)      /* nothing */
1002 #endif
1003
1004 /*
1005  * "Early" data abort fixup.
1006  *
1007  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
1008  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1009  *
1010  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1011  */
1012 int
1013 early_abort_fixup(arg)
1014         void *arg;
1015 {
1016         trapframe_t *frame = arg;
1017         u_int fault_pc;
1018         u_int fault_instruction;
1019         int saved_lr = 0;
1020
1021         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1022
1023                 /* Ok an abort in SVC mode */
1024
1025                 /*
1026                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1027                  * as the fault happened in svc mode but we need it in the
1028                  * usr slot so we can treat the registers as an array of ints
1029                  * during fixing.
1030                  * NOTE: This PC is in the position but writeback is not
1031                  * allowed on r15.
1032                  * Doing it like this is more efficient than trapping this
1033                  * case in all possible locations in the following fixup code.
1034                  */
1035
1036                 saved_lr = frame->tf_usr_lr;
1037                 frame->tf_usr_lr = frame->tf_svc_lr;
1038
1039                 /*
1040                  * Note the trapframe does not have the SVC r13 so a fault
1041                  * from an instruction with writeback to r13 in SVC mode is
1042                  * not allowed. This should not happen as the kstack is
1043                  * always valid.
1044                  */
1045         }
1046
1047         /* Get fault address and status from the CPU */
1048
1049         fault_pc = frame->tf_pc;
1050         fault_instruction = *((volatile unsigned int *)fault_pc);
1051
1052         /* Decode the fault instruction and fix the registers as needed */
1053
1054         if ((fault_instruction & 0x0e000000) == 0x08000000) {
1055                 int base;
1056                 int loop;
1057                 int count;
1058                 int *registers = &frame->tf_r0;
1059         
1060                 DFC_PRINTF(("LDM/STM\n"));
1061                 DFC_DISASSEMBLE(fault_pc);
1062                 if (fault_instruction & (1 << 21)) {
1063                         DFC_PRINTF(("This instruction must be corrected\n"));
1064                         base = (fault_instruction >> 16) & 0x0f;
1065                         if (base == 15)
1066                                 return ABORT_FIXUP_FAILED;
1067                         /* Count registers transferred */
1068                         count = 0;
1069                         for (loop = 0; loop < 16; ++loop) {
1070                                 if (fault_instruction & (1<<loop))
1071                                         ++count;
1072                         }
1073                         DFC_PRINTF(("%d registers used\n", count));
1074                         DFC_PRINTF(("Corrected r%d by %d bytes ",
1075                                        base, count * 4));
1076                         if (fault_instruction & (1 << 23)) {
1077                                 DFC_PRINTF(("down\n"));
1078                                 registers[base] -= count * 4;
1079                         } else {
1080                                 DFC_PRINTF(("up\n"));
1081                                 registers[base] += count * 4;
1082                         }
1083                 }
1084         } else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1085                 int base;
1086                 int offset;
1087                 int *registers = &frame->tf_r0;
1088         
1089                 /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1090
1091                 DFC_DISASSEMBLE(fault_pc);
1092
1093                 /* Only need to fix registers if write back is turned on */
1094
1095                 if ((fault_instruction & (1 << 21)) != 0) {
1096                         base = (fault_instruction >> 16) & 0x0f;
1097                         if (base == 13 &&
1098                             (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1099                                 return ABORT_FIXUP_FAILED;
1100                         if (base == 15)
1101                                 return ABORT_FIXUP_FAILED;
1102
1103                         offset = (fault_instruction & 0xff) << 2;
1104                         DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1105                         if ((fault_instruction & (1 << 23)) != 0)
1106                                 offset = -offset;
1107                         registers[base] += offset;
1108                         DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1109                 }
1110         } else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1111                 return ABORT_FIXUP_FAILED;
1112
1113         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1114
1115                 /* Ok an abort in SVC mode */
1116
1117                 /*
1118                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1119                  * as the fault happened in svc mode but we need it in the
1120                  * usr slot so we can treat the registers as an array of ints
1121                  * during fixing.
1122                  * NOTE: This PC is in the position but writeback is not
1123                  * allowed on r15.
1124                  * Doing it like this is more efficient than trapping this
1125                  * case in all possible locations in the prior fixup code.
1126                  */
1127
1128                 frame->tf_svc_lr = frame->tf_usr_lr;
1129                 frame->tf_usr_lr = saved_lr;
1130
1131                 /*
1132                  * Note the trapframe does not have the SVC r13 so a fault
1133                  * from an instruction with writeback to r13 in SVC mode is
1134                  * not allowed. This should not happen as the kstack is
1135                  * always valid.
1136                  */
1137         }
1138
1139         return(ABORT_FIXUP_OK);
1140 }
1141 #endif  /* CPU_ARM2/250/3/6/7 */
1142
1143
1144 #if defined(CPU_ARM7TDMI)
1145 /*
1146  * "Late" (base updated) data abort fixup
1147  *
1148  * For ARM6 (in late-abort mode) and ARM7.
1149  *
1150  * In this model, all data-transfer instructions need fixing up.  We defer
1151  * LDM, STM, LDC and STC fixup to the early-abort handler.
1152  */
1153 int
1154 late_abort_fixup(arg)
1155         void *arg;
1156 {
1157         trapframe_t *frame = arg;
1158         u_int fault_pc;
1159         u_int fault_instruction;
1160         int saved_lr = 0;
1161
1162         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1163
1164                 /* Ok an abort in SVC mode */
1165
1166                 /*
1167                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1168                  * as the fault happened in svc mode but we need it in the
1169                  * usr slot so we can treat the registers as an array of ints
1170                  * during fixing.
1171                  * NOTE: This PC is in the position but writeback is not
1172                  * allowed on r15.
1173                  * Doing it like this is more efficient than trapping this
1174                  * case in all possible locations in the following fixup code.
1175                  */
1176
1177                 saved_lr = frame->tf_usr_lr;
1178                 frame->tf_usr_lr = frame->tf_svc_lr;
1179
1180                 /*
1181                  * Note the trapframe does not have the SVC r13 so a fault
1182                  * from an instruction with writeback to r13 in SVC mode is
1183                  * not allowed. This should not happen as the kstack is
1184                  * always valid.
1185                  */
1186         }
1187
1188         /* Get fault address and status from the CPU */
1189
1190         fault_pc = frame->tf_pc;
1191         fault_instruction = *((volatile unsigned int *)fault_pc);
1192
1193         /* Decode the fault instruction and fix the registers as needed */
1194
1195         /* Was is a swap instruction ? */
1196
1197         if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1198                 DFC_DISASSEMBLE(fault_pc);
1199         } else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1200
1201                 /* Was is a ldr/str instruction */
1202                 /* This is for late abort only */
1203
1204                 int base;
1205                 int offset;
1206                 int *registers = &frame->tf_r0;
1207
1208                 DFC_DISASSEMBLE(fault_pc);
1209                 
1210                 /* This is for late abort only */
1211
1212                 if ((fault_instruction & (1 << 24)) == 0
1213                     || (fault_instruction & (1 << 21)) != 0) {  
1214                         /* postindexed ldr/str with no writeback */
1215
1216                         base = (fault_instruction >> 16) & 0x0f;
1217                         if (base == 13 &&
1218                             (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1219                                 return ABORT_FIXUP_FAILED;
1220                         if (base == 15)
1221                                 return ABORT_FIXUP_FAILED;
1222                         DFC_PRINTF(("late abt fix: r%d=%08x : ",
1223                                        base, registers[base]));
1224                         if ((fault_instruction & (1 << 25)) == 0) {
1225                                 /* Immediate offset - easy */
1226
1227                                 offset = fault_instruction & 0xfff;
1228                                 if ((fault_instruction & (1 << 23)))
1229                                         offset = -offset;
1230                                 registers[base] += offset;
1231                                 DFC_PRINTF(("imm=%08x ", offset));
1232                         } else {
1233                                 /* offset is a shifted register */
1234                                 int shift;
1235
1236                                 offset = fault_instruction & 0x0f;
1237                                 if (offset == base)
1238                                         return ABORT_FIXUP_FAILED;
1239                 
1240                                 /*
1241                                  * Register offset - hard we have to
1242                                  * cope with shifts !
1243                                  */
1244                                 offset = registers[offset];
1245
1246                                 if ((fault_instruction & (1 << 4)) == 0)
1247                                         /* shift with amount */
1248                                         shift = (fault_instruction >> 7) & 0x1f;
1249                                 else {
1250                                         /* shift with register */
1251                                         if ((fault_instruction & (1 << 7)) != 0)
1252                                                 /* undefined for now so bail out */
1253                                                 return ABORT_FIXUP_FAILED;
1254                                         shift = ((fault_instruction >> 8) & 0xf);
1255                                         if (base == shift)
1256                                                 return ABORT_FIXUP_FAILED;
1257                                         DFC_PRINTF(("shift reg=%d ", shift));
1258                                         shift = registers[shift];
1259                                 }
1260                                 DFC_PRINTF(("shift=%08x ", shift));
1261                                 switch (((fault_instruction >> 5) & 0x3)) {
1262                                 case 0 : /* Logical left */
1263                                         offset = (int)(((u_int)offset) << shift);
1264                                         break;
1265                                 case 1 : /* Logical Right */
1266                                         if (shift == 0) shift = 32;
1267                                         offset = (int)(((u_int)offset) >> shift);
1268                                         break;
1269                                 case 2 : /* Arithmetic Right */
1270                                         if (shift == 0) shift = 32;
1271                                         offset = (int)(((int)offset) >> shift);
1272                                         break;
1273                                 case 3 : /* Rotate right (rol or rxx) */
1274                                         return ABORT_FIXUP_FAILED;
1275                                         break;
1276                                 }
1277
1278                                 DFC_PRINTF(("abt: fixed LDR/STR with "
1279                                                "register offset\n"));
1280                                 if ((fault_instruction & (1 << 23)))
1281                                         offset = -offset;
1282                                 DFC_PRINTF(("offset=%08x ", offset));
1283                                 registers[base] += offset;
1284                         }
1285                         DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1286                 }
1287         }
1288
1289         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1290
1291                 /* Ok an abort in SVC mode */
1292
1293                 /*
1294                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1295                  * as the fault happened in svc mode but we need it in the
1296                  * usr slot so we can treat the registers as an array of ints
1297                  * during fixing.
1298                  * NOTE: This PC is in the position but writeback is not
1299                  * allowed on r15.
1300                  * Doing it like this is more efficient than trapping this
1301                  * case in all possible locations in the prior fixup code.
1302                  */
1303
1304                 frame->tf_svc_lr = frame->tf_usr_lr;
1305                 frame->tf_usr_lr = saved_lr;
1306
1307                 /*
1308                  * Note the trapframe does not have the SVC r13 so a fault
1309                  * from an instruction with writeback to r13 in SVC mode is
1310                  * not allowed. This should not happen as the kstack is
1311                  * always valid.
1312                  */
1313         }
1314
1315         /*
1316          * Now let the early-abort fixup routine have a go, in case it
1317          * was an LDM, STM, LDC or STC that faulted.
1318          */
1319
1320         return early_abort_fixup(arg);
1321 }
1322 #endif  /* CPU_ARM7TDMI */
1323
1324 /*
1325  * CPU Setup code
1326  */
1327
1328 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined (CPU_ARM9) || \
1329   defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) ||   \
1330   defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||             \
1331   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||           \
1332   defined(CPU_XSCALE_80219)
1333
1334 #define IGN     0
1335 #define OR      1
1336 #define BIC     2
1337
1338 struct cpu_option {
1339         char    *co_name;
1340         int     co_falseop;
1341         int     co_trueop;
1342         int     co_value;
1343 };
1344
1345 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
1346
1347 static u_int
1348 parse_cpu_options(args, optlist, cpuctrl)
1349         char *args;
1350         struct cpu_option *optlist;    
1351         u_int cpuctrl; 
1352 {
1353         int integer;
1354
1355         if (args == NULL)
1356                 return(cpuctrl);
1357
1358         while (optlist->co_name) {
1359                 if (get_bootconf_option(args, optlist->co_name,
1360                     BOOTOPT_TYPE_BOOLEAN, &integer)) {
1361                         if (integer) {
1362                                 if (optlist->co_trueop == OR)
1363                                         cpuctrl |= optlist->co_value;
1364                                 else if (optlist->co_trueop == BIC)
1365                                         cpuctrl &= ~optlist->co_value;
1366                         } else {
1367                                 if (optlist->co_falseop == OR)
1368                                         cpuctrl |= optlist->co_value;
1369                                 else if (optlist->co_falseop == BIC)
1370                                         cpuctrl &= ~optlist->co_value;
1371                         }
1372                 }
1373                 ++optlist;
1374         }
1375         return(cpuctrl);
1376 }
1377 #endif /* CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 || XSCALE*/
1378
1379 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8)
1380 struct cpu_option arm678_options[] = {
1381 #ifdef COMPAT_12
1382         { "nocache",            IGN, BIC, CPU_CONTROL_IDC_ENABLE },
1383         { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1384 #endif  /* COMPAT_12 */
1385         { "cpu.cache",          BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1386         { "cpu.nocache",        OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1387         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1388         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1389         { NULL,                 IGN, IGN, 0 }
1390 };
1391
1392 #endif  /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
1393
1394 #ifdef CPU_ARM7TDMI
1395 struct cpu_option arm7tdmi_options[] = {
1396         { "arm7.cache",         BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1397         { "arm7.nocache",       OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1398         { "arm7.writebuf",      BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1399         { "arm7.nowritebuf",    OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1400 #ifdef COMPAT_12
1401         { "fpaclk2",            BIC, OR,  CPU_CONTROL_CPCLK },
1402 #endif  /* COMPAT_12 */
1403         { "arm700.fpaclk",      BIC, OR,  CPU_CONTROL_CPCLK },
1404         { NULL,                 IGN, IGN, 0 }
1405 };
1406
1407 void
1408 arm7tdmi_setup(args)
1409         char *args;
1410 {
1411         int cpuctrl;
1412
1413         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1414                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1415                  | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1416
1417         cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1418         cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
1419
1420 #ifdef __ARMEB__
1421         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1422 #endif
1423
1424         /* Clear out the cache */
1425         cpu_idcache_wbinv_all();
1426
1427         /* Set the control register */
1428         ctrl = cpuctrl;
1429         cpu_control(0xffffffff, cpuctrl);
1430 }
1431 #endif  /* CPU_ARM7TDMI */
1432
1433 #ifdef CPU_ARM8
1434 struct cpu_option arm8_options[] = {
1435         { "arm8.cache",         BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1436         { "arm8.nocache",       OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1437         { "arm8.writebuf",      BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1438         { "arm8.nowritebuf",    OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1439 #ifdef COMPAT_12
1440         { "branchpredict",      BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1441 #endif  /* COMPAT_12 */
1442         { "cpu.branchpredict",  BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1443         { "arm8.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1444         { NULL,                 IGN, IGN, 0 }
1445 };
1446
1447 void
1448 arm8_setup(args)
1449         char *args;
1450 {
1451         int integer;
1452         int cpuctrl, cpuctrlmask;
1453         int clocktest;
1454         int setclock = 0;
1455
1456         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1457                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1458                  | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1459         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1460                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1461                  | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1462                  | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
1463                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
1464
1465 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1466         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1467 #endif
1468
1469         cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1470         cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
1471
1472 #ifdef __ARMEB__
1473         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1474 #endif
1475
1476         /* Get clock configuration */
1477         clocktest = arm8_clock_config(0, 0) & 0x0f;
1478
1479         /* Special ARM8 clock and test configuration */
1480         if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1481                 clocktest = 0;
1482                 setclock = 1;
1483         }
1484         if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1485                 if (integer)
1486                         clocktest |= 0x01;
1487                 else
1488                         clocktest &= ~(0x01);
1489                 setclock = 1;
1490         }
1491         if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1492                 if (integer)
1493                         clocktest |= 0x02;
1494                 else
1495                         clocktest &= ~(0x02);
1496                 setclock = 1;
1497         }
1498         if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
1499                 clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
1500                 setclock = 1;
1501         }
1502         if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
1503                 clocktest |= (integer & 7) << 5;
1504                 setclock = 1;
1505         }
1506         
1507         /* Clear out the cache */
1508         cpu_idcache_wbinv_all();
1509
1510         /* Set the control register */
1511         ctrl = cpuctrl;
1512         cpu_control(0xffffffff, cpuctrl);
1513
1514         /* Set the clock/test register */    
1515         if (setclock)
1516                 arm8_clock_config(0x7f, clocktest);
1517 }
1518 #endif  /* CPU_ARM8 */
1519
1520 #ifdef CPU_ARM9
1521 struct cpu_option arm9_options[] = {
1522         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1523         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1524         { "arm9.cache", BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1525         { "arm9.icache",        BIC, OR,  CPU_CONTROL_IC_ENABLE },
1526         { "arm9.dcache",        BIC, OR,  CPU_CONTROL_DC_ENABLE },
1527         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1528         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1529         { "arm9.writebuf",      BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1530         { NULL,                 IGN, IGN, 0 }
1531 };
1532
1533 void
1534 arm9_setup(args)
1535         char *args;
1536 {
1537         int cpuctrl, cpuctrlmask;
1538
1539         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1540             | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1541             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1542             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
1543             CPU_CONTROL_ROUNDROBIN;
1544         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1545                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1546                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1547                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1548                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1549                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1550                  | CPU_CONTROL_ROUNDROBIN;
1551
1552 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1553         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1554 #endif
1555
1556         cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1557
1558 #ifdef __ARMEB__
1559         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1560 #endif
1561         if (vector_page == ARM_VECTORS_HIGH)
1562                 cpuctrl |= CPU_CONTROL_VECRELOC;
1563
1564         /* Clear out the cache */
1565         cpu_idcache_wbinv_all();
1566
1567         /* Set the control register */
1568         cpu_control(cpuctrlmask, cpuctrl);
1569         ctrl = cpuctrl;
1570
1571 }
1572 #endif  /* CPU_ARM9 */
1573
1574 #ifdef CPU_ARM10
1575 struct cpu_option arm10_options[] = {
1576         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1577         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1578         { "arm10.cache",        BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1579         { "arm10.icache",       BIC, OR,  CPU_CONTROL_IC_ENABLE },
1580         { "arm10.dcache",       BIC, OR,  CPU_CONTROL_DC_ENABLE },
1581         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1582         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1583         { "arm10.writebuf",     BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1584         { NULL,                 IGN, IGN, 0 }
1585 };
1586
1587 void
1588 arm10_setup(args)
1589         char *args;
1590 {
1591         int cpuctrl, cpuctrlmask;
1592
1593         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1594             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 
1595             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1596         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1597             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1598             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1599             | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1600             | CPU_CONTROL_BPRD_ENABLE
1601             | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1602
1603 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1604         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1605 #endif
1606
1607         cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
1608
1609 #ifdef __ARMEB__
1610         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1611 #endif
1612
1613         /* Clear out the cache */
1614         cpu_idcache_wbinv_all();
1615
1616         /* Now really make sure they are clean.  */
1617         asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1618
1619         /* Set the control register */
1620         ctrl = cpuctrl;
1621         cpu_control(0xffffffff, cpuctrl);
1622
1623         /* And again. */
1624         cpu_idcache_wbinv_all();
1625 }
1626 #endif  /* CPU_ARM10 */
1627
1628 #ifdef CPU_SA110
1629 struct cpu_option sa110_options[] = {
1630 #ifdef COMPAT_12
1631         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1632         { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1633 #endif  /* COMPAT_12 */
1634         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1635         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1636         { "sa110.cache",        BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1637         { "sa110.icache",       BIC, OR,  CPU_CONTROL_IC_ENABLE },
1638         { "sa110.dcache",       BIC, OR,  CPU_CONTROL_DC_ENABLE },
1639         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1640         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1641         { "sa110.writebuf",     BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1642         { NULL,                 IGN, IGN, 0 }
1643 };
1644
1645 void
1646 sa110_setup(args)
1647         char *args;
1648 {
1649         int cpuctrl, cpuctrlmask;
1650
1651         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1652                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1653                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1654                  | CPU_CONTROL_WBUF_ENABLE;
1655         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1656                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1657                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1658                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1659                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1660                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1661                  | CPU_CONTROL_CPCLK;
1662
1663 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1664         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1665 #endif
1666
1667         cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
1668
1669 #ifdef __ARMEB__
1670         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1671 #endif
1672
1673         /* Clear out the cache */
1674         cpu_idcache_wbinv_all();
1675
1676         /* Set the control register */
1677         ctrl = cpuctrl;
1678 /*      cpu_control(cpuctrlmask, cpuctrl);*/
1679         cpu_control(0xffffffff, cpuctrl);
1680
1681         /* 
1682          * enable clockswitching, note that this doesn't read or write to r0,
1683          * r0 is just to make it valid asm
1684          */
1685         __asm ("mcr 15, 0, r0, c15, c1, 2");
1686 }
1687 #endif  /* CPU_SA110 */
1688
1689 #if defined(CPU_SA1100) || defined(CPU_SA1110)
1690 struct cpu_option sa11x0_options[] = {
1691 #ifdef COMPAT_12
1692         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1693         { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1694 #endif  /* COMPAT_12 */
1695         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1696         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1697         { "sa11x0.cache",       BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1698         { "sa11x0.icache",      BIC, OR,  CPU_CONTROL_IC_ENABLE },
1699         { "sa11x0.dcache",      BIC, OR,  CPU_CONTROL_DC_ENABLE },
1700         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1701         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1702         { "sa11x0.writebuf",    BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1703         { NULL,                 IGN, IGN, 0 }
1704 };
1705
1706 void
1707 sa11x0_setup(args)
1708         char *args;
1709 {
1710         int cpuctrl, cpuctrlmask;
1711
1712         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1713                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1714                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1715                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
1716         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1717                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1718                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1719                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1720                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1721                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1722                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1723
1724 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1725         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1726 #endif
1727
1728
1729         cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
1730
1731 #ifdef __ARMEB__
1732         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1733 #endif
1734
1735         if (vector_page == ARM_VECTORS_HIGH)
1736                 cpuctrl |= CPU_CONTROL_VECRELOC;
1737         /* Clear out the cache */
1738         cpu_idcache_wbinv_all();
1739         /* Set the control register */    
1740         ctrl = cpuctrl;
1741         cpu_control(0xffffffff, cpuctrl);
1742 }
1743 #endif  /* CPU_SA1100 || CPU_SA1110 */
1744
1745 #if defined(CPU_IXP12X0)
1746 struct cpu_option ixp12x0_options[] = {
1747         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1748         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1749         { "ixp12x0.cache",      BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1750         { "ixp12x0.icache",     BIC, OR,  CPU_CONTROL_IC_ENABLE },
1751         { "ixp12x0.dcache",     BIC, OR,  CPU_CONTROL_DC_ENABLE },
1752         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1753         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1754         { "ixp12x0.writebuf",   BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1755         { NULL,                 IGN, IGN, 0 }
1756 };
1757
1758 void
1759 ixp12x0_setup(args)
1760         char *args;
1761 {
1762         int cpuctrl, cpuctrlmask;
1763
1764
1765         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
1766                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
1767                  | CPU_CONTROL_IC_ENABLE;
1768
1769         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
1770                  | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1771                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
1772                  | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
1773                  | CPU_CONTROL_VECRELOC;
1774
1775 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1776         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1777 #endif
1778
1779         cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
1780
1781 #ifdef __ARMEB__
1782         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1783 #endif
1784
1785         if (vector_page == ARM_VECTORS_HIGH)
1786                 cpuctrl |= CPU_CONTROL_VECRELOC;
1787
1788         /* Clear out the cache */
1789         cpu_idcache_wbinv_all();
1790
1791         /* Set the control register */    
1792         ctrl = cpuctrl;
1793         /* cpu_control(0xffffffff, cpuctrl); */
1794         cpu_control(cpuctrlmask, cpuctrl);
1795 }
1796 #endif /* CPU_IXP12X0 */
1797
1798 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1799   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1800   defined(CPU_XSCALE_80219)
1801 struct cpu_option xscale_options[] = {
1802 #ifdef COMPAT_12
1803         { "branchpredict",      BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1804         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1805 #endif  /* COMPAT_12 */
1806         { "cpu.branchpredict",  BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1807         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1808         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1809         { "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1810         { "xscale.cache",       BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1811         { "xscale.icache",      BIC, OR,  CPU_CONTROL_IC_ENABLE },
1812         { "xscale.dcache",      BIC, OR,  CPU_CONTROL_DC_ENABLE },
1813         { NULL,                 IGN, IGN, 0 }
1814 };
1815
1816 void
1817 xscale_setup(args)
1818         char *args;
1819 {
1820         uint32_t auxctl;
1821         int cpuctrl, cpuctrlmask;
1822
1823         /*
1824          * The XScale Write Buffer is always enabled.  Our option
1825          * is to enable/disable coalescing.  Note that bits 6:3
1826          * must always be enabled.
1827          */
1828
1829         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1830                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1831                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1832                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1833                  | CPU_CONTROL_BPRD_ENABLE;
1834         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1835                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1836                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1837                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1838                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1839                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1840                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1841
1842 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1843         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1844 #endif
1845
1846         cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
1847
1848 #ifdef __ARMEB__
1849         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1850 #endif
1851
1852         if (vector_page == ARM_VECTORS_HIGH)
1853                 cpuctrl |= CPU_CONTROL_VECRELOC;
1854
1855         /* Clear out the cache */
1856         cpu_idcache_wbinv_all();
1857
1858         /*
1859          * Set the control register.  Note that bits 6:3 must always
1860          * be set to 1.
1861          */
1862         ctrl = cpuctrl;
1863 /*      cpu_control(cpuctrlmask, cpuctrl);*/
1864         cpu_control(0xffffffff, cpuctrl);
1865
1866         /* Make sure write coalescing is turned on */
1867         __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1868                 : "=r" (auxctl));
1869 #ifdef XSCALE_NO_COALESCE_WRITES
1870         auxctl |= XSCALE_AUXCTL_K;
1871 #else
1872         auxctl &= ~XSCALE_AUXCTL_K;
1873 #endif
1874         __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1875                 : : "r" (auxctl));
1876 }
1877 #endif  /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 
1878            CPU_XSCALE_80219 */