]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm/arm/cpufunc.c
This commit was generated by cvs2svn to compensate for changes in r165743,
[FreeBSD/FreeBSD.git] / sys / arm / arm / cpufunc.c
1 /*      $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
2
3 /*-
4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
5  * arm8 support code Copyright (c) 1997 ARM Limited
6  * arm8 support code Copyright (c) 1997 Causality Limited
7  * arm9 support code Copyright (C) 2001 ARM Ltd
8  * Copyright (c) 1997 Mark Brinicombe.
9  * Copyright (c) 1997 Causality Limited
10  * All rights reserved.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *      This product includes software developed by Causality Limited.
23  * 4. The name of Causality Limited may not be used to endorse or promote
24  *    products derived from this software without specific prior written
25  *    permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  * RiscBSD kernel project
40  *
41  * cpufuncs.c
42  *
43  * C functions for supporting CPU / MMU / TLB specific operations.
44  *
45  * Created      : 30/01/97
46  */
47 #include <sys/cdefs.h>
48 __FBSDID("$FreeBSD$");
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/lock.h>
53 #include <sys/mutex.h>
54 #include <sys/bus.h>
55 #include <machine/bus.h>
56 #include <machine/cpu.h>
57 #include <machine/disassem.h>
58
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61
62 #include <machine/cpuconf.h>
63 #include <machine/cpufunc.h>
64 #include <machine/bootconfig.h>
65
66 #ifdef CPU_XSCALE_80200
67 #include <arm/xscale/i80200/i80200reg.h>
68 #include <arm/xscale/i80200/i80200var.h>
69 #endif
70
71 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
72 #include <arm/xscale/i80321/i80321reg.h>
73 #include <arm/xscale/i80321/i80321var.h>
74 #endif
75
76 #if defined(CPU_XSCALE_81342)
77 #include <arm/xscale/i8134x/i81342reg.h>
78 #endif
79
80 #ifdef CPU_XSCALE_IXP425
81 #include <arm/xscale/ixp425/ixp425reg.h>
82 #include <arm/xscale/ixp425/ixp425var.h>
83 #endif
84
85 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
86     defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
87 #include <arm/xscale/xscalereg.h>
88 #endif
89
90 #if defined(PERFCTRS)
91 struct arm_pmc_funcs *arm_pmc;
92 #endif
93
94 /* PRIMARY CACHE VARIABLES */
95 int     arm_picache_size;
96 int     arm_picache_line_size;
97 int     arm_picache_ways;
98
99 int     arm_pdcache_size;       /* and unified */
100 int     arm_pdcache_line_size;
101 int     arm_pdcache_ways;
102
103 int     arm_pcache_type;
104 int     arm_pcache_unified;
105
106 int     arm_dcache_align;
107 int     arm_dcache_align_mask;
108
109 /* 1 == use cpu_sleep(), 0 == don't */
110 int cpu_do_powersave;
111 int ctrl;
112
113 #ifdef CPU_ARM7TDMI
114 struct cpu_functions arm7tdmi_cpufuncs = {
115         /* CPU functions */
116         
117         cpufunc_id,                     /* id                   */
118         cpufunc_nullop,                 /* cpwait               */
119
120         /* MMU functions */
121
122         cpufunc_control,                /* control              */
123         cpufunc_domains,                /* domain               */
124         arm7tdmi_setttb,                /* setttb               */
125         cpufunc_faultstatus,            /* faultstatus          */
126         cpufunc_faultaddress,           /* faultaddress         */
127
128         /* TLB functions */
129
130         arm7tdmi_tlb_flushID,           /* tlb_flushID          */
131         arm7tdmi_tlb_flushID_SE,        /* tlb_flushID_SE       */
132         arm7tdmi_tlb_flushID,           /* tlb_flushI           */
133         arm7tdmi_tlb_flushID_SE,        /* tlb_flushI_SE        */
134         arm7tdmi_tlb_flushID,           /* tlb_flushD           */
135         arm7tdmi_tlb_flushID_SE,        /* tlb_flushD_SE        */
136
137         /* Cache operations */
138
139         cpufunc_nullop,                 /* icache_sync_all      */
140         (void *)cpufunc_nullop,         /* icache_sync_range    */
141
142         arm7tdmi_cache_flushID,         /* dcache_wbinv_all     */
143         (void *)arm7tdmi_cache_flushID, /* dcache_wbinv_range   */
144         (void *)arm7tdmi_cache_flushID, /* dcache_inv_range     */
145         (void *)cpufunc_nullop,         /* dcache_wb_range      */
146
147         arm7tdmi_cache_flushID,         /* idcache_wbinv_all    */
148         (void *)arm7tdmi_cache_flushID, /* idcache_wbinv_range  */
149
150         /* Other functions */
151
152         cpufunc_nullop,                 /* flush_prefetchbuf    */
153         cpufunc_nullop,                 /* drain_writebuf       */
154         cpufunc_nullop,                 /* flush_brnchtgt_C     */
155         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
156
157         (void *)cpufunc_nullop,         /* sleep                */
158
159         /* Soft functions */
160
161         late_abort_fixup,               /* dataabt_fixup        */
162         cpufunc_null_fixup,             /* prefetchabt_fixup    */
163
164         arm7tdmi_context_switch,        /* context_switch       */
165
166         arm7tdmi_setup                  /* cpu setup            */
167
168 };
169 #endif  /* CPU_ARM7TDMI */
170
171 #ifdef CPU_ARM8
172 struct cpu_functions arm8_cpufuncs = {
173         /* CPU functions */
174         
175         cpufunc_id,                     /* id                   */
176         cpufunc_nullop,                 /* cpwait               */
177
178         /* MMU functions */
179
180         cpufunc_control,                /* control              */
181         cpufunc_domains,                /* domain               */
182         arm8_setttb,                    /* setttb               */
183         cpufunc_faultstatus,            /* faultstatus          */
184         cpufunc_faultaddress,           /* faultaddress         */
185
186         /* TLB functions */
187
188         arm8_tlb_flushID,               /* tlb_flushID          */
189         arm8_tlb_flushID_SE,            /* tlb_flushID_SE       */
190         arm8_tlb_flushID,               /* tlb_flushI           */
191         arm8_tlb_flushID_SE,            /* tlb_flushI_SE        */
192         arm8_tlb_flushID,               /* tlb_flushD           */
193         arm8_tlb_flushID_SE,            /* tlb_flushD_SE        */
194
195         /* Cache operations */
196
197         cpufunc_nullop,                 /* icache_sync_all      */
198         (void *)cpufunc_nullop,         /* icache_sync_range    */
199
200         arm8_cache_purgeID,             /* dcache_wbinv_all     */
201         (void *)arm8_cache_purgeID,     /* dcache_wbinv_range   */
202 /*XXX*/ (void *)arm8_cache_purgeID,     /* dcache_inv_range     */
203         (void *)arm8_cache_cleanID,     /* dcache_wb_range      */
204
205         arm8_cache_purgeID,             /* idcache_wbinv_all    */
206         (void *)arm8_cache_purgeID,     /* idcache_wbinv_range  */
207
208         /* Other functions */
209
210         cpufunc_nullop,                 /* flush_prefetchbuf    */
211         cpufunc_nullop,                 /* drain_writebuf       */
212         cpufunc_nullop,                 /* flush_brnchtgt_C     */
213         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
214
215         (void *)cpufunc_nullop,         /* sleep                */
216
217         /* Soft functions */
218
219         cpufunc_null_fixup,             /* dataabt_fixup        */
220         cpufunc_null_fixup,             /* prefetchabt_fixup    */
221
222         arm8_context_switch,            /* context_switch       */
223
224         arm8_setup                      /* cpu setup            */
225 };          
226 #endif  /* CPU_ARM8 */
227
228 #ifdef CPU_ARM9
229 struct cpu_functions arm9_cpufuncs = {
230         /* CPU functions */
231
232         cpufunc_id,                     /* id                   */
233         cpufunc_nullop,                 /* cpwait               */
234
235         /* MMU functions */
236
237         cpufunc_control,                /* control              */
238         cpufunc_domains,                /* Domain               */
239         arm9_setttb,                    /* Setttb               */
240         cpufunc_faultstatus,            /* Faultstatus          */
241         cpufunc_faultaddress,           /* Faultaddress         */
242
243         /* TLB functions */
244
245         armv4_tlb_flushID,              /* tlb_flushID          */
246         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
247         armv4_tlb_flushI,               /* tlb_flushI           */
248         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
249         armv4_tlb_flushD,               /* tlb_flushD           */
250         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
251
252         /* Cache operations */
253
254         arm9_icache_sync_all,           /* icache_sync_all      */
255         arm9_icache_sync_range,         /* icache_sync_range    */
256
257         arm9_dcache_wbinv_all,          /* dcache_wbinv_all     */
258         arm9_dcache_wbinv_range,        /* dcache_wbinv_range   */
259 /*XXX*/ arm9_dcache_wbinv_range,        /* dcache_inv_range     */
260         arm9_dcache_wb_range,           /* dcache_wb_range      */
261
262         arm9_idcache_wbinv_all,         /* idcache_wbinv_all    */
263         arm9_idcache_wbinv_range,       /* idcache_wbinv_range  */
264
265         /* Other functions */
266
267         cpufunc_nullop,                 /* flush_prefetchbuf    */
268         armv4_drain_writebuf,           /* drain_writebuf       */
269         cpufunc_nullop,                 /* flush_brnchtgt_C     */
270         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
271
272         (void *)cpufunc_nullop,         /* sleep                */
273
274         /* Soft functions */
275
276         cpufunc_null_fixup,             /* dataabt_fixup        */
277         cpufunc_null_fixup,             /* prefetchabt_fixup    */
278
279         arm9_context_switch,            /* context_switch       */
280
281         arm9_setup                      /* cpu setup            */
282
283 };
284 #endif /* CPU_ARM9 */
285
286 #ifdef CPU_ARM10
287 struct cpu_functions arm10_cpufuncs = {
288         /* CPU functions */
289
290         cpufunc_id,                     /* id                   */
291         cpufunc_nullop,                 /* cpwait               */
292
293         /* MMU functions */
294
295         cpufunc_control,                /* control              */
296         cpufunc_domains,                /* Domain               */
297         arm10_setttb,                   /* Setttb               */
298         cpufunc_faultstatus,            /* Faultstatus          */
299         cpufunc_faultaddress,           /* Faultaddress         */
300
301         /* TLB functions */
302
303         armv4_tlb_flushID,              /* tlb_flushID          */
304         arm10_tlb_flushID_SE,           /* tlb_flushID_SE       */
305         armv4_tlb_flushI,               /* tlb_flushI           */
306         arm10_tlb_flushI_SE,            /* tlb_flushI_SE        */
307         armv4_tlb_flushD,               /* tlb_flushD           */
308         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
309
310         /* Cache operations */
311
312         arm10_icache_sync_all,          /* icache_sync_all      */
313         arm10_icache_sync_range,        /* icache_sync_range    */
314
315         arm10_dcache_wbinv_all,         /* dcache_wbinv_all     */
316         arm10_dcache_wbinv_range,       /* dcache_wbinv_range   */
317         arm10_dcache_inv_range,         /* dcache_inv_range     */
318         arm10_dcache_wb_range,          /* dcache_wb_range      */
319
320         arm10_idcache_wbinv_all,        /* idcache_wbinv_all    */
321         arm10_idcache_wbinv_range,      /* idcache_wbinv_range  */
322
323         /* Other functions */
324
325         cpufunc_nullop,                 /* flush_prefetchbuf    */
326         armv4_drain_writebuf,           /* drain_writebuf       */
327         cpufunc_nullop,                 /* flush_brnchtgt_C     */
328         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
329
330         (void *)cpufunc_nullop,         /* sleep                */
331
332         /* Soft functions */
333
334         cpufunc_null_fixup,             /* dataabt_fixup        */
335         cpufunc_null_fixup,             /* prefetchabt_fixup    */
336
337         arm10_context_switch,           /* context_switch       */
338
339         arm10_setup                     /* cpu setup            */
340
341 };
342 #endif /* CPU_ARM10 */
343
344 #ifdef CPU_SA110
345 struct cpu_functions sa110_cpufuncs = {
346         /* CPU functions */
347         
348         cpufunc_id,                     /* id                   */
349         cpufunc_nullop,                 /* cpwait               */
350
351         /* MMU functions */
352
353         cpufunc_control,                /* control              */
354         cpufunc_domains,                /* domain               */
355         sa1_setttb,                     /* setttb               */
356         cpufunc_faultstatus,            /* faultstatus          */
357         cpufunc_faultaddress,           /* faultaddress         */
358
359         /* TLB functions */
360
361         armv4_tlb_flushID,              /* tlb_flushID          */
362         sa1_tlb_flushID_SE,             /* tlb_flushID_SE       */
363         armv4_tlb_flushI,               /* tlb_flushI           */
364         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
365         armv4_tlb_flushD,               /* tlb_flushD           */
366         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
367
368         /* Cache operations */
369
370         sa1_cache_syncI,                /* icache_sync_all      */
371         sa1_cache_syncI_rng,            /* icache_sync_range    */
372
373         sa1_cache_purgeD,               /* dcache_wbinv_all     */
374         sa1_cache_purgeD_rng,           /* dcache_wbinv_range   */
375 /*XXX*/ sa1_cache_purgeD_rng,           /* dcache_inv_range     */
376         sa1_cache_cleanD_rng,           /* dcache_wb_range      */
377
378         sa1_cache_purgeID,              /* idcache_wbinv_all    */
379         sa1_cache_purgeID_rng,          /* idcache_wbinv_range  */
380
381         /* Other functions */
382
383         cpufunc_nullop,                 /* flush_prefetchbuf    */
384         armv4_drain_writebuf,           /* drain_writebuf       */
385         cpufunc_nullop,                 /* flush_brnchtgt_C     */
386         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
387
388         (void *)cpufunc_nullop,         /* sleep                */
389
390         /* Soft functions */
391
392         cpufunc_null_fixup,             /* dataabt_fixup        */
393         cpufunc_null_fixup,             /* prefetchabt_fixup    */
394
395         sa110_context_switch,           /* context_switch       */
396
397         sa110_setup                     /* cpu setup            */
398 };          
399 #endif  /* CPU_SA110 */
400
401 #if defined(CPU_SA1100) || defined(CPU_SA1110)
402 struct cpu_functions sa11x0_cpufuncs = {
403         /* CPU functions */
404         
405         cpufunc_id,                     /* id                   */
406         cpufunc_nullop,                 /* cpwait               */
407
408         /* MMU functions */
409
410         cpufunc_control,                /* control              */
411         cpufunc_domains,                /* domain               */
412         sa1_setttb,                     /* setttb               */
413         cpufunc_faultstatus,            /* faultstatus          */
414         cpufunc_faultaddress,           /* faultaddress         */
415
416         /* TLB functions */
417
418         armv4_tlb_flushID,              /* tlb_flushID          */
419         sa1_tlb_flushID_SE,             /* tlb_flushID_SE       */
420         armv4_tlb_flushI,               /* tlb_flushI           */
421         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
422         armv4_tlb_flushD,               /* tlb_flushD           */
423         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
424
425         /* Cache operations */
426
427         sa1_cache_syncI,                /* icache_sync_all      */
428         sa1_cache_syncI_rng,            /* icache_sync_range    */
429
430         sa1_cache_purgeD,               /* dcache_wbinv_all     */
431         sa1_cache_purgeD_rng,           /* dcache_wbinv_range   */
432 /*XXX*/ sa1_cache_purgeD_rng,           /* dcache_inv_range     */
433         sa1_cache_cleanD_rng,           /* dcache_wb_range      */
434
435         sa1_cache_purgeID,              /* idcache_wbinv_all    */
436         sa1_cache_purgeID_rng,          /* idcache_wbinv_range  */
437
438         /* Other functions */
439
440         sa11x0_drain_readbuf,           /* flush_prefetchbuf    */
441         armv4_drain_writebuf,           /* drain_writebuf       */
442         cpufunc_nullop,                 /* flush_brnchtgt_C     */
443         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
444
445         sa11x0_cpu_sleep,               /* sleep                */
446
447         /* Soft functions */
448
449         cpufunc_null_fixup,             /* dataabt_fixup        */
450         cpufunc_null_fixup,             /* prefetchabt_fixup    */
451
452         sa11x0_context_switch,          /* context_switch       */
453
454         sa11x0_setup                    /* cpu setup            */
455 };          
456 #endif  /* CPU_SA1100 || CPU_SA1110 */
457
458 #ifdef CPU_IXP12X0
459 struct cpu_functions ixp12x0_cpufuncs = {
460         /* CPU functions */
461         
462         cpufunc_id,                     /* id                   */
463         cpufunc_nullop,                 /* cpwait               */
464
465         /* MMU functions */
466
467         cpufunc_control,                /* control              */
468         cpufunc_domains,                /* domain               */
469         sa1_setttb,                     /* setttb               */
470         cpufunc_faultstatus,            /* faultstatus          */
471         cpufunc_faultaddress,           /* faultaddress         */
472
473         /* TLB functions */
474
475         armv4_tlb_flushID,              /* tlb_flushID          */
476         sa1_tlb_flushID_SE,             /* tlb_flushID_SE       */
477         armv4_tlb_flushI,               /* tlb_flushI           */
478         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
479         armv4_tlb_flushD,               /* tlb_flushD           */
480         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
481
482         /* Cache operations */
483
484         sa1_cache_syncI,                /* icache_sync_all      */
485         sa1_cache_syncI_rng,            /* icache_sync_range    */
486
487         sa1_cache_purgeD,               /* dcache_wbinv_all     */
488         sa1_cache_purgeD_rng,           /* dcache_wbinv_range   */
489 /*XXX*/ sa1_cache_purgeD_rng,           /* dcache_inv_range     */
490         sa1_cache_cleanD_rng,           /* dcache_wb_range      */
491
492         sa1_cache_purgeID,              /* idcache_wbinv_all    */
493         sa1_cache_purgeID_rng,          /* idcache_wbinv_range  */
494
495         /* Other functions */
496
497         ixp12x0_drain_readbuf,                  /* flush_prefetchbuf    */
498         armv4_drain_writebuf,           /* drain_writebuf       */
499         cpufunc_nullop,                 /* flush_brnchtgt_C     */
500         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
501
502         (void *)cpufunc_nullop,         /* sleep                */
503
504         /* Soft functions */
505
506         cpufunc_null_fixup,             /* dataabt_fixup        */
507         cpufunc_null_fixup,             /* prefetchabt_fixup    */
508
509         ixp12x0_context_switch,         /* context_switch       */
510
511         ixp12x0_setup                   /* cpu setup            */
512 };          
513 #endif  /* CPU_IXP12X0 */
514
515 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
516   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
517   defined(CPU_XSCALE_80219)
518
519 struct cpu_functions xscale_cpufuncs = {
520         /* CPU functions */
521         
522         cpufunc_id,                     /* id                   */
523         xscale_cpwait,                  /* cpwait               */
524
525         /* MMU functions */
526
527         xscale_control,                 /* control              */
528         cpufunc_domains,                /* domain               */
529         xscale_setttb,                  /* setttb               */
530         cpufunc_faultstatus,            /* faultstatus          */
531         cpufunc_faultaddress,           /* faultaddress         */
532
533         /* TLB functions */
534
535         armv4_tlb_flushID,              /* tlb_flushID          */
536         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
537         armv4_tlb_flushI,               /* tlb_flushI           */
538         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
539         armv4_tlb_flushD,               /* tlb_flushD           */
540         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
541
542         /* Cache operations */
543
544         xscale_cache_syncI,             /* icache_sync_all      */
545         xscale_cache_syncI_rng,         /* icache_sync_range    */
546
547         xscale_cache_purgeD,            /* dcache_wbinv_all     */
548         xscale_cache_purgeD_rng,        /* dcache_wbinv_range   */
549         xscale_cache_flushD_rng,        /* dcache_inv_range     */
550         xscale_cache_cleanD_rng,        /* dcache_wb_range      */
551
552         xscale_cache_purgeID,           /* idcache_wbinv_all    */
553         xscale_cache_purgeID_rng,       /* idcache_wbinv_range  */
554
555         /* Other functions */
556
557         cpufunc_nullop,                 /* flush_prefetchbuf    */
558         armv4_drain_writebuf,           /* drain_writebuf       */
559         cpufunc_nullop,                 /* flush_brnchtgt_C     */
560         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
561
562         xscale_cpu_sleep,               /* sleep                */
563
564         /* Soft functions */
565
566         cpufunc_null_fixup,             /* dataabt_fixup        */
567         cpufunc_null_fixup,             /* prefetchabt_fixup    */
568
569         xscale_context_switch,          /* context_switch       */
570
571         xscale_setup                    /* cpu setup            */
572 };
573 #endif
574 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
575    CPU_XSCALE_80219 */
576
577 #ifdef CPU_XSCALE_81342
578 struct cpu_functions xscalec3_cpufuncs = {
579         /* CPU functions */
580         
581         cpufunc_id,                     /* id                   */
582         xscale_cpwait,                  /* cpwait               */
583
584         /* MMU functions */
585
586         xscale_control,                 /* control              */
587         cpufunc_domains,                /* domain               */
588         xscalec3_setttb,                /* setttb               */
589         cpufunc_faultstatus,            /* faultstatus          */
590         cpufunc_faultaddress,           /* faultaddress         */
591
592         /* TLB functions */
593
594         armv4_tlb_flushID,              /* tlb_flushID          */
595         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
596         armv4_tlb_flushI,               /* tlb_flushI           */
597         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
598         armv4_tlb_flushD,               /* tlb_flushD           */
599         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
600
601         /* Cache operations */
602
603         xscalec3_cache_syncI,           /* icache_sync_all      */
604         xscale_cache_syncI_rng,         /* icache_sync_range    */
605
606         xscalec3_cache_purgeD,          /* dcache_wbinv_all     */
607         xscalec3_cache_purgeD_rng,      /* dcache_wbinv_range   */
608         xscale_cache_flushD_rng,        /* dcache_inv_range     */
609         xscalec3_cache_cleanD_rng,      /* dcache_wb_range      */
610
611         xscalec3_cache_purgeID, /* idcache_wbinv_all    */
612         xscalec3_cache_purgeID_rng,     /* idcache_wbinv_range  */
613
614         /* Other functions */
615
616         cpufunc_nullop,                 /* flush_prefetchbuf    */
617         armv4_drain_writebuf,           /* drain_writebuf       */
618         cpufunc_nullop,                 /* flush_brnchtgt_C     */
619         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
620
621         xscale_cpu_sleep,               /* sleep                */
622
623         /* Soft functions */
624
625         cpufunc_null_fixup,             /* dataabt_fixup        */
626         cpufunc_null_fixup,             /* prefetchabt_fixup    */
627
628         xscalec3_context_switch,        /* context_switch       */
629
630         xscale_setup                    /* cpu setup            */
631 };
632 #endif /* CPU_XSCALE_81342 */
633 /*
634  * Global constants also used by locore.s
635  */
636
637 struct cpu_functions cpufuncs;
638 u_int cputype;
639 u_int cpu_reset_needs_v4_MMU_disable;   /* flag used in locore.s */
640
641 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
642   defined (CPU_ARM10) ||                                               \
643   defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||            \
644   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||          \
645   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
646
647 static void get_cachetype_cp15(void);
648
649 /* Additional cache information local to this file.  Log2 of some of the
650    above numbers.  */
651 static int      arm_dcache_l2_nsets;
652 static int      arm_dcache_l2_assoc;
653 static int      arm_dcache_l2_linesize;
654
655 static void
656 get_cachetype_cp15()
657 {
658         u_int ctype, isize, dsize;
659         u_int multiplier;
660
661         __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
662                 : "=r" (ctype));
663
664         /*
665          * ...and thus spake the ARM ARM:
666          *
667          * If an <opcode2> value corresponding to an unimplemented or
668          * reserved ID register is encountered, the System Control
669          * processor returns the value of the main ID register.
670          */
671         if (ctype == cpufunc_id())
672                 goto out;
673
674         if ((ctype & CPU_CT_S) == 0)
675                 arm_pcache_unified = 1;
676
677         /*
678          * If you want to know how this code works, go read the ARM ARM.
679          */
680
681         arm_pcache_type = CPU_CT_CTYPE(ctype);
682
683         if (arm_pcache_unified == 0) {
684                 isize = CPU_CT_ISIZE(ctype);
685                 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
686                 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
687                 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
688                         if (isize & CPU_CT_xSIZE_M)
689                                 arm_picache_line_size = 0; /* not present */
690                         else
691                                 arm_picache_ways = 1;
692                 } else {
693                         arm_picache_ways = multiplier <<
694                             (CPU_CT_xSIZE_ASSOC(isize) - 1);
695                 }
696                 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
697         }
698
699         dsize = CPU_CT_DSIZE(ctype);
700         multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
701         arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
702         if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
703                 if (dsize & CPU_CT_xSIZE_M)
704                         arm_pdcache_line_size = 0; /* not present */
705                 else
706                         arm_pdcache_ways = 1;
707         } else {
708                 arm_pdcache_ways = multiplier <<
709                     (CPU_CT_xSIZE_ASSOC(dsize) - 1);
710         }
711         arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
712
713         arm_dcache_align = arm_pdcache_line_size;
714
715         arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
716         arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
717         arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
718             CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
719
720  out:
721         arm_dcache_align_mask = arm_dcache_align - 1;
722 }
723 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
724
725 #if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
726     defined(CPU_IXP12X0)
727 /* Cache information for CPUs without cache type registers. */
728 struct cachetab {
729         u_int32_t ct_cpuid;
730         int     ct_pcache_type;
731         int     ct_pcache_unified;
732         int     ct_pdcache_size;
733         int     ct_pdcache_line_size;
734         int     ct_pdcache_ways;
735         int     ct_picache_size;
736         int     ct_picache_line_size;
737         int     ct_picache_ways;
738 };
739
740 struct cachetab cachetab[] = {
741     /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
742     /* XXX is this type right for SA-1? */
743     { CPU_ID_SA110,     CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
744     { CPU_ID_SA1100,    CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
745     { CPU_ID_SA1110,    CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
746     { CPU_ID_IXP1200,   CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
747     { 0, 0, 0, 0, 0, 0, 0, 0}
748 };
749
750 static void get_cachetype_table(void);
751
752 static void
753 get_cachetype_table()
754 {
755         int i;
756         u_int32_t cpuid = cpufunc_id();
757
758         for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
759                 if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
760                         arm_pcache_type = cachetab[i].ct_pcache_type;
761                         arm_pcache_unified = cachetab[i].ct_pcache_unified;
762                         arm_pdcache_size = cachetab[i].ct_pdcache_size;
763                         arm_pdcache_line_size =
764                             cachetab[i].ct_pdcache_line_size;
765                         arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
766                         arm_picache_size = cachetab[i].ct_picache_size;
767                         arm_picache_line_size =
768                             cachetab[i].ct_picache_line_size;
769                         arm_picache_ways = cachetab[i].ct_picache_ways;
770                 }
771         }
772         arm_dcache_align = arm_pdcache_line_size;
773
774         arm_dcache_align_mask = arm_dcache_align - 1;
775 }
776
777 #endif /* SA110 || SA1100 || SA1111 || IXP12X0 */
778
779 /*
780  * Cannot panic here as we may not have a console yet ...
781  */
782
783 int
784 set_cpufuncs()
785 {
786         cputype = cpufunc_id();
787         cputype &= CPU_ID_CPU_MASK;
788
789         /*
790          * NOTE: cpu_do_powersave defaults to off.  If we encounter a
791          * CPU type where we want to use it by default, then we set it.
792          */
793
794 #ifdef CPU_ARM7TDMI
795         if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
796             CPU_ID_IS7(cputype) &&
797             (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
798                 cpufuncs = arm7tdmi_cpufuncs;
799                 cpu_reset_needs_v4_MMU_disable = 0;
800                 get_cachetype_cp15();
801                 pmap_pte_init_generic();
802                 return 0;
803         }
804 #endif  
805 #ifdef CPU_ARM8
806         if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
807             (cputype & 0x0000f000) == 0x00008000) {
808                 cpufuncs = arm8_cpufuncs;
809                 cpu_reset_needs_v4_MMU_disable = 0;     /* XXX correct? */
810                 get_cachetype_cp15();
811                 pmap_pte_init_arm8();
812                 return 0;
813         }
814 #endif  /* CPU_ARM8 */
815 #ifdef CPU_ARM9
816         if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
817              (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
818             (cputype & 0x0000f000) == 0x00009000) {
819                 cpufuncs = arm9_cpufuncs;
820                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
821                 get_cachetype_cp15();
822                 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
823                 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
824                     arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
825                 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
826                 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
827 #ifdef ARM9_CACHE_WRITE_THROUGH
828                 pmap_pte_init_arm9();
829 #else
830                 pmap_pte_init_generic();
831 #endif
832                 return 0;
833         }
834 #endif /* CPU_ARM9 */
835 #ifdef CPU_ARM10
836         if (/* cputype == CPU_ID_ARM1020T || */
837             cputype == CPU_ID_ARM1020E) {
838                 /*
839                  * Select write-through cacheing (this isn't really an
840                  * option on ARM1020T).
841                  */
842                 cpufuncs = arm10_cpufuncs;
843                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
844                 get_cachetype_cp15();
845                 arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
846                 arm10_dcache_sets_max = 
847                     (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
848                     arm10_dcache_sets_inc;
849                 arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
850                 arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
851                 pmap_pte_init_generic();
852                 return 0;
853         }
854 #endif /* CPU_ARM10 */
855 #ifdef CPU_SA110
856         if (cputype == CPU_ID_SA110) {
857                 cpufuncs = sa110_cpufuncs;
858                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it */
859                 get_cachetype_table();
860                 pmap_pte_init_sa1();
861                 return 0;
862         }
863 #endif  /* CPU_SA110 */
864 #ifdef CPU_SA1100
865         if (cputype == CPU_ID_SA1100) {
866                 cpufuncs = sa11x0_cpufuncs;
867                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
868                 get_cachetype_table();
869                 pmap_pte_init_sa1();
870                 /* Use powersave on this CPU. */
871                 cpu_do_powersave = 1;
872
873                 return 0;
874         }
875 #endif  /* CPU_SA1100 */
876 #ifdef CPU_SA1110
877         if (cputype == CPU_ID_SA1110) {
878                 cpufuncs = sa11x0_cpufuncs;
879                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
880                 get_cachetype_table();
881                 pmap_pte_init_sa1();
882                 /* Use powersave on this CPU. */
883                 cpu_do_powersave = 1;
884
885                 return 0;
886         }
887 #endif  /* CPU_SA1110 */
888 #ifdef CPU_IXP12X0
889         if (cputype == CPU_ID_IXP1200) {
890                 cpufuncs = ixp12x0_cpufuncs;
891                 cpu_reset_needs_v4_MMU_disable = 1;
892                 get_cachetype_table();
893                 pmap_pte_init_sa1();
894                 return 0;
895         }
896 #endif  /* CPU_IXP12X0 */
897 #ifdef CPU_XSCALE_80200
898         if (cputype == CPU_ID_80200) {
899                 int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
900
901                 i80200_icu_init();
902
903                 /*
904                  * Reset the Performance Monitoring Unit to a
905                  * pristine state:
906                  *      - CCNT, PMN0, PMN1 reset to 0
907                  *      - overflow indications cleared
908                  *      - all counters disabled
909                  */
910                 __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
911                         :
912                         : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
913                                PMNC_CC_IF));
914
915 #if defined(XSCALE_CCLKCFG)
916                 /*
917                  * Crank CCLKCFG to maximum legal value.
918                  */
919                 __asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
920                         :
921                         : "r" (XSCALE_CCLKCFG));
922 #endif
923
924                 /*
925                  * XXX Disable ECC in the Bus Controller Unit; we
926                  * don't really support it, yet.  Clear any pending
927                  * error indications.
928                  */
929                 __asm __volatile("mcr p13, 0, %0, c0, c1, 0"
930                         :
931                         : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
932
933                 cpufuncs = xscale_cpufuncs;
934 #if defined(PERFCTRS)
935                 xscale_pmu_init();
936 #endif
937
938                 /*
939                  * i80200 errata: Step-A0 and A1 have a bug where
940                  * D$ dirty bits are not cleared on "invalidate by
941                  * address".
942                  *
943                  * Workaround: Clean cache line before invalidating.
944                  */
945                 if (rev == 0 || rev == 1)
946                         cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
947
948                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
949                 get_cachetype_cp15();
950                 pmap_pte_init_xscale();
951                 return 0;
952         }
953 #endif /* CPU_XSCALE_80200 */
954 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
955         if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
956             cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
957             cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
958                 /*
959                  * Reset the Performance Monitoring Unit to a
960                  * pristine state:
961                  *      - CCNT, PMN0, PMN1 reset to 0
962                  *      - overflow indications cleared
963                  *      - all counters disabled
964                  */
965                 __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
966                         :
967                         : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
968                                PMNC_CC_IF));
969
970                 cpufuncs = xscale_cpufuncs;
971 #if defined(PERFCTRS)
972                 xscale_pmu_init();
973 #endif
974
975                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
976                 get_cachetype_cp15();
977                 pmap_pte_init_xscale();
978                 return 0;
979         }
980 #endif /* CPU_XSCALE_80321 */
981
982 #if defined(CPU_XSCALE_81342)
983         if (cputype == CPU_ID_81342) {
984                 cpufuncs = xscalec3_cpufuncs;
985 #if defined(PERFCTRS)
986                 xscale_pmu_init();
987 #endif
988
989                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
990                 get_cachetype_cp15();
991                 pmap_pte_init_xscale();
992                 return 0;
993         }
994 #endif /* CPU_XSCALE_81342 */
995 #ifdef CPU_XSCALE_PXA2X0
996         /* ignore core revision to test PXA2xx CPUs */
997         if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
998             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
999
1000                 cpufuncs = xscale_cpufuncs;
1001 #if defined(PERFCTRS)
1002                 xscale_pmu_init();
1003 #endif
1004
1005                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
1006                 get_cachetype_cp15();
1007                 pmap_pte_init_xscale();
1008
1009                 /* Use powersave on this CPU. */
1010                 cpu_do_powersave = 1;
1011
1012                 return 0;
1013         }
1014 #endif /* CPU_XSCALE_PXA2X0 */
1015 #ifdef CPU_XSCALE_IXP425
1016         if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1017             cputype == CPU_ID_IXP425_266) {
1018
1019                 cpufuncs = xscale_cpufuncs;
1020 #if defined(PERFCTRS)
1021                 xscale_pmu_init();
1022 #endif
1023
1024                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
1025                 get_cachetype_cp15();
1026                 pmap_pte_init_xscale();
1027
1028                 return 0;
1029         }
1030 #endif /* CPU_XSCALE_IXP425 */
1031         /*
1032          * Bzzzz. And the answer was ...
1033          */
1034         panic("No support for this CPU type (%08x) in kernel", cputype);
1035         return(ARCHITECTURE_NOT_PRESENT);
1036 }
1037
1038 /*
1039  * Fixup routines for data and prefetch aborts.
1040  *
1041  * Several compile time symbols are used
1042  *
1043  * DEBUG_FAULT_CORRECTION - Print debugging information during the
1044  * correction of registers after a fault.
1045  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
1046  * when defined should use late aborts
1047  */
1048
1049
1050 /*
1051  * Null abort fixup routine.
1052  * For use when no fixup is required.
1053  */
1054 int
1055 cpufunc_null_fixup(arg)
1056         void *arg;
1057 {
1058         return(ABORT_FIXUP_OK);
1059 }
1060
1061
1062 #if defined(CPU_ARM7TDMI)
1063
1064 #ifdef DEBUG_FAULT_CORRECTION
1065 #define DFC_PRINTF(x)           printf x
1066 #define DFC_DISASSEMBLE(x)      disassemble(x)
1067 #else
1068 #define DFC_PRINTF(x)           /* nothing */
1069 #define DFC_DISASSEMBLE(x)      /* nothing */
1070 #endif
1071
1072 /*
1073  * "Early" data abort fixup.
1074  *
1075  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
1076  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1077  *
1078  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1079  */
1080 int
1081 early_abort_fixup(arg)
1082         void *arg;
1083 {
1084         trapframe_t *frame = arg;
1085         u_int fault_pc;
1086         u_int fault_instruction;
1087         int saved_lr = 0;
1088
1089         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1090
1091                 /* Ok an abort in SVC mode */
1092
1093                 /*
1094                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1095                  * as the fault happened in svc mode but we need it in the
1096                  * usr slot so we can treat the registers as an array of ints
1097                  * during fixing.
1098                  * NOTE: This PC is in the position but writeback is not
1099                  * allowed on r15.
1100                  * Doing it like this is more efficient than trapping this
1101                  * case in all possible locations in the following fixup code.
1102                  */
1103
1104                 saved_lr = frame->tf_usr_lr;
1105                 frame->tf_usr_lr = frame->tf_svc_lr;
1106
1107                 /*
1108                  * Note the trapframe does not have the SVC r13 so a fault
1109                  * from an instruction with writeback to r13 in SVC mode is
1110                  * not allowed. This should not happen as the kstack is
1111                  * always valid.
1112                  */
1113         }
1114
1115         /* Get fault address and status from the CPU */
1116
1117         fault_pc = frame->tf_pc;
1118         fault_instruction = *((volatile unsigned int *)fault_pc);
1119
1120         /* Decode the fault instruction and fix the registers as needed */
1121
1122         if ((fault_instruction & 0x0e000000) == 0x08000000) {
1123                 int base;
1124                 int loop;
1125                 int count;
1126                 int *registers = &frame->tf_r0;
1127         
1128                 DFC_PRINTF(("LDM/STM\n"));
1129                 DFC_DISASSEMBLE(fault_pc);
1130                 if (fault_instruction & (1 << 21)) {
1131                         DFC_PRINTF(("This instruction must be corrected\n"));
1132                         base = (fault_instruction >> 16) & 0x0f;
1133                         if (base == 15)
1134                                 return ABORT_FIXUP_FAILED;
1135                         /* Count registers transferred */
1136                         count = 0;
1137                         for (loop = 0; loop < 16; ++loop) {
1138                                 if (fault_instruction & (1<<loop))
1139                                         ++count;
1140                         }
1141                         DFC_PRINTF(("%d registers used\n", count));
1142                         DFC_PRINTF(("Corrected r%d by %d bytes ",
1143                                        base, count * 4));
1144                         if (fault_instruction & (1 << 23)) {
1145                                 DFC_PRINTF(("down\n"));
1146                                 registers[base] -= count * 4;
1147                         } else {
1148                                 DFC_PRINTF(("up\n"));
1149                                 registers[base] += count * 4;
1150                         }
1151                 }
1152         } else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1153                 int base;
1154                 int offset;
1155                 int *registers = &frame->tf_r0;
1156         
1157                 /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1158
1159                 DFC_DISASSEMBLE(fault_pc);
1160
1161                 /* Only need to fix registers if write back is turned on */
1162
1163                 if ((fault_instruction & (1 << 21)) != 0) {
1164                         base = (fault_instruction >> 16) & 0x0f;
1165                         if (base == 13 &&
1166                             (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1167                                 return ABORT_FIXUP_FAILED;
1168                         if (base == 15)
1169                                 return ABORT_FIXUP_FAILED;
1170
1171                         offset = (fault_instruction & 0xff) << 2;
1172                         DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1173                         if ((fault_instruction & (1 << 23)) != 0)
1174                                 offset = -offset;
1175                         registers[base] += offset;
1176                         DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1177                 }
1178         } else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1179                 return ABORT_FIXUP_FAILED;
1180
1181         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1182
1183                 /* Ok an abort in SVC mode */
1184
1185                 /*
1186                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1187                  * as the fault happened in svc mode but we need it in the
1188                  * usr slot so we can treat the registers as an array of ints
1189                  * during fixing.
1190                  * NOTE: This PC is in the position but writeback is not
1191                  * allowed on r15.
1192                  * Doing it like this is more efficient than trapping this
1193                  * case in all possible locations in the prior fixup code.
1194                  */
1195
1196                 frame->tf_svc_lr = frame->tf_usr_lr;
1197                 frame->tf_usr_lr = saved_lr;
1198
1199                 /*
1200                  * Note the trapframe does not have the SVC r13 so a fault
1201                  * from an instruction with writeback to r13 in SVC mode is
1202                  * not allowed. This should not happen as the kstack is
1203                  * always valid.
1204                  */
1205         }
1206
1207         return(ABORT_FIXUP_OK);
1208 }
1209 #endif  /* CPU_ARM2/250/3/6/7 */
1210
1211
1212 #if defined(CPU_ARM7TDMI)
1213 /*
1214  * "Late" (base updated) data abort fixup
1215  *
1216  * For ARM6 (in late-abort mode) and ARM7.
1217  *
1218  * In this model, all data-transfer instructions need fixing up.  We defer
1219  * LDM, STM, LDC and STC fixup to the early-abort handler.
1220  */
1221 int
1222 late_abort_fixup(arg)
1223         void *arg;
1224 {
1225         trapframe_t *frame = arg;
1226         u_int fault_pc;
1227         u_int fault_instruction;
1228         int saved_lr = 0;
1229
1230         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1231
1232                 /* Ok an abort in SVC mode */
1233
1234                 /*
1235                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1236                  * as the fault happened in svc mode but we need it in the
1237                  * usr slot so we can treat the registers as an array of ints
1238                  * during fixing.
1239                  * NOTE: This PC is in the position but writeback is not
1240                  * allowed on r15.
1241                  * Doing it like this is more efficient than trapping this
1242                  * case in all possible locations in the following fixup code.
1243                  */
1244
1245                 saved_lr = frame->tf_usr_lr;
1246                 frame->tf_usr_lr = frame->tf_svc_lr;
1247
1248                 /*
1249                  * Note the trapframe does not have the SVC r13 so a fault
1250                  * from an instruction with writeback to r13 in SVC mode is
1251                  * not allowed. This should not happen as the kstack is
1252                  * always valid.
1253                  */
1254         }
1255
1256         /* Get fault address and status from the CPU */
1257
1258         fault_pc = frame->tf_pc;
1259         fault_instruction = *((volatile unsigned int *)fault_pc);
1260
1261         /* Decode the fault instruction and fix the registers as needed */
1262
1263         /* Was is a swap instruction ? */
1264
1265         if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1266                 DFC_DISASSEMBLE(fault_pc);
1267         } else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1268
1269                 /* Was is a ldr/str instruction */
1270                 /* This is for late abort only */
1271
1272                 int base;
1273                 int offset;
1274                 int *registers = &frame->tf_r0;
1275
1276                 DFC_DISASSEMBLE(fault_pc);
1277                 
1278                 /* This is for late abort only */
1279
1280                 if ((fault_instruction & (1 << 24)) == 0
1281                     || (fault_instruction & (1 << 21)) != 0) {  
1282                         /* postindexed ldr/str with no writeback */
1283
1284                         base = (fault_instruction >> 16) & 0x0f;
1285                         if (base == 13 &&
1286                             (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1287                                 return ABORT_FIXUP_FAILED;
1288                         if (base == 15)
1289                                 return ABORT_FIXUP_FAILED;
1290                         DFC_PRINTF(("late abt fix: r%d=%08x : ",
1291                                        base, registers[base]));
1292                         if ((fault_instruction & (1 << 25)) == 0) {
1293                                 /* Immediate offset - easy */
1294
1295                                 offset = fault_instruction & 0xfff;
1296                                 if ((fault_instruction & (1 << 23)))
1297                                         offset = -offset;
1298                                 registers[base] += offset;
1299                                 DFC_PRINTF(("imm=%08x ", offset));
1300                         } else {
1301                                 /* offset is a shifted register */
1302                                 int shift;
1303
1304                                 offset = fault_instruction & 0x0f;
1305                                 if (offset == base)
1306                                         return ABORT_FIXUP_FAILED;
1307                 
1308                                 /*
1309                                  * Register offset - hard we have to
1310                                  * cope with shifts !
1311                                  */
1312                                 offset = registers[offset];
1313
1314                                 if ((fault_instruction & (1 << 4)) == 0)
1315                                         /* shift with amount */
1316                                         shift = (fault_instruction >> 7) & 0x1f;
1317                                 else {
1318                                         /* shift with register */
1319                                         if ((fault_instruction & (1 << 7)) != 0)
1320                                                 /* undefined for now so bail out */
1321                                                 return ABORT_FIXUP_FAILED;
1322                                         shift = ((fault_instruction >> 8) & 0xf);
1323                                         if (base == shift)
1324                                                 return ABORT_FIXUP_FAILED;
1325                                         DFC_PRINTF(("shift reg=%d ", shift));
1326                                         shift = registers[shift];
1327                                 }
1328                                 DFC_PRINTF(("shift=%08x ", shift));
1329                                 switch (((fault_instruction >> 5) & 0x3)) {
1330                                 case 0 : /* Logical left */
1331                                         offset = (int)(((u_int)offset) << shift);
1332                                         break;
1333                                 case 1 : /* Logical Right */
1334                                         if (shift == 0) shift = 32;
1335                                         offset = (int)(((u_int)offset) >> shift);
1336                                         break;
1337                                 case 2 : /* Arithmetic Right */
1338                                         if (shift == 0) shift = 32;
1339                                         offset = (int)(((int)offset) >> shift);
1340                                         break;
1341                                 case 3 : /* Rotate right (rol or rxx) */
1342                                         return ABORT_FIXUP_FAILED;
1343                                         break;
1344                                 }
1345
1346                                 DFC_PRINTF(("abt: fixed LDR/STR with "
1347                                                "register offset\n"));
1348                                 if ((fault_instruction & (1 << 23)))
1349                                         offset = -offset;
1350                                 DFC_PRINTF(("offset=%08x ", offset));
1351                                 registers[base] += offset;
1352                         }
1353                         DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1354                 }
1355         }
1356
1357         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1358
1359                 /* Ok an abort in SVC mode */
1360
1361                 /*
1362                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1363                  * as the fault happened in svc mode but we need it in the
1364                  * usr slot so we can treat the registers as an array of ints
1365                  * during fixing.
1366                  * NOTE: This PC is in the position but writeback is not
1367                  * allowed on r15.
1368                  * Doing it like this is more efficient than trapping this
1369                  * case in all possible locations in the prior fixup code.
1370                  */
1371
1372                 frame->tf_svc_lr = frame->tf_usr_lr;
1373                 frame->tf_usr_lr = saved_lr;
1374
1375                 /*
1376                  * Note the trapframe does not have the SVC r13 so a fault
1377                  * from an instruction with writeback to r13 in SVC mode is
1378                  * not allowed. This should not happen as the kstack is
1379                  * always valid.
1380                  */
1381         }
1382
1383         /*
1384          * Now let the early-abort fixup routine have a go, in case it
1385          * was an LDM, STM, LDC or STC that faulted.
1386          */
1387
1388         return early_abort_fixup(arg);
1389 }
1390 #endif  /* CPU_ARM7TDMI */
1391
1392 /*
1393  * CPU Setup code
1394  */
1395
1396 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined (CPU_ARM9) || \
1397   defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) ||   \
1398   defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||             \
1399   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||           \
1400   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
1401
1402 #define IGN     0
1403 #define OR      1
1404 #define BIC     2
1405
1406 struct cpu_option {
1407         char    *co_name;
1408         int     co_falseop;
1409         int     co_trueop;
1410         int     co_value;
1411 };
1412
1413 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
1414
1415 static u_int
1416 parse_cpu_options(args, optlist, cpuctrl)
1417         char *args;
1418         struct cpu_option *optlist;    
1419         u_int cpuctrl; 
1420 {
1421         int integer;
1422
1423         if (args == NULL)
1424                 return(cpuctrl);
1425
1426         while (optlist->co_name) {
1427                 if (get_bootconf_option(args, optlist->co_name,
1428                     BOOTOPT_TYPE_BOOLEAN, &integer)) {
1429                         if (integer) {
1430                                 if (optlist->co_trueop == OR)
1431                                         cpuctrl |= optlist->co_value;
1432                                 else if (optlist->co_trueop == BIC)
1433                                         cpuctrl &= ~optlist->co_value;
1434                         } else {
1435                                 if (optlist->co_falseop == OR)
1436                                         cpuctrl |= optlist->co_value;
1437                                 else if (optlist->co_falseop == BIC)
1438                                         cpuctrl &= ~optlist->co_value;
1439                         }
1440                 }
1441                 ++optlist;
1442         }
1443         return(cpuctrl);
1444 }
1445 #endif /* CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 || XSCALE*/
1446
1447 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8)
1448 struct cpu_option arm678_options[] = {
1449 #ifdef COMPAT_12
1450         { "nocache",            IGN, BIC, CPU_CONTROL_IDC_ENABLE },
1451         { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1452 #endif  /* COMPAT_12 */
1453         { "cpu.cache",          BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1454         { "cpu.nocache",        OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1455         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1456         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1457         { NULL,                 IGN, IGN, 0 }
1458 };
1459
1460 #endif  /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
1461
1462 #ifdef CPU_ARM7TDMI
1463 struct cpu_option arm7tdmi_options[] = {
1464         { "arm7.cache",         BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1465         { "arm7.nocache",       OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1466         { "arm7.writebuf",      BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1467         { "arm7.nowritebuf",    OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1468 #ifdef COMPAT_12
1469         { "fpaclk2",            BIC, OR,  CPU_CONTROL_CPCLK },
1470 #endif  /* COMPAT_12 */
1471         { "arm700.fpaclk",      BIC, OR,  CPU_CONTROL_CPCLK },
1472         { NULL,                 IGN, IGN, 0 }
1473 };
1474
1475 void
1476 arm7tdmi_setup(args)
1477         char *args;
1478 {
1479         int cpuctrl;
1480
1481         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1482                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1483                  | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1484
1485         cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1486         cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
1487
1488 #ifdef __ARMEB__
1489         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1490 #endif
1491
1492         /* Clear out the cache */
1493         cpu_idcache_wbinv_all();
1494
1495         /* Set the control register */
1496         ctrl = cpuctrl;
1497         cpu_control(0xffffffff, cpuctrl);
1498 }
1499 #endif  /* CPU_ARM7TDMI */
1500
1501 #ifdef CPU_ARM8
1502 struct cpu_option arm8_options[] = {
1503         { "arm8.cache",         BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1504         { "arm8.nocache",       OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1505         { "arm8.writebuf",      BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1506         { "arm8.nowritebuf",    OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1507 #ifdef COMPAT_12
1508         { "branchpredict",      BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1509 #endif  /* COMPAT_12 */
1510         { "cpu.branchpredict",  BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1511         { "arm8.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1512         { NULL,                 IGN, IGN, 0 }
1513 };
1514
1515 void
1516 arm8_setup(args)
1517         char *args;
1518 {
1519         int integer;
1520         int cpuctrl, cpuctrlmask;
1521         int clocktest;
1522         int setclock = 0;
1523
1524         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1525                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1526                  | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1527         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1528                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1529                  | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1530                  | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
1531                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
1532
1533 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1534         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1535 #endif
1536
1537         cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1538         cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
1539
1540 #ifdef __ARMEB__
1541         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1542 #endif
1543
1544         /* Get clock configuration */
1545         clocktest = arm8_clock_config(0, 0) & 0x0f;
1546
1547         /* Special ARM8 clock and test configuration */
1548         if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1549                 clocktest = 0;
1550                 setclock = 1;
1551         }
1552         if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1553                 if (integer)
1554                         clocktest |= 0x01;
1555                 else
1556                         clocktest &= ~(0x01);
1557                 setclock = 1;
1558         }
1559         if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1560                 if (integer)
1561                         clocktest |= 0x02;
1562                 else
1563                         clocktest &= ~(0x02);
1564                 setclock = 1;
1565         }
1566         if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
1567                 clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
1568                 setclock = 1;
1569         }
1570         if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
1571                 clocktest |= (integer & 7) << 5;
1572                 setclock = 1;
1573         }
1574         
1575         /* Clear out the cache */
1576         cpu_idcache_wbinv_all();
1577
1578         /* Set the control register */
1579         ctrl = cpuctrl;
1580         cpu_control(0xffffffff, cpuctrl);
1581
1582         /* Set the clock/test register */    
1583         if (setclock)
1584                 arm8_clock_config(0x7f, clocktest);
1585 }
1586 #endif  /* CPU_ARM8 */
1587
1588 #ifdef CPU_ARM9
1589 struct cpu_option arm9_options[] = {
1590         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1591         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1592         { "arm9.cache", BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1593         { "arm9.icache",        BIC, OR,  CPU_CONTROL_IC_ENABLE },
1594         { "arm9.dcache",        BIC, OR,  CPU_CONTROL_DC_ENABLE },
1595         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1596         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1597         { "arm9.writebuf",      BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1598         { NULL,                 IGN, IGN, 0 }
1599 };
1600
1601 void
1602 arm9_setup(args)
1603         char *args;
1604 {
1605         int cpuctrl, cpuctrlmask;
1606
1607         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1608             | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1609             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1610             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
1611             CPU_CONTROL_ROUNDROBIN;
1612         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1613                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1614                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1615                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1616                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1617                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1618                  | CPU_CONTROL_ROUNDROBIN;
1619
1620 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1621         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1622 #endif
1623
1624         cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1625
1626 #ifdef __ARMEB__
1627         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1628 #endif
1629         if (vector_page == ARM_VECTORS_HIGH)
1630                 cpuctrl |= CPU_CONTROL_VECRELOC;
1631
1632         /* Clear out the cache */
1633         cpu_idcache_wbinv_all();
1634
1635         /* Set the control register */
1636         cpu_control(cpuctrlmask, cpuctrl);
1637         ctrl = cpuctrl;
1638
1639 }
1640 #endif  /* CPU_ARM9 */
1641
1642 #ifdef CPU_ARM10
1643 struct cpu_option arm10_options[] = {
1644         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1645         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1646         { "arm10.cache",        BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1647         { "arm10.icache",       BIC, OR,  CPU_CONTROL_IC_ENABLE },
1648         { "arm10.dcache",       BIC, OR,  CPU_CONTROL_DC_ENABLE },
1649         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1650         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1651         { "arm10.writebuf",     BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1652         { NULL,                 IGN, IGN, 0 }
1653 };
1654
1655 void
1656 arm10_setup(args)
1657         char *args;
1658 {
1659         int cpuctrl, cpuctrlmask;
1660
1661         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1662             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 
1663             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1664         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1665             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1666             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1667             | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1668             | CPU_CONTROL_BPRD_ENABLE
1669             | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1670
1671 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1672         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1673 #endif
1674
1675         cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
1676
1677 #ifdef __ARMEB__
1678         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1679 #endif
1680
1681         /* Clear out the cache */
1682         cpu_idcache_wbinv_all();
1683
1684         /* Now really make sure they are clean.  */
1685         asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1686
1687         /* Set the control register */
1688         ctrl = cpuctrl;
1689         cpu_control(0xffffffff, cpuctrl);
1690
1691         /* And again. */
1692         cpu_idcache_wbinv_all();
1693 }
1694 #endif  /* CPU_ARM10 */
1695
1696 #ifdef CPU_SA110
1697 struct cpu_option sa110_options[] = {
1698 #ifdef COMPAT_12
1699         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1700         { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1701 #endif  /* COMPAT_12 */
1702         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1703         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1704         { "sa110.cache",        BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1705         { "sa110.icache",       BIC, OR,  CPU_CONTROL_IC_ENABLE },
1706         { "sa110.dcache",       BIC, OR,  CPU_CONTROL_DC_ENABLE },
1707         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1708         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1709         { "sa110.writebuf",     BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1710         { NULL,                 IGN, IGN, 0 }
1711 };
1712
1713 void
1714 sa110_setup(args)
1715         char *args;
1716 {
1717         int cpuctrl, cpuctrlmask;
1718
1719         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1720                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1721                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1722                  | CPU_CONTROL_WBUF_ENABLE;
1723         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1724                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1725                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1726                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1727                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1728                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1729                  | CPU_CONTROL_CPCLK;
1730
1731 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1732         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1733 #endif
1734
1735         cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
1736
1737 #ifdef __ARMEB__
1738         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1739 #endif
1740
1741         /* Clear out the cache */
1742         cpu_idcache_wbinv_all();
1743
1744         /* Set the control register */
1745         ctrl = cpuctrl;
1746 /*      cpu_control(cpuctrlmask, cpuctrl);*/
1747         cpu_control(0xffffffff, cpuctrl);
1748
1749         /* 
1750          * enable clockswitching, note that this doesn't read or write to r0,
1751          * r0 is just to make it valid asm
1752          */
1753         __asm ("mcr 15, 0, r0, c15, c1, 2");
1754 }
1755 #endif  /* CPU_SA110 */
1756
1757 #if defined(CPU_SA1100) || defined(CPU_SA1110)
1758 struct cpu_option sa11x0_options[] = {
1759 #ifdef COMPAT_12
1760         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1761         { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1762 #endif  /* COMPAT_12 */
1763         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1764         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1765         { "sa11x0.cache",       BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1766         { "sa11x0.icache",      BIC, OR,  CPU_CONTROL_IC_ENABLE },
1767         { "sa11x0.dcache",      BIC, OR,  CPU_CONTROL_DC_ENABLE },
1768         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1769         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1770         { "sa11x0.writebuf",    BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1771         { NULL,                 IGN, IGN, 0 }
1772 };
1773
1774 void
1775 sa11x0_setup(args)
1776         char *args;
1777 {
1778         int cpuctrl, cpuctrlmask;
1779
1780         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1781                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1782                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1783                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
1784         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1785                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1786                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1787                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1788                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1789                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1790                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1791
1792 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1793         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1794 #endif
1795
1796
1797         cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
1798
1799 #ifdef __ARMEB__
1800         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1801 #endif
1802
1803         if (vector_page == ARM_VECTORS_HIGH)
1804                 cpuctrl |= CPU_CONTROL_VECRELOC;
1805         /* Clear out the cache */
1806         cpu_idcache_wbinv_all();
1807         /* Set the control register */    
1808         ctrl = cpuctrl;
1809         cpu_control(0xffffffff, cpuctrl);
1810 }
1811 #endif  /* CPU_SA1100 || CPU_SA1110 */
1812
1813 #if defined(CPU_IXP12X0)
1814 struct cpu_option ixp12x0_options[] = {
1815         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1816         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1817         { "ixp12x0.cache",      BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1818         { "ixp12x0.icache",     BIC, OR,  CPU_CONTROL_IC_ENABLE },
1819         { "ixp12x0.dcache",     BIC, OR,  CPU_CONTROL_DC_ENABLE },
1820         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1821         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1822         { "ixp12x0.writebuf",   BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1823         { NULL,                 IGN, IGN, 0 }
1824 };
1825
1826 void
1827 ixp12x0_setup(args)
1828         char *args;
1829 {
1830         int cpuctrl, cpuctrlmask;
1831
1832
1833         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
1834                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
1835                  | CPU_CONTROL_IC_ENABLE;
1836
1837         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
1838                  | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1839                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
1840                  | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
1841                  | CPU_CONTROL_VECRELOC;
1842
1843 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1844         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1845 #endif
1846
1847         cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
1848
1849 #ifdef __ARMEB__
1850         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1851 #endif
1852
1853         if (vector_page == ARM_VECTORS_HIGH)
1854                 cpuctrl |= CPU_CONTROL_VECRELOC;
1855
1856         /* Clear out the cache */
1857         cpu_idcache_wbinv_all();
1858
1859         /* Set the control register */    
1860         ctrl = cpuctrl;
1861         /* cpu_control(0xffffffff, cpuctrl); */
1862         cpu_control(cpuctrlmask, cpuctrl);
1863 }
1864 #endif /* CPU_IXP12X0 */
1865
1866 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1867   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1868   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
1869 struct cpu_option xscale_options[] = {
1870 #ifdef COMPAT_12
1871         { "branchpredict",      BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1872         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1873 #endif  /* COMPAT_12 */
1874         { "cpu.branchpredict",  BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1875         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1876         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1877         { "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1878         { "xscale.cache",       BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1879         { "xscale.icache",      BIC, OR,  CPU_CONTROL_IC_ENABLE },
1880         { "xscale.dcache",      BIC, OR,  CPU_CONTROL_DC_ENABLE },
1881         { NULL,                 IGN, IGN, 0 }
1882 };
1883
1884 void
1885 xscale_setup(args)
1886         char *args;
1887 {
1888 #ifndef CPU_XSCALE_CORE3
1889         uint32_t auxctl;
1890 #endif
1891         int cpuctrl, cpuctrlmask;
1892
1893         /*
1894          * The XScale Write Buffer is always enabled.  Our option
1895          * is to enable/disable coalescing.  Note that bits 6:3
1896          * must always be enabled.
1897          */
1898
1899         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1900                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1901                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1902                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1903                  | CPU_CONTROL_BPRD_ENABLE;
1904         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1905                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1906                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1907                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1908                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1909                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1910                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1911
1912 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1913         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1914 #endif
1915
1916         cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
1917
1918 #ifdef __ARMEB__
1919         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1920 #endif
1921
1922         if (vector_page == ARM_VECTORS_HIGH)
1923                 cpuctrl |= CPU_CONTROL_VECRELOC;
1924
1925         /* Clear out the cache */
1926         cpu_idcache_wbinv_all();
1927
1928         /*
1929          * Set the control register.  Note that bits 6:3 must always
1930          * be set to 1.
1931          */
1932         ctrl = cpuctrl;
1933 /*      cpu_control(cpuctrlmask, cpuctrl);*/
1934         cpu_control(0xffffffff, cpuctrl);
1935
1936 #ifndef CPU_XSCALE_CORE3
1937         /* Make sure write coalescing is turned on */
1938         __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1939                 : "=r" (auxctl));
1940 #ifdef XSCALE_NO_COALESCE_WRITES
1941         auxctl |= XSCALE_AUXCTL_K;
1942 #else
1943         auxctl &= ~XSCALE_AUXCTL_K;
1944 #endif
1945         __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1946                 : : "r" (auxctl));
1947 #endif
1948 }
1949 #endif  /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 
1950            CPU_XSCALE_80219 */