]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm/arm/cpufunc.c
This commit was generated by cvs2svn to compensate for changes in r167802,
[FreeBSD/FreeBSD.git] / sys / arm / arm / cpufunc.c
1 /*      $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
2
3 /*-
4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
5  * arm8 support code Copyright (c) 1997 ARM Limited
6  * arm8 support code Copyright (c) 1997 Causality Limited
7  * arm9 support code Copyright (C) 2001 ARM Ltd
8  * Copyright (c) 1997 Mark Brinicombe.
9  * Copyright (c) 1997 Causality Limited
10  * All rights reserved.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *      This product includes software developed by Causality Limited.
23  * 4. The name of Causality Limited may not be used to endorse or promote
24  *    products derived from this software without specific prior written
25  *    permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  * RiscBSD kernel project
40  *
41  * cpufuncs.c
42  *
43  * C functions for supporting CPU / MMU / TLB specific operations.
44  *
45  * Created      : 30/01/97
46  */
47 #include <sys/cdefs.h>
48 __FBSDID("$FreeBSD$");
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/lock.h>
53 #include <sys/mutex.h>
54 #include <sys/bus.h>
55 #include <machine/bus.h>
56 #include <machine/cpu.h>
57 #include <machine/disassem.h>
58
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 #include <vm/uma.h>
62
63 #include <machine/cpuconf.h>
64 #include <machine/cpufunc.h>
65 #include <machine/bootconfig.h>
66
67 #ifdef CPU_XSCALE_80200
68 #include <arm/xscale/i80200/i80200reg.h>
69 #include <arm/xscale/i80200/i80200var.h>
70 #endif
71
72 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
73 #include <arm/xscale/i80321/i80321reg.h>
74 #include <arm/xscale/i80321/i80321var.h>
75 #endif
76
77 #if defined(CPU_XSCALE_81342)
78 #include <arm/xscale/i8134x/i81342reg.h>
79 #endif
80
81 #ifdef CPU_XSCALE_IXP425
82 #include <arm/xscale/ixp425/ixp425reg.h>
83 #include <arm/xscale/ixp425/ixp425var.h>
84 #endif
85
86 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
87     defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
88 #include <arm/xscale/xscalereg.h>
89 #endif
90
91 #if defined(PERFCTRS)
92 struct arm_pmc_funcs *arm_pmc;
93 #endif
94
95 /* PRIMARY CACHE VARIABLES */
96 int     arm_picache_size;
97 int     arm_picache_line_size;
98 int     arm_picache_ways;
99
100 int     arm_pdcache_size;       /* and unified */
101 int     arm_pdcache_line_size;
102 int     arm_pdcache_ways;
103
104 int     arm_pcache_type;
105 int     arm_pcache_unified;
106
107 int     arm_dcache_align;
108 int     arm_dcache_align_mask;
109
110 /* 1 == use cpu_sleep(), 0 == don't */
111 int cpu_do_powersave;
112 int ctrl;
113
114 #ifdef CPU_ARM7TDMI
115 struct cpu_functions arm7tdmi_cpufuncs = {
116         /* CPU functions */
117         
118         cpufunc_id,                     /* id                   */
119         cpufunc_nullop,                 /* cpwait               */
120
121         /* MMU functions */
122
123         cpufunc_control,                /* control              */
124         cpufunc_domains,                /* domain               */
125         arm7tdmi_setttb,                /* setttb               */
126         cpufunc_faultstatus,            /* faultstatus          */
127         cpufunc_faultaddress,           /* faultaddress         */
128
129         /* TLB functions */
130
131         arm7tdmi_tlb_flushID,           /* tlb_flushID          */
132         arm7tdmi_tlb_flushID_SE,        /* tlb_flushID_SE       */
133         arm7tdmi_tlb_flushID,           /* tlb_flushI           */
134         arm7tdmi_tlb_flushID_SE,        /* tlb_flushI_SE        */
135         arm7tdmi_tlb_flushID,           /* tlb_flushD           */
136         arm7tdmi_tlb_flushID_SE,        /* tlb_flushD_SE        */
137
138         /* Cache operations */
139
140         cpufunc_nullop,                 /* icache_sync_all      */
141         (void *)cpufunc_nullop,         /* icache_sync_range    */
142
143         arm7tdmi_cache_flushID,         /* dcache_wbinv_all     */
144         (void *)arm7tdmi_cache_flushID, /* dcache_wbinv_range   */
145         (void *)arm7tdmi_cache_flushID, /* dcache_inv_range     */
146         (void *)cpufunc_nullop,         /* dcache_wb_range      */
147
148         arm7tdmi_cache_flushID,         /* idcache_wbinv_all    */
149         (void *)arm7tdmi_cache_flushID, /* idcache_wbinv_range  */
150
151         /* Other functions */
152
153         cpufunc_nullop,                 /* flush_prefetchbuf    */
154         cpufunc_nullop,                 /* drain_writebuf       */
155         cpufunc_nullop,                 /* flush_brnchtgt_C     */
156         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
157
158         (void *)cpufunc_nullop,         /* sleep                */
159
160         /* Soft functions */
161
162         late_abort_fixup,               /* dataabt_fixup        */
163         cpufunc_null_fixup,             /* prefetchabt_fixup    */
164
165         arm7tdmi_context_switch,        /* context_switch       */
166
167         arm7tdmi_setup                  /* cpu setup            */
168
169 };
170 #endif  /* CPU_ARM7TDMI */
171
172 #ifdef CPU_ARM8
173 struct cpu_functions arm8_cpufuncs = {
174         /* CPU functions */
175         
176         cpufunc_id,                     /* id                   */
177         cpufunc_nullop,                 /* cpwait               */
178
179         /* MMU functions */
180
181         cpufunc_control,                /* control              */
182         cpufunc_domains,                /* domain               */
183         arm8_setttb,                    /* setttb               */
184         cpufunc_faultstatus,            /* faultstatus          */
185         cpufunc_faultaddress,           /* faultaddress         */
186
187         /* TLB functions */
188
189         arm8_tlb_flushID,               /* tlb_flushID          */
190         arm8_tlb_flushID_SE,            /* tlb_flushID_SE       */
191         arm8_tlb_flushID,               /* tlb_flushI           */
192         arm8_tlb_flushID_SE,            /* tlb_flushI_SE        */
193         arm8_tlb_flushID,               /* tlb_flushD           */
194         arm8_tlb_flushID_SE,            /* tlb_flushD_SE        */
195
196         /* Cache operations */
197
198         cpufunc_nullop,                 /* icache_sync_all      */
199         (void *)cpufunc_nullop,         /* icache_sync_range    */
200
201         arm8_cache_purgeID,             /* dcache_wbinv_all     */
202         (void *)arm8_cache_purgeID,     /* dcache_wbinv_range   */
203 /*XXX*/ (void *)arm8_cache_purgeID,     /* dcache_inv_range     */
204         (void *)arm8_cache_cleanID,     /* dcache_wb_range      */
205
206         arm8_cache_purgeID,             /* idcache_wbinv_all    */
207         (void *)arm8_cache_purgeID,     /* idcache_wbinv_range  */
208
209         /* Other functions */
210
211         cpufunc_nullop,                 /* flush_prefetchbuf    */
212         cpufunc_nullop,                 /* drain_writebuf       */
213         cpufunc_nullop,                 /* flush_brnchtgt_C     */
214         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
215
216         (void *)cpufunc_nullop,         /* sleep                */
217
218         /* Soft functions */
219
220         cpufunc_null_fixup,             /* dataabt_fixup        */
221         cpufunc_null_fixup,             /* prefetchabt_fixup    */
222
223         arm8_context_switch,            /* context_switch       */
224
225         arm8_setup                      /* cpu setup            */
226 };          
227 #endif  /* CPU_ARM8 */
228
229 #ifdef CPU_ARM9
230 struct cpu_functions arm9_cpufuncs = {
231         /* CPU functions */
232
233         cpufunc_id,                     /* id                   */
234         cpufunc_nullop,                 /* cpwait               */
235
236         /* MMU functions */
237
238         cpufunc_control,                /* control              */
239         cpufunc_domains,                /* Domain               */
240         arm9_setttb,                    /* Setttb               */
241         cpufunc_faultstatus,            /* Faultstatus          */
242         cpufunc_faultaddress,           /* Faultaddress         */
243
244         /* TLB functions */
245
246         armv4_tlb_flushID,              /* tlb_flushID          */
247         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
248         armv4_tlb_flushI,               /* tlb_flushI           */
249         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
250         armv4_tlb_flushD,               /* tlb_flushD           */
251         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
252
253         /* Cache operations */
254
255         arm9_icache_sync_all,           /* icache_sync_all      */
256         arm9_icache_sync_range,         /* icache_sync_range    */
257
258         arm9_dcache_wbinv_all,          /* dcache_wbinv_all     */
259         arm9_dcache_wbinv_range,        /* dcache_wbinv_range   */
260 /*XXX*/ arm9_dcache_wbinv_range,        /* dcache_inv_range     */
261         arm9_dcache_wb_range,           /* dcache_wb_range      */
262
263         arm9_idcache_wbinv_all,         /* idcache_wbinv_all    */
264         arm9_idcache_wbinv_range,       /* idcache_wbinv_range  */
265
266         /* Other functions */
267
268         cpufunc_nullop,                 /* flush_prefetchbuf    */
269         armv4_drain_writebuf,           /* drain_writebuf       */
270         cpufunc_nullop,                 /* flush_brnchtgt_C     */
271         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
272
273         (void *)cpufunc_nullop,         /* sleep                */
274
275         /* Soft functions */
276
277         cpufunc_null_fixup,             /* dataabt_fixup        */
278         cpufunc_null_fixup,             /* prefetchabt_fixup    */
279
280         arm9_context_switch,            /* context_switch       */
281
282         arm9_setup                      /* cpu setup            */
283
284 };
285 #endif /* CPU_ARM9 */
286
287 #ifdef CPU_ARM10
288 struct cpu_functions arm10_cpufuncs = {
289         /* CPU functions */
290
291         cpufunc_id,                     /* id                   */
292         cpufunc_nullop,                 /* cpwait               */
293
294         /* MMU functions */
295
296         cpufunc_control,                /* control              */
297         cpufunc_domains,                /* Domain               */
298         arm10_setttb,                   /* Setttb               */
299         cpufunc_faultstatus,            /* Faultstatus          */
300         cpufunc_faultaddress,           /* Faultaddress         */
301
302         /* TLB functions */
303
304         armv4_tlb_flushID,              /* tlb_flushID          */
305         arm10_tlb_flushID_SE,           /* tlb_flushID_SE       */
306         armv4_tlb_flushI,               /* tlb_flushI           */
307         arm10_tlb_flushI_SE,            /* tlb_flushI_SE        */
308         armv4_tlb_flushD,               /* tlb_flushD           */
309         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
310
311         /* Cache operations */
312
313         arm10_icache_sync_all,          /* icache_sync_all      */
314         arm10_icache_sync_range,        /* icache_sync_range    */
315
316         arm10_dcache_wbinv_all,         /* dcache_wbinv_all     */
317         arm10_dcache_wbinv_range,       /* dcache_wbinv_range   */
318         arm10_dcache_inv_range,         /* dcache_inv_range     */
319         arm10_dcache_wb_range,          /* dcache_wb_range      */
320
321         arm10_idcache_wbinv_all,        /* idcache_wbinv_all    */
322         arm10_idcache_wbinv_range,      /* idcache_wbinv_range  */
323
324         /* Other functions */
325
326         cpufunc_nullop,                 /* flush_prefetchbuf    */
327         armv4_drain_writebuf,           /* drain_writebuf       */
328         cpufunc_nullop,                 /* flush_brnchtgt_C     */
329         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
330
331         (void *)cpufunc_nullop,         /* sleep                */
332
333         /* Soft functions */
334
335         cpufunc_null_fixup,             /* dataabt_fixup        */
336         cpufunc_null_fixup,             /* prefetchabt_fixup    */
337
338         arm10_context_switch,           /* context_switch       */
339
340         arm10_setup                     /* cpu setup            */
341
342 };
343 #endif /* CPU_ARM10 */
344
345 #ifdef CPU_SA110
346 struct cpu_functions sa110_cpufuncs = {
347         /* CPU functions */
348         
349         cpufunc_id,                     /* id                   */
350         cpufunc_nullop,                 /* cpwait               */
351
352         /* MMU functions */
353
354         cpufunc_control,                /* control              */
355         cpufunc_domains,                /* domain               */
356         sa1_setttb,                     /* setttb               */
357         cpufunc_faultstatus,            /* faultstatus          */
358         cpufunc_faultaddress,           /* faultaddress         */
359
360         /* TLB functions */
361
362         armv4_tlb_flushID,              /* tlb_flushID          */
363         sa1_tlb_flushID_SE,             /* tlb_flushID_SE       */
364         armv4_tlb_flushI,               /* tlb_flushI           */
365         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
366         armv4_tlb_flushD,               /* tlb_flushD           */
367         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
368
369         /* Cache operations */
370
371         sa1_cache_syncI,                /* icache_sync_all      */
372         sa1_cache_syncI_rng,            /* icache_sync_range    */
373
374         sa1_cache_purgeD,               /* dcache_wbinv_all     */
375         sa1_cache_purgeD_rng,           /* dcache_wbinv_range   */
376 /*XXX*/ sa1_cache_purgeD_rng,           /* dcache_inv_range     */
377         sa1_cache_cleanD_rng,           /* dcache_wb_range      */
378
379         sa1_cache_purgeID,              /* idcache_wbinv_all    */
380         sa1_cache_purgeID_rng,          /* idcache_wbinv_range  */
381
382         /* Other functions */
383
384         cpufunc_nullop,                 /* flush_prefetchbuf    */
385         armv4_drain_writebuf,           /* drain_writebuf       */
386         cpufunc_nullop,                 /* flush_brnchtgt_C     */
387         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
388
389         (void *)cpufunc_nullop,         /* sleep                */
390
391         /* Soft functions */
392
393         cpufunc_null_fixup,             /* dataabt_fixup        */
394         cpufunc_null_fixup,             /* prefetchabt_fixup    */
395
396         sa110_context_switch,           /* context_switch       */
397
398         sa110_setup                     /* cpu setup            */
399 };          
400 #endif  /* CPU_SA110 */
401
402 #if defined(CPU_SA1100) || defined(CPU_SA1110)
403 struct cpu_functions sa11x0_cpufuncs = {
404         /* CPU functions */
405         
406         cpufunc_id,                     /* id                   */
407         cpufunc_nullop,                 /* cpwait               */
408
409         /* MMU functions */
410
411         cpufunc_control,                /* control              */
412         cpufunc_domains,                /* domain               */
413         sa1_setttb,                     /* setttb               */
414         cpufunc_faultstatus,            /* faultstatus          */
415         cpufunc_faultaddress,           /* faultaddress         */
416
417         /* TLB functions */
418
419         armv4_tlb_flushID,              /* tlb_flushID          */
420         sa1_tlb_flushID_SE,             /* tlb_flushID_SE       */
421         armv4_tlb_flushI,               /* tlb_flushI           */
422         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
423         armv4_tlb_flushD,               /* tlb_flushD           */
424         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
425
426         /* Cache operations */
427
428         sa1_cache_syncI,                /* icache_sync_all      */
429         sa1_cache_syncI_rng,            /* icache_sync_range    */
430
431         sa1_cache_purgeD,               /* dcache_wbinv_all     */
432         sa1_cache_purgeD_rng,           /* dcache_wbinv_range   */
433 /*XXX*/ sa1_cache_purgeD_rng,           /* dcache_inv_range     */
434         sa1_cache_cleanD_rng,           /* dcache_wb_range      */
435
436         sa1_cache_purgeID,              /* idcache_wbinv_all    */
437         sa1_cache_purgeID_rng,          /* idcache_wbinv_range  */
438
439         /* Other functions */
440
441         sa11x0_drain_readbuf,           /* flush_prefetchbuf    */
442         armv4_drain_writebuf,           /* drain_writebuf       */
443         cpufunc_nullop,                 /* flush_brnchtgt_C     */
444         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
445
446         sa11x0_cpu_sleep,               /* sleep                */
447
448         /* Soft functions */
449
450         cpufunc_null_fixup,             /* dataabt_fixup        */
451         cpufunc_null_fixup,             /* prefetchabt_fixup    */
452
453         sa11x0_context_switch,          /* context_switch       */
454
455         sa11x0_setup                    /* cpu setup            */
456 };          
457 #endif  /* CPU_SA1100 || CPU_SA1110 */
458
459 #ifdef CPU_IXP12X0
460 struct cpu_functions ixp12x0_cpufuncs = {
461         /* CPU functions */
462         
463         cpufunc_id,                     /* id                   */
464         cpufunc_nullop,                 /* cpwait               */
465
466         /* MMU functions */
467
468         cpufunc_control,                /* control              */
469         cpufunc_domains,                /* domain               */
470         sa1_setttb,                     /* setttb               */
471         cpufunc_faultstatus,            /* faultstatus          */
472         cpufunc_faultaddress,           /* faultaddress         */
473
474         /* TLB functions */
475
476         armv4_tlb_flushID,              /* tlb_flushID          */
477         sa1_tlb_flushID_SE,             /* tlb_flushID_SE       */
478         armv4_tlb_flushI,               /* tlb_flushI           */
479         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
480         armv4_tlb_flushD,               /* tlb_flushD           */
481         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
482
483         /* Cache operations */
484
485         sa1_cache_syncI,                /* icache_sync_all      */
486         sa1_cache_syncI_rng,            /* icache_sync_range    */
487
488         sa1_cache_purgeD,               /* dcache_wbinv_all     */
489         sa1_cache_purgeD_rng,           /* dcache_wbinv_range   */
490 /*XXX*/ sa1_cache_purgeD_rng,           /* dcache_inv_range     */
491         sa1_cache_cleanD_rng,           /* dcache_wb_range      */
492
493         sa1_cache_purgeID,              /* idcache_wbinv_all    */
494         sa1_cache_purgeID_rng,          /* idcache_wbinv_range  */
495
496         /* Other functions */
497
498         ixp12x0_drain_readbuf,                  /* flush_prefetchbuf    */
499         armv4_drain_writebuf,           /* drain_writebuf       */
500         cpufunc_nullop,                 /* flush_brnchtgt_C     */
501         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
502
503         (void *)cpufunc_nullop,         /* sleep                */
504
505         /* Soft functions */
506
507         cpufunc_null_fixup,             /* dataabt_fixup        */
508         cpufunc_null_fixup,             /* prefetchabt_fixup    */
509
510         ixp12x0_context_switch,         /* context_switch       */
511
512         ixp12x0_setup                   /* cpu setup            */
513 };          
514 #endif  /* CPU_IXP12X0 */
515
516 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
517   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
518   defined(CPU_XSCALE_80219)
519
520 struct cpu_functions xscale_cpufuncs = {
521         /* CPU functions */
522         
523         cpufunc_id,                     /* id                   */
524         xscale_cpwait,                  /* cpwait               */
525
526         /* MMU functions */
527
528         xscale_control,                 /* control              */
529         cpufunc_domains,                /* domain               */
530         xscale_setttb,                  /* setttb               */
531         cpufunc_faultstatus,            /* faultstatus          */
532         cpufunc_faultaddress,           /* faultaddress         */
533
534         /* TLB functions */
535
536         armv4_tlb_flushID,              /* tlb_flushID          */
537         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
538         armv4_tlb_flushI,               /* tlb_flushI           */
539         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
540         armv4_tlb_flushD,               /* tlb_flushD           */
541         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
542
543         /* Cache operations */
544
545         xscale_cache_syncI,             /* icache_sync_all      */
546         xscale_cache_syncI_rng,         /* icache_sync_range    */
547
548         xscale_cache_purgeD,            /* dcache_wbinv_all     */
549         xscale_cache_purgeD_rng,        /* dcache_wbinv_range   */
550         xscale_cache_flushD_rng,        /* dcache_inv_range     */
551         xscale_cache_cleanD_rng,        /* dcache_wb_range      */
552
553         xscale_cache_purgeID,           /* idcache_wbinv_all    */
554         xscale_cache_purgeID_rng,       /* idcache_wbinv_range  */
555
556         /* Other functions */
557
558         cpufunc_nullop,                 /* flush_prefetchbuf    */
559         armv4_drain_writebuf,           /* drain_writebuf       */
560         cpufunc_nullop,                 /* flush_brnchtgt_C     */
561         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
562
563         xscale_cpu_sleep,               /* sleep                */
564
565         /* Soft functions */
566
567         cpufunc_null_fixup,             /* dataabt_fixup        */
568         cpufunc_null_fixup,             /* prefetchabt_fixup    */
569
570         xscale_context_switch,          /* context_switch       */
571
572         xscale_setup                    /* cpu setup            */
573 };
574 #endif
575 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
576    CPU_XSCALE_80219 */
577
578 #ifdef CPU_XSCALE_81342
579 struct cpu_functions xscalec3_cpufuncs = {
580         /* CPU functions */
581         
582         cpufunc_id,                     /* id                   */
583         xscale_cpwait,                  /* cpwait               */
584
585         /* MMU functions */
586
587         xscale_control,                 /* control              */
588         cpufunc_domains,                /* domain               */
589         xscalec3_setttb,                /* setttb               */
590         cpufunc_faultstatus,            /* faultstatus          */
591         cpufunc_faultaddress,           /* faultaddress         */
592
593         /* TLB functions */
594
595         armv4_tlb_flushID,              /* tlb_flushID          */
596         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
597         armv4_tlb_flushI,               /* tlb_flushI           */
598         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
599         armv4_tlb_flushD,               /* tlb_flushD           */
600         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
601
602         /* Cache operations */
603
604         xscalec3_cache_syncI,           /* icache_sync_all      */
605         xscale_cache_syncI_rng,         /* icache_sync_range    */
606
607         xscalec3_cache_purgeD,          /* dcache_wbinv_all     */
608         xscalec3_cache_purgeD_rng,      /* dcache_wbinv_range   */
609         xscale_cache_flushD_rng,        /* dcache_inv_range     */
610         xscalec3_cache_cleanD_rng,      /* dcache_wb_range      */
611
612         xscalec3_cache_purgeID, /* idcache_wbinv_all    */
613         xscalec3_cache_purgeID_rng,     /* idcache_wbinv_range  */
614
615         /* Other functions */
616
617         cpufunc_nullop,                 /* flush_prefetchbuf    */
618         armv4_drain_writebuf,           /* drain_writebuf       */
619         cpufunc_nullop,                 /* flush_brnchtgt_C     */
620         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
621
622         xscale_cpu_sleep,               /* sleep                */
623
624         /* Soft functions */
625
626         cpufunc_null_fixup,             /* dataabt_fixup        */
627         cpufunc_null_fixup,             /* prefetchabt_fixup    */
628
629         xscalec3_context_switch,        /* context_switch       */
630
631         xscale_setup                    /* cpu setup            */
632 };
633 #endif /* CPU_XSCALE_81342 */
634 /*
635  * Global constants also used by locore.s
636  */
637
638 struct cpu_functions cpufuncs;
639 u_int cputype;
640 u_int cpu_reset_needs_v4_MMU_disable;   /* flag used in locore.s */
641
642 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
643   defined (CPU_ARM10) ||                                               \
644   defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||            \
645   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||          \
646   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
647
648 static void get_cachetype_cp15(void);
649
650 /* Additional cache information local to this file.  Log2 of some of the
651    above numbers.  */
652 static int      arm_dcache_l2_nsets;
653 static int      arm_dcache_l2_assoc;
654 static int      arm_dcache_l2_linesize;
655
656 static void
657 get_cachetype_cp15()
658 {
659         u_int ctype, isize, dsize;
660         u_int multiplier;
661
662         __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
663                 : "=r" (ctype));
664
665         /*
666          * ...and thus spake the ARM ARM:
667          *
668          * If an <opcode2> value corresponding to an unimplemented or
669          * reserved ID register is encountered, the System Control
670          * processor returns the value of the main ID register.
671          */
672         if (ctype == cpufunc_id())
673                 goto out;
674
675         if ((ctype & CPU_CT_S) == 0)
676                 arm_pcache_unified = 1;
677
678         /*
679          * If you want to know how this code works, go read the ARM ARM.
680          */
681
682         arm_pcache_type = CPU_CT_CTYPE(ctype);
683
684         if (arm_pcache_unified == 0) {
685                 isize = CPU_CT_ISIZE(ctype);
686                 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
687                 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
688                 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
689                         if (isize & CPU_CT_xSIZE_M)
690                                 arm_picache_line_size = 0; /* not present */
691                         else
692                                 arm_picache_ways = 1;
693                 } else {
694                         arm_picache_ways = multiplier <<
695                             (CPU_CT_xSIZE_ASSOC(isize) - 1);
696                 }
697                 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
698         }
699
700         dsize = CPU_CT_DSIZE(ctype);
701         multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
702         arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
703         if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
704                 if (dsize & CPU_CT_xSIZE_M)
705                         arm_pdcache_line_size = 0; /* not present */
706                 else
707                         arm_pdcache_ways = 1;
708         } else {
709                 arm_pdcache_ways = multiplier <<
710                     (CPU_CT_xSIZE_ASSOC(dsize) - 1);
711         }
712         arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
713
714         arm_dcache_align = arm_pdcache_line_size;
715
716         arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
717         arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
718         arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
719             CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
720
721  out:
722         arm_dcache_align_mask = arm_dcache_align - 1;
723 }
724 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
725
726 #if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
727     defined(CPU_IXP12X0)
728 /* Cache information for CPUs without cache type registers. */
729 struct cachetab {
730         u_int32_t ct_cpuid;
731         int     ct_pcache_type;
732         int     ct_pcache_unified;
733         int     ct_pdcache_size;
734         int     ct_pdcache_line_size;
735         int     ct_pdcache_ways;
736         int     ct_picache_size;
737         int     ct_picache_line_size;
738         int     ct_picache_ways;
739 };
740
741 struct cachetab cachetab[] = {
742     /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
743     /* XXX is this type right for SA-1? */
744     { CPU_ID_SA110,     CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
745     { CPU_ID_SA1100,    CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
746     { CPU_ID_SA1110,    CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
747     { CPU_ID_IXP1200,   CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
748     { 0, 0, 0, 0, 0, 0, 0, 0}
749 };
750
751 static void get_cachetype_table(void);
752
753 static void
754 get_cachetype_table()
755 {
756         int i;
757         u_int32_t cpuid = cpufunc_id();
758
759         for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
760                 if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
761                         arm_pcache_type = cachetab[i].ct_pcache_type;
762                         arm_pcache_unified = cachetab[i].ct_pcache_unified;
763                         arm_pdcache_size = cachetab[i].ct_pdcache_size;
764                         arm_pdcache_line_size =
765                             cachetab[i].ct_pdcache_line_size;
766                         arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
767                         arm_picache_size = cachetab[i].ct_picache_size;
768                         arm_picache_line_size =
769                             cachetab[i].ct_picache_line_size;
770                         arm_picache_ways = cachetab[i].ct_picache_ways;
771                 }
772         }
773         arm_dcache_align = arm_pdcache_line_size;
774
775         arm_dcache_align_mask = arm_dcache_align - 1;
776 }
777
778 #endif /* SA110 || SA1100 || SA1111 || IXP12X0 */
779
780 /*
781  * Cannot panic here as we may not have a console yet ...
782  */
783
784 int
785 set_cpufuncs()
786 {
787         cputype = cpufunc_id();
788         cputype &= CPU_ID_CPU_MASK;
789
790         /*
791          * NOTE: cpu_do_powersave defaults to off.  If we encounter a
792          * CPU type where we want to use it by default, then we set it.
793          */
794
795 #ifdef CPU_ARM7TDMI
796         if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
797             CPU_ID_IS7(cputype) &&
798             (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
799                 cpufuncs = arm7tdmi_cpufuncs;
800                 cpu_reset_needs_v4_MMU_disable = 0;
801                 get_cachetype_cp15();
802                 pmap_pte_init_generic();
803                 goto out;
804         }
805 #endif  
806 #ifdef CPU_ARM8
807         if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
808             (cputype & 0x0000f000) == 0x00008000) {
809                 cpufuncs = arm8_cpufuncs;
810                 cpu_reset_needs_v4_MMU_disable = 0;     /* XXX correct? */
811                 get_cachetype_cp15();
812                 pmap_pte_init_arm8();
813                 goto out;
814         }
815 #endif  /* CPU_ARM8 */
816 #ifdef CPU_ARM9
817         if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
818              (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
819             (cputype & 0x0000f000) == 0x00009000) {
820                 cpufuncs = arm9_cpufuncs;
821                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
822                 get_cachetype_cp15();
823                 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
824                 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
825                     arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
826                 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
827                 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
828 #ifdef ARM9_CACHE_WRITE_THROUGH
829                 pmap_pte_init_arm9();
830 #else
831                 pmap_pte_init_generic();
832 #endif
833                 goto out;
834         }
835 #endif /* CPU_ARM9 */
836 #ifdef CPU_ARM10
837         if (/* cputype == CPU_ID_ARM1020T || */
838             cputype == CPU_ID_ARM1020E) {
839                 /*
840                  * Select write-through cacheing (this isn't really an
841                  * option on ARM1020T).
842                  */
843                 cpufuncs = arm10_cpufuncs;
844                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
845                 get_cachetype_cp15();
846                 arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
847                 arm10_dcache_sets_max = 
848                     (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
849                     arm10_dcache_sets_inc;
850                 arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
851                 arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
852                 pmap_pte_init_generic();
853                 goto out;
854         }
855 #endif /* CPU_ARM10 */
856 #ifdef CPU_SA110
857         if (cputype == CPU_ID_SA110) {
858                 cpufuncs = sa110_cpufuncs;
859                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it */
860                 get_cachetype_table();
861                 pmap_pte_init_sa1();
862                 goto out;
863         }
864 #endif  /* CPU_SA110 */
865 #ifdef CPU_SA1100
866         if (cputype == CPU_ID_SA1100) {
867                 cpufuncs = sa11x0_cpufuncs;
868                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
869                 get_cachetype_table();
870                 pmap_pte_init_sa1();
871                 /* Use powersave on this CPU. */
872                 cpu_do_powersave = 1;
873
874                 goto out;
875         }
876 #endif  /* CPU_SA1100 */
877 #ifdef CPU_SA1110
878         if (cputype == CPU_ID_SA1110) {
879                 cpufuncs = sa11x0_cpufuncs;
880                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
881                 get_cachetype_table();
882                 pmap_pte_init_sa1();
883                 /* Use powersave on this CPU. */
884                 cpu_do_powersave = 1;
885
886                 goto out;
887         }
888 #endif  /* CPU_SA1110 */
889 #ifdef CPU_IXP12X0
890         if (cputype == CPU_ID_IXP1200) {
891                 cpufuncs = ixp12x0_cpufuncs;
892                 cpu_reset_needs_v4_MMU_disable = 1;
893                 get_cachetype_table();
894                 pmap_pte_init_sa1();
895                 goto out;
896         }
897 #endif  /* CPU_IXP12X0 */
898 #ifdef CPU_XSCALE_80200
899         if (cputype == CPU_ID_80200) {
900                 int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
901
902                 i80200_icu_init();
903
904                 /*
905                  * Reset the Performance Monitoring Unit to a
906                  * pristine state:
907                  *      - CCNT, PMN0, PMN1 reset to 0
908                  *      - overflow indications cleared
909                  *      - all counters disabled
910                  */
911                 __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
912                         :
913                         : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
914                                PMNC_CC_IF));
915
916 #if defined(XSCALE_CCLKCFG)
917                 /*
918                  * Crank CCLKCFG to maximum legal value.
919                  */
920                 __asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
921                         :
922                         : "r" (XSCALE_CCLKCFG));
923 #endif
924
925                 /*
926                  * XXX Disable ECC in the Bus Controller Unit; we
927                  * don't really support it, yet.  Clear any pending
928                  * error indications.
929                  */
930                 __asm __volatile("mcr p13, 0, %0, c0, c1, 0"
931                         :
932                         : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
933
934                 cpufuncs = xscale_cpufuncs;
935 #if defined(PERFCTRS)
936                 xscale_pmu_init();
937 #endif
938
939                 /*
940                  * i80200 errata: Step-A0 and A1 have a bug where
941                  * D$ dirty bits are not cleared on "invalidate by
942                  * address".
943                  *
944                  * Workaround: Clean cache line before invalidating.
945                  */
946                 if (rev == 0 || rev == 1)
947                         cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
948
949                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
950                 get_cachetype_cp15();
951                 pmap_pte_init_xscale();
952                 goto out;
953         }
954 #endif /* CPU_XSCALE_80200 */
955 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
956         if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
957             cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
958             cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
959                 /*
960                  * Reset the Performance Monitoring Unit to a
961                  * pristine state:
962                  *      - CCNT, PMN0, PMN1 reset to 0
963                  *      - overflow indications cleared
964                  *      - all counters disabled
965                  */
966                 __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
967                         :
968                         : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
969                                PMNC_CC_IF));
970
971                 cpufuncs = xscale_cpufuncs;
972 #if defined(PERFCTRS)
973                 xscale_pmu_init();
974 #endif
975
976                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
977                 get_cachetype_cp15();
978                 pmap_pte_init_xscale();
979                 goto out;
980         }
981 #endif /* CPU_XSCALE_80321 */
982
983 #if defined(CPU_XSCALE_81342)
984         if (cputype == CPU_ID_81342) {
985                 cpufuncs = xscalec3_cpufuncs;
986 #if defined(PERFCTRS)
987                 xscale_pmu_init();
988 #endif
989
990                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
991                 get_cachetype_cp15();
992                 pmap_pte_init_xscale();
993                 goto out;
994         }
995 #endif /* CPU_XSCALE_81342 */
996 #ifdef CPU_XSCALE_PXA2X0
997         /* ignore core revision to test PXA2xx CPUs */
998         if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
999             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1000
1001                 cpufuncs = xscale_cpufuncs;
1002 #if defined(PERFCTRS)
1003                 xscale_pmu_init();
1004 #endif
1005
1006                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
1007                 get_cachetype_cp15();
1008                 pmap_pte_init_xscale();
1009
1010                 /* Use powersave on this CPU. */
1011                 cpu_do_powersave = 1;
1012
1013                 goto out;
1014         }
1015 #endif /* CPU_XSCALE_PXA2X0 */
1016 #ifdef CPU_XSCALE_IXP425
1017         if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1018             cputype == CPU_ID_IXP425_266) {
1019
1020                 cpufuncs = xscale_cpufuncs;
1021 #if defined(PERFCTRS)
1022                 xscale_pmu_init();
1023 #endif
1024
1025                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
1026                 get_cachetype_cp15();
1027                 pmap_pte_init_xscale();
1028
1029                 goto out;
1030         }
1031 #endif /* CPU_XSCALE_IXP425 */
1032         /*
1033          * Bzzzz. And the answer was ...
1034          */
1035         panic("No support for this CPU type (%08x) in kernel", cputype);
1036         return(ARCHITECTURE_NOT_PRESENT);
1037 out:
1038         uma_set_align(arm_dcache_align_mask);
1039         return (0);
1040 }
1041
1042 /*
1043  * Fixup routines for data and prefetch aborts.
1044  *
1045  * Several compile time symbols are used
1046  *
1047  * DEBUG_FAULT_CORRECTION - Print debugging information during the
1048  * correction of registers after a fault.
1049  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
1050  * when defined should use late aborts
1051  */
1052
1053
1054 /*
1055  * Null abort fixup routine.
1056  * For use when no fixup is required.
1057  */
1058 int
1059 cpufunc_null_fixup(arg)
1060         void *arg;
1061 {
1062         return(ABORT_FIXUP_OK);
1063 }
1064
1065
1066 #if defined(CPU_ARM7TDMI)
1067
1068 #ifdef DEBUG_FAULT_CORRECTION
1069 #define DFC_PRINTF(x)           printf x
1070 #define DFC_DISASSEMBLE(x)      disassemble(x)
1071 #else
1072 #define DFC_PRINTF(x)           /* nothing */
1073 #define DFC_DISASSEMBLE(x)      /* nothing */
1074 #endif
1075
1076 /*
1077  * "Early" data abort fixup.
1078  *
1079  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
1080  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1081  *
1082  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1083  */
1084 int
1085 early_abort_fixup(arg)
1086         void *arg;
1087 {
1088         trapframe_t *frame = arg;
1089         u_int fault_pc;
1090         u_int fault_instruction;
1091         int saved_lr = 0;
1092
1093         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1094
1095                 /* Ok an abort in SVC mode */
1096
1097                 /*
1098                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1099                  * as the fault happened in svc mode but we need it in the
1100                  * usr slot so we can treat the registers as an array of ints
1101                  * during fixing.
1102                  * NOTE: This PC is in the position but writeback is not
1103                  * allowed on r15.
1104                  * Doing it like this is more efficient than trapping this
1105                  * case in all possible locations in the following fixup code.
1106                  */
1107
1108                 saved_lr = frame->tf_usr_lr;
1109                 frame->tf_usr_lr = frame->tf_svc_lr;
1110
1111                 /*
1112                  * Note the trapframe does not have the SVC r13 so a fault
1113                  * from an instruction with writeback to r13 in SVC mode is
1114                  * not allowed. This should not happen as the kstack is
1115                  * always valid.
1116                  */
1117         }
1118
1119         /* Get fault address and status from the CPU */
1120
1121         fault_pc = frame->tf_pc;
1122         fault_instruction = *((volatile unsigned int *)fault_pc);
1123
1124         /* Decode the fault instruction and fix the registers as needed */
1125
1126         if ((fault_instruction & 0x0e000000) == 0x08000000) {
1127                 int base;
1128                 int loop;
1129                 int count;
1130                 int *registers = &frame->tf_r0;
1131         
1132                 DFC_PRINTF(("LDM/STM\n"));
1133                 DFC_DISASSEMBLE(fault_pc);
1134                 if (fault_instruction & (1 << 21)) {
1135                         DFC_PRINTF(("This instruction must be corrected\n"));
1136                         base = (fault_instruction >> 16) & 0x0f;
1137                         if (base == 15)
1138                                 return ABORT_FIXUP_FAILED;
1139                         /* Count registers transferred */
1140                         count = 0;
1141                         for (loop = 0; loop < 16; ++loop) {
1142                                 if (fault_instruction & (1<<loop))
1143                                         ++count;
1144                         }
1145                         DFC_PRINTF(("%d registers used\n", count));
1146                         DFC_PRINTF(("Corrected r%d by %d bytes ",
1147                                        base, count * 4));
1148                         if (fault_instruction & (1 << 23)) {
1149                                 DFC_PRINTF(("down\n"));
1150                                 registers[base] -= count * 4;
1151                         } else {
1152                                 DFC_PRINTF(("up\n"));
1153                                 registers[base] += count * 4;
1154                         }
1155                 }
1156         } else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1157                 int base;
1158                 int offset;
1159                 int *registers = &frame->tf_r0;
1160         
1161                 /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1162
1163                 DFC_DISASSEMBLE(fault_pc);
1164
1165                 /* Only need to fix registers if write back is turned on */
1166
1167                 if ((fault_instruction & (1 << 21)) != 0) {
1168                         base = (fault_instruction >> 16) & 0x0f;
1169                         if (base == 13 &&
1170                             (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1171                                 return ABORT_FIXUP_FAILED;
1172                         if (base == 15)
1173                                 return ABORT_FIXUP_FAILED;
1174
1175                         offset = (fault_instruction & 0xff) << 2;
1176                         DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1177                         if ((fault_instruction & (1 << 23)) != 0)
1178                                 offset = -offset;
1179                         registers[base] += offset;
1180                         DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1181                 }
1182         } else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1183                 return ABORT_FIXUP_FAILED;
1184
1185         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1186
1187                 /* Ok an abort in SVC mode */
1188
1189                 /*
1190                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1191                  * as the fault happened in svc mode but we need it in the
1192                  * usr slot so we can treat the registers as an array of ints
1193                  * during fixing.
1194                  * NOTE: This PC is in the position but writeback is not
1195                  * allowed on r15.
1196                  * Doing it like this is more efficient than trapping this
1197                  * case in all possible locations in the prior fixup code.
1198                  */
1199
1200                 frame->tf_svc_lr = frame->tf_usr_lr;
1201                 frame->tf_usr_lr = saved_lr;
1202
1203                 /*
1204                  * Note the trapframe does not have the SVC r13 so a fault
1205                  * from an instruction with writeback to r13 in SVC mode is
1206                  * not allowed. This should not happen as the kstack is
1207                  * always valid.
1208                  */
1209         }
1210
1211         return(ABORT_FIXUP_OK);
1212 }
1213 #endif  /* CPU_ARM2/250/3/6/7 */
1214
1215
1216 #if defined(CPU_ARM7TDMI)
1217 /*
1218  * "Late" (base updated) data abort fixup
1219  *
1220  * For ARM6 (in late-abort mode) and ARM7.
1221  *
1222  * In this model, all data-transfer instructions need fixing up.  We defer
1223  * LDM, STM, LDC and STC fixup to the early-abort handler.
1224  */
1225 int
1226 late_abort_fixup(arg)
1227         void *arg;
1228 {
1229         trapframe_t *frame = arg;
1230         u_int fault_pc;
1231         u_int fault_instruction;
1232         int saved_lr = 0;
1233
1234         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1235
1236                 /* Ok an abort in SVC mode */
1237
1238                 /*
1239                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1240                  * as the fault happened in svc mode but we need it in the
1241                  * usr slot so we can treat the registers as an array of ints
1242                  * during fixing.
1243                  * NOTE: This PC is in the position but writeback is not
1244                  * allowed on r15.
1245                  * Doing it like this is more efficient than trapping this
1246                  * case in all possible locations in the following fixup code.
1247                  */
1248
1249                 saved_lr = frame->tf_usr_lr;
1250                 frame->tf_usr_lr = frame->tf_svc_lr;
1251
1252                 /*
1253                  * Note the trapframe does not have the SVC r13 so a fault
1254                  * from an instruction with writeback to r13 in SVC mode is
1255                  * not allowed. This should not happen as the kstack is
1256                  * always valid.
1257                  */
1258         }
1259
1260         /* Get fault address and status from the CPU */
1261
1262         fault_pc = frame->tf_pc;
1263         fault_instruction = *((volatile unsigned int *)fault_pc);
1264
1265         /* Decode the fault instruction and fix the registers as needed */
1266
1267         /* Was is a swap instruction ? */
1268
1269         if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1270                 DFC_DISASSEMBLE(fault_pc);
1271         } else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1272
1273                 /* Was is a ldr/str instruction */
1274                 /* This is for late abort only */
1275
1276                 int base;
1277                 int offset;
1278                 int *registers = &frame->tf_r0;
1279
1280                 DFC_DISASSEMBLE(fault_pc);
1281                 
1282                 /* This is for late abort only */
1283
1284                 if ((fault_instruction & (1 << 24)) == 0
1285                     || (fault_instruction & (1 << 21)) != 0) {  
1286                         /* postindexed ldr/str with no writeback */
1287
1288                         base = (fault_instruction >> 16) & 0x0f;
1289                         if (base == 13 &&
1290                             (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1291                                 return ABORT_FIXUP_FAILED;
1292                         if (base == 15)
1293                                 return ABORT_FIXUP_FAILED;
1294                         DFC_PRINTF(("late abt fix: r%d=%08x : ",
1295                                        base, registers[base]));
1296                         if ((fault_instruction & (1 << 25)) == 0) {
1297                                 /* Immediate offset - easy */
1298
1299                                 offset = fault_instruction & 0xfff;
1300                                 if ((fault_instruction & (1 << 23)))
1301                                         offset = -offset;
1302                                 registers[base] += offset;
1303                                 DFC_PRINTF(("imm=%08x ", offset));
1304                         } else {
1305                                 /* offset is a shifted register */
1306                                 int shift;
1307
1308                                 offset = fault_instruction & 0x0f;
1309                                 if (offset == base)
1310                                         return ABORT_FIXUP_FAILED;
1311                 
1312                                 /*
1313                                  * Register offset - hard we have to
1314                                  * cope with shifts !
1315                                  */
1316                                 offset = registers[offset];
1317
1318                                 if ((fault_instruction & (1 << 4)) == 0)
1319                                         /* shift with amount */
1320                                         shift = (fault_instruction >> 7) & 0x1f;
1321                                 else {
1322                                         /* shift with register */
1323                                         if ((fault_instruction & (1 << 7)) != 0)
1324                                                 /* undefined for now so bail out */
1325                                                 return ABORT_FIXUP_FAILED;
1326                                         shift = ((fault_instruction >> 8) & 0xf);
1327                                         if (base == shift)
1328                                                 return ABORT_FIXUP_FAILED;
1329                                         DFC_PRINTF(("shift reg=%d ", shift));
1330                                         shift = registers[shift];
1331                                 }
1332                                 DFC_PRINTF(("shift=%08x ", shift));
1333                                 switch (((fault_instruction >> 5) & 0x3)) {
1334                                 case 0 : /* Logical left */
1335                                         offset = (int)(((u_int)offset) << shift);
1336                                         break;
1337                                 case 1 : /* Logical Right */
1338                                         if (shift == 0) shift = 32;
1339                                         offset = (int)(((u_int)offset) >> shift);
1340                                         break;
1341                                 case 2 : /* Arithmetic Right */
1342                                         if (shift == 0) shift = 32;
1343                                         offset = (int)(((int)offset) >> shift);
1344                                         break;
1345                                 case 3 : /* Rotate right (rol or rxx) */
1346                                         return ABORT_FIXUP_FAILED;
1347                                         break;
1348                                 }
1349
1350                                 DFC_PRINTF(("abt: fixed LDR/STR with "
1351                                                "register offset\n"));
1352                                 if ((fault_instruction & (1 << 23)))
1353                                         offset = -offset;
1354                                 DFC_PRINTF(("offset=%08x ", offset));
1355                                 registers[base] += offset;
1356                         }
1357                         DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1358                 }
1359         }
1360
1361         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1362
1363                 /* Ok an abort in SVC mode */
1364
1365                 /*
1366                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1367                  * as the fault happened in svc mode but we need it in the
1368                  * usr slot so we can treat the registers as an array of ints
1369                  * during fixing.
1370                  * NOTE: This PC is in the position but writeback is not
1371                  * allowed on r15.
1372                  * Doing it like this is more efficient than trapping this
1373                  * case in all possible locations in the prior fixup code.
1374                  */
1375
1376                 frame->tf_svc_lr = frame->tf_usr_lr;
1377                 frame->tf_usr_lr = saved_lr;
1378
1379                 /*
1380                  * Note the trapframe does not have the SVC r13 so a fault
1381                  * from an instruction with writeback to r13 in SVC mode is
1382                  * not allowed. This should not happen as the kstack is
1383                  * always valid.
1384                  */
1385         }
1386
1387         /*
1388          * Now let the early-abort fixup routine have a go, in case it
1389          * was an LDM, STM, LDC or STC that faulted.
1390          */
1391
1392         return early_abort_fixup(arg);
1393 }
1394 #endif  /* CPU_ARM7TDMI */
1395
1396 /*
1397  * CPU Setup code
1398  */
1399
1400 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined (CPU_ARM9) || \
1401   defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) ||   \
1402   defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||             \
1403   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||           \
1404   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
1405
1406 #define IGN     0
1407 #define OR      1
1408 #define BIC     2
1409
1410 struct cpu_option {
1411         char    *co_name;
1412         int     co_falseop;
1413         int     co_trueop;
1414         int     co_value;
1415 };
1416
1417 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
1418
1419 static u_int
1420 parse_cpu_options(args, optlist, cpuctrl)
1421         char *args;
1422         struct cpu_option *optlist;    
1423         u_int cpuctrl; 
1424 {
1425         int integer;
1426
1427         if (args == NULL)
1428                 return(cpuctrl);
1429
1430         while (optlist->co_name) {
1431                 if (get_bootconf_option(args, optlist->co_name,
1432                     BOOTOPT_TYPE_BOOLEAN, &integer)) {
1433                         if (integer) {
1434                                 if (optlist->co_trueop == OR)
1435                                         cpuctrl |= optlist->co_value;
1436                                 else if (optlist->co_trueop == BIC)
1437                                         cpuctrl &= ~optlist->co_value;
1438                         } else {
1439                                 if (optlist->co_falseop == OR)
1440                                         cpuctrl |= optlist->co_value;
1441                                 else if (optlist->co_falseop == BIC)
1442                                         cpuctrl &= ~optlist->co_value;
1443                         }
1444                 }
1445                 ++optlist;
1446         }
1447         return(cpuctrl);
1448 }
1449 #endif /* CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 || XSCALE*/
1450
1451 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8)
1452 struct cpu_option arm678_options[] = {
1453 #ifdef COMPAT_12
1454         { "nocache",            IGN, BIC, CPU_CONTROL_IDC_ENABLE },
1455         { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1456 #endif  /* COMPAT_12 */
1457         { "cpu.cache",          BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1458         { "cpu.nocache",        OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1459         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1460         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1461         { NULL,                 IGN, IGN, 0 }
1462 };
1463
1464 #endif  /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
1465
1466 #ifdef CPU_ARM7TDMI
1467 struct cpu_option arm7tdmi_options[] = {
1468         { "arm7.cache",         BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1469         { "arm7.nocache",       OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1470         { "arm7.writebuf",      BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1471         { "arm7.nowritebuf",    OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1472 #ifdef COMPAT_12
1473         { "fpaclk2",            BIC, OR,  CPU_CONTROL_CPCLK },
1474 #endif  /* COMPAT_12 */
1475         { "arm700.fpaclk",      BIC, OR,  CPU_CONTROL_CPCLK },
1476         { NULL,                 IGN, IGN, 0 }
1477 };
1478
1479 void
1480 arm7tdmi_setup(args)
1481         char *args;
1482 {
1483         int cpuctrl;
1484
1485         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1486                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1487                  | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1488
1489         cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1490         cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
1491
1492 #ifdef __ARMEB__
1493         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1494 #endif
1495
1496         /* Clear out the cache */
1497         cpu_idcache_wbinv_all();
1498
1499         /* Set the control register */
1500         ctrl = cpuctrl;
1501         cpu_control(0xffffffff, cpuctrl);
1502 }
1503 #endif  /* CPU_ARM7TDMI */
1504
1505 #ifdef CPU_ARM8
1506 struct cpu_option arm8_options[] = {
1507         { "arm8.cache",         BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1508         { "arm8.nocache",       OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1509         { "arm8.writebuf",      BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1510         { "arm8.nowritebuf",    OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1511 #ifdef COMPAT_12
1512         { "branchpredict",      BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1513 #endif  /* COMPAT_12 */
1514         { "cpu.branchpredict",  BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1515         { "arm8.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1516         { NULL,                 IGN, IGN, 0 }
1517 };
1518
1519 void
1520 arm8_setup(args)
1521         char *args;
1522 {
1523         int integer;
1524         int cpuctrl, cpuctrlmask;
1525         int clocktest;
1526         int setclock = 0;
1527
1528         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1529                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1530                  | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1531         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1532                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1533                  | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1534                  | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
1535                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
1536
1537 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1538         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1539 #endif
1540
1541         cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1542         cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
1543
1544 #ifdef __ARMEB__
1545         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1546 #endif
1547
1548         /* Get clock configuration */
1549         clocktest = arm8_clock_config(0, 0) & 0x0f;
1550
1551         /* Special ARM8 clock and test configuration */
1552         if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1553                 clocktest = 0;
1554                 setclock = 1;
1555         }
1556         if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1557                 if (integer)
1558                         clocktest |= 0x01;
1559                 else
1560                         clocktest &= ~(0x01);
1561                 setclock = 1;
1562         }
1563         if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1564                 if (integer)
1565                         clocktest |= 0x02;
1566                 else
1567                         clocktest &= ~(0x02);
1568                 setclock = 1;
1569         }
1570         if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
1571                 clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
1572                 setclock = 1;
1573         }
1574         if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
1575                 clocktest |= (integer & 7) << 5;
1576                 setclock = 1;
1577         }
1578         
1579         /* Clear out the cache */
1580         cpu_idcache_wbinv_all();
1581
1582         /* Set the control register */
1583         ctrl = cpuctrl;
1584         cpu_control(0xffffffff, cpuctrl);
1585
1586         /* Set the clock/test register */    
1587         if (setclock)
1588                 arm8_clock_config(0x7f, clocktest);
1589 }
1590 #endif  /* CPU_ARM8 */
1591
1592 #ifdef CPU_ARM9
1593 struct cpu_option arm9_options[] = {
1594         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1595         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1596         { "arm9.cache", BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1597         { "arm9.icache",        BIC, OR,  CPU_CONTROL_IC_ENABLE },
1598         { "arm9.dcache",        BIC, OR,  CPU_CONTROL_DC_ENABLE },
1599         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1600         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1601         { "arm9.writebuf",      BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1602         { NULL,                 IGN, IGN, 0 }
1603 };
1604
1605 void
1606 arm9_setup(args)
1607         char *args;
1608 {
1609         int cpuctrl, cpuctrlmask;
1610
1611         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1612             | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1613             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1614             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
1615             CPU_CONTROL_ROUNDROBIN;
1616         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1617                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1618                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1619                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1620                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1621                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1622                  | CPU_CONTROL_ROUNDROBIN;
1623
1624 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1625         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1626 #endif
1627
1628         cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1629
1630 #ifdef __ARMEB__
1631         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1632 #endif
1633         if (vector_page == ARM_VECTORS_HIGH)
1634                 cpuctrl |= CPU_CONTROL_VECRELOC;
1635
1636         /* Clear out the cache */
1637         cpu_idcache_wbinv_all();
1638
1639         /* Set the control register */
1640         cpu_control(cpuctrlmask, cpuctrl);
1641         ctrl = cpuctrl;
1642
1643 }
1644 #endif  /* CPU_ARM9 */
1645
1646 #ifdef CPU_ARM10
1647 struct cpu_option arm10_options[] = {
1648         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1649         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1650         { "arm10.cache",        BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1651         { "arm10.icache",       BIC, OR,  CPU_CONTROL_IC_ENABLE },
1652         { "arm10.dcache",       BIC, OR,  CPU_CONTROL_DC_ENABLE },
1653         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1654         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1655         { "arm10.writebuf",     BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1656         { NULL,                 IGN, IGN, 0 }
1657 };
1658
1659 void
1660 arm10_setup(args)
1661         char *args;
1662 {
1663         int cpuctrl, cpuctrlmask;
1664
1665         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1666             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 
1667             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1668         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1669             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1670             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1671             | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1672             | CPU_CONTROL_BPRD_ENABLE
1673             | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1674
1675 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1676         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1677 #endif
1678
1679         cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
1680
1681 #ifdef __ARMEB__
1682         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1683 #endif
1684
1685         /* Clear out the cache */
1686         cpu_idcache_wbinv_all();
1687
1688         /* Now really make sure they are clean.  */
1689         asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1690
1691         /* Set the control register */
1692         ctrl = cpuctrl;
1693         cpu_control(0xffffffff, cpuctrl);
1694
1695         /* And again. */
1696         cpu_idcache_wbinv_all();
1697 }
1698 #endif  /* CPU_ARM10 */
1699
1700 #ifdef CPU_SA110
1701 struct cpu_option sa110_options[] = {
1702 #ifdef COMPAT_12
1703         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1704         { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1705 #endif  /* COMPAT_12 */
1706         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1707         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1708         { "sa110.cache",        BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1709         { "sa110.icache",       BIC, OR,  CPU_CONTROL_IC_ENABLE },
1710         { "sa110.dcache",       BIC, OR,  CPU_CONTROL_DC_ENABLE },
1711         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1712         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1713         { "sa110.writebuf",     BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1714         { NULL,                 IGN, IGN, 0 }
1715 };
1716
1717 void
1718 sa110_setup(args)
1719         char *args;
1720 {
1721         int cpuctrl, cpuctrlmask;
1722
1723         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1724                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1725                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1726                  | CPU_CONTROL_WBUF_ENABLE;
1727         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1728                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1729                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1730                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1731                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1732                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1733                  | CPU_CONTROL_CPCLK;
1734
1735 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1736         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1737 #endif
1738
1739         cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
1740
1741 #ifdef __ARMEB__
1742         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1743 #endif
1744
1745         /* Clear out the cache */
1746         cpu_idcache_wbinv_all();
1747
1748         /* Set the control register */
1749         ctrl = cpuctrl;
1750 /*      cpu_control(cpuctrlmask, cpuctrl);*/
1751         cpu_control(0xffffffff, cpuctrl);
1752
1753         /* 
1754          * enable clockswitching, note that this doesn't read or write to r0,
1755          * r0 is just to make it valid asm
1756          */
1757         __asm ("mcr 15, 0, r0, c15, c1, 2");
1758 }
1759 #endif  /* CPU_SA110 */
1760
1761 #if defined(CPU_SA1100) || defined(CPU_SA1110)
1762 struct cpu_option sa11x0_options[] = {
1763 #ifdef COMPAT_12
1764         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1765         { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1766 #endif  /* COMPAT_12 */
1767         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1768         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1769         { "sa11x0.cache",       BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1770         { "sa11x0.icache",      BIC, OR,  CPU_CONTROL_IC_ENABLE },
1771         { "sa11x0.dcache",      BIC, OR,  CPU_CONTROL_DC_ENABLE },
1772         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1773         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1774         { "sa11x0.writebuf",    BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1775         { NULL,                 IGN, IGN, 0 }
1776 };
1777
1778 void
1779 sa11x0_setup(args)
1780         char *args;
1781 {
1782         int cpuctrl, cpuctrlmask;
1783
1784         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1785                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1786                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1787                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
1788         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1789                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1790                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1791                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1792                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1793                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1794                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1795
1796 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1797         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1798 #endif
1799
1800
1801         cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
1802
1803 #ifdef __ARMEB__
1804         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1805 #endif
1806
1807         if (vector_page == ARM_VECTORS_HIGH)
1808                 cpuctrl |= CPU_CONTROL_VECRELOC;
1809         /* Clear out the cache */
1810         cpu_idcache_wbinv_all();
1811         /* Set the control register */    
1812         ctrl = cpuctrl;
1813         cpu_control(0xffffffff, cpuctrl);
1814 }
1815 #endif  /* CPU_SA1100 || CPU_SA1110 */
1816
1817 #if defined(CPU_IXP12X0)
1818 struct cpu_option ixp12x0_options[] = {
1819         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1820         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1821         { "ixp12x0.cache",      BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1822         { "ixp12x0.icache",     BIC, OR,  CPU_CONTROL_IC_ENABLE },
1823         { "ixp12x0.dcache",     BIC, OR,  CPU_CONTROL_DC_ENABLE },
1824         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1825         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1826         { "ixp12x0.writebuf",   BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1827         { NULL,                 IGN, IGN, 0 }
1828 };
1829
1830 void
1831 ixp12x0_setup(args)
1832         char *args;
1833 {
1834         int cpuctrl, cpuctrlmask;
1835
1836
1837         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
1838                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
1839                  | CPU_CONTROL_IC_ENABLE;
1840
1841         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
1842                  | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1843                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
1844                  | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
1845                  | CPU_CONTROL_VECRELOC;
1846
1847 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1848         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1849 #endif
1850
1851         cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
1852
1853 #ifdef __ARMEB__
1854         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1855 #endif
1856
1857         if (vector_page == ARM_VECTORS_HIGH)
1858                 cpuctrl |= CPU_CONTROL_VECRELOC;
1859
1860         /* Clear out the cache */
1861         cpu_idcache_wbinv_all();
1862
1863         /* Set the control register */    
1864         ctrl = cpuctrl;
1865         /* cpu_control(0xffffffff, cpuctrl); */
1866         cpu_control(cpuctrlmask, cpuctrl);
1867 }
1868 #endif /* CPU_IXP12X0 */
1869
1870 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1871   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1872   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
1873 struct cpu_option xscale_options[] = {
1874 #ifdef COMPAT_12
1875         { "branchpredict",      BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1876         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1877 #endif  /* COMPAT_12 */
1878         { "cpu.branchpredict",  BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1879         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1880         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1881         { "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1882         { "xscale.cache",       BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1883         { "xscale.icache",      BIC, OR,  CPU_CONTROL_IC_ENABLE },
1884         { "xscale.dcache",      BIC, OR,  CPU_CONTROL_DC_ENABLE },
1885         { NULL,                 IGN, IGN, 0 }
1886 };
1887
1888 void
1889 xscale_setup(args)
1890         char *args;
1891 {
1892 #ifndef CPU_XSCALE_CORE3
1893         uint32_t auxctl;
1894 #endif
1895         int cpuctrl, cpuctrlmask;
1896
1897         /*
1898          * The XScale Write Buffer is always enabled.  Our option
1899          * is to enable/disable coalescing.  Note that bits 6:3
1900          * must always be enabled.
1901          */
1902
1903         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1904                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1905                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1906                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1907                  | CPU_CONTROL_BPRD_ENABLE;
1908         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1909                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1910                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1911                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1912                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1913                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1914                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1915
1916 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1917         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1918 #endif
1919
1920         cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
1921
1922 #ifdef __ARMEB__
1923         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1924 #endif
1925
1926         if (vector_page == ARM_VECTORS_HIGH)
1927                 cpuctrl |= CPU_CONTROL_VECRELOC;
1928
1929         /* Clear out the cache */
1930         cpu_idcache_wbinv_all();
1931
1932         /*
1933          * Set the control register.  Note that bits 6:3 must always
1934          * be set to 1.
1935          */
1936         ctrl = cpuctrl;
1937 /*      cpu_control(cpuctrlmask, cpuctrl);*/
1938         cpu_control(0xffffffff, cpuctrl);
1939
1940 #ifndef CPU_XSCALE_CORE3
1941         /* Make sure write coalescing is turned on */
1942         __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1943                 : "=r" (auxctl));
1944 #ifdef XSCALE_NO_COALESCE_WRITES
1945         auxctl |= XSCALE_AUXCTL_K;
1946 #else
1947         auxctl &= ~XSCALE_AUXCTL_K;
1948 #endif
1949         __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1950                 : : "r" (auxctl));
1951 #endif
1952 }
1953 #endif  /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 
1954            CPU_XSCALE_80219 */