]> CyberLeo.Net >> Repos - FreeBSD/releng/8.1.git/blob - sys/arm/arm/cpufunc.c
Copy stable/8 to releng/8.1 in preparation for 8.1-RC1.
[FreeBSD/releng/8.1.git] / sys / arm / arm / cpufunc.c
1 /*      $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
2
3 /*-
4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
5  * arm8 support code Copyright (c) 1997 ARM Limited
6  * arm8 support code Copyright (c) 1997 Causality Limited
7  * arm9 support code Copyright (C) 2001 ARM Ltd
8  * Copyright (c) 1997 Mark Brinicombe.
9  * Copyright (c) 1997 Causality Limited
10  * All rights reserved.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *      This product includes software developed by Causality Limited.
23  * 4. The name of Causality Limited may not be used to endorse or promote
24  *    products derived from this software without specific prior written
25  *    permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  * RiscBSD kernel project
40  *
41  * cpufuncs.c
42  *
43  * C functions for supporting CPU / MMU / TLB specific operations.
44  *
45  * Created      : 30/01/97
46  */
47 #include <sys/cdefs.h>
48 __FBSDID("$FreeBSD$");
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/lock.h>
53 #include <sys/mutex.h>
54 #include <sys/bus.h>
55 #include <machine/bus.h>
56 #include <machine/cpu.h>
57 #include <machine/disassem.h>
58
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 #include <vm/uma.h>
62
63 #include <machine/cpuconf.h>
64 #include <machine/cpufunc.h>
65 #include <machine/bootconfig.h>
66
67 #ifdef CPU_XSCALE_80200
68 #include <arm/xscale/i80200/i80200reg.h>
69 #include <arm/xscale/i80200/i80200var.h>
70 #endif
71
72 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
73 #include <arm/xscale/i80321/i80321reg.h>
74 #include <arm/xscale/i80321/i80321var.h>
75 #endif
76
77 #if defined(CPU_XSCALE_81342)
78 #include <arm/xscale/i8134x/i81342reg.h>
79 #endif
80
81 #ifdef CPU_XSCALE_IXP425
82 #include <arm/xscale/ixp425/ixp425reg.h>
83 #include <arm/xscale/ixp425/ixp425var.h>
84 #endif
85
86 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
87     defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
88 #include <arm/xscale/xscalereg.h>
89 #endif
90
91 #if defined(PERFCTRS)
92 struct arm_pmc_funcs *arm_pmc;
93 #endif
94
95 /* PRIMARY CACHE VARIABLES */
96 int     arm_picache_size;
97 int     arm_picache_line_size;
98 int     arm_picache_ways;
99
100 int     arm_pdcache_size;       /* and unified */
101 int     arm_pdcache_line_size;
102 int     arm_pdcache_ways;
103
104 int     arm_pcache_type;
105 int     arm_pcache_unified;
106
107 int     arm_dcache_align;
108 int     arm_dcache_align_mask;
109
110 /* 1 == use cpu_sleep(), 0 == don't */
111 int cpu_do_powersave;
112 int ctrl;
113
114 #ifdef CPU_ARM7TDMI
115 struct cpu_functions arm7tdmi_cpufuncs = {
116         /* CPU functions */
117         
118         cpufunc_id,                     /* id                   */
119         cpufunc_nullop,                 /* cpwait               */
120
121         /* MMU functions */
122
123         cpufunc_control,                /* control              */
124         cpufunc_domains,                /* domain               */
125         arm7tdmi_setttb,                /* setttb               */
126         cpufunc_faultstatus,            /* faultstatus          */
127         cpufunc_faultaddress,           /* faultaddress         */
128
129         /* TLB functions */
130
131         arm7tdmi_tlb_flushID,           /* tlb_flushID          */
132         arm7tdmi_tlb_flushID_SE,        /* tlb_flushID_SE       */
133         arm7tdmi_tlb_flushID,           /* tlb_flushI           */
134         arm7tdmi_tlb_flushID_SE,        /* tlb_flushI_SE        */
135         arm7tdmi_tlb_flushID,           /* tlb_flushD           */
136         arm7tdmi_tlb_flushID_SE,        /* tlb_flushD_SE        */
137
138         /* Cache operations */
139
140         cpufunc_nullop,                 /* icache_sync_all      */
141         (void *)cpufunc_nullop,         /* icache_sync_range    */
142
143         arm7tdmi_cache_flushID,         /* dcache_wbinv_all     */
144         (void *)arm7tdmi_cache_flushID, /* dcache_wbinv_range   */
145         (void *)arm7tdmi_cache_flushID, /* dcache_inv_range     */
146         (void *)cpufunc_nullop,         /* dcache_wb_range      */
147
148         arm7tdmi_cache_flushID,         /* idcache_wbinv_all    */
149         (void *)arm7tdmi_cache_flushID, /* idcache_wbinv_range  */
150         cpufunc_nullop,                 /* l2cache_wbinv_all    */
151         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
152         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
153         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
154
155         /* Other functions */
156
157         cpufunc_nullop,                 /* flush_prefetchbuf    */
158         cpufunc_nullop,                 /* drain_writebuf       */
159         cpufunc_nullop,                 /* flush_brnchtgt_C     */
160         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
161
162         (void *)cpufunc_nullop,         /* sleep                */
163
164         /* Soft functions */
165
166         late_abort_fixup,               /* dataabt_fixup        */
167         cpufunc_null_fixup,             /* prefetchabt_fixup    */
168
169         arm7tdmi_context_switch,        /* context_switch       */
170
171         arm7tdmi_setup                  /* cpu setup            */
172
173 };
174 #endif  /* CPU_ARM7TDMI */
175
176 #ifdef CPU_ARM8
177 struct cpu_functions arm8_cpufuncs = {
178         /* CPU functions */
179         
180         cpufunc_id,                     /* id                   */
181         cpufunc_nullop,                 /* cpwait               */
182
183         /* MMU functions */
184
185         cpufunc_control,                /* control              */
186         cpufunc_domains,                /* domain               */
187         arm8_setttb,                    /* setttb               */
188         cpufunc_faultstatus,            /* faultstatus          */
189         cpufunc_faultaddress,           /* faultaddress         */
190
191         /* TLB functions */
192
193         arm8_tlb_flushID,               /* tlb_flushID          */
194         arm8_tlb_flushID_SE,            /* tlb_flushID_SE       */
195         arm8_tlb_flushID,               /* tlb_flushI           */
196         arm8_tlb_flushID_SE,            /* tlb_flushI_SE        */
197         arm8_tlb_flushID,               /* tlb_flushD           */
198         arm8_tlb_flushID_SE,            /* tlb_flushD_SE        */
199
200         /* Cache operations */
201
202         cpufunc_nullop,                 /* icache_sync_all      */
203         (void *)cpufunc_nullop,         /* icache_sync_range    */
204
205         arm8_cache_purgeID,             /* dcache_wbinv_all     */
206         (void *)arm8_cache_purgeID,     /* dcache_wbinv_range   */
207 /*XXX*/ (void *)arm8_cache_purgeID,     /* dcache_inv_range     */
208         (void *)arm8_cache_cleanID,     /* dcache_wb_range      */
209
210         arm8_cache_purgeID,             /* idcache_wbinv_all    */
211         (void *)arm8_cache_purgeID,     /* idcache_wbinv_range  */
212         cpufunc_nullop,                 /* l2cache_wbinv_all    */
213         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
214         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
215         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
216
217         /* Other functions */
218
219         cpufunc_nullop,                 /* flush_prefetchbuf    */
220         cpufunc_nullop,                 /* drain_writebuf       */
221         cpufunc_nullop,                 /* flush_brnchtgt_C     */
222         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
223
224         (void *)cpufunc_nullop,         /* sleep                */
225
226         /* Soft functions */
227
228         cpufunc_null_fixup,             /* dataabt_fixup        */
229         cpufunc_null_fixup,             /* prefetchabt_fixup    */
230
231         arm8_context_switch,            /* context_switch       */
232
233         arm8_setup                      /* cpu setup            */
234 };          
235 #endif  /* CPU_ARM8 */
236
237 #ifdef CPU_ARM9
238 struct cpu_functions arm9_cpufuncs = {
239         /* CPU functions */
240
241         cpufunc_id,                     /* id                   */
242         cpufunc_nullop,                 /* cpwait               */
243
244         /* MMU functions */
245
246         cpufunc_control,                /* control              */
247         cpufunc_domains,                /* Domain               */
248         arm9_setttb,                    /* Setttb               */
249         cpufunc_faultstatus,            /* Faultstatus          */
250         cpufunc_faultaddress,           /* Faultaddress         */
251
252         /* TLB functions */
253
254         armv4_tlb_flushID,              /* tlb_flushID          */
255         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
256         armv4_tlb_flushI,               /* tlb_flushI           */
257         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
258         armv4_tlb_flushD,               /* tlb_flushD           */
259         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
260
261         /* Cache operations */
262
263         arm9_icache_sync_all,           /* icache_sync_all      */
264         arm9_icache_sync_range,         /* icache_sync_range    */
265
266         arm9_dcache_wbinv_all,          /* dcache_wbinv_all     */
267         arm9_dcache_wbinv_range,        /* dcache_wbinv_range   */
268         arm9_dcache_inv_range,          /* dcache_inv_range     */
269         arm9_dcache_wb_range,           /* dcache_wb_range      */
270
271         arm9_idcache_wbinv_all,         /* idcache_wbinv_all    */
272         arm9_idcache_wbinv_range,       /* idcache_wbinv_range  */
273         cpufunc_nullop,                 /* l2cache_wbinv_all    */
274         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
275         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
276         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
277
278         /* Other functions */
279
280         cpufunc_nullop,                 /* flush_prefetchbuf    */
281         armv4_drain_writebuf,           /* drain_writebuf       */
282         cpufunc_nullop,                 /* flush_brnchtgt_C     */
283         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
284
285         (void *)cpufunc_nullop,         /* sleep                */
286
287         /* Soft functions */
288
289         cpufunc_null_fixup,             /* dataabt_fixup        */
290         cpufunc_null_fixup,             /* prefetchabt_fixup    */
291
292         arm9_context_switch,            /* context_switch       */
293
294         arm9_setup                      /* cpu setup            */
295
296 };
297 #endif /* CPU_ARM9 */
298
299 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
300 struct cpu_functions armv5_ec_cpufuncs = {
301         /* CPU functions */
302
303         cpufunc_id,                     /* id                   */
304         cpufunc_nullop,                 /* cpwait               */
305
306         /* MMU functions */
307
308         cpufunc_control,                /* control              */
309         cpufunc_domains,                /* Domain               */
310         armv5_ec_setttb,                /* Setttb               */
311         cpufunc_faultstatus,            /* Faultstatus          */
312         cpufunc_faultaddress,           /* Faultaddress         */
313
314         /* TLB functions */
315
316         armv4_tlb_flushID,              /* tlb_flushID          */
317         arm10_tlb_flushID_SE,           /* tlb_flushID_SE       */
318         armv4_tlb_flushI,               /* tlb_flushI           */
319         arm10_tlb_flushI_SE,            /* tlb_flushI_SE        */
320         armv4_tlb_flushD,               /* tlb_flushD           */
321         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
322
323         /* Cache operations */
324
325         armv5_ec_icache_sync_all,       /* icache_sync_all      */
326         armv5_ec_icache_sync_range,     /* icache_sync_range    */
327
328         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
329         armv5_ec_dcache_wbinv_range,    /* dcache_wbinv_range   */
330         armv5_ec_dcache_inv_range,      /* dcache_inv_range     */
331         armv5_ec_dcache_wb_range,       /* dcache_wb_range      */
332
333         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
334         armv5_ec_idcache_wbinv_range,   /* idcache_wbinv_range  */
335
336         cpufunc_nullop,                 /* l2cache_wbinv_all    */
337         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
338         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
339         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
340                                  
341         /* Other functions */
342
343         cpufunc_nullop,                 /* flush_prefetchbuf    */
344         armv4_drain_writebuf,           /* drain_writebuf       */
345         cpufunc_nullop,                 /* flush_brnchtgt_C     */
346         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
347
348         (void *)cpufunc_nullop,         /* sleep                */
349
350         /* Soft functions */
351
352         cpufunc_null_fixup,             /* dataabt_fixup        */
353         cpufunc_null_fixup,             /* prefetchabt_fixup    */
354
355         arm10_context_switch,           /* context_switch       */
356
357         arm10_setup                     /* cpu setup            */
358
359 };
360
361 struct cpu_functions sheeva_cpufuncs = {
362         /* CPU functions */
363
364         cpufunc_id,                     /* id                   */
365         cpufunc_nullop,                 /* cpwait               */
366
367         /* MMU functions */
368
369         cpufunc_control,                /* control              */
370         cpufunc_domains,                /* Domain               */
371         sheeva_setttb,                  /* Setttb               */
372         cpufunc_faultstatus,            /* Faultstatus          */
373         cpufunc_faultaddress,           /* Faultaddress         */
374
375         /* TLB functions */
376
377         armv4_tlb_flushID,              /* tlb_flushID          */
378         arm10_tlb_flushID_SE,           /* tlb_flushID_SE       */
379         armv4_tlb_flushI,               /* tlb_flushI           */
380         arm10_tlb_flushI_SE,            /* tlb_flushI_SE        */
381         armv4_tlb_flushD,               /* tlb_flushD           */
382         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
383
384         /* Cache operations */
385
386         armv5_ec_icache_sync_all,       /* icache_sync_all      */
387         armv5_ec_icache_sync_range,     /* icache_sync_range    */
388
389         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
390         sheeva_dcache_wbinv_range,      /* dcache_wbinv_range   */
391         sheeva_dcache_inv_range,        /* dcache_inv_range     */
392         sheeva_dcache_wb_range,         /* dcache_wb_range      */
393
394         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
395         sheeva_idcache_wbinv_range,     /* idcache_wbinv_all    */
396
397         sheeva_l2cache_wbinv_all,       /* l2cache_wbinv_all    */
398         sheeva_l2cache_wbinv_range,     /* l2cache_wbinv_range  */
399         sheeva_l2cache_inv_range,       /* l2cache_inv_range    */
400         sheeva_l2cache_wb_range,        /* l2cache_wb_range     */
401
402         /* Other functions */
403
404         cpufunc_nullop,                 /* flush_prefetchbuf    */
405         armv4_drain_writebuf,           /* drain_writebuf       */
406         cpufunc_nullop,                 /* flush_brnchtgt_C     */
407         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
408
409         (void *)cpufunc_nullop,         /* sleep                */
410
411         /* Soft functions */
412
413         cpufunc_null_fixup,             /* dataabt_fixup        */
414         cpufunc_null_fixup,             /* prefetchabt_fixup    */
415
416         arm10_context_switch,           /* context_switch       */
417
418         arm10_setup                     /* cpu setup            */
419 };
420 #endif /* CPU_ARM9E || CPU_ARM10 */
421
422 #ifdef CPU_ARM10
423 struct cpu_functions arm10_cpufuncs = {
424         /* CPU functions */
425
426         cpufunc_id,                     /* id                   */
427         cpufunc_nullop,                 /* cpwait               */
428
429         /* MMU functions */
430
431         cpufunc_control,                /* control              */
432         cpufunc_domains,                /* Domain               */
433         arm10_setttb,                   /* Setttb               */
434         cpufunc_faultstatus,            /* Faultstatus          */
435         cpufunc_faultaddress,           /* Faultaddress         */
436
437         /* TLB functions */
438
439         armv4_tlb_flushID,              /* tlb_flushID          */
440         arm10_tlb_flushID_SE,           /* tlb_flushID_SE       */
441         armv4_tlb_flushI,               /* tlb_flushI           */
442         arm10_tlb_flushI_SE,            /* tlb_flushI_SE        */
443         armv4_tlb_flushD,               /* tlb_flushD           */
444         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
445
446         /* Cache operations */
447
448         arm10_icache_sync_all,          /* icache_sync_all      */
449         arm10_icache_sync_range,        /* icache_sync_range    */
450
451         arm10_dcache_wbinv_all,         /* dcache_wbinv_all     */
452         arm10_dcache_wbinv_range,       /* dcache_wbinv_range   */
453         arm10_dcache_inv_range,         /* dcache_inv_range     */
454         arm10_dcache_wb_range,          /* dcache_wb_range      */
455
456         arm10_idcache_wbinv_all,        /* idcache_wbinv_all    */
457         arm10_idcache_wbinv_range,      /* idcache_wbinv_range  */
458         cpufunc_nullop,                 /* l2cache_wbinv_all    */
459         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
460         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
461         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
462
463         /* Other functions */
464
465         cpufunc_nullop,                 /* flush_prefetchbuf    */
466         armv4_drain_writebuf,           /* drain_writebuf       */
467         cpufunc_nullop,                 /* flush_brnchtgt_C     */
468         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
469
470         (void *)cpufunc_nullop,         /* sleep                */
471
472         /* Soft functions */
473
474         cpufunc_null_fixup,             /* dataabt_fixup        */
475         cpufunc_null_fixup,             /* prefetchabt_fixup    */
476
477         arm10_context_switch,           /* context_switch       */
478
479         arm10_setup                     /* cpu setup            */
480
481 };
482 #endif /* CPU_ARM10 */
483
484 #ifdef CPU_SA110
485 struct cpu_functions sa110_cpufuncs = {
486         /* CPU functions */
487         
488         cpufunc_id,                     /* id                   */
489         cpufunc_nullop,                 /* cpwait               */
490
491         /* MMU functions */
492
493         cpufunc_control,                /* control              */
494         cpufunc_domains,                /* domain               */
495         sa1_setttb,                     /* setttb               */
496         cpufunc_faultstatus,            /* faultstatus          */
497         cpufunc_faultaddress,           /* faultaddress         */
498
499         /* TLB functions */
500
501         armv4_tlb_flushID,              /* tlb_flushID          */
502         sa1_tlb_flushID_SE,             /* tlb_flushID_SE       */
503         armv4_tlb_flushI,               /* tlb_flushI           */
504         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
505         armv4_tlb_flushD,               /* tlb_flushD           */
506         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
507
508         /* Cache operations */
509
510         sa1_cache_syncI,                /* icache_sync_all      */
511         sa1_cache_syncI_rng,            /* icache_sync_range    */
512
513         sa1_cache_purgeD,               /* dcache_wbinv_all     */
514         sa1_cache_purgeD_rng,           /* dcache_wbinv_range   */
515 /*XXX*/ sa1_cache_purgeD_rng,           /* dcache_inv_range     */
516         sa1_cache_cleanD_rng,           /* dcache_wb_range      */
517
518         sa1_cache_purgeID,              /* idcache_wbinv_all    */
519         sa1_cache_purgeID_rng,          /* idcache_wbinv_range  */
520         cpufunc_nullop,                 /* l2cache_wbinv_all    */
521         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
522         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
523         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
524
525         /* Other functions */
526
527         cpufunc_nullop,                 /* flush_prefetchbuf    */
528         armv4_drain_writebuf,           /* drain_writebuf       */
529         cpufunc_nullop,                 /* flush_brnchtgt_C     */
530         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
531
532         (void *)cpufunc_nullop,         /* sleep                */
533
534         /* Soft functions */
535
536         cpufunc_null_fixup,             /* dataabt_fixup        */
537         cpufunc_null_fixup,             /* prefetchabt_fixup    */
538
539         sa110_context_switch,           /* context_switch       */
540
541         sa110_setup                     /* cpu setup            */
542 };          
543 #endif  /* CPU_SA110 */
544
545 #if defined(CPU_SA1100) || defined(CPU_SA1110)
546 struct cpu_functions sa11x0_cpufuncs = {
547         /* CPU functions */
548         
549         cpufunc_id,                     /* id                   */
550         cpufunc_nullop,                 /* cpwait               */
551
552         /* MMU functions */
553
554         cpufunc_control,                /* control              */
555         cpufunc_domains,                /* domain               */
556         sa1_setttb,                     /* setttb               */
557         cpufunc_faultstatus,            /* faultstatus          */
558         cpufunc_faultaddress,           /* faultaddress         */
559
560         /* TLB functions */
561
562         armv4_tlb_flushID,              /* tlb_flushID          */
563         sa1_tlb_flushID_SE,             /* tlb_flushID_SE       */
564         armv4_tlb_flushI,               /* tlb_flushI           */
565         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
566         armv4_tlb_flushD,               /* tlb_flushD           */
567         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
568
569         /* Cache operations */
570
571         sa1_cache_syncI,                /* icache_sync_all      */
572         sa1_cache_syncI_rng,            /* icache_sync_range    */
573
574         sa1_cache_purgeD,               /* dcache_wbinv_all     */
575         sa1_cache_purgeD_rng,           /* dcache_wbinv_range   */
576 /*XXX*/ sa1_cache_purgeD_rng,           /* dcache_inv_range     */
577         sa1_cache_cleanD_rng,           /* dcache_wb_range      */
578
579         sa1_cache_purgeID,              /* idcache_wbinv_all    */
580         sa1_cache_purgeID_rng,          /* idcache_wbinv_range  */
581         cpufunc_nullop,                 /* l2cache_wbinv_all    */
582         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
583         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
584         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
585
586         /* Other functions */
587
588         sa11x0_drain_readbuf,           /* flush_prefetchbuf    */
589         armv4_drain_writebuf,           /* drain_writebuf       */
590         cpufunc_nullop,                 /* flush_brnchtgt_C     */
591         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
592
593         sa11x0_cpu_sleep,               /* sleep                */
594
595         /* Soft functions */
596
597         cpufunc_null_fixup,             /* dataabt_fixup        */
598         cpufunc_null_fixup,             /* prefetchabt_fixup    */
599
600         sa11x0_context_switch,          /* context_switch       */
601
602         sa11x0_setup                    /* cpu setup            */
603 };          
604 #endif  /* CPU_SA1100 || CPU_SA1110 */
605
606 #ifdef CPU_IXP12X0
607 struct cpu_functions ixp12x0_cpufuncs = {
608         /* CPU functions */
609         
610         cpufunc_id,                     /* id                   */
611         cpufunc_nullop,                 /* cpwait               */
612
613         /* MMU functions */
614
615         cpufunc_control,                /* control              */
616         cpufunc_domains,                /* domain               */
617         sa1_setttb,                     /* setttb               */
618         cpufunc_faultstatus,            /* faultstatus          */
619         cpufunc_faultaddress,           /* faultaddress         */
620
621         /* TLB functions */
622
623         armv4_tlb_flushID,              /* tlb_flushID          */
624         sa1_tlb_flushID_SE,             /* tlb_flushID_SE       */
625         armv4_tlb_flushI,               /* tlb_flushI           */
626         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
627         armv4_tlb_flushD,               /* tlb_flushD           */
628         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
629
630         /* Cache operations */
631
632         sa1_cache_syncI,                /* icache_sync_all      */
633         sa1_cache_syncI_rng,            /* icache_sync_range    */
634
635         sa1_cache_purgeD,               /* dcache_wbinv_all     */
636         sa1_cache_purgeD_rng,           /* dcache_wbinv_range   */
637 /*XXX*/ sa1_cache_purgeD_rng,           /* dcache_inv_range     */
638         sa1_cache_cleanD_rng,           /* dcache_wb_range      */
639
640         sa1_cache_purgeID,              /* idcache_wbinv_all    */
641         sa1_cache_purgeID_rng,          /* idcache_wbinv_range  */
642         cpufunc_nullop,                 /* l2cache_wbinv_all    */
643         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
644         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
645         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
646
647         /* Other functions */
648
649         ixp12x0_drain_readbuf,                  /* flush_prefetchbuf    */
650         armv4_drain_writebuf,           /* drain_writebuf       */
651         cpufunc_nullop,                 /* flush_brnchtgt_C     */
652         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
653
654         (void *)cpufunc_nullop,         /* sleep                */
655
656         /* Soft functions */
657
658         cpufunc_null_fixup,             /* dataabt_fixup        */
659         cpufunc_null_fixup,             /* prefetchabt_fixup    */
660
661         ixp12x0_context_switch,         /* context_switch       */
662
663         ixp12x0_setup                   /* cpu setup            */
664 };          
665 #endif  /* CPU_IXP12X0 */
666
667 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
668   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
669   defined(CPU_XSCALE_80219)
670
671 struct cpu_functions xscale_cpufuncs = {
672         /* CPU functions */
673         
674         cpufunc_id,                     /* id                   */
675         xscale_cpwait,                  /* cpwait               */
676
677         /* MMU functions */
678
679         xscale_control,                 /* control              */
680         cpufunc_domains,                /* domain               */
681         xscale_setttb,                  /* setttb               */
682         cpufunc_faultstatus,            /* faultstatus          */
683         cpufunc_faultaddress,           /* faultaddress         */
684
685         /* TLB functions */
686
687         armv4_tlb_flushID,              /* tlb_flushID          */
688         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
689         armv4_tlb_flushI,               /* tlb_flushI           */
690         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
691         armv4_tlb_flushD,               /* tlb_flushD           */
692         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
693
694         /* Cache operations */
695
696         xscale_cache_syncI,             /* icache_sync_all      */
697         xscale_cache_syncI_rng,         /* icache_sync_range    */
698
699         xscale_cache_purgeD,            /* dcache_wbinv_all     */
700         xscale_cache_purgeD_rng,        /* dcache_wbinv_range   */
701         xscale_cache_flushD_rng,        /* dcache_inv_range     */
702         xscale_cache_cleanD_rng,        /* dcache_wb_range      */
703
704         xscale_cache_purgeID,           /* idcache_wbinv_all    */
705         xscale_cache_purgeID_rng,       /* idcache_wbinv_range  */
706         cpufunc_nullop,                 /* l2cache_wbinv_all    */
707         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
708         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
709         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
710
711         /* Other functions */
712
713         cpufunc_nullop,                 /* flush_prefetchbuf    */
714         armv4_drain_writebuf,           /* drain_writebuf       */
715         cpufunc_nullop,                 /* flush_brnchtgt_C     */
716         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
717
718         xscale_cpu_sleep,               /* sleep                */
719
720         /* Soft functions */
721
722         cpufunc_null_fixup,             /* dataabt_fixup        */
723         cpufunc_null_fixup,             /* prefetchabt_fixup    */
724
725         xscale_context_switch,          /* context_switch       */
726
727         xscale_setup                    /* cpu setup            */
728 };
729 #endif
730 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
731    CPU_XSCALE_80219 */
732
733 #ifdef CPU_XSCALE_81342
734 struct cpu_functions xscalec3_cpufuncs = {
735         /* CPU functions */
736         
737         cpufunc_id,                     /* id                   */
738         xscale_cpwait,                  /* cpwait               */
739
740         /* MMU functions */
741
742         xscale_control,                 /* control              */
743         cpufunc_domains,                /* domain               */
744         xscalec3_setttb,                /* setttb               */
745         cpufunc_faultstatus,            /* faultstatus          */
746         cpufunc_faultaddress,           /* faultaddress         */
747
748         /* TLB functions */
749
750         armv4_tlb_flushID,              /* tlb_flushID          */
751         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
752         armv4_tlb_flushI,               /* tlb_flushI           */
753         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
754         armv4_tlb_flushD,               /* tlb_flushD           */
755         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
756
757         /* Cache operations */
758
759         xscalec3_cache_syncI,           /* icache_sync_all      */
760         xscalec3_cache_syncI_rng,       /* icache_sync_range    */
761
762         xscalec3_cache_purgeD,          /* dcache_wbinv_all     */
763         xscalec3_cache_purgeD_rng,      /* dcache_wbinv_range   */
764         xscale_cache_flushD_rng,        /* dcache_inv_range     */
765         xscalec3_cache_cleanD_rng,      /* dcache_wb_range      */
766
767         xscalec3_cache_purgeID,         /* idcache_wbinv_all    */
768         xscalec3_cache_purgeID_rng,     /* idcache_wbinv_range  */
769         xscalec3_l2cache_purge,         /* l2cache_wbinv_all    */
770         xscalec3_l2cache_purge_rng,     /* l2cache_wbinv_range  */
771         xscalec3_l2cache_flush_rng,     /* l2cache_inv_range    */
772         xscalec3_l2cache_clean_rng,     /* l2cache_wb_range     */
773
774         /* Other functions */
775
776         cpufunc_nullop,                 /* flush_prefetchbuf    */
777         armv4_drain_writebuf,           /* drain_writebuf       */
778         cpufunc_nullop,                 /* flush_brnchtgt_C     */
779         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
780
781         xscale_cpu_sleep,               /* sleep                */
782
783         /* Soft functions */
784
785         cpufunc_null_fixup,             /* dataabt_fixup        */
786         cpufunc_null_fixup,             /* prefetchabt_fixup    */
787
788         xscalec3_context_switch,        /* context_switch       */
789
790         xscale_setup                    /* cpu setup            */
791 };
792 #endif /* CPU_XSCALE_81342 */
793 /*
794  * Global constants also used by locore.s
795  */
796
797 struct cpu_functions cpufuncs;
798 u_int cputype;
799 u_int cpu_reset_needs_v4_MMU_disable;   /* flag used in locore.s */
800
801 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
802   defined (CPU_ARM9E) || defined (CPU_ARM10) ||                        \
803   defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||            \
804   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||          \
805   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
806
807 static void get_cachetype_cp15(void);
808
809 /* Additional cache information local to this file.  Log2 of some of the
810    above numbers.  */
811 static int      arm_dcache_l2_nsets;
812 static int      arm_dcache_l2_assoc;
813 static int      arm_dcache_l2_linesize;
814
815 static void
816 get_cachetype_cp15()
817 {
818         u_int ctype, isize, dsize;
819         u_int multiplier;
820
821         __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
822                 : "=r" (ctype));
823
824         /*
825          * ...and thus spake the ARM ARM:
826          *
827          * If an <opcode2> value corresponding to an unimplemented or
828          * reserved ID register is encountered, the System Control
829          * processor returns the value of the main ID register.
830          */
831         if (ctype == cpufunc_id())
832                 goto out;
833
834         if ((ctype & CPU_CT_S) == 0)
835                 arm_pcache_unified = 1;
836
837         /*
838          * If you want to know how this code works, go read the ARM ARM.
839          */
840
841         arm_pcache_type = CPU_CT_CTYPE(ctype);
842
843         if (arm_pcache_unified == 0) {
844                 isize = CPU_CT_ISIZE(ctype);
845                 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
846                 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
847                 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
848                         if (isize & CPU_CT_xSIZE_M)
849                                 arm_picache_line_size = 0; /* not present */
850                         else
851                                 arm_picache_ways = 1;
852                 } else {
853                         arm_picache_ways = multiplier <<
854                             (CPU_CT_xSIZE_ASSOC(isize) - 1);
855                 }
856                 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
857         }
858
859         dsize = CPU_CT_DSIZE(ctype);
860         multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
861         arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
862         if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
863                 if (dsize & CPU_CT_xSIZE_M)
864                         arm_pdcache_line_size = 0; /* not present */
865                 else
866                         arm_pdcache_ways = 1;
867         } else {
868                 arm_pdcache_ways = multiplier <<
869                     (CPU_CT_xSIZE_ASSOC(dsize) - 1);
870         }
871         arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
872
873         arm_dcache_align = arm_pdcache_line_size;
874
875         arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
876         arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
877         arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
878             CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
879
880  out:
881         arm_dcache_align_mask = arm_dcache_align - 1;
882 }
883 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
884
885 #if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
886     defined(CPU_IXP12X0)
887 /* Cache information for CPUs without cache type registers. */
888 struct cachetab {
889         u_int32_t ct_cpuid;
890         int     ct_pcache_type;
891         int     ct_pcache_unified;
892         int     ct_pdcache_size;
893         int     ct_pdcache_line_size;
894         int     ct_pdcache_ways;
895         int     ct_picache_size;
896         int     ct_picache_line_size;
897         int     ct_picache_ways;
898 };
899
900 struct cachetab cachetab[] = {
901     /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
902     /* XXX is this type right for SA-1? */
903     { CPU_ID_SA110,     CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
904     { CPU_ID_SA1100,    CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
905     { CPU_ID_SA1110,    CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
906     { CPU_ID_IXP1200,   CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
907     { 0, 0, 0, 0, 0, 0, 0, 0}
908 };
909
910 static void get_cachetype_table(void);
911
912 static void
913 get_cachetype_table()
914 {
915         int i;
916         u_int32_t cpuid = cpufunc_id();
917
918         for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
919                 if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
920                         arm_pcache_type = cachetab[i].ct_pcache_type;
921                         arm_pcache_unified = cachetab[i].ct_pcache_unified;
922                         arm_pdcache_size = cachetab[i].ct_pdcache_size;
923                         arm_pdcache_line_size =
924                             cachetab[i].ct_pdcache_line_size;
925                         arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
926                         arm_picache_size = cachetab[i].ct_picache_size;
927                         arm_picache_line_size =
928                             cachetab[i].ct_picache_line_size;
929                         arm_picache_ways = cachetab[i].ct_picache_ways;
930                 }
931         }
932         arm_dcache_align = arm_pdcache_line_size;
933
934         arm_dcache_align_mask = arm_dcache_align - 1;
935 }
936
937 #endif /* SA110 || SA1100 || SA1111 || IXP12X0 */
938
939 /*
940  * Cannot panic here as we may not have a console yet ...
941  */
942
943 int
944 set_cpufuncs()
945 {
946         cputype = cpufunc_id();
947         cputype &= CPU_ID_CPU_MASK;
948
949         /*
950          * NOTE: cpu_do_powersave defaults to off.  If we encounter a
951          * CPU type where we want to use it by default, then we set it.
952          */
953
954 #ifdef CPU_ARM7TDMI
955         if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
956             CPU_ID_IS7(cputype) &&
957             (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
958                 cpufuncs = arm7tdmi_cpufuncs;
959                 cpu_reset_needs_v4_MMU_disable = 0;
960                 get_cachetype_cp15();
961                 pmap_pte_init_generic();
962                 goto out;
963         }
964 #endif  
965 #ifdef CPU_ARM8
966         if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
967             (cputype & 0x0000f000) == 0x00008000) {
968                 cpufuncs = arm8_cpufuncs;
969                 cpu_reset_needs_v4_MMU_disable = 0;     /* XXX correct? */
970                 get_cachetype_cp15();
971                 pmap_pte_init_arm8();
972                 goto out;
973         }
974 #endif  /* CPU_ARM8 */
975 #ifdef CPU_ARM9
976         if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
977              (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
978             (cputype & 0x0000f000) == 0x00009000) {
979                 cpufuncs = arm9_cpufuncs;
980                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
981                 get_cachetype_cp15();
982                 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
983                 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
984                     arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
985                 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
986                 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
987 #ifdef ARM9_CACHE_WRITE_THROUGH
988                 pmap_pte_init_arm9();
989 #else
990                 pmap_pte_init_generic();
991 #endif
992                 goto out;
993         }
994 #endif /* CPU_ARM9 */
995 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
996         if (cputype == CPU_ID_ARM926EJS || cputype == CPU_ID_ARM1026EJS ||
997             cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
998             cputype == CPU_ID_MV88FR571_41) {
999                 if (cputype == CPU_ID_MV88FR131 ||
1000                     cputype == CPU_ID_MV88FR571_VD ||
1001                     cputype == CPU_ID_MV88FR571_41) {
1002
1003                         cpufuncs = sheeva_cpufuncs;
1004                         /*
1005                          * Workaround for Marvell MV78100 CPU: Cache prefetch
1006                          * mechanism may affect the cache coherency validity,
1007                          * so it needs to be disabled.
1008                          *
1009                          * Refer to errata document MV-S501058-00C.pdf (p. 3.1
1010                          * L2 Prefetching Mechanism) for details.
1011                          */
1012                         if (cputype == CPU_ID_MV88FR571_VD ||
1013                             cputype == CPU_ID_MV88FR571_41) {
1014                                 sheeva_control_ext(0xffffffff,
1015                                     FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN |
1016                                     FC_BRANCH_TARG_BUF_DIS | FC_L2CACHE_EN |
1017                                     FC_L2_PREF_DIS);
1018                         } else {
1019                                 sheeva_control_ext(0xffffffff,
1020                                     FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN |
1021                                     FC_BRANCH_TARG_BUF_DIS | FC_L2CACHE_EN);
1022                         }
1023                 } else
1024                         cpufuncs = armv5_ec_cpufuncs;
1025
1026                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
1027                 get_cachetype_cp15();
1028                 pmap_pte_init_generic();
1029                 goto out;
1030         }
1031 #endif /* CPU_ARM9E || CPU_ARM10 */
1032 #ifdef CPU_ARM10
1033         if (/* cputype == CPU_ID_ARM1020T || */
1034             cputype == CPU_ID_ARM1020E) {
1035                 /*
1036                  * Select write-through cacheing (this isn't really an
1037                  * option on ARM1020T).
1038                  */
1039                 cpufuncs = arm10_cpufuncs;
1040                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
1041                 get_cachetype_cp15();
1042                 arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1043                 arm10_dcache_sets_max = 
1044                     (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1045                     arm10_dcache_sets_inc;
1046                 arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1047                 arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
1048                 pmap_pte_init_generic();
1049                 goto out;
1050         }
1051 #endif /* CPU_ARM10 */
1052 #ifdef CPU_SA110
1053         if (cputype == CPU_ID_SA110) {
1054                 cpufuncs = sa110_cpufuncs;
1055                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it */
1056                 get_cachetype_table();
1057                 pmap_pte_init_sa1();
1058                 goto out;
1059         }
1060 #endif  /* CPU_SA110 */
1061 #ifdef CPU_SA1100
1062         if (cputype == CPU_ID_SA1100) {
1063                 cpufuncs = sa11x0_cpufuncs;
1064                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
1065                 get_cachetype_table();
1066                 pmap_pte_init_sa1();
1067                 /* Use powersave on this CPU. */
1068                 cpu_do_powersave = 1;
1069
1070                 goto out;
1071         }
1072 #endif  /* CPU_SA1100 */
1073 #ifdef CPU_SA1110
1074         if (cputype == CPU_ID_SA1110) {
1075                 cpufuncs = sa11x0_cpufuncs;
1076                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
1077                 get_cachetype_table();
1078                 pmap_pte_init_sa1();
1079                 /* Use powersave on this CPU. */
1080                 cpu_do_powersave = 1;
1081
1082                 goto out;
1083         }
1084 #endif  /* CPU_SA1110 */
1085 #ifdef CPU_IXP12X0
1086         if (cputype == CPU_ID_IXP1200) {
1087                 cpufuncs = ixp12x0_cpufuncs;
1088                 cpu_reset_needs_v4_MMU_disable = 1;
1089                 get_cachetype_table();
1090                 pmap_pte_init_sa1();
1091                 goto out;
1092         }
1093 #endif  /* CPU_IXP12X0 */
1094 #ifdef CPU_XSCALE_80200
1095         if (cputype == CPU_ID_80200) {
1096                 int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1097
1098                 i80200_icu_init();
1099
1100                 /*
1101                  * Reset the Performance Monitoring Unit to a
1102                  * pristine state:
1103                  *      - CCNT, PMN0, PMN1 reset to 0
1104                  *      - overflow indications cleared
1105                  *      - all counters disabled
1106                  */
1107                 __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
1108                         :
1109                         : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1110                                PMNC_CC_IF));
1111
1112 #if defined(XSCALE_CCLKCFG)
1113                 /*
1114                  * Crank CCLKCFG to maximum legal value.
1115                  */
1116                 __asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
1117                         :
1118                         : "r" (XSCALE_CCLKCFG));
1119 #endif
1120
1121                 /*
1122                  * XXX Disable ECC in the Bus Controller Unit; we
1123                  * don't really support it, yet.  Clear any pending
1124                  * error indications.
1125                  */
1126                 __asm __volatile("mcr p13, 0, %0, c0, c1, 0"
1127                         :
1128                         : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1129
1130                 cpufuncs = xscale_cpufuncs;
1131 #if defined(PERFCTRS)
1132                 xscale_pmu_init();
1133 #endif
1134
1135                 /*
1136                  * i80200 errata: Step-A0 and A1 have a bug where
1137                  * D$ dirty bits are not cleared on "invalidate by
1138                  * address".
1139                  *
1140                  * Workaround: Clean cache line before invalidating.
1141                  */
1142                 if (rev == 0 || rev == 1)
1143                         cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1144
1145                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
1146                 get_cachetype_cp15();
1147                 pmap_pte_init_xscale();
1148                 goto out;
1149         }
1150 #endif /* CPU_XSCALE_80200 */
1151 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
1152         if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1153             cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1154             cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1155                 /*
1156                  * Reset the Performance Monitoring Unit to a
1157                  * pristine state:
1158                  *      - CCNT, PMN0, PMN1 reset to 0
1159                  *      - overflow indications cleared
1160                  *      - all counters disabled
1161                  */
1162                 __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
1163                         :
1164                         : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1165                                PMNC_CC_IF));
1166
1167                 cpufuncs = xscale_cpufuncs;
1168 #if defined(PERFCTRS)
1169                 xscale_pmu_init();
1170 #endif
1171
1172                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
1173                 get_cachetype_cp15();
1174                 pmap_pte_init_xscale();
1175                 goto out;
1176         }
1177 #endif /* CPU_XSCALE_80321 */
1178
1179 #if defined(CPU_XSCALE_81342)
1180         if (cputype == CPU_ID_81342) {
1181                 cpufuncs = xscalec3_cpufuncs;
1182 #if defined(PERFCTRS)
1183                 xscale_pmu_init();
1184 #endif
1185
1186                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
1187                 get_cachetype_cp15();
1188                 pmap_pte_init_xscale();
1189                 goto out;
1190         }
1191 #endif /* CPU_XSCALE_81342 */
1192 #ifdef CPU_XSCALE_PXA2X0
1193         /* ignore core revision to test PXA2xx CPUs */
1194         if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1195             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
1196             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1197
1198                 cpufuncs = xscale_cpufuncs;
1199 #if defined(PERFCTRS)
1200                 xscale_pmu_init();
1201 #endif
1202
1203                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
1204                 get_cachetype_cp15();
1205                 pmap_pte_init_xscale();
1206
1207                 /* Use powersave on this CPU. */
1208                 cpu_do_powersave = 1;
1209
1210                 goto out;
1211         }
1212 #endif /* CPU_XSCALE_PXA2X0 */
1213 #ifdef CPU_XSCALE_IXP425
1214         if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1215             cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
1216
1217                 cpufuncs = xscale_cpufuncs;
1218 #if defined(PERFCTRS)
1219                 xscale_pmu_init();
1220 #endif
1221
1222                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
1223                 get_cachetype_cp15();
1224                 pmap_pte_init_xscale();
1225
1226                 goto out;
1227         }
1228 #endif /* CPU_XSCALE_IXP425 */
1229         /*
1230          * Bzzzz. And the answer was ...
1231          */
1232         panic("No support for this CPU type (%08x) in kernel", cputype);
1233         return(ARCHITECTURE_NOT_PRESENT);
1234 out:
1235         uma_set_align(arm_dcache_align_mask);
1236         return (0);
1237 }
1238
1239 /*
1240  * Fixup routines for data and prefetch aborts.
1241  *
1242  * Several compile time symbols are used
1243  *
1244  * DEBUG_FAULT_CORRECTION - Print debugging information during the
1245  * correction of registers after a fault.
1246  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
1247  * when defined should use late aborts
1248  */
1249
1250
1251 /*
1252  * Null abort fixup routine.
1253  * For use when no fixup is required.
1254  */
1255 int
1256 cpufunc_null_fixup(arg)
1257         void *arg;
1258 {
1259         return(ABORT_FIXUP_OK);
1260 }
1261
1262
1263 #if defined(CPU_ARM7TDMI)
1264
1265 #ifdef DEBUG_FAULT_CORRECTION
1266 #define DFC_PRINTF(x)           printf x
1267 #define DFC_DISASSEMBLE(x)      disassemble(x)
1268 #else
1269 #define DFC_PRINTF(x)           /* nothing */
1270 #define DFC_DISASSEMBLE(x)      /* nothing */
1271 #endif
1272
1273 /*
1274  * "Early" data abort fixup.
1275  *
1276  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
1277  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1278  *
1279  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1280  */
1281 int
1282 early_abort_fixup(arg)
1283         void *arg;
1284 {
1285         trapframe_t *frame = arg;
1286         u_int fault_pc;
1287         u_int fault_instruction;
1288         int saved_lr = 0;
1289
1290         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1291
1292                 /* Ok an abort in SVC mode */
1293
1294                 /*
1295                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1296                  * as the fault happened in svc mode but we need it in the
1297                  * usr slot so we can treat the registers as an array of ints
1298                  * during fixing.
1299                  * NOTE: This PC is in the position but writeback is not
1300                  * allowed on r15.
1301                  * Doing it like this is more efficient than trapping this
1302                  * case in all possible locations in the following fixup code.
1303                  */
1304
1305                 saved_lr = frame->tf_usr_lr;
1306                 frame->tf_usr_lr = frame->tf_svc_lr;
1307
1308                 /*
1309                  * Note the trapframe does not have the SVC r13 so a fault
1310                  * from an instruction with writeback to r13 in SVC mode is
1311                  * not allowed. This should not happen as the kstack is
1312                  * always valid.
1313                  */
1314         }
1315
1316         /* Get fault address and status from the CPU */
1317
1318         fault_pc = frame->tf_pc;
1319         fault_instruction = *((volatile unsigned int *)fault_pc);
1320
1321         /* Decode the fault instruction and fix the registers as needed */
1322
1323         if ((fault_instruction & 0x0e000000) == 0x08000000) {
1324                 int base;
1325                 int loop;
1326                 int count;
1327                 int *registers = &frame->tf_r0;
1328         
1329                 DFC_PRINTF(("LDM/STM\n"));
1330                 DFC_DISASSEMBLE(fault_pc);
1331                 if (fault_instruction & (1 << 21)) {
1332                         DFC_PRINTF(("This instruction must be corrected\n"));
1333                         base = (fault_instruction >> 16) & 0x0f;
1334                         if (base == 15)
1335                                 return ABORT_FIXUP_FAILED;
1336                         /* Count registers transferred */
1337                         count = 0;
1338                         for (loop = 0; loop < 16; ++loop) {
1339                                 if (fault_instruction & (1<<loop))
1340                                         ++count;
1341                         }
1342                         DFC_PRINTF(("%d registers used\n", count));
1343                         DFC_PRINTF(("Corrected r%d by %d bytes ",
1344                                        base, count * 4));
1345                         if (fault_instruction & (1 << 23)) {
1346                                 DFC_PRINTF(("down\n"));
1347                                 registers[base] -= count * 4;
1348                         } else {
1349                                 DFC_PRINTF(("up\n"));
1350                                 registers[base] += count * 4;
1351                         }
1352                 }
1353         } else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1354                 int base;
1355                 int offset;
1356                 int *registers = &frame->tf_r0;
1357         
1358                 /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1359
1360                 DFC_DISASSEMBLE(fault_pc);
1361
1362                 /* Only need to fix registers if write back is turned on */
1363
1364                 if ((fault_instruction & (1 << 21)) != 0) {
1365                         base = (fault_instruction >> 16) & 0x0f;
1366                         if (base == 13 &&
1367                             (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1368                                 return ABORT_FIXUP_FAILED;
1369                         if (base == 15)
1370                                 return ABORT_FIXUP_FAILED;
1371
1372                         offset = (fault_instruction & 0xff) << 2;
1373                         DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1374                         if ((fault_instruction & (1 << 23)) != 0)
1375                                 offset = -offset;
1376                         registers[base] += offset;
1377                         DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1378                 }
1379         } else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1380                 return ABORT_FIXUP_FAILED;
1381
1382         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1383
1384                 /* Ok an abort in SVC mode */
1385
1386                 /*
1387                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1388                  * as the fault happened in svc mode but we need it in the
1389                  * usr slot so we can treat the registers as an array of ints
1390                  * during fixing.
1391                  * NOTE: This PC is in the position but writeback is not
1392                  * allowed on r15.
1393                  * Doing it like this is more efficient than trapping this
1394                  * case in all possible locations in the prior fixup code.
1395                  */
1396
1397                 frame->tf_svc_lr = frame->tf_usr_lr;
1398                 frame->tf_usr_lr = saved_lr;
1399
1400                 /*
1401                  * Note the trapframe does not have the SVC r13 so a fault
1402                  * from an instruction with writeback to r13 in SVC mode is
1403                  * not allowed. This should not happen as the kstack is
1404                  * always valid.
1405                  */
1406         }
1407
1408         return(ABORT_FIXUP_OK);
1409 }
1410 #endif  /* CPU_ARM2/250/3/6/7 */
1411
1412
1413 #if defined(CPU_ARM7TDMI)
1414 /*
1415  * "Late" (base updated) data abort fixup
1416  *
1417  * For ARM6 (in late-abort mode) and ARM7.
1418  *
1419  * In this model, all data-transfer instructions need fixing up.  We defer
1420  * LDM, STM, LDC and STC fixup to the early-abort handler.
1421  */
1422 int
1423 late_abort_fixup(arg)
1424         void *arg;
1425 {
1426         trapframe_t *frame = arg;
1427         u_int fault_pc;
1428         u_int fault_instruction;
1429         int saved_lr = 0;
1430
1431         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1432
1433                 /* Ok an abort in SVC mode */
1434
1435                 /*
1436                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1437                  * as the fault happened in svc mode but we need it in the
1438                  * usr slot so we can treat the registers as an array of ints
1439                  * during fixing.
1440                  * NOTE: This PC is in the position but writeback is not
1441                  * allowed on r15.
1442                  * Doing it like this is more efficient than trapping this
1443                  * case in all possible locations in the following fixup code.
1444                  */
1445
1446                 saved_lr = frame->tf_usr_lr;
1447                 frame->tf_usr_lr = frame->tf_svc_lr;
1448
1449                 /*
1450                  * Note the trapframe does not have the SVC r13 so a fault
1451                  * from an instruction with writeback to r13 in SVC mode is
1452                  * not allowed. This should not happen as the kstack is
1453                  * always valid.
1454                  */
1455         }
1456
1457         /* Get fault address and status from the CPU */
1458
1459         fault_pc = frame->tf_pc;
1460         fault_instruction = *((volatile unsigned int *)fault_pc);
1461
1462         /* Decode the fault instruction and fix the registers as needed */
1463
1464         /* Was is a swap instruction ? */
1465
1466         if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1467                 DFC_DISASSEMBLE(fault_pc);
1468         } else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1469
1470                 /* Was is a ldr/str instruction */
1471                 /* This is for late abort only */
1472
1473                 int base;
1474                 int offset;
1475                 int *registers = &frame->tf_r0;
1476
1477                 DFC_DISASSEMBLE(fault_pc);
1478                 
1479                 /* This is for late abort only */
1480
1481                 if ((fault_instruction & (1 << 24)) == 0
1482                     || (fault_instruction & (1 << 21)) != 0) {  
1483                         /* postindexed ldr/str with no writeback */
1484
1485                         base = (fault_instruction >> 16) & 0x0f;
1486                         if (base == 13 &&
1487                             (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1488                                 return ABORT_FIXUP_FAILED;
1489                         if (base == 15)
1490                                 return ABORT_FIXUP_FAILED;
1491                         DFC_PRINTF(("late abt fix: r%d=%08x : ",
1492                                        base, registers[base]));
1493                         if ((fault_instruction & (1 << 25)) == 0) {
1494                                 /* Immediate offset - easy */
1495
1496                                 offset = fault_instruction & 0xfff;
1497                                 if ((fault_instruction & (1 << 23)))
1498                                         offset = -offset;
1499                                 registers[base] += offset;
1500                                 DFC_PRINTF(("imm=%08x ", offset));
1501                         } else {
1502                                 /* offset is a shifted register */
1503                                 int shift;
1504
1505                                 offset = fault_instruction & 0x0f;
1506                                 if (offset == base)
1507                                         return ABORT_FIXUP_FAILED;
1508                 
1509                                 /*
1510                                  * Register offset - hard we have to
1511                                  * cope with shifts !
1512                                  */
1513                                 offset = registers[offset];
1514
1515                                 if ((fault_instruction & (1 << 4)) == 0)
1516                                         /* shift with amount */
1517                                         shift = (fault_instruction >> 7) & 0x1f;
1518                                 else {
1519                                         /* shift with register */
1520                                         if ((fault_instruction & (1 << 7)) != 0)
1521                                                 /* undefined for now so bail out */
1522                                                 return ABORT_FIXUP_FAILED;
1523                                         shift = ((fault_instruction >> 8) & 0xf);
1524                                         if (base == shift)
1525                                                 return ABORT_FIXUP_FAILED;
1526                                         DFC_PRINTF(("shift reg=%d ", shift));
1527                                         shift = registers[shift];
1528                                 }
1529                                 DFC_PRINTF(("shift=%08x ", shift));
1530                                 switch (((fault_instruction >> 5) & 0x3)) {
1531                                 case 0 : /* Logical left */
1532                                         offset = (int)(((u_int)offset) << shift);
1533                                         break;
1534                                 case 1 : /* Logical Right */
1535                                         if (shift == 0) shift = 32;
1536                                         offset = (int)(((u_int)offset) >> shift);
1537                                         break;
1538                                 case 2 : /* Arithmetic Right */
1539                                         if (shift == 0) shift = 32;
1540                                         offset = (int)(((int)offset) >> shift);
1541                                         break;
1542                                 case 3 : /* Rotate right (rol or rxx) */
1543                                         return ABORT_FIXUP_FAILED;
1544                                         break;
1545                                 }
1546
1547                                 DFC_PRINTF(("abt: fixed LDR/STR with "
1548                                                "register offset\n"));
1549                                 if ((fault_instruction & (1 << 23)))
1550                                         offset = -offset;
1551                                 DFC_PRINTF(("offset=%08x ", offset));
1552                                 registers[base] += offset;
1553                         }
1554                         DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1555                 }
1556         }
1557
1558         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1559
1560                 /* Ok an abort in SVC mode */
1561
1562                 /*
1563                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1564                  * as the fault happened in svc mode but we need it in the
1565                  * usr slot so we can treat the registers as an array of ints
1566                  * during fixing.
1567                  * NOTE: This PC is in the position but writeback is not
1568                  * allowed on r15.
1569                  * Doing it like this is more efficient than trapping this
1570                  * case in all possible locations in the prior fixup code.
1571                  */
1572
1573                 frame->tf_svc_lr = frame->tf_usr_lr;
1574                 frame->tf_usr_lr = saved_lr;
1575
1576                 /*
1577                  * Note the trapframe does not have the SVC r13 so a fault
1578                  * from an instruction with writeback to r13 in SVC mode is
1579                  * not allowed. This should not happen as the kstack is
1580                  * always valid.
1581                  */
1582         }
1583
1584         /*
1585          * Now let the early-abort fixup routine have a go, in case it
1586          * was an LDM, STM, LDC or STC that faulted.
1587          */
1588
1589         return early_abort_fixup(arg);
1590 }
1591 #endif  /* CPU_ARM7TDMI */
1592
1593 /*
1594  * CPU Setup code
1595  */
1596
1597 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined (CPU_ARM9) || \
1598   defined(CPU_ARM9E) || \
1599   defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) ||   \
1600   defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||             \
1601   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||           \
1602   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
1603   defined(CPU_ARM10) ||  defined(CPU_ARM11)
1604
1605 #define IGN     0
1606 #define OR      1
1607 #define BIC     2
1608
1609 struct cpu_option {
1610         char    *co_name;
1611         int     co_falseop;
1612         int     co_trueop;
1613         int     co_value;
1614 };
1615
1616 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
1617
1618 static u_int
1619 parse_cpu_options(args, optlist, cpuctrl)
1620         char *args;
1621         struct cpu_option *optlist;    
1622         u_int cpuctrl; 
1623 {
1624         int integer;
1625
1626         if (args == NULL)
1627                 return(cpuctrl);
1628
1629         while (optlist->co_name) {
1630                 if (get_bootconf_option(args, optlist->co_name,
1631                     BOOTOPT_TYPE_BOOLEAN, &integer)) {
1632                         if (integer) {
1633                                 if (optlist->co_trueop == OR)
1634                                         cpuctrl |= optlist->co_value;
1635                                 else if (optlist->co_trueop == BIC)
1636                                         cpuctrl &= ~optlist->co_value;
1637                         } else {
1638                                 if (optlist->co_falseop == OR)
1639                                         cpuctrl |= optlist->co_value;
1640                                 else if (optlist->co_falseop == BIC)
1641                                         cpuctrl &= ~optlist->co_value;
1642                         }
1643                 }
1644                 ++optlist;
1645         }
1646         return(cpuctrl);
1647 }
1648 #endif /* CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 || XSCALE*/
1649
1650 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8)
1651 struct cpu_option arm678_options[] = {
1652 #ifdef COMPAT_12
1653         { "nocache",            IGN, BIC, CPU_CONTROL_IDC_ENABLE },
1654         { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1655 #endif  /* COMPAT_12 */
1656         { "cpu.cache",          BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1657         { "cpu.nocache",        OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1658         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1659         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1660         { NULL,                 IGN, IGN, 0 }
1661 };
1662
1663 #endif  /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
1664
1665 #ifdef CPU_ARM7TDMI
1666 struct cpu_option arm7tdmi_options[] = {
1667         { "arm7.cache",         BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1668         { "arm7.nocache",       OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1669         { "arm7.writebuf",      BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1670         { "arm7.nowritebuf",    OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1671 #ifdef COMPAT_12
1672         { "fpaclk2",            BIC, OR,  CPU_CONTROL_CPCLK },
1673 #endif  /* COMPAT_12 */
1674         { "arm700.fpaclk",      BIC, OR,  CPU_CONTROL_CPCLK },
1675         { NULL,                 IGN, IGN, 0 }
1676 };
1677
1678 void
1679 arm7tdmi_setup(args)
1680         char *args;
1681 {
1682         int cpuctrl;
1683
1684         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1685                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1686                  | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1687
1688         cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1689         cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
1690
1691 #ifdef __ARMEB__
1692         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1693 #endif
1694
1695         /* Clear out the cache */
1696         cpu_idcache_wbinv_all();
1697
1698         /* Set the control register */
1699         ctrl = cpuctrl;
1700         cpu_control(0xffffffff, cpuctrl);
1701 }
1702 #endif  /* CPU_ARM7TDMI */
1703
1704 #ifdef CPU_ARM8
1705 struct cpu_option arm8_options[] = {
1706         { "arm8.cache",         BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1707         { "arm8.nocache",       OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1708         { "arm8.writebuf",      BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1709         { "arm8.nowritebuf",    OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1710 #ifdef COMPAT_12
1711         { "branchpredict",      BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1712 #endif  /* COMPAT_12 */
1713         { "cpu.branchpredict",  BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1714         { "arm8.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1715         { NULL,                 IGN, IGN, 0 }
1716 };
1717
1718 void
1719 arm8_setup(args)
1720         char *args;
1721 {
1722         int integer;
1723         int cpuctrl, cpuctrlmask;
1724         int clocktest;
1725         int setclock = 0;
1726
1727         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1728                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1729                  | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1730         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1731                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1732                  | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1733                  | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
1734                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
1735
1736 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1737         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1738 #endif
1739
1740         cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1741         cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
1742
1743 #ifdef __ARMEB__
1744         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1745 #endif
1746
1747         /* Get clock configuration */
1748         clocktest = arm8_clock_config(0, 0) & 0x0f;
1749
1750         /* Special ARM8 clock and test configuration */
1751         if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1752                 clocktest = 0;
1753                 setclock = 1;
1754         }
1755         if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1756                 if (integer)
1757                         clocktest |= 0x01;
1758                 else
1759                         clocktest &= ~(0x01);
1760                 setclock = 1;
1761         }
1762         if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1763                 if (integer)
1764                         clocktest |= 0x02;
1765                 else
1766                         clocktest &= ~(0x02);
1767                 setclock = 1;
1768         }
1769         if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
1770                 clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
1771                 setclock = 1;
1772         }
1773         if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
1774                 clocktest |= (integer & 7) << 5;
1775                 setclock = 1;
1776         }
1777         
1778         /* Clear out the cache */
1779         cpu_idcache_wbinv_all();
1780
1781         /* Set the control register */
1782         ctrl = cpuctrl;
1783         cpu_control(0xffffffff, cpuctrl);
1784
1785         /* Set the clock/test register */    
1786         if (setclock)
1787                 arm8_clock_config(0x7f, clocktest);
1788 }
1789 #endif  /* CPU_ARM8 */
1790
1791 #ifdef CPU_ARM9
1792 struct cpu_option arm9_options[] = {
1793         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1794         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1795         { "arm9.cache", BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1796         { "arm9.icache",        BIC, OR,  CPU_CONTROL_IC_ENABLE },
1797         { "arm9.dcache",        BIC, OR,  CPU_CONTROL_DC_ENABLE },
1798         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1799         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1800         { "arm9.writebuf",      BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1801         { NULL,                 IGN, IGN, 0 }
1802 };
1803
1804 void
1805 arm9_setup(args)
1806         char *args;
1807 {
1808         int cpuctrl, cpuctrlmask;
1809
1810         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1811             | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1812             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1813             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
1814             CPU_CONTROL_ROUNDROBIN;
1815         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1816                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1817                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1818                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1819                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1820                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1821                  | CPU_CONTROL_ROUNDROBIN;
1822
1823 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1824         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1825 #endif
1826
1827         cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1828
1829 #ifdef __ARMEB__
1830         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1831 #endif
1832         if (vector_page == ARM_VECTORS_HIGH)
1833                 cpuctrl |= CPU_CONTROL_VECRELOC;
1834
1835         /* Clear out the cache */
1836         cpu_idcache_wbinv_all();
1837
1838         /* Set the control register */
1839         cpu_control(cpuctrlmask, cpuctrl);
1840         ctrl = cpuctrl;
1841
1842 }
1843 #endif  /* CPU_ARM9 */
1844
1845 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
1846 struct cpu_option arm10_options[] = {
1847         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1848         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1849         { "arm10.cache",        BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1850         { "arm10.icache",       BIC, OR,  CPU_CONTROL_IC_ENABLE },
1851         { "arm10.dcache",       BIC, OR,  CPU_CONTROL_DC_ENABLE },
1852         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1853         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1854         { "arm10.writebuf",     BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1855         { NULL,                 IGN, IGN, 0 }
1856 };
1857
1858 void
1859 arm10_setup(args)
1860         char *args;
1861 {
1862         int cpuctrl, cpuctrlmask;
1863
1864         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1865             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 
1866             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1867         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1868             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1869             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1870             | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1871             | CPU_CONTROL_BPRD_ENABLE
1872             | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1873
1874 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1875         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1876 #endif
1877
1878         cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
1879
1880 #ifdef __ARMEB__
1881         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1882 #endif
1883
1884         /* Clear out the cache */
1885         cpu_idcache_wbinv_all();
1886
1887         /* Now really make sure they are clean.  */
1888         __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1889
1890         if (vector_page == ARM_VECTORS_HIGH)
1891                 cpuctrl |= CPU_CONTROL_VECRELOC;
1892
1893         /* Set the control register */
1894         ctrl = cpuctrl;
1895         cpu_control(0xffffffff, cpuctrl);
1896
1897         /* And again. */
1898         cpu_idcache_wbinv_all();
1899 }
1900 #endif  /* CPU_ARM9E || CPU_ARM10 */
1901
1902 #ifdef CPU_ARM11
1903 struct cpu_option arm11_options[] = {
1904         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1905         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1906         { "arm11.cache",        BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1907         { "arm11.icache",       BIC, OR,  CPU_CONTROL_IC_ENABLE },
1908         { "arm11.dcache",       BIC, OR,  CPU_CONTROL_DC_ENABLE },
1909         { NULL,                 IGN, IGN, 0 }
1910 };
1911
1912 void
1913 arm11_setup(args)
1914         char *args;
1915 {
1916         int cpuctrl, cpuctrlmask;
1917
1918         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1919             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1920             /* | CPU_CONTROL_BPRD_ENABLE */;
1921         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1922             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1923             | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
1924             | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1925             | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1926
1927 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1928         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1929 #endif
1930
1931         cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
1932
1933 #ifdef __ARMEB__
1934         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1935 #endif
1936
1937         /* Clear out the cache */
1938         cpu_idcache_wbinv_all();
1939
1940         /* Now really make sure they are clean.  */
1941         __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1942
1943         /* Set the control register */
1944         curcpu()->ci_ctrl = cpuctrl;
1945         cpu_control(0xffffffff, cpuctrl);
1946
1947         /* And again. */
1948         cpu_idcache_wbinv_all();
1949 }
1950 #endif  /* CPU_ARM11 */
1951
1952 #ifdef CPU_SA110
1953 struct cpu_option sa110_options[] = {
1954 #ifdef COMPAT_12
1955         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1956         { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1957 #endif  /* COMPAT_12 */
1958         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1959         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1960         { "sa110.cache",        BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1961         { "sa110.icache",       BIC, OR,  CPU_CONTROL_IC_ENABLE },
1962         { "sa110.dcache",       BIC, OR,  CPU_CONTROL_DC_ENABLE },
1963         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1964         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1965         { "sa110.writebuf",     BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1966         { NULL,                 IGN, IGN, 0 }
1967 };
1968
1969 void
1970 sa110_setup(args)
1971         char *args;
1972 {
1973         int cpuctrl, cpuctrlmask;
1974
1975         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1976                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1977                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1978                  | CPU_CONTROL_WBUF_ENABLE;
1979         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1980                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1981                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1982                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1983                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1984                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1985                  | CPU_CONTROL_CPCLK;
1986
1987 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1988         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1989 #endif
1990
1991         cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
1992
1993 #ifdef __ARMEB__
1994         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1995 #endif
1996
1997         /* Clear out the cache */
1998         cpu_idcache_wbinv_all();
1999
2000         /* Set the control register */
2001         ctrl = cpuctrl;
2002 /*      cpu_control(cpuctrlmask, cpuctrl);*/
2003         cpu_control(0xffffffff, cpuctrl);
2004
2005         /* 
2006          * enable clockswitching, note that this doesn't read or write to r0,
2007          * r0 is just to make it valid asm
2008          */
2009         __asm ("mcr 15, 0, r0, c15, c1, 2");
2010 }
2011 #endif  /* CPU_SA110 */
2012
2013 #if defined(CPU_SA1100) || defined(CPU_SA1110)
2014 struct cpu_option sa11x0_options[] = {
2015 #ifdef COMPAT_12
2016         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2017         { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2018 #endif  /* COMPAT_12 */
2019         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2020         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2021         { "sa11x0.cache",       BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2022         { "sa11x0.icache",      BIC, OR,  CPU_CONTROL_IC_ENABLE },
2023         { "sa11x0.dcache",      BIC, OR,  CPU_CONTROL_DC_ENABLE },
2024         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2025         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2026         { "sa11x0.writebuf",    BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2027         { NULL,                 IGN, IGN, 0 }
2028 };
2029
2030 void
2031 sa11x0_setup(args)
2032         char *args;
2033 {
2034         int cpuctrl, cpuctrlmask;
2035
2036         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2037                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2038                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2039                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
2040         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2041                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2042                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2043                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2044                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2045                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2046                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2047
2048 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2049         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2050 #endif
2051
2052
2053         cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
2054
2055 #ifdef __ARMEB__
2056         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2057 #endif
2058
2059         if (vector_page == ARM_VECTORS_HIGH)
2060                 cpuctrl |= CPU_CONTROL_VECRELOC;
2061         /* Clear out the cache */
2062         cpu_idcache_wbinv_all();
2063         /* Set the control register */    
2064         ctrl = cpuctrl;
2065         cpu_control(0xffffffff, cpuctrl);
2066 }
2067 #endif  /* CPU_SA1100 || CPU_SA1110 */
2068
2069 #if defined(CPU_IXP12X0)
2070 struct cpu_option ixp12x0_options[] = {
2071         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2072         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2073         { "ixp12x0.cache",      BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2074         { "ixp12x0.icache",     BIC, OR,  CPU_CONTROL_IC_ENABLE },
2075         { "ixp12x0.dcache",     BIC, OR,  CPU_CONTROL_DC_ENABLE },
2076         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2077         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2078         { "ixp12x0.writebuf",   BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2079         { NULL,                 IGN, IGN, 0 }
2080 };
2081
2082 void
2083 ixp12x0_setup(args)
2084         char *args;
2085 {
2086         int cpuctrl, cpuctrlmask;
2087
2088
2089         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
2090                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
2091                  | CPU_CONTROL_IC_ENABLE;
2092
2093         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
2094                  | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2095                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
2096                  | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
2097                  | CPU_CONTROL_VECRELOC;
2098
2099 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2100         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2101 #endif
2102
2103         cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
2104
2105 #ifdef __ARMEB__
2106         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2107 #endif
2108
2109         if (vector_page == ARM_VECTORS_HIGH)
2110                 cpuctrl |= CPU_CONTROL_VECRELOC;
2111
2112         /* Clear out the cache */
2113         cpu_idcache_wbinv_all();
2114
2115         /* Set the control register */    
2116         ctrl = cpuctrl;
2117         /* cpu_control(0xffffffff, cpuctrl); */
2118         cpu_control(cpuctrlmask, cpuctrl);
2119 }
2120 #endif /* CPU_IXP12X0 */
2121
2122 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2123   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
2124   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
2125 struct cpu_option xscale_options[] = {
2126 #ifdef COMPAT_12
2127         { "branchpredict",      BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2128         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2129 #endif  /* COMPAT_12 */
2130         { "cpu.branchpredict",  BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2131         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2132         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2133         { "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2134         { "xscale.cache",       BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2135         { "xscale.icache",      BIC, OR,  CPU_CONTROL_IC_ENABLE },
2136         { "xscale.dcache",      BIC, OR,  CPU_CONTROL_DC_ENABLE },
2137         { NULL,                 IGN, IGN, 0 }
2138 };
2139
2140 void
2141 xscale_setup(args)
2142         char *args;
2143 {
2144         uint32_t auxctl;
2145         int cpuctrl, cpuctrlmask;
2146
2147         /*
2148          * The XScale Write Buffer is always enabled.  Our option
2149          * is to enable/disable coalescing.  Note that bits 6:3
2150          * must always be enabled.
2151          */
2152
2153         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2154                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2155                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2156                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
2157                  | CPU_CONTROL_BPRD_ENABLE;
2158         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2159                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2160                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2161                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2162                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2163                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2164                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
2165                  CPU_CONTROL_L2_ENABLE;
2166
2167 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2168         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2169 #endif
2170
2171         cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
2172
2173 #ifdef __ARMEB__
2174         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2175 #endif
2176
2177         if (vector_page == ARM_VECTORS_HIGH)
2178                 cpuctrl |= CPU_CONTROL_VECRELOC;
2179 #ifdef CPU_XSCALE_CORE3
2180         cpuctrl |= CPU_CONTROL_L2_ENABLE;
2181 #endif
2182
2183         /* Clear out the cache */
2184         cpu_idcache_wbinv_all();
2185
2186         /*
2187          * Set the control register.  Note that bits 6:3 must always
2188          * be set to 1.
2189          */
2190         ctrl = cpuctrl;
2191 /*      cpu_control(cpuctrlmask, cpuctrl);*/
2192         cpu_control(0xffffffff, cpuctrl);
2193
2194         /* Make sure write coalescing is turned on */
2195         __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
2196                 : "=r" (auxctl));
2197 #ifdef XSCALE_NO_COALESCE_WRITES
2198         auxctl |= XSCALE_AUXCTL_K;
2199 #else
2200         auxctl &= ~XSCALE_AUXCTL_K;
2201 #endif
2202 #ifdef CPU_XSCALE_CORE3
2203         auxctl |= XSCALE_AUXCTL_LLR;
2204         auxctl |= XSCALE_AUXCTL_MD_MASK;
2205 #endif
2206         __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
2207                 : : "r" (auxctl));
2208 }
2209 #endif  /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 
2210            CPU_XSCALE_80219 */