]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm/arm/cpufunc.c
add -n option to suppress clearing the build tree and add -DNO_CLEAN
[FreeBSD/FreeBSD.git] / sys / arm / arm / cpufunc.c
1 /*      $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
2
3 /*-
4  * arm7tdmi support code Copyright (c) 2001 John Fremlin
5  * arm8 support code Copyright (c) 1997 ARM Limited
6  * arm8 support code Copyright (c) 1997 Causality Limited
7  * arm9 support code Copyright (C) 2001 ARM Ltd
8  * Copyright (c) 1997 Mark Brinicombe.
9  * Copyright (c) 1997 Causality Limited
10  * All rights reserved.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *      This product includes software developed by Causality Limited.
23  * 4. The name of Causality Limited may not be used to endorse or promote
24  *    products derived from this software without specific prior written
25  *    permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  * RiscBSD kernel project
40  *
41  * cpufuncs.c
42  *
43  * C functions for supporting CPU / MMU / TLB specific operations.
44  *
45  * Created      : 30/01/97
46  */
47 #include <sys/cdefs.h>
48 __FBSDID("$FreeBSD$");
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/lock.h>
53 #include <sys/mutex.h>
54 #include <sys/bus.h>
55 #include <machine/bus.h>
56 #include <machine/cpu.h>
57 #include <machine/disassem.h>
58
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 #include <vm/uma.h>
62
63 #include <machine/cpuconf.h>
64 #include <machine/cpufunc.h>
65 #include <machine/bootconfig.h>
66
67 #ifdef CPU_XSCALE_80200
68 #include <arm/xscale/i80200/i80200reg.h>
69 #include <arm/xscale/i80200/i80200var.h>
70 #endif
71
72 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
73 #include <arm/xscale/i80321/i80321reg.h>
74 #include <arm/xscale/i80321/i80321var.h>
75 #endif
76
77 #if defined(CPU_XSCALE_81342)
78 #include <arm/xscale/i8134x/i81342reg.h>
79 #endif
80
81 #ifdef CPU_XSCALE_IXP425
82 #include <arm/xscale/ixp425/ixp425reg.h>
83 #include <arm/xscale/ixp425/ixp425var.h>
84 #endif
85
86 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
87     defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
88 #include <arm/xscale/xscalereg.h>
89 #endif
90
91 #if defined(PERFCTRS)
92 struct arm_pmc_funcs *arm_pmc;
93 #endif
94
95 /* PRIMARY CACHE VARIABLES */
96 int     arm_picache_size;
97 int     arm_picache_line_size;
98 int     arm_picache_ways;
99
100 int     arm_pdcache_size;       /* and unified */
101 int     arm_pdcache_line_size;
102 int     arm_pdcache_ways;
103
104 int     arm_pcache_type;
105 int     arm_pcache_unified;
106
107 int     arm_dcache_align;
108 int     arm_dcache_align_mask;
109
110 /* 1 == use cpu_sleep(), 0 == don't */
111 int cpu_do_powersave;
112 int ctrl;
113
114 #ifdef CPU_ARM7TDMI
115 struct cpu_functions arm7tdmi_cpufuncs = {
116         /* CPU functions */
117         
118         cpufunc_id,                     /* id                   */
119         cpufunc_nullop,                 /* cpwait               */
120
121         /* MMU functions */
122
123         cpufunc_control,                /* control              */
124         cpufunc_domains,                /* domain               */
125         arm7tdmi_setttb,                /* setttb               */
126         cpufunc_faultstatus,            /* faultstatus          */
127         cpufunc_faultaddress,           /* faultaddress         */
128
129         /* TLB functions */
130
131         arm7tdmi_tlb_flushID,           /* tlb_flushID          */
132         arm7tdmi_tlb_flushID_SE,        /* tlb_flushID_SE       */
133         arm7tdmi_tlb_flushID,           /* tlb_flushI           */
134         arm7tdmi_tlb_flushID_SE,        /* tlb_flushI_SE        */
135         arm7tdmi_tlb_flushID,           /* tlb_flushD           */
136         arm7tdmi_tlb_flushID_SE,        /* tlb_flushD_SE        */
137
138         /* Cache operations */
139
140         cpufunc_nullop,                 /* icache_sync_all      */
141         (void *)cpufunc_nullop,         /* icache_sync_range    */
142
143         arm7tdmi_cache_flushID,         /* dcache_wbinv_all     */
144         (void *)arm7tdmi_cache_flushID, /* dcache_wbinv_range   */
145         (void *)arm7tdmi_cache_flushID, /* dcache_inv_range     */
146         (void *)cpufunc_nullop,         /* dcache_wb_range      */
147
148         arm7tdmi_cache_flushID,         /* idcache_wbinv_all    */
149         (void *)arm7tdmi_cache_flushID, /* idcache_wbinv_range  */
150         cpufunc_nullop,                 /* l2cache_wbinv_all    */
151         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
152         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
153         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
154
155         /* Other functions */
156
157         cpufunc_nullop,                 /* flush_prefetchbuf    */
158         cpufunc_nullop,                 /* drain_writebuf       */
159         cpufunc_nullop,                 /* flush_brnchtgt_C     */
160         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
161
162         (void *)cpufunc_nullop,         /* sleep                */
163
164         /* Soft functions */
165
166         late_abort_fixup,               /* dataabt_fixup        */
167         cpufunc_null_fixup,             /* prefetchabt_fixup    */
168
169         arm7tdmi_context_switch,        /* context_switch       */
170
171         arm7tdmi_setup                  /* cpu setup            */
172
173 };
174 #endif  /* CPU_ARM7TDMI */
175
176 #ifdef CPU_ARM8
177 struct cpu_functions arm8_cpufuncs = {
178         /* CPU functions */
179         
180         cpufunc_id,                     /* id                   */
181         cpufunc_nullop,                 /* cpwait               */
182
183         /* MMU functions */
184
185         cpufunc_control,                /* control              */
186         cpufunc_domains,                /* domain               */
187         arm8_setttb,                    /* setttb               */
188         cpufunc_faultstatus,            /* faultstatus          */
189         cpufunc_faultaddress,           /* faultaddress         */
190
191         /* TLB functions */
192
193         arm8_tlb_flushID,               /* tlb_flushID          */
194         arm8_tlb_flushID_SE,            /* tlb_flushID_SE       */
195         arm8_tlb_flushID,               /* tlb_flushI           */
196         arm8_tlb_flushID_SE,            /* tlb_flushI_SE        */
197         arm8_tlb_flushID,               /* tlb_flushD           */
198         arm8_tlb_flushID_SE,            /* tlb_flushD_SE        */
199
200         /* Cache operations */
201
202         cpufunc_nullop,                 /* icache_sync_all      */
203         (void *)cpufunc_nullop,         /* icache_sync_range    */
204
205         arm8_cache_purgeID,             /* dcache_wbinv_all     */
206         (void *)arm8_cache_purgeID,     /* dcache_wbinv_range   */
207 /*XXX*/ (void *)arm8_cache_purgeID,     /* dcache_inv_range     */
208         (void *)arm8_cache_cleanID,     /* dcache_wb_range      */
209
210         arm8_cache_purgeID,             /* idcache_wbinv_all    */
211         (void *)arm8_cache_purgeID,     /* idcache_wbinv_range  */
212         cpufunc_nullop,                 /* l2cache_wbinv_all    */
213         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
214         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
215         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
216
217         /* Other functions */
218
219         cpufunc_nullop,                 /* flush_prefetchbuf    */
220         cpufunc_nullop,                 /* drain_writebuf       */
221         cpufunc_nullop,                 /* flush_brnchtgt_C     */
222         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
223
224         (void *)cpufunc_nullop,         /* sleep                */
225
226         /* Soft functions */
227
228         cpufunc_null_fixup,             /* dataabt_fixup        */
229         cpufunc_null_fixup,             /* prefetchabt_fixup    */
230
231         arm8_context_switch,            /* context_switch       */
232
233         arm8_setup                      /* cpu setup            */
234 };          
235 #endif  /* CPU_ARM8 */
236
237 #ifdef CPU_ARM9
238 struct cpu_functions arm9_cpufuncs = {
239         /* CPU functions */
240
241         cpufunc_id,                     /* id                   */
242         cpufunc_nullop,                 /* cpwait               */
243
244         /* MMU functions */
245
246         cpufunc_control,                /* control              */
247         cpufunc_domains,                /* Domain               */
248         arm9_setttb,                    /* Setttb               */
249         cpufunc_faultstatus,            /* Faultstatus          */
250         cpufunc_faultaddress,           /* Faultaddress         */
251
252         /* TLB functions */
253
254         armv4_tlb_flushID,              /* tlb_flushID          */
255         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
256         armv4_tlb_flushI,               /* tlb_flushI           */
257         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
258         armv4_tlb_flushD,               /* tlb_flushD           */
259         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
260
261         /* Cache operations */
262
263         arm9_icache_sync_all,           /* icache_sync_all      */
264         arm9_icache_sync_range,         /* icache_sync_range    */
265
266         arm9_dcache_wbinv_all,          /* dcache_wbinv_all     */
267         arm9_dcache_wbinv_range,        /* dcache_wbinv_range   */
268 /*XXX*/ arm9_dcache_wbinv_range,        /* dcache_inv_range     */
269         arm9_dcache_wb_range,           /* dcache_wb_range      */
270
271         arm9_idcache_wbinv_all,         /* idcache_wbinv_all    */
272         arm9_idcache_wbinv_range,       /* idcache_wbinv_range  */
273         cpufunc_nullop,                 /* l2cache_wbinv_all    */
274         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
275         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
276         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
277
278         /* Other functions */
279
280         cpufunc_nullop,                 /* flush_prefetchbuf    */
281         armv4_drain_writebuf,           /* drain_writebuf       */
282         cpufunc_nullop,                 /* flush_brnchtgt_C     */
283         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
284
285         (void *)cpufunc_nullop,         /* sleep                */
286
287         /* Soft functions */
288
289         cpufunc_null_fixup,             /* dataabt_fixup        */
290         cpufunc_null_fixup,             /* prefetchabt_fixup    */
291
292         arm9_context_switch,            /* context_switch       */
293
294         arm9_setup                      /* cpu setup            */
295
296 };
297 #endif /* CPU_ARM9 */
298
299 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
300 struct cpu_functions armv5_ec_cpufuncs = {
301         /* CPU functions */
302
303         cpufunc_id,                     /* id                   */
304         cpufunc_nullop,                 /* cpwait               */
305
306         /* MMU functions */
307
308         cpufunc_control,                /* control              */
309         cpufunc_domains,                /* Domain               */
310         armv5_ec_setttb,                /* Setttb               */
311         cpufunc_faultstatus,            /* Faultstatus          */
312         cpufunc_faultaddress,           /* Faultaddress         */
313
314         /* TLB functions */
315
316         armv4_tlb_flushID,              /* tlb_flushID          */
317         arm10_tlb_flushID_SE,           /* tlb_flushID_SE       */
318         armv4_tlb_flushI,               /* tlb_flushI           */
319         arm10_tlb_flushI_SE,            /* tlb_flushI_SE        */
320         armv4_tlb_flushD,               /* tlb_flushD           */
321         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
322
323         /* Cache operations */
324
325         armv5_ec_icache_sync_all,       /* icache_sync_all      */
326         armv5_ec_icache_sync_range,     /* icache_sync_range    */
327
328         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
329         armv5_ec_dcache_wbinv_range,    /* dcache_wbinv_range   */
330 /*XXX*/ armv5_ec_dcache_wbinv_range,    /* dcache_inv_range     */
331         armv5_ec_dcache_wb_range,       /* dcache_wb_range      */
332
333         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
334         armv5_ec_idcache_wbinv_range,   /* idcache_wbinv_range  */
335
336         cpufunc_nullop,                 /* l2cache_wbinv_all    */
337         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
338         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
339         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
340                                  
341         /* Other functions */
342
343         cpufunc_nullop,                 /* flush_prefetchbuf    */
344         armv4_drain_writebuf,           /* drain_writebuf       */
345         cpufunc_nullop,                 /* flush_brnchtgt_C     */
346         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
347
348         (void *)cpufunc_nullop,         /* sleep                */
349
350         /* Soft functions */
351
352         cpufunc_null_fixup,             /* dataabt_fixup        */
353         cpufunc_null_fixup,             /* prefetchabt_fixup    */
354
355         arm10_context_switch,           /* context_switch       */
356
357         arm10_setup                     /* cpu setup            */
358
359 };
360
361 struct cpu_functions feroceon_cpufuncs = {
362         /* CPU functions */
363
364         cpufunc_id,                     /* id                   */
365         cpufunc_nullop,                 /* cpwait               */
366
367         /* MMU functions */
368
369         cpufunc_control,                /* control              */
370         cpufunc_domains,                /* Domain               */
371         feroceon_setttb,                /* Setttb               */
372         cpufunc_faultstatus,            /* Faultstatus          */
373         cpufunc_faultaddress,           /* Faultaddress         */
374
375         /* TLB functions */
376
377         armv4_tlb_flushID,              /* tlb_flushID          */
378         arm10_tlb_flushID_SE,           /* tlb_flushID_SE       */
379         armv4_tlb_flushI,               /* tlb_flushI           */
380         arm10_tlb_flushI_SE,            /* tlb_flushI_SE        */
381         armv4_tlb_flushD,               /* tlb_flushD           */
382         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
383
384         /* Cache operations */
385
386         armv5_ec_icache_sync_all,       /* icache_sync_all      */
387         armv5_ec_icache_sync_range,     /* icache_sync_range    */
388
389         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
390         feroceon_dcache_wbinv_range,    /* dcache_wbinv_range   */
391         feroceon_dcache_inv_range,      /* dcache_inv_range     */
392         feroceon_dcache_wb_range,       /* dcache_wb_range      */
393
394         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
395         feroceon_idcache_wbinv_range,   /* idcache_wbinv_all    */
396
397         feroceon_l2cache_wbinv_all,     /* l2cache_wbinv_all    */
398         feroceon_l2cache_wbinv_range,   /* l2cache_wbinv_range  */
399         feroceon_l2cache_inv_range,     /* l2cache_inv_range    */
400         feroceon_l2cache_wb_range,      /* l2cache_wb_range     */
401
402         /* Other functions */
403
404         cpufunc_nullop,                 /* flush_prefetchbuf    */
405         armv4_drain_writebuf,           /* drain_writebuf       */
406         cpufunc_nullop,                 /* flush_brnchtgt_C     */
407         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
408
409         (void *)cpufunc_nullop,         /* sleep                */
410
411         /* Soft functions */
412
413         cpufunc_null_fixup,             /* dataabt_fixup        */
414         cpufunc_null_fixup,             /* prefetchabt_fixup    */
415
416         arm10_context_switch,           /* context_switch       */
417
418         arm10_setup                     /* cpu setup            */
419 };
420 #endif /* CPU_ARM9E || CPU_ARM10 */
421
422 #ifdef CPU_ARM10
423 struct cpu_functions arm10_cpufuncs = {
424         /* CPU functions */
425
426         cpufunc_id,                     /* id                   */
427         cpufunc_nullop,                 /* cpwait               */
428
429         /* MMU functions */
430
431         cpufunc_control,                /* control              */
432         cpufunc_domains,                /* Domain               */
433         arm10_setttb,                   /* Setttb               */
434         cpufunc_faultstatus,            /* Faultstatus          */
435         cpufunc_faultaddress,           /* Faultaddress         */
436
437         /* TLB functions */
438
439         armv4_tlb_flushID,              /* tlb_flushID          */
440         arm10_tlb_flushID_SE,           /* tlb_flushID_SE       */
441         armv4_tlb_flushI,               /* tlb_flushI           */
442         arm10_tlb_flushI_SE,            /* tlb_flushI_SE        */
443         armv4_tlb_flushD,               /* tlb_flushD           */
444         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
445
446         /* Cache operations */
447
448         arm10_icache_sync_all,          /* icache_sync_all      */
449         arm10_icache_sync_range,        /* icache_sync_range    */
450
451         arm10_dcache_wbinv_all,         /* dcache_wbinv_all     */
452         arm10_dcache_wbinv_range,       /* dcache_wbinv_range   */
453         arm10_dcache_inv_range,         /* dcache_inv_range     */
454         arm10_dcache_wb_range,          /* dcache_wb_range      */
455
456         arm10_idcache_wbinv_all,        /* idcache_wbinv_all    */
457         arm10_idcache_wbinv_range,      /* idcache_wbinv_range  */
458         cpufunc_nullop,                 /* l2cache_wbinv_all    */
459         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
460         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
461         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
462
463         /* Other functions */
464
465         cpufunc_nullop,                 /* flush_prefetchbuf    */
466         armv4_drain_writebuf,           /* drain_writebuf       */
467         cpufunc_nullop,                 /* flush_brnchtgt_C     */
468         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
469
470         (void *)cpufunc_nullop,         /* sleep                */
471
472         /* Soft functions */
473
474         cpufunc_null_fixup,             /* dataabt_fixup        */
475         cpufunc_null_fixup,             /* prefetchabt_fixup    */
476
477         arm10_context_switch,           /* context_switch       */
478
479         arm10_setup                     /* cpu setup            */
480
481 };
482 #endif /* CPU_ARM10 */
483
484 #ifdef CPU_SA110
485 struct cpu_functions sa110_cpufuncs = {
486         /* CPU functions */
487         
488         cpufunc_id,                     /* id                   */
489         cpufunc_nullop,                 /* cpwait               */
490
491         /* MMU functions */
492
493         cpufunc_control,                /* control              */
494         cpufunc_domains,                /* domain               */
495         sa1_setttb,                     /* setttb               */
496         cpufunc_faultstatus,            /* faultstatus          */
497         cpufunc_faultaddress,           /* faultaddress         */
498
499         /* TLB functions */
500
501         armv4_tlb_flushID,              /* tlb_flushID          */
502         sa1_tlb_flushID_SE,             /* tlb_flushID_SE       */
503         armv4_tlb_flushI,               /* tlb_flushI           */
504         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
505         armv4_tlb_flushD,               /* tlb_flushD           */
506         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
507
508         /* Cache operations */
509
510         sa1_cache_syncI,                /* icache_sync_all      */
511         sa1_cache_syncI_rng,            /* icache_sync_range    */
512
513         sa1_cache_purgeD,               /* dcache_wbinv_all     */
514         sa1_cache_purgeD_rng,           /* dcache_wbinv_range   */
515 /*XXX*/ sa1_cache_purgeD_rng,           /* dcache_inv_range     */
516         sa1_cache_cleanD_rng,           /* dcache_wb_range      */
517
518         sa1_cache_purgeID,              /* idcache_wbinv_all    */
519         sa1_cache_purgeID_rng,          /* idcache_wbinv_range  */
520         cpufunc_nullop,                 /* l2cache_wbinv_all    */
521         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
522         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
523         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
524
525         /* Other functions */
526
527         cpufunc_nullop,                 /* flush_prefetchbuf    */
528         armv4_drain_writebuf,           /* drain_writebuf       */
529         cpufunc_nullop,                 /* flush_brnchtgt_C     */
530         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
531
532         (void *)cpufunc_nullop,         /* sleep                */
533
534         /* Soft functions */
535
536         cpufunc_null_fixup,             /* dataabt_fixup        */
537         cpufunc_null_fixup,             /* prefetchabt_fixup    */
538
539         sa110_context_switch,           /* context_switch       */
540
541         sa110_setup                     /* cpu setup            */
542 };          
543 #endif  /* CPU_SA110 */
544
545 #if defined(CPU_SA1100) || defined(CPU_SA1110)
546 struct cpu_functions sa11x0_cpufuncs = {
547         /* CPU functions */
548         
549         cpufunc_id,                     /* id                   */
550         cpufunc_nullop,                 /* cpwait               */
551
552         /* MMU functions */
553
554         cpufunc_control,                /* control              */
555         cpufunc_domains,                /* domain               */
556         sa1_setttb,                     /* setttb               */
557         cpufunc_faultstatus,            /* faultstatus          */
558         cpufunc_faultaddress,           /* faultaddress         */
559
560         /* TLB functions */
561
562         armv4_tlb_flushID,              /* tlb_flushID          */
563         sa1_tlb_flushID_SE,             /* tlb_flushID_SE       */
564         armv4_tlb_flushI,               /* tlb_flushI           */
565         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
566         armv4_tlb_flushD,               /* tlb_flushD           */
567         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
568
569         /* Cache operations */
570
571         sa1_cache_syncI,                /* icache_sync_all      */
572         sa1_cache_syncI_rng,            /* icache_sync_range    */
573
574         sa1_cache_purgeD,               /* dcache_wbinv_all     */
575         sa1_cache_purgeD_rng,           /* dcache_wbinv_range   */
576 /*XXX*/ sa1_cache_purgeD_rng,           /* dcache_inv_range     */
577         sa1_cache_cleanD_rng,           /* dcache_wb_range      */
578
579         sa1_cache_purgeID,              /* idcache_wbinv_all    */
580         sa1_cache_purgeID_rng,          /* idcache_wbinv_range  */
581         cpufunc_nullop,                 /* l2cache_wbinv_all    */
582         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
583         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
584         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
585
586         /* Other functions */
587
588         sa11x0_drain_readbuf,           /* flush_prefetchbuf    */
589         armv4_drain_writebuf,           /* drain_writebuf       */
590         cpufunc_nullop,                 /* flush_brnchtgt_C     */
591         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
592
593         sa11x0_cpu_sleep,               /* sleep                */
594
595         /* Soft functions */
596
597         cpufunc_null_fixup,             /* dataabt_fixup        */
598         cpufunc_null_fixup,             /* prefetchabt_fixup    */
599
600         sa11x0_context_switch,          /* context_switch       */
601
602         sa11x0_setup                    /* cpu setup            */
603 };          
604 #endif  /* CPU_SA1100 || CPU_SA1110 */
605
606 #ifdef CPU_IXP12X0
607 struct cpu_functions ixp12x0_cpufuncs = {
608         /* CPU functions */
609         
610         cpufunc_id,                     /* id                   */
611         cpufunc_nullop,                 /* cpwait               */
612
613         /* MMU functions */
614
615         cpufunc_control,                /* control              */
616         cpufunc_domains,                /* domain               */
617         sa1_setttb,                     /* setttb               */
618         cpufunc_faultstatus,            /* faultstatus          */
619         cpufunc_faultaddress,           /* faultaddress         */
620
621         /* TLB functions */
622
623         armv4_tlb_flushID,              /* tlb_flushID          */
624         sa1_tlb_flushID_SE,             /* tlb_flushID_SE       */
625         armv4_tlb_flushI,               /* tlb_flushI           */
626         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
627         armv4_tlb_flushD,               /* tlb_flushD           */
628         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
629
630         /* Cache operations */
631
632         sa1_cache_syncI,                /* icache_sync_all      */
633         sa1_cache_syncI_rng,            /* icache_sync_range    */
634
635         sa1_cache_purgeD,               /* dcache_wbinv_all     */
636         sa1_cache_purgeD_rng,           /* dcache_wbinv_range   */
637 /*XXX*/ sa1_cache_purgeD_rng,           /* dcache_inv_range     */
638         sa1_cache_cleanD_rng,           /* dcache_wb_range      */
639
640         sa1_cache_purgeID,              /* idcache_wbinv_all    */
641         sa1_cache_purgeID_rng,          /* idcache_wbinv_range  */
642         cpufunc_nullop,                 /* l2cache_wbinv_all    */
643         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
644         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
645         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
646
647         /* Other functions */
648
649         ixp12x0_drain_readbuf,                  /* flush_prefetchbuf    */
650         armv4_drain_writebuf,           /* drain_writebuf       */
651         cpufunc_nullop,                 /* flush_brnchtgt_C     */
652         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
653
654         (void *)cpufunc_nullop,         /* sleep                */
655
656         /* Soft functions */
657
658         cpufunc_null_fixup,             /* dataabt_fixup        */
659         cpufunc_null_fixup,             /* prefetchabt_fixup    */
660
661         ixp12x0_context_switch,         /* context_switch       */
662
663         ixp12x0_setup                   /* cpu setup            */
664 };          
665 #endif  /* CPU_IXP12X0 */
666
667 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
668   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
669   defined(CPU_XSCALE_80219)
670
671 struct cpu_functions xscale_cpufuncs = {
672         /* CPU functions */
673         
674         cpufunc_id,                     /* id                   */
675         xscale_cpwait,                  /* cpwait               */
676
677         /* MMU functions */
678
679         xscale_control,                 /* control              */
680         cpufunc_domains,                /* domain               */
681         xscale_setttb,                  /* setttb               */
682         cpufunc_faultstatus,            /* faultstatus          */
683         cpufunc_faultaddress,           /* faultaddress         */
684
685         /* TLB functions */
686
687         armv4_tlb_flushID,              /* tlb_flushID          */
688         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
689         armv4_tlb_flushI,               /* tlb_flushI           */
690         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
691         armv4_tlb_flushD,               /* tlb_flushD           */
692         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
693
694         /* Cache operations */
695
696         xscale_cache_syncI,             /* icache_sync_all      */
697         xscale_cache_syncI_rng,         /* icache_sync_range    */
698
699         xscale_cache_purgeD,            /* dcache_wbinv_all     */
700         xscale_cache_purgeD_rng,        /* dcache_wbinv_range   */
701         xscale_cache_flushD_rng,        /* dcache_inv_range     */
702         xscale_cache_cleanD_rng,        /* dcache_wb_range      */
703
704         xscale_cache_purgeID,           /* idcache_wbinv_all    */
705         xscale_cache_purgeID_rng,       /* idcache_wbinv_range  */
706         cpufunc_nullop,                 /* l2cache_wbinv_all    */
707         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
708         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
709         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
710
711         /* Other functions */
712
713         cpufunc_nullop,                 /* flush_prefetchbuf    */
714         armv4_drain_writebuf,           /* drain_writebuf       */
715         cpufunc_nullop,                 /* flush_brnchtgt_C     */
716         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
717
718         xscale_cpu_sleep,               /* sleep                */
719
720         /* Soft functions */
721
722         cpufunc_null_fixup,             /* dataabt_fixup        */
723         cpufunc_null_fixup,             /* prefetchabt_fixup    */
724
725         xscale_context_switch,          /* context_switch       */
726
727         xscale_setup                    /* cpu setup            */
728 };
729 #endif
730 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
731    CPU_XSCALE_80219 */
732
733 #ifdef CPU_XSCALE_81342
734 struct cpu_functions xscalec3_cpufuncs = {
735         /* CPU functions */
736         
737         cpufunc_id,                     /* id                   */
738         xscale_cpwait,                  /* cpwait               */
739
740         /* MMU functions */
741
742         xscale_control,                 /* control              */
743         cpufunc_domains,                /* domain               */
744         xscalec3_setttb,                /* setttb               */
745         cpufunc_faultstatus,            /* faultstatus          */
746         cpufunc_faultaddress,           /* faultaddress         */
747
748         /* TLB functions */
749
750         armv4_tlb_flushID,              /* tlb_flushID          */
751         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
752         armv4_tlb_flushI,               /* tlb_flushI           */
753         (void *)armv4_tlb_flushI,       /* tlb_flushI_SE        */
754         armv4_tlb_flushD,               /* tlb_flushD           */
755         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
756
757         /* Cache operations */
758
759         xscalec3_cache_syncI,           /* icache_sync_all      */
760         xscalec3_cache_syncI_rng,       /* icache_sync_range    */
761
762         xscalec3_cache_purgeD,          /* dcache_wbinv_all     */
763         xscalec3_cache_purgeD_rng,      /* dcache_wbinv_range   */
764         xscale_cache_flushD_rng,        /* dcache_inv_range     */
765         xscalec3_cache_cleanD_rng,      /* dcache_wb_range      */
766
767         xscalec3_cache_purgeID,         /* idcache_wbinv_all    */
768         xscalec3_cache_purgeID_rng,     /* idcache_wbinv_range  */
769         xscalec3_l2cache_purge,         /* l2cache_wbinv_all    */
770         xscalec3_l2cache_purge_rng,     /* l2cache_wbinv_range  */
771         xscalec3_l2cache_flush_rng,     /* l2cache_inv_range    */
772         xscalec3_l2cache_clean_rng,     /* l2cache_wb_range     */
773
774         /* Other functions */
775
776         cpufunc_nullop,                 /* flush_prefetchbuf    */
777         armv4_drain_writebuf,           /* drain_writebuf       */
778         cpufunc_nullop,                 /* flush_brnchtgt_C     */
779         (void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
780
781         xscale_cpu_sleep,               /* sleep                */
782
783         /* Soft functions */
784
785         cpufunc_null_fixup,             /* dataabt_fixup        */
786         cpufunc_null_fixup,             /* prefetchabt_fixup    */
787
788         xscalec3_context_switch,        /* context_switch       */
789
790         xscale_setup                    /* cpu setup            */
791 };
792 #endif /* CPU_XSCALE_81342 */
793 /*
794  * Global constants also used by locore.s
795  */
796
797 struct cpu_functions cpufuncs;
798 u_int cputype;
799 u_int cpu_reset_needs_v4_MMU_disable;   /* flag used in locore.s */
800
801 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
802   defined (CPU_ARM9E) || defined (CPU_ARM10) ||                        \
803   defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||            \
804   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||          \
805   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
806
807 static void get_cachetype_cp15(void);
808
809 /* Additional cache information local to this file.  Log2 of some of the
810    above numbers.  */
811 static int      arm_dcache_l2_nsets;
812 static int      arm_dcache_l2_assoc;
813 static int      arm_dcache_l2_linesize;
814
815 static void
816 get_cachetype_cp15()
817 {
818         u_int ctype, isize, dsize;
819         u_int multiplier;
820
821         __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
822                 : "=r" (ctype));
823
824         /*
825          * ...and thus spake the ARM ARM:
826          *
827          * If an <opcode2> value corresponding to an unimplemented or
828          * reserved ID register is encountered, the System Control
829          * processor returns the value of the main ID register.
830          */
831         if (ctype == cpufunc_id())
832                 goto out;
833
834         if ((ctype & CPU_CT_S) == 0)
835                 arm_pcache_unified = 1;
836
837         /*
838          * If you want to know how this code works, go read the ARM ARM.
839          */
840
841         arm_pcache_type = CPU_CT_CTYPE(ctype);
842
843         if (arm_pcache_unified == 0) {
844                 isize = CPU_CT_ISIZE(ctype);
845                 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
846                 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
847                 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
848                         if (isize & CPU_CT_xSIZE_M)
849                                 arm_picache_line_size = 0; /* not present */
850                         else
851                                 arm_picache_ways = 1;
852                 } else {
853                         arm_picache_ways = multiplier <<
854                             (CPU_CT_xSIZE_ASSOC(isize) - 1);
855                 }
856                 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
857         }
858
859         dsize = CPU_CT_DSIZE(ctype);
860         multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
861         arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
862         if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
863                 if (dsize & CPU_CT_xSIZE_M)
864                         arm_pdcache_line_size = 0; /* not present */
865                 else
866                         arm_pdcache_ways = 1;
867         } else {
868                 arm_pdcache_ways = multiplier <<
869                     (CPU_CT_xSIZE_ASSOC(dsize) - 1);
870         }
871         arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
872
873         arm_dcache_align = arm_pdcache_line_size;
874
875         arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
876         arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
877         arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
878             CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
879
880  out:
881         arm_dcache_align_mask = arm_dcache_align - 1;
882 }
883 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
884
885 #if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
886     defined(CPU_IXP12X0)
887 /* Cache information for CPUs without cache type registers. */
888 struct cachetab {
889         u_int32_t ct_cpuid;
890         int     ct_pcache_type;
891         int     ct_pcache_unified;
892         int     ct_pdcache_size;
893         int     ct_pdcache_line_size;
894         int     ct_pdcache_ways;
895         int     ct_picache_size;
896         int     ct_picache_line_size;
897         int     ct_picache_ways;
898 };
899
900 struct cachetab cachetab[] = {
901     /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
902     /* XXX is this type right for SA-1? */
903     { CPU_ID_SA110,     CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
904     { CPU_ID_SA1100,    CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
905     { CPU_ID_SA1110,    CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
906     { CPU_ID_IXP1200,   CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
907     { 0, 0, 0, 0, 0, 0, 0, 0}
908 };
909
910 static void get_cachetype_table(void);
911
912 static void
913 get_cachetype_table()
914 {
915         int i;
916         u_int32_t cpuid = cpufunc_id();
917
918         for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
919                 if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
920                         arm_pcache_type = cachetab[i].ct_pcache_type;
921                         arm_pcache_unified = cachetab[i].ct_pcache_unified;
922                         arm_pdcache_size = cachetab[i].ct_pdcache_size;
923                         arm_pdcache_line_size =
924                             cachetab[i].ct_pdcache_line_size;
925                         arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
926                         arm_picache_size = cachetab[i].ct_picache_size;
927                         arm_picache_line_size =
928                             cachetab[i].ct_picache_line_size;
929                         arm_picache_ways = cachetab[i].ct_picache_ways;
930                 }
931         }
932         arm_dcache_align = arm_pdcache_line_size;
933
934         arm_dcache_align_mask = arm_dcache_align - 1;
935 }
936
937 #endif /* SA110 || SA1100 || SA1111 || IXP12X0 */
938
939 /*
940  * Cannot panic here as we may not have a console yet ...
941  */
942
943 int
944 set_cpufuncs()
945 {
946         cputype = cpufunc_id();
947         cputype &= CPU_ID_CPU_MASK;
948
949         /*
950          * NOTE: cpu_do_powersave defaults to off.  If we encounter a
951          * CPU type where we want to use it by default, then we set it.
952          */
953
954 #ifdef CPU_ARM7TDMI
955         if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
956             CPU_ID_IS7(cputype) &&
957             (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
958                 cpufuncs = arm7tdmi_cpufuncs;
959                 cpu_reset_needs_v4_MMU_disable = 0;
960                 get_cachetype_cp15();
961                 pmap_pte_init_generic();
962                 goto out;
963         }
964 #endif  
965 #ifdef CPU_ARM8
966         if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
967             (cputype & 0x0000f000) == 0x00008000) {
968                 cpufuncs = arm8_cpufuncs;
969                 cpu_reset_needs_v4_MMU_disable = 0;     /* XXX correct? */
970                 get_cachetype_cp15();
971                 pmap_pte_init_arm8();
972                 goto out;
973         }
974 #endif  /* CPU_ARM8 */
975 #ifdef CPU_ARM9
976         if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
977              (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
978             (cputype & 0x0000f000) == 0x00009000) {
979                 cpufuncs = arm9_cpufuncs;
980                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
981                 get_cachetype_cp15();
982                 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
983                 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
984                     arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
985                 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
986                 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
987 #ifdef ARM9_CACHE_WRITE_THROUGH
988                 pmap_pte_init_arm9();
989 #else
990                 pmap_pte_init_generic();
991 #endif
992                 goto out;
993         }
994 #endif /* CPU_ARM9 */
995 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
996         if (cputype == CPU_ID_ARM926EJS || cputype == CPU_ID_ARM1026EJS ||
997             cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
998             cputype == CPU_ID_MV88FR571_41) {
999                 if (cputype == CPU_ID_MV88FR131 ||
1000                     cputype == CPU_ID_MV88FR571_VD ||
1001                     cputype == CPU_ID_MV88FR571_41) {
1002
1003                         cpufuncs = feroceon_cpufuncs;
1004                         /*
1005                          * Workaround for Marvell MV78100 CPU: Cache prefetch
1006                          * mechanism may affect the cache coherency validity,
1007                          * so it needs to be disabled.
1008                          *
1009                          * Refer to errata document MV-S501058-00C.pdf (p. 3.1
1010                          * L2 Prefetching Mechanism) for details.
1011                          */
1012                         if (cputype == CPU_ID_MV88FR571_VD ||
1013                             cputype == CPU_ID_MV88FR571_41) {
1014                                 feroceon_control_ext(0xffffffff,
1015                                     FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN |
1016                                     FC_BRANCH_TARG_BUF_DIS | FC_L2CACHE_EN |
1017                                     FC_L2_PREF_DIS);
1018                         } else {
1019                                 feroceon_control_ext(0xffffffff,
1020                                     FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN |
1021                                     FC_BRANCH_TARG_BUF_DIS | FC_L2CACHE_EN);
1022                         }
1023                 } else
1024                         cpufuncs = armv5_ec_cpufuncs;
1025
1026                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
1027                 get_cachetype_cp15();
1028                 pmap_pte_init_generic();
1029                 goto out;
1030         }
1031 #endif /* CPU_ARM9E || CPU_ARM10 */
1032 #ifdef CPU_ARM10
1033         if (/* cputype == CPU_ID_ARM1020T || */
1034             cputype == CPU_ID_ARM1020E) {
1035                 /*
1036                  * Select write-through cacheing (this isn't really an
1037                  * option on ARM1020T).
1038                  */
1039                 cpufuncs = arm10_cpufuncs;
1040                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
1041                 get_cachetype_cp15();
1042                 arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1043                 arm10_dcache_sets_max = 
1044                     (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1045                     arm10_dcache_sets_inc;
1046                 arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1047                 arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
1048                 pmap_pte_init_generic();
1049                 goto out;
1050         }
1051 #endif /* CPU_ARM10 */
1052 #ifdef CPU_SA110
1053         if (cputype == CPU_ID_SA110) {
1054                 cpufuncs = sa110_cpufuncs;
1055                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it */
1056                 get_cachetype_table();
1057                 pmap_pte_init_sa1();
1058                 goto out;
1059         }
1060 #endif  /* CPU_SA110 */
1061 #ifdef CPU_SA1100
1062         if (cputype == CPU_ID_SA1100) {
1063                 cpufuncs = sa11x0_cpufuncs;
1064                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
1065                 get_cachetype_table();
1066                 pmap_pte_init_sa1();
1067                 /* Use powersave on this CPU. */
1068                 cpu_do_powersave = 1;
1069
1070                 goto out;
1071         }
1072 #endif  /* CPU_SA1100 */
1073 #ifdef CPU_SA1110
1074         if (cputype == CPU_ID_SA1110) {
1075                 cpufuncs = sa11x0_cpufuncs;
1076                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
1077                 get_cachetype_table();
1078                 pmap_pte_init_sa1();
1079                 /* Use powersave on this CPU. */
1080                 cpu_do_powersave = 1;
1081
1082                 goto out;
1083         }
1084 #endif  /* CPU_SA1110 */
1085 #ifdef CPU_IXP12X0
1086         if (cputype == CPU_ID_IXP1200) {
1087                 cpufuncs = ixp12x0_cpufuncs;
1088                 cpu_reset_needs_v4_MMU_disable = 1;
1089                 get_cachetype_table();
1090                 pmap_pte_init_sa1();
1091                 goto out;
1092         }
1093 #endif  /* CPU_IXP12X0 */
1094 #ifdef CPU_XSCALE_80200
1095         if (cputype == CPU_ID_80200) {
1096                 int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1097
1098                 i80200_icu_init();
1099
1100                 /*
1101                  * Reset the Performance Monitoring Unit to a
1102                  * pristine state:
1103                  *      - CCNT, PMN0, PMN1 reset to 0
1104                  *      - overflow indications cleared
1105                  *      - all counters disabled
1106                  */
1107                 __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
1108                         :
1109                         : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1110                                PMNC_CC_IF));
1111
1112 #if defined(XSCALE_CCLKCFG)
1113                 /*
1114                  * Crank CCLKCFG to maximum legal value.
1115                  */
1116                 __asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
1117                         :
1118                         : "r" (XSCALE_CCLKCFG));
1119 #endif
1120
1121                 /*
1122                  * XXX Disable ECC in the Bus Controller Unit; we
1123                  * don't really support it, yet.  Clear any pending
1124                  * error indications.
1125                  */
1126                 __asm __volatile("mcr p13, 0, %0, c0, c1, 0"
1127                         :
1128                         : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1129
1130                 cpufuncs = xscale_cpufuncs;
1131 #if defined(PERFCTRS)
1132                 xscale_pmu_init();
1133 #endif
1134
1135                 /*
1136                  * i80200 errata: Step-A0 and A1 have a bug where
1137                  * D$ dirty bits are not cleared on "invalidate by
1138                  * address".
1139                  *
1140                  * Workaround: Clean cache line before invalidating.
1141                  */
1142                 if (rev == 0 || rev == 1)
1143                         cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1144
1145                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
1146                 get_cachetype_cp15();
1147                 pmap_pte_init_xscale();
1148                 goto out;
1149         }
1150 #endif /* CPU_XSCALE_80200 */
1151 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
1152         if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1153             cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1154             cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1155                 /*
1156                  * Reset the Performance Monitoring Unit to a
1157                  * pristine state:
1158                  *      - CCNT, PMN0, PMN1 reset to 0
1159                  *      - overflow indications cleared
1160                  *      - all counters disabled
1161                  */
1162                 __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
1163                         :
1164                         : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1165                                PMNC_CC_IF));
1166
1167                 cpufuncs = xscale_cpufuncs;
1168 #if defined(PERFCTRS)
1169                 xscale_pmu_init();
1170 #endif
1171
1172                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
1173                 get_cachetype_cp15();
1174                 pmap_pte_init_xscale();
1175                 goto out;
1176         }
1177 #endif /* CPU_XSCALE_80321 */
1178
1179 #if defined(CPU_XSCALE_81342)
1180         if (cputype == CPU_ID_81342) {
1181                 cpufuncs = xscalec3_cpufuncs;
1182 #if defined(PERFCTRS)
1183                 xscale_pmu_init();
1184 #endif
1185
1186                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
1187                 get_cachetype_cp15();
1188                 pmap_pte_init_xscale();
1189                 goto out;
1190         }
1191 #endif /* CPU_XSCALE_81342 */
1192 #ifdef CPU_XSCALE_PXA2X0
1193         /* ignore core revision to test PXA2xx CPUs */
1194         if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1195             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1196
1197                 cpufuncs = xscale_cpufuncs;
1198 #if defined(PERFCTRS)
1199                 xscale_pmu_init();
1200 #endif
1201
1202                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
1203                 get_cachetype_cp15();
1204                 pmap_pte_init_xscale();
1205
1206                 /* Use powersave on this CPU. */
1207                 cpu_do_powersave = 1;
1208
1209                 goto out;
1210         }
1211 #endif /* CPU_XSCALE_PXA2X0 */
1212 #ifdef CPU_XSCALE_IXP425
1213         if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1214             cputype == CPU_ID_IXP425_266) {
1215
1216                 cpufuncs = xscale_cpufuncs;
1217 #if defined(PERFCTRS)
1218                 xscale_pmu_init();
1219 #endif
1220
1221                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
1222                 get_cachetype_cp15();
1223                 pmap_pte_init_xscale();
1224
1225                 goto out;
1226         }
1227 #endif /* CPU_XSCALE_IXP425 */
1228         /*
1229          * Bzzzz. And the answer was ...
1230          */
1231         panic("No support for this CPU type (%08x) in kernel", cputype);
1232         return(ARCHITECTURE_NOT_PRESENT);
1233 out:
1234         uma_set_align(arm_dcache_align_mask);
1235         return (0);
1236 }
1237
1238 /*
1239  * Fixup routines for data and prefetch aborts.
1240  *
1241  * Several compile time symbols are used
1242  *
1243  * DEBUG_FAULT_CORRECTION - Print debugging information during the
1244  * correction of registers after a fault.
1245  * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
1246  * when defined should use late aborts
1247  */
1248
1249
1250 /*
1251  * Null abort fixup routine.
1252  * For use when no fixup is required.
1253  */
1254 int
1255 cpufunc_null_fixup(arg)
1256         void *arg;
1257 {
1258         return(ABORT_FIXUP_OK);
1259 }
1260
1261
1262 #if defined(CPU_ARM7TDMI)
1263
1264 #ifdef DEBUG_FAULT_CORRECTION
1265 #define DFC_PRINTF(x)           printf x
1266 #define DFC_DISASSEMBLE(x)      disassemble(x)
1267 #else
1268 #define DFC_PRINTF(x)           /* nothing */
1269 #define DFC_DISASSEMBLE(x)      /* nothing */
1270 #endif
1271
1272 /*
1273  * "Early" data abort fixup.
1274  *
1275  * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
1276  * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1277  *
1278  * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1279  */
1280 int
1281 early_abort_fixup(arg)
1282         void *arg;
1283 {
1284         trapframe_t *frame = arg;
1285         u_int fault_pc;
1286         u_int fault_instruction;
1287         int saved_lr = 0;
1288
1289         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1290
1291                 /* Ok an abort in SVC mode */
1292
1293                 /*
1294                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1295                  * as the fault happened in svc mode but we need it in the
1296                  * usr slot so we can treat the registers as an array of ints
1297                  * during fixing.
1298                  * NOTE: This PC is in the position but writeback is not
1299                  * allowed on r15.
1300                  * Doing it like this is more efficient than trapping this
1301                  * case in all possible locations in the following fixup code.
1302                  */
1303
1304                 saved_lr = frame->tf_usr_lr;
1305                 frame->tf_usr_lr = frame->tf_svc_lr;
1306
1307                 /*
1308                  * Note the trapframe does not have the SVC r13 so a fault
1309                  * from an instruction with writeback to r13 in SVC mode is
1310                  * not allowed. This should not happen as the kstack is
1311                  * always valid.
1312                  */
1313         }
1314
1315         /* Get fault address and status from the CPU */
1316
1317         fault_pc = frame->tf_pc;
1318         fault_instruction = *((volatile unsigned int *)fault_pc);
1319
1320         /* Decode the fault instruction and fix the registers as needed */
1321
1322         if ((fault_instruction & 0x0e000000) == 0x08000000) {
1323                 int base;
1324                 int loop;
1325                 int count;
1326                 int *registers = &frame->tf_r0;
1327         
1328                 DFC_PRINTF(("LDM/STM\n"));
1329                 DFC_DISASSEMBLE(fault_pc);
1330                 if (fault_instruction & (1 << 21)) {
1331                         DFC_PRINTF(("This instruction must be corrected\n"));
1332                         base = (fault_instruction >> 16) & 0x0f;
1333                         if (base == 15)
1334                                 return ABORT_FIXUP_FAILED;
1335                         /* Count registers transferred */
1336                         count = 0;
1337                         for (loop = 0; loop < 16; ++loop) {
1338                                 if (fault_instruction & (1<<loop))
1339                                         ++count;
1340                         }
1341                         DFC_PRINTF(("%d registers used\n", count));
1342                         DFC_PRINTF(("Corrected r%d by %d bytes ",
1343                                        base, count * 4));
1344                         if (fault_instruction & (1 << 23)) {
1345                                 DFC_PRINTF(("down\n"));
1346                                 registers[base] -= count * 4;
1347                         } else {
1348                                 DFC_PRINTF(("up\n"));
1349                                 registers[base] += count * 4;
1350                         }
1351                 }
1352         } else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1353                 int base;
1354                 int offset;
1355                 int *registers = &frame->tf_r0;
1356         
1357                 /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1358
1359                 DFC_DISASSEMBLE(fault_pc);
1360
1361                 /* Only need to fix registers if write back is turned on */
1362
1363                 if ((fault_instruction & (1 << 21)) != 0) {
1364                         base = (fault_instruction >> 16) & 0x0f;
1365                         if (base == 13 &&
1366                             (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1367                                 return ABORT_FIXUP_FAILED;
1368                         if (base == 15)
1369                                 return ABORT_FIXUP_FAILED;
1370
1371                         offset = (fault_instruction & 0xff) << 2;
1372                         DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1373                         if ((fault_instruction & (1 << 23)) != 0)
1374                                 offset = -offset;
1375                         registers[base] += offset;
1376                         DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1377                 }
1378         } else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1379                 return ABORT_FIXUP_FAILED;
1380
1381         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1382
1383                 /* Ok an abort in SVC mode */
1384
1385                 /*
1386                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1387                  * as the fault happened in svc mode but we need it in the
1388                  * usr slot so we can treat the registers as an array of ints
1389                  * during fixing.
1390                  * NOTE: This PC is in the position but writeback is not
1391                  * allowed on r15.
1392                  * Doing it like this is more efficient than trapping this
1393                  * case in all possible locations in the prior fixup code.
1394                  */
1395
1396                 frame->tf_svc_lr = frame->tf_usr_lr;
1397                 frame->tf_usr_lr = saved_lr;
1398
1399                 /*
1400                  * Note the trapframe does not have the SVC r13 so a fault
1401                  * from an instruction with writeback to r13 in SVC mode is
1402                  * not allowed. This should not happen as the kstack is
1403                  * always valid.
1404                  */
1405         }
1406
1407         return(ABORT_FIXUP_OK);
1408 }
1409 #endif  /* CPU_ARM2/250/3/6/7 */
1410
1411
1412 #if defined(CPU_ARM7TDMI)
1413 /*
1414  * "Late" (base updated) data abort fixup
1415  *
1416  * For ARM6 (in late-abort mode) and ARM7.
1417  *
1418  * In this model, all data-transfer instructions need fixing up.  We defer
1419  * LDM, STM, LDC and STC fixup to the early-abort handler.
1420  */
1421 int
1422 late_abort_fixup(arg)
1423         void *arg;
1424 {
1425         trapframe_t *frame = arg;
1426         u_int fault_pc;
1427         u_int fault_instruction;
1428         int saved_lr = 0;
1429
1430         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1431
1432                 /* Ok an abort in SVC mode */
1433
1434                 /*
1435                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1436                  * as the fault happened in svc mode but we need it in the
1437                  * usr slot so we can treat the registers as an array of ints
1438                  * during fixing.
1439                  * NOTE: This PC is in the position but writeback is not
1440                  * allowed on r15.
1441                  * Doing it like this is more efficient than trapping this
1442                  * case in all possible locations in the following fixup code.
1443                  */
1444
1445                 saved_lr = frame->tf_usr_lr;
1446                 frame->tf_usr_lr = frame->tf_svc_lr;
1447
1448                 /*
1449                  * Note the trapframe does not have the SVC r13 so a fault
1450                  * from an instruction with writeback to r13 in SVC mode is
1451                  * not allowed. This should not happen as the kstack is
1452                  * always valid.
1453                  */
1454         }
1455
1456         /* Get fault address and status from the CPU */
1457
1458         fault_pc = frame->tf_pc;
1459         fault_instruction = *((volatile unsigned int *)fault_pc);
1460
1461         /* Decode the fault instruction and fix the registers as needed */
1462
1463         /* Was is a swap instruction ? */
1464
1465         if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1466                 DFC_DISASSEMBLE(fault_pc);
1467         } else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1468
1469                 /* Was is a ldr/str instruction */
1470                 /* This is for late abort only */
1471
1472                 int base;
1473                 int offset;
1474                 int *registers = &frame->tf_r0;
1475
1476                 DFC_DISASSEMBLE(fault_pc);
1477                 
1478                 /* This is for late abort only */
1479
1480                 if ((fault_instruction & (1 << 24)) == 0
1481                     || (fault_instruction & (1 << 21)) != 0) {  
1482                         /* postindexed ldr/str with no writeback */
1483
1484                         base = (fault_instruction >> 16) & 0x0f;
1485                         if (base == 13 &&
1486                             (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1487                                 return ABORT_FIXUP_FAILED;
1488                         if (base == 15)
1489                                 return ABORT_FIXUP_FAILED;
1490                         DFC_PRINTF(("late abt fix: r%d=%08x : ",
1491                                        base, registers[base]));
1492                         if ((fault_instruction & (1 << 25)) == 0) {
1493                                 /* Immediate offset - easy */
1494
1495                                 offset = fault_instruction & 0xfff;
1496                                 if ((fault_instruction & (1 << 23)))
1497                                         offset = -offset;
1498                                 registers[base] += offset;
1499                                 DFC_PRINTF(("imm=%08x ", offset));
1500                         } else {
1501                                 /* offset is a shifted register */
1502                                 int shift;
1503
1504                                 offset = fault_instruction & 0x0f;
1505                                 if (offset == base)
1506                                         return ABORT_FIXUP_FAILED;
1507                 
1508                                 /*
1509                                  * Register offset - hard we have to
1510                                  * cope with shifts !
1511                                  */
1512                                 offset = registers[offset];
1513
1514                                 if ((fault_instruction & (1 << 4)) == 0)
1515                                         /* shift with amount */
1516                                         shift = (fault_instruction >> 7) & 0x1f;
1517                                 else {
1518                                         /* shift with register */
1519                                         if ((fault_instruction & (1 << 7)) != 0)
1520                                                 /* undefined for now so bail out */
1521                                                 return ABORT_FIXUP_FAILED;
1522                                         shift = ((fault_instruction >> 8) & 0xf);
1523                                         if (base == shift)
1524                                                 return ABORT_FIXUP_FAILED;
1525                                         DFC_PRINTF(("shift reg=%d ", shift));
1526                                         shift = registers[shift];
1527                                 }
1528                                 DFC_PRINTF(("shift=%08x ", shift));
1529                                 switch (((fault_instruction >> 5) & 0x3)) {
1530                                 case 0 : /* Logical left */
1531                                         offset = (int)(((u_int)offset) << shift);
1532                                         break;
1533                                 case 1 : /* Logical Right */
1534                                         if (shift == 0) shift = 32;
1535                                         offset = (int)(((u_int)offset) >> shift);
1536                                         break;
1537                                 case 2 : /* Arithmetic Right */
1538                                         if (shift == 0) shift = 32;
1539                                         offset = (int)(((int)offset) >> shift);
1540                                         break;
1541                                 case 3 : /* Rotate right (rol or rxx) */
1542                                         return ABORT_FIXUP_FAILED;
1543                                         break;
1544                                 }
1545
1546                                 DFC_PRINTF(("abt: fixed LDR/STR with "
1547                                                "register offset\n"));
1548                                 if ((fault_instruction & (1 << 23)))
1549                                         offset = -offset;
1550                                 DFC_PRINTF(("offset=%08x ", offset));
1551                                 registers[base] += offset;
1552                         }
1553                         DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1554                 }
1555         }
1556
1557         if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1558
1559                 /* Ok an abort in SVC mode */
1560
1561                 /*
1562                  * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1563                  * as the fault happened in svc mode but we need it in the
1564                  * usr slot so we can treat the registers as an array of ints
1565                  * during fixing.
1566                  * NOTE: This PC is in the position but writeback is not
1567                  * allowed on r15.
1568                  * Doing it like this is more efficient than trapping this
1569                  * case in all possible locations in the prior fixup code.
1570                  */
1571
1572                 frame->tf_svc_lr = frame->tf_usr_lr;
1573                 frame->tf_usr_lr = saved_lr;
1574
1575                 /*
1576                  * Note the trapframe does not have the SVC r13 so a fault
1577                  * from an instruction with writeback to r13 in SVC mode is
1578                  * not allowed. This should not happen as the kstack is
1579                  * always valid.
1580                  */
1581         }
1582
1583         /*
1584          * Now let the early-abort fixup routine have a go, in case it
1585          * was an LDM, STM, LDC or STC that faulted.
1586          */
1587
1588         return early_abort_fixup(arg);
1589 }
1590 #endif  /* CPU_ARM7TDMI */
1591
1592 /*
1593  * CPU Setup code
1594  */
1595
1596 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined (CPU_ARM9) || \
1597   defined(CPU_ARM9E) || \
1598   defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) ||   \
1599   defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||             \
1600   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||           \
1601   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
1602   defined(CPU_ARM10) ||  defined(CPU_ARM11)
1603
1604 #define IGN     0
1605 #define OR      1
1606 #define BIC     2
1607
1608 struct cpu_option {
1609         char    *co_name;
1610         int     co_falseop;
1611         int     co_trueop;
1612         int     co_value;
1613 };
1614
1615 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
1616
1617 static u_int
1618 parse_cpu_options(args, optlist, cpuctrl)
1619         char *args;
1620         struct cpu_option *optlist;    
1621         u_int cpuctrl; 
1622 {
1623         int integer;
1624
1625         if (args == NULL)
1626                 return(cpuctrl);
1627
1628         while (optlist->co_name) {
1629                 if (get_bootconf_option(args, optlist->co_name,
1630                     BOOTOPT_TYPE_BOOLEAN, &integer)) {
1631                         if (integer) {
1632                                 if (optlist->co_trueop == OR)
1633                                         cpuctrl |= optlist->co_value;
1634                                 else if (optlist->co_trueop == BIC)
1635                                         cpuctrl &= ~optlist->co_value;
1636                         } else {
1637                                 if (optlist->co_falseop == OR)
1638                                         cpuctrl |= optlist->co_value;
1639                                 else if (optlist->co_falseop == BIC)
1640                                         cpuctrl &= ~optlist->co_value;
1641                         }
1642                 }
1643                 ++optlist;
1644         }
1645         return(cpuctrl);
1646 }
1647 #endif /* CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 || XSCALE*/
1648
1649 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8)
1650 struct cpu_option arm678_options[] = {
1651 #ifdef COMPAT_12
1652         { "nocache",            IGN, BIC, CPU_CONTROL_IDC_ENABLE },
1653         { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1654 #endif  /* COMPAT_12 */
1655         { "cpu.cache",          BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1656         { "cpu.nocache",        OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1657         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1658         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1659         { NULL,                 IGN, IGN, 0 }
1660 };
1661
1662 #endif  /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
1663
1664 #ifdef CPU_ARM7TDMI
1665 struct cpu_option arm7tdmi_options[] = {
1666         { "arm7.cache",         BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1667         { "arm7.nocache",       OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1668         { "arm7.writebuf",      BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1669         { "arm7.nowritebuf",    OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1670 #ifdef COMPAT_12
1671         { "fpaclk2",            BIC, OR,  CPU_CONTROL_CPCLK },
1672 #endif  /* COMPAT_12 */
1673         { "arm700.fpaclk",      BIC, OR,  CPU_CONTROL_CPCLK },
1674         { NULL,                 IGN, IGN, 0 }
1675 };
1676
1677 void
1678 arm7tdmi_setup(args)
1679         char *args;
1680 {
1681         int cpuctrl;
1682
1683         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1684                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1685                  | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1686
1687         cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1688         cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
1689
1690 #ifdef __ARMEB__
1691         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1692 #endif
1693
1694         /* Clear out the cache */
1695         cpu_idcache_wbinv_all();
1696
1697         /* Set the control register */
1698         ctrl = cpuctrl;
1699         cpu_control(0xffffffff, cpuctrl);
1700 }
1701 #endif  /* CPU_ARM7TDMI */
1702
1703 #ifdef CPU_ARM8
1704 struct cpu_option arm8_options[] = {
1705         { "arm8.cache",         BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1706         { "arm8.nocache",       OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1707         { "arm8.writebuf",      BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1708         { "arm8.nowritebuf",    OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1709 #ifdef COMPAT_12
1710         { "branchpredict",      BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1711 #endif  /* COMPAT_12 */
1712         { "cpu.branchpredict",  BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1713         { "arm8.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1714         { NULL,                 IGN, IGN, 0 }
1715 };
1716
1717 void
1718 arm8_setup(args)
1719         char *args;
1720 {
1721         int integer;
1722         int cpuctrl, cpuctrlmask;
1723         int clocktest;
1724         int setclock = 0;
1725
1726         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1727                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1728                  | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1729         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1730                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1731                  | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1732                  | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
1733                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
1734
1735 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1736         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1737 #endif
1738
1739         cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1740         cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
1741
1742 #ifdef __ARMEB__
1743         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1744 #endif
1745
1746         /* Get clock configuration */
1747         clocktest = arm8_clock_config(0, 0) & 0x0f;
1748
1749         /* Special ARM8 clock and test configuration */
1750         if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1751                 clocktest = 0;
1752                 setclock = 1;
1753         }
1754         if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1755                 if (integer)
1756                         clocktest |= 0x01;
1757                 else
1758                         clocktest &= ~(0x01);
1759                 setclock = 1;
1760         }
1761         if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1762                 if (integer)
1763                         clocktest |= 0x02;
1764                 else
1765                         clocktest &= ~(0x02);
1766                 setclock = 1;
1767         }
1768         if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
1769                 clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
1770                 setclock = 1;
1771         }
1772         if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
1773                 clocktest |= (integer & 7) << 5;
1774                 setclock = 1;
1775         }
1776         
1777         /* Clear out the cache */
1778         cpu_idcache_wbinv_all();
1779
1780         /* Set the control register */
1781         ctrl = cpuctrl;
1782         cpu_control(0xffffffff, cpuctrl);
1783
1784         /* Set the clock/test register */    
1785         if (setclock)
1786                 arm8_clock_config(0x7f, clocktest);
1787 }
1788 #endif  /* CPU_ARM8 */
1789
1790 #ifdef CPU_ARM9
1791 struct cpu_option arm9_options[] = {
1792         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1793         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1794         { "arm9.cache", BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1795         { "arm9.icache",        BIC, OR,  CPU_CONTROL_IC_ENABLE },
1796         { "arm9.dcache",        BIC, OR,  CPU_CONTROL_DC_ENABLE },
1797         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1798         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1799         { "arm9.writebuf",      BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1800         { NULL,                 IGN, IGN, 0 }
1801 };
1802
1803 void
1804 arm9_setup(args)
1805         char *args;
1806 {
1807         int cpuctrl, cpuctrlmask;
1808
1809         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1810             | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1811             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1812             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
1813             CPU_CONTROL_ROUNDROBIN;
1814         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1815                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1816                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1817                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1818                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1819                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1820                  | CPU_CONTROL_ROUNDROBIN;
1821
1822 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1823         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1824 #endif
1825
1826         cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1827
1828 #ifdef __ARMEB__
1829         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1830 #endif
1831         if (vector_page == ARM_VECTORS_HIGH)
1832                 cpuctrl |= CPU_CONTROL_VECRELOC;
1833
1834         /* Clear out the cache */
1835         cpu_idcache_wbinv_all();
1836
1837         /* Set the control register */
1838         cpu_control(cpuctrlmask, cpuctrl);
1839         ctrl = cpuctrl;
1840
1841 }
1842 #endif  /* CPU_ARM9 */
1843
1844 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
1845 struct cpu_option arm10_options[] = {
1846         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1847         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1848         { "arm10.cache",        BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1849         { "arm10.icache",       BIC, OR,  CPU_CONTROL_IC_ENABLE },
1850         { "arm10.dcache",       BIC, OR,  CPU_CONTROL_DC_ENABLE },
1851         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1852         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1853         { "arm10.writebuf",     BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1854         { NULL,                 IGN, IGN, 0 }
1855 };
1856
1857 void
1858 arm10_setup(args)
1859         char *args;
1860 {
1861         int cpuctrl, cpuctrlmask;
1862
1863         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1864             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 
1865             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1866         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1867             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1868             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1869             | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1870             | CPU_CONTROL_BPRD_ENABLE
1871             | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1872
1873 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1874         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1875 #endif
1876
1877         cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
1878
1879 #ifdef __ARMEB__
1880         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1881 #endif
1882
1883         /* Clear out the cache */
1884         cpu_idcache_wbinv_all();
1885
1886         /* Now really make sure they are clean.  */
1887         __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1888
1889         if (vector_page == ARM_VECTORS_HIGH)
1890                 cpuctrl |= CPU_CONTROL_VECRELOC;
1891
1892         /* Set the control register */
1893         ctrl = cpuctrl;
1894         cpu_control(0xffffffff, cpuctrl);
1895
1896         /* And again. */
1897         cpu_idcache_wbinv_all();
1898 }
1899 #endif  /* CPU_ARM9E || CPU_ARM10 */
1900
1901 #ifdef CPU_ARM11
1902 struct cpu_option arm11_options[] = {
1903         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1904         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1905         { "arm11.cache",        BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1906         { "arm11.icache",       BIC, OR,  CPU_CONTROL_IC_ENABLE },
1907         { "arm11.dcache",       BIC, OR,  CPU_CONTROL_DC_ENABLE },
1908         { NULL,                 IGN, IGN, 0 }
1909 };
1910
1911 void
1912 arm11_setup(args)
1913         char *args;
1914 {
1915         int cpuctrl, cpuctrlmask;
1916
1917         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1918             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1919             /* | CPU_CONTROL_BPRD_ENABLE */;
1920         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1921             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1922             | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
1923             | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1924             | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1925
1926 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1927         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1928 #endif
1929
1930         cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
1931
1932 #ifdef __ARMEB__
1933         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1934 #endif
1935
1936         /* Clear out the cache */
1937         cpu_idcache_wbinv_all();
1938
1939         /* Now really make sure they are clean.  */
1940         __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1941
1942         /* Set the control register */
1943         curcpu()->ci_ctrl = cpuctrl;
1944         cpu_control(0xffffffff, cpuctrl);
1945
1946         /* And again. */
1947         cpu_idcache_wbinv_all();
1948 }
1949 #endif  /* CPU_ARM11 */
1950
1951 #ifdef CPU_SA110
1952 struct cpu_option sa110_options[] = {
1953 #ifdef COMPAT_12
1954         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1955         { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1956 #endif  /* COMPAT_12 */
1957         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1958         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1959         { "sa110.cache",        BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1960         { "sa110.icache",       BIC, OR,  CPU_CONTROL_IC_ENABLE },
1961         { "sa110.dcache",       BIC, OR,  CPU_CONTROL_DC_ENABLE },
1962         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1963         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1964         { "sa110.writebuf",     BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1965         { NULL,                 IGN, IGN, 0 }
1966 };
1967
1968 void
1969 sa110_setup(args)
1970         char *args;
1971 {
1972         int cpuctrl, cpuctrlmask;
1973
1974         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1975                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1976                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1977                  | CPU_CONTROL_WBUF_ENABLE;
1978         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1979                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1980                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1981                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1982                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1983                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1984                  | CPU_CONTROL_CPCLK;
1985
1986 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1987         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1988 #endif
1989
1990         cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
1991
1992 #ifdef __ARMEB__
1993         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1994 #endif
1995
1996         /* Clear out the cache */
1997         cpu_idcache_wbinv_all();
1998
1999         /* Set the control register */
2000         ctrl = cpuctrl;
2001 /*      cpu_control(cpuctrlmask, cpuctrl);*/
2002         cpu_control(0xffffffff, cpuctrl);
2003
2004         /* 
2005          * enable clockswitching, note that this doesn't read or write to r0,
2006          * r0 is just to make it valid asm
2007          */
2008         __asm ("mcr 15, 0, r0, c15, c1, 2");
2009 }
2010 #endif  /* CPU_SA110 */
2011
2012 #if defined(CPU_SA1100) || defined(CPU_SA1110)
2013 struct cpu_option sa11x0_options[] = {
2014 #ifdef COMPAT_12
2015         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2016         { "nowritebuf",         IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2017 #endif  /* COMPAT_12 */
2018         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2019         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2020         { "sa11x0.cache",       BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2021         { "sa11x0.icache",      BIC, OR,  CPU_CONTROL_IC_ENABLE },
2022         { "sa11x0.dcache",      BIC, OR,  CPU_CONTROL_DC_ENABLE },
2023         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2024         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2025         { "sa11x0.writebuf",    BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2026         { NULL,                 IGN, IGN, 0 }
2027 };
2028
2029 void
2030 sa11x0_setup(args)
2031         char *args;
2032 {
2033         int cpuctrl, cpuctrlmask;
2034
2035         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2036                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2037                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2038                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
2039         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2040                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2041                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2042                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2043                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2044                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2045                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2046
2047 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2048         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2049 #endif
2050
2051
2052         cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
2053
2054 #ifdef __ARMEB__
2055         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2056 #endif
2057
2058         if (vector_page == ARM_VECTORS_HIGH)
2059                 cpuctrl |= CPU_CONTROL_VECRELOC;
2060         /* Clear out the cache */
2061         cpu_idcache_wbinv_all();
2062         /* Set the control register */    
2063         ctrl = cpuctrl;
2064         cpu_control(0xffffffff, cpuctrl);
2065 }
2066 #endif  /* CPU_SA1100 || CPU_SA1110 */
2067
2068 #if defined(CPU_IXP12X0)
2069 struct cpu_option ixp12x0_options[] = {
2070         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2071         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2072         { "ixp12x0.cache",      BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2073         { "ixp12x0.icache",     BIC, OR,  CPU_CONTROL_IC_ENABLE },
2074         { "ixp12x0.dcache",     BIC, OR,  CPU_CONTROL_DC_ENABLE },
2075         { "cpu.writebuf",       BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2076         { "cpu.nowritebuf",     OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2077         { "ixp12x0.writebuf",   BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2078         { NULL,                 IGN, IGN, 0 }
2079 };
2080
2081 void
2082 ixp12x0_setup(args)
2083         char *args;
2084 {
2085         int cpuctrl, cpuctrlmask;
2086
2087
2088         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
2089                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
2090                  | CPU_CONTROL_IC_ENABLE;
2091
2092         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
2093                  | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2094                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
2095                  | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
2096                  | CPU_CONTROL_VECRELOC;
2097
2098 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2099         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2100 #endif
2101
2102         cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
2103
2104 #ifdef __ARMEB__
2105         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2106 #endif
2107
2108         if (vector_page == ARM_VECTORS_HIGH)
2109                 cpuctrl |= CPU_CONTROL_VECRELOC;
2110
2111         /* Clear out the cache */
2112         cpu_idcache_wbinv_all();
2113
2114         /* Set the control register */    
2115         ctrl = cpuctrl;
2116         /* cpu_control(0xffffffff, cpuctrl); */
2117         cpu_control(cpuctrlmask, cpuctrl);
2118 }
2119 #endif /* CPU_IXP12X0 */
2120
2121 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2122   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
2123   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
2124 struct cpu_option xscale_options[] = {
2125 #ifdef COMPAT_12
2126         { "branchpredict",      BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2127         { "nocache",            IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2128 #endif  /* COMPAT_12 */
2129         { "cpu.branchpredict",  BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2130         { "cpu.cache",          BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2131         { "cpu.nocache",        OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2132         { "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2133         { "xscale.cache",       BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2134         { "xscale.icache",      BIC, OR,  CPU_CONTROL_IC_ENABLE },
2135         { "xscale.dcache",      BIC, OR,  CPU_CONTROL_DC_ENABLE },
2136         { NULL,                 IGN, IGN, 0 }
2137 };
2138
2139 void
2140 xscale_setup(args)
2141         char *args;
2142 {
2143         uint32_t auxctl;
2144         int cpuctrl, cpuctrlmask;
2145
2146         /*
2147          * The XScale Write Buffer is always enabled.  Our option
2148          * is to enable/disable coalescing.  Note that bits 6:3
2149          * must always be enabled.
2150          */
2151
2152         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2153                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2154                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2155                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
2156                  | CPU_CONTROL_BPRD_ENABLE;
2157         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2158                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2159                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2160                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2161                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2162                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2163                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
2164                  CPU_CONTROL_L2_ENABLE;
2165
2166 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2167         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2168 #endif
2169
2170         cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
2171
2172 #ifdef __ARMEB__
2173         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2174 #endif
2175
2176         if (vector_page == ARM_VECTORS_HIGH)
2177                 cpuctrl |= CPU_CONTROL_VECRELOC;
2178 #ifdef CPU_XSCALE_CORE3
2179         cpuctrl |= CPU_CONTROL_L2_ENABLE;
2180 #endif
2181
2182         /* Clear out the cache */
2183         cpu_idcache_wbinv_all();
2184
2185         /*
2186          * Set the control register.  Note that bits 6:3 must always
2187          * be set to 1.
2188          */
2189         ctrl = cpuctrl;
2190 /*      cpu_control(cpuctrlmask, cpuctrl);*/
2191         cpu_control(0xffffffff, cpuctrl);
2192
2193         /* Make sure write coalescing is turned on */
2194         __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
2195                 : "=r" (auxctl));
2196 #ifdef XSCALE_NO_COALESCE_WRITES
2197         auxctl |= XSCALE_AUXCTL_K;
2198 #else
2199         auxctl &= ~XSCALE_AUXCTL_K;
2200 #endif
2201 #ifdef CPU_XSCALE_CORE3
2202         auxctl |= XSCALE_AUXCTL_LLR;
2203         auxctl |= XSCALE_AUXCTL_MD_MASK;
2204 #endif
2205         __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
2206                 : : "r" (auxctl));
2207 }
2208 #endif  /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 
2209            CPU_XSCALE_80219 */