]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm/arm/cpufunc.c
ARM: All remaining functions in cpufunc_asm_arm10.S are identical with
[FreeBSD/FreeBSD.git] / sys / arm / arm / cpufunc.c
1 /*      $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
2
3 /*-
4  * arm9 support code Copyright (C) 2001 ARM Ltd
5  * Copyright (c) 1997 Mark Brinicombe.
6  * Copyright (c) 1997 Causality Limited
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed by Causality Limited.
20  * 4. The name of Causality Limited may not be used to endorse or promote
21  *    products derived from this software without specific prior written
22  *    permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  * RiscBSD kernel project
37  *
38  * cpufuncs.c
39  *
40  * C functions for supporting CPU / MMU / TLB specific operations.
41  *
42  * Created      : 30/01/97
43  */
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/bus.h>
52 #include <machine/bus.h>
53 #include <machine/cpu.h>
54 #include <machine/disassem.h>
55
56 #include <vm/vm.h>
57 #include <vm/pmap.h>
58 #include <vm/uma.h>
59
60 #include <machine/cpuconf.h>
61 #include <machine/cpufunc.h>
62
63 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
64 #include <arm/xscale/i80321/i80321reg.h>
65 #include <arm/xscale/i80321/i80321var.h>
66 #endif
67
68 /*
69  * Some definitions in i81342reg.h clash with i80321reg.h.
70  * This only happens for the LINT kernel. As it happens,
71  * we don't need anything from i81342reg.h that we already
72  * got from somewhere else during a LINT compile.
73  */
74 #if defined(CPU_XSCALE_81342) && !defined(COMPILING_LINT)
75 #include <arm/xscale/i8134x/i81342reg.h>
76 #endif
77
78 #ifdef CPU_XSCALE_IXP425
79 #include <arm/xscale/ixp425/ixp425reg.h>
80 #include <arm/xscale/ixp425/ixp425var.h>
81 #endif
82
83 /* PRIMARY CACHE VARIABLES */
84 int     arm_picache_size;
85 int     arm_picache_line_size;
86 int     arm_picache_ways;
87
88 int     arm_pdcache_size;       /* and unified */
89 int     arm_pdcache_line_size;
90 int     arm_pdcache_ways;
91
92 int     arm_pcache_type;
93 int     arm_pcache_unified;
94
95 int     arm_dcache_align;
96 int     arm_dcache_align_mask;
97
98 u_int   arm_cache_level;
99 u_int   arm_cache_type[14];
100 u_int   arm_cache_loc;
101
102 int ctrl;
103
104 #ifdef CPU_ARM9
105 struct cpu_functions arm9_cpufuncs = {
106         /* CPU functions */
107
108         cpufunc_nullop,                 /* cpwait               */
109
110         /* MMU functions */
111
112         cpufunc_control,                /* control              */
113         arm9_setttb,                    /* Setttb               */
114
115         /* TLB functions */
116
117         armv4_tlb_flushID,              /* tlb_flushID          */
118         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
119         armv4_tlb_flushD,               /* tlb_flushD           */
120         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
121
122         /* Cache operations */
123
124         arm9_icache_sync_all,           /* icache_sync_all      */
125         arm9_icache_sync_range,         /* icache_sync_range    */
126
127         arm9_dcache_wbinv_all,          /* dcache_wbinv_all     */
128         arm9_dcache_wbinv_range,        /* dcache_wbinv_range   */
129         arm9_dcache_inv_range,          /* dcache_inv_range     */
130         arm9_dcache_wb_range,           /* dcache_wb_range      */
131
132         armv4_idcache_inv_all,          /* idcache_inv_all      */
133         arm9_idcache_wbinv_all,         /* idcache_wbinv_all    */
134         arm9_idcache_wbinv_range,       /* idcache_wbinv_range  */
135         cpufunc_nullop,                 /* l2cache_wbinv_all    */
136         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
137         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
138         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
139         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
140
141         /* Other functions */
142
143         armv4_drain_writebuf,           /* drain_writebuf       */
144
145         (void *)cpufunc_nullop,         /* sleep                */
146
147         /* Soft functions */
148
149         arm9_context_switch,            /* context_switch       */
150
151         arm9_setup                      /* cpu setup            */
152
153 };
154 #endif /* CPU_ARM9 */
155
156 #if defined(CPU_ARM9E)
157 struct cpu_functions armv5_ec_cpufuncs = {
158         /* CPU functions */
159
160         cpufunc_nullop,                 /* cpwait               */
161
162         /* MMU functions */
163
164         cpufunc_control,                /* control              */
165         armv5_ec_setttb,                /* Setttb               */
166
167         /* TLB functions */
168
169         armv4_tlb_flushID,              /* tlb_flushID          */
170         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
171         armv4_tlb_flushD,               /* tlb_flushD           */
172         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
173
174         /* Cache operations */
175
176         armv5_ec_icache_sync_all,       /* icache_sync_all      */
177         armv5_ec_icache_sync_range,     /* icache_sync_range    */
178
179         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
180         armv5_ec_dcache_wbinv_range,    /* dcache_wbinv_range   */
181         armv5_ec_dcache_inv_range,      /* dcache_inv_range     */
182         armv5_ec_dcache_wb_range,       /* dcache_wb_range      */
183
184         armv4_idcache_inv_all,          /* idcache_inv_all      */
185         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
186         armv5_ec_idcache_wbinv_range,   /* idcache_wbinv_range  */
187
188         cpufunc_nullop,                 /* l2cache_wbinv_all    */
189         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
190         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
191         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
192         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
193
194         /* Other functions */
195
196         armv4_drain_writebuf,           /* drain_writebuf       */
197
198         (void *)cpufunc_nullop,         /* sleep                */
199
200         /* Soft functions */
201
202         arm9_context_switch,            /* context_switch       */
203
204         arm10_setup                     /* cpu setup            */
205
206 };
207
208 struct cpu_functions sheeva_cpufuncs = {
209         /* CPU functions */
210
211         cpufunc_nullop,                 /* cpwait               */
212
213         /* MMU functions */
214
215         cpufunc_control,                /* control              */
216         sheeva_setttb,                  /* Setttb               */
217
218         /* TLB functions */
219
220         armv4_tlb_flushID,              /* tlb_flushID          */
221         arm9_tlb_flushID_SE,            /* tlb_flushID_SE       */
222         armv4_tlb_flushD,               /* tlb_flushD           */
223         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
224
225         /* Cache operations */
226
227         armv5_ec_icache_sync_all,       /* icache_sync_all      */
228         armv5_ec_icache_sync_range,     /* icache_sync_range    */
229
230         armv5_ec_dcache_wbinv_all,      /* dcache_wbinv_all     */
231         sheeva_dcache_wbinv_range,      /* dcache_wbinv_range   */
232         sheeva_dcache_inv_range,        /* dcache_inv_range     */
233         sheeva_dcache_wb_range,         /* dcache_wb_range      */
234
235         armv4_idcache_inv_all,          /* idcache_inv_all      */
236         armv5_ec_idcache_wbinv_all,     /* idcache_wbinv_all    */
237         sheeva_idcache_wbinv_range,     /* idcache_wbinv_all    */
238
239         sheeva_l2cache_wbinv_all,       /* l2cache_wbinv_all    */
240         sheeva_l2cache_wbinv_range,     /* l2cache_wbinv_range  */
241         sheeva_l2cache_inv_range,       /* l2cache_inv_range    */
242         sheeva_l2cache_wb_range,        /* l2cache_wb_range     */
243         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
244
245         /* Other functions */
246
247         armv4_drain_writebuf,           /* drain_writebuf       */
248
249         sheeva_cpu_sleep,               /* sleep                */
250
251         /* Soft functions */
252
253         arm9_context_switch,            /* context_switch       */
254
255         arm10_setup                     /* cpu setup            */
256 };
257 #endif /* CPU_ARM9E */
258
259 #ifdef CPU_MV_PJ4B
260 struct cpu_functions pj4bv7_cpufuncs = {
261         /* CPU functions */
262
263         armv7_drain_writebuf,           /* cpwait               */
264
265         /* MMU functions */
266
267         cpufunc_control,                /* control              */
268         armv7_setttb,                   /* Setttb               */
269
270         /* TLB functions */
271
272         armv7_tlb_flushID,              /* tlb_flushID          */
273         armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
274         armv7_tlb_flushID,              /* tlb_flushD           */
275         armv7_tlb_flushID_SE,           /* tlb_flushD_SE        */
276
277         /* Cache operations */
278         armv7_idcache_wbinv_all,        /* icache_sync_all      */
279         armv7_icache_sync_range,        /* icache_sync_range    */
280
281         armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
282         armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
283         armv7_dcache_inv_range,         /* dcache_inv_range     */
284         armv7_dcache_wb_range,          /* dcache_wb_range      */
285
286         armv7_idcache_inv_all,          /* idcache_inv_all      */
287         armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
288         armv7_idcache_wbinv_range,      /* idcache_wbinv_all    */
289
290         (void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
291         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
292         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
293         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
294         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
295
296         /* Other functions */
297
298         armv7_drain_writebuf,           /* drain_writebuf       */
299
300         (void *)cpufunc_nullop,         /* sleep                */
301
302         /* Soft functions */
303         armv7_context_switch,           /* context_switch       */
304
305         pj4bv7_setup                    /* cpu setup            */
306 };
307 #endif /* CPU_MV_PJ4B */
308
309 #if defined(CPU_XSCALE_80321) || \
310   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
311   defined(CPU_XSCALE_80219)
312
313 struct cpu_functions xscale_cpufuncs = {
314         /* CPU functions */
315
316         xscale_cpwait,                  /* cpwait               */
317
318         /* MMU functions */
319
320         xscale_control,                 /* control              */
321         xscale_setttb,                  /* setttb               */
322
323         /* TLB functions */
324
325         armv4_tlb_flushID,              /* tlb_flushID          */
326         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
327         armv4_tlb_flushD,               /* tlb_flushD           */
328         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
329
330         /* Cache operations */
331
332         xscale_cache_syncI,             /* icache_sync_all      */
333         xscale_cache_syncI_rng,         /* icache_sync_range    */
334
335         xscale_cache_purgeD,            /* dcache_wbinv_all     */
336         xscale_cache_purgeD_rng,        /* dcache_wbinv_range   */
337         xscale_cache_flushD_rng,        /* dcache_inv_range     */
338         xscale_cache_cleanD_rng,        /* dcache_wb_range      */
339
340         xscale_cache_flushID,           /* idcache_inv_all      */
341         xscale_cache_purgeID,           /* idcache_wbinv_all    */
342         xscale_cache_purgeID_rng,       /* idcache_wbinv_range  */
343         cpufunc_nullop,                 /* l2cache_wbinv_all    */
344         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
345         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
346         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
347         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
348
349         /* Other functions */
350
351         armv4_drain_writebuf,           /* drain_writebuf       */
352
353         xscale_cpu_sleep,               /* sleep                */
354
355         /* Soft functions */
356
357         xscale_context_switch,          /* context_switch       */
358
359         xscale_setup                    /* cpu setup            */
360 };
361 #endif
362 /* CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
363    CPU_XSCALE_80219 */
364
365 #ifdef CPU_XSCALE_81342
366 struct cpu_functions xscalec3_cpufuncs = {
367         /* CPU functions */
368
369         xscale_cpwait,                  /* cpwait               */
370
371         /* MMU functions */
372
373         xscale_control,                 /* control              */
374         xscalec3_setttb,                /* setttb               */
375
376         /* TLB functions */
377
378         armv4_tlb_flushID,              /* tlb_flushID          */
379         xscale_tlb_flushID_SE,          /* tlb_flushID_SE       */
380         armv4_tlb_flushD,               /* tlb_flushD           */
381         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
382
383         /* Cache operations */
384
385         xscalec3_cache_syncI,           /* icache_sync_all      */
386         xscalec3_cache_syncI_rng,       /* icache_sync_range    */
387
388         xscalec3_cache_purgeD,          /* dcache_wbinv_all     */
389         xscalec3_cache_purgeD_rng,      /* dcache_wbinv_range   */
390         xscale_cache_flushD_rng,        /* dcache_inv_range     */
391         xscalec3_cache_cleanD_rng,      /* dcache_wb_range      */
392
393         xscale_cache_flushID,           /* idcache_inv_all      */
394         xscalec3_cache_purgeID,         /* idcache_wbinv_all    */
395         xscalec3_cache_purgeID_rng,     /* idcache_wbinv_range  */
396         xscalec3_l2cache_purge,         /* l2cache_wbinv_all    */
397         xscalec3_l2cache_purge_rng,     /* l2cache_wbinv_range  */
398         xscalec3_l2cache_flush_rng,     /* l2cache_inv_range    */
399         xscalec3_l2cache_clean_rng,     /* l2cache_wb_range     */
400         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
401
402         /* Other functions */
403
404         armv4_drain_writebuf,           /* drain_writebuf       */
405
406         xscale_cpu_sleep,               /* sleep                */
407
408         /* Soft functions */
409
410         xscalec3_context_switch,        /* context_switch       */
411
412         xscale_setup                    /* cpu setup            */
413 };
414 #endif /* CPU_XSCALE_81342 */
415
416
417 #if defined(CPU_FA526)
418 struct cpu_functions fa526_cpufuncs = {
419         /* CPU functions */
420
421         cpufunc_nullop,                 /* cpwait               */
422
423         /* MMU functions */
424
425         cpufunc_control,                /* control              */
426         fa526_setttb,                   /* setttb               */
427
428         /* TLB functions */
429
430         armv4_tlb_flushID,              /* tlb_flushID          */
431         fa526_tlb_flushID_SE,           /* tlb_flushID_SE       */
432         armv4_tlb_flushD,               /* tlb_flushD           */
433         armv4_tlb_flushD_SE,            /* tlb_flushD_SE        */
434
435         /* Cache operations */
436
437         fa526_icache_sync_all,          /* icache_sync_all      */
438         fa526_icache_sync_range,        /* icache_sync_range    */
439
440         fa526_dcache_wbinv_all,         /* dcache_wbinv_all     */
441         fa526_dcache_wbinv_range,       /* dcache_wbinv_range   */
442         fa526_dcache_inv_range,         /* dcache_inv_range     */
443         fa526_dcache_wb_range,          /* dcache_wb_range      */
444
445         armv4_idcache_inv_all,          /* idcache_inv_all      */
446         fa526_idcache_wbinv_all,        /* idcache_wbinv_all    */
447         fa526_idcache_wbinv_range,      /* idcache_wbinv_range  */
448         cpufunc_nullop,                 /* l2cache_wbinv_all    */
449         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
450         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
451         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
452         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
453
454         /* Other functions */
455
456         armv4_drain_writebuf,           /* drain_writebuf       */
457
458         fa526_cpu_sleep,                /* sleep                */
459
460         /* Soft functions */
461
462
463         fa526_context_switch,           /* context_switch       */
464
465         fa526_setup                     /* cpu setup            */
466 };
467 #endif  /* CPU_FA526 */
468
469 #if defined(CPU_ARM1176)
470 struct cpu_functions arm1176_cpufuncs = {
471         /* CPU functions */
472
473         cpufunc_nullop,                 /* cpwait               */
474
475         /* MMU functions */
476
477         cpufunc_control,                /* control              */
478         arm11x6_setttb,                 /* Setttb               */
479
480         /* TLB functions */
481
482         arm11_tlb_flushID,              /* tlb_flushID          */
483         arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
484         arm11_tlb_flushD,               /* tlb_flushD           */
485         arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
486
487         /* Cache operations */
488
489         arm11x6_icache_sync_all,        /* icache_sync_all      */
490         arm11x6_icache_sync_range,      /* icache_sync_range    */
491
492         arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
493         armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
494         armv6_dcache_inv_range,         /* dcache_inv_range     */
495         armv6_dcache_wb_range,          /* dcache_wb_range      */
496
497         armv6_idcache_inv_all,          /* idcache_inv_all      */
498         arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
499         arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
500
501         (void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
502         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
503         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
504         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
505         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
506
507         /* Other functions */
508
509         arm11_drain_writebuf,           /* drain_writebuf       */
510
511         arm11x6_sleep,                  /* sleep                */
512
513         /* Soft functions */
514
515         arm11_context_switch,           /* context_switch       */
516
517         arm11x6_setup                   /* cpu setup            */
518 };
519 #endif /*CPU_ARM1176 */
520
521 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
522 struct cpu_functions cortexa_cpufuncs = {
523         /* CPU functions */
524
525         cpufunc_nullop,                 /* cpwait               */
526
527         /* MMU functions */
528
529         cpufunc_control,                /* control              */
530         armv7_setttb,                   /* Setttb               */
531
532         /*
533          * TLB functions.  ARMv7 does all TLB ops based on a unified TLB model
534          * whether the hardware implements separate I+D or not, so we use the
535          * same 'ID' functions for all 3 variations.
536          */
537
538         armv7_tlb_flushID,              /* tlb_flushID          */
539         armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
540         armv7_tlb_flushID,              /* tlb_flushD           */
541         armv7_tlb_flushID_SE,           /* tlb_flushD_SE        */
542
543         /* Cache operations */
544
545         armv7_icache_sync_all,          /* icache_sync_all      */
546         armv7_icache_sync_range,        /* icache_sync_range    */
547
548         armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
549         armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
550         armv7_dcache_inv_range,         /* dcache_inv_range     */
551         armv7_dcache_wb_range,          /* dcache_wb_range      */
552
553         armv7_idcache_inv_all,          /* idcache_inv_all      */
554         armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
555         armv7_idcache_wbinv_range,      /* idcache_wbinv_range  */
556
557         /*
558          * Note: For CPUs using the PL310 the L2 ops are filled in when the
559          * L2 cache controller is actually enabled.
560          */
561         cpufunc_nullop,                 /* l2cache_wbinv_all    */
562         (void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
563         (void *)cpufunc_nullop,         /* l2cache_inv_range    */
564         (void *)cpufunc_nullop,         /* l2cache_wb_range     */
565         (void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
566
567         /* Other functions */
568
569         armv7_drain_writebuf,           /* drain_writebuf       */
570
571         armv7_cpu_sleep,                /* sleep                */
572
573         /* Soft functions */
574
575         armv7_context_switch,           /* context_switch       */
576
577         cortexa_setup                     /* cpu setup            */
578 };
579 #endif /* CPU_CORTEXA */
580
581 /*
582  * Global constants also used by locore.s
583  */
584
585 struct cpu_functions cpufuncs;
586 u_int cputype;
587 u_int cpu_reset_needs_v4_MMU_disable;   /* flag used in locore.s */
588
589 #if defined(CPU_ARM9) ||        \
590   defined (CPU_ARM9E) ||        \
591   defined(CPU_ARM1176) || defined(CPU_XSCALE_80321) ||          \
592   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||           \
593   defined(CPU_FA526) || defined(CPU_MV_PJ4B) ||                 \
594   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
595   defined(CPU_CORTEXA) || defined(CPU_KRAIT)
596
597 /* Global cache line sizes, use 32 as default */
598 int     arm_dcache_min_line_size = 32;
599 int     arm_icache_min_line_size = 32;
600 int     arm_idcache_min_line_size = 32;
601
602 static void get_cachetype_cp15(void);
603
604 /* Additional cache information local to this file.  Log2 of some of the
605    above numbers.  */
606 static int      arm_dcache_l2_nsets;
607 static int      arm_dcache_l2_assoc;
608 static int      arm_dcache_l2_linesize;
609
610 static void
611 get_cachetype_cp15()
612 {
613         u_int ctype, isize, dsize, cpuid;
614         u_int clevel, csize, i, sel;
615         u_int multiplier;
616         u_char type;
617
618         __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
619                 : "=r" (ctype));
620
621         cpuid = cpu_ident();
622         /*
623          * ...and thus spake the ARM ARM:
624          *
625          * If an <opcode2> value corresponding to an unimplemented or
626          * reserved ID register is encountered, the System Control
627          * processor returns the value of the main ID register.
628          */
629         if (ctype == cpuid)
630                 goto out;
631
632         if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
633                 /* Resolve minimal cache line sizes */
634                 arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
635                 arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
636                 arm_idcache_min_line_size =
637                     min(arm_icache_min_line_size, arm_dcache_min_line_size);
638
639                 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
640                     : "=r" (clevel));
641                 arm_cache_level = clevel;
642                 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
643                 i = 0;
644                 while ((type = (clevel & 0x7)) && i < 7) {
645                         if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
646                             type == CACHE_SEP_CACHE) {
647                                 sel = i << 1;
648                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
649                                     : : "r" (sel));
650                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
651                                     : "=r" (csize));
652                                 arm_cache_type[sel] = csize;
653                                 arm_dcache_align = 1 <<
654                                     (CPUV7_CT_xSIZE_LEN(csize) + 4);
655                                 arm_dcache_align_mask = arm_dcache_align - 1;
656                         }
657                         if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
658                                 sel = (i << 1) | 1;
659                                 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
660                                     : : "r" (sel));
661                                 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
662                                     : "=r" (csize));
663                                 arm_cache_type[sel] = csize;
664                         }
665                         i++;
666                         clevel >>= 3;
667                 }
668         } else {
669                 if ((ctype & CPU_CT_S) == 0)
670                         arm_pcache_unified = 1;
671
672                 /*
673                  * If you want to know how this code works, go read the ARM ARM.
674                  */
675
676                 arm_pcache_type = CPU_CT_CTYPE(ctype);
677
678                 if (arm_pcache_unified == 0) {
679                         isize = CPU_CT_ISIZE(ctype);
680                         multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
681                         arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
682                         if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
683                                 if (isize & CPU_CT_xSIZE_M)
684                                         arm_picache_line_size = 0; /* not present */
685                                 else
686                                         arm_picache_ways = 1;
687                         } else {
688                                 arm_picache_ways = multiplier <<
689                                     (CPU_CT_xSIZE_ASSOC(isize) - 1);
690                         }
691                         arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
692                 }
693
694                 dsize = CPU_CT_DSIZE(ctype);
695                 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
696                 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
697                 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
698                         if (dsize & CPU_CT_xSIZE_M)
699                                 arm_pdcache_line_size = 0; /* not present */
700                         else
701                                 arm_pdcache_ways = 1;
702                 } else {
703                         arm_pdcache_ways = multiplier <<
704                             (CPU_CT_xSIZE_ASSOC(dsize) - 1);
705                 }
706                 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
707
708                 arm_dcache_align = arm_pdcache_line_size;
709
710                 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
711                 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
712                 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
713                     CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
714
715         out:
716                 arm_dcache_align_mask = arm_dcache_align - 1;
717         }
718 }
719 #endif /* ARM9 || XSCALE */
720
721 /*
722  * Cannot panic here as we may not have a console yet ...
723  */
724
725 int
726 set_cpufuncs()
727 {
728         cputype = cpu_ident();
729         cputype &= CPU_ID_CPU_MASK;
730
731 #ifdef CPU_ARM9
732         if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
733              (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
734             (cputype & 0x0000f000) == 0x00009000) {
735                 cpufuncs = arm9_cpufuncs;
736                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
737                 get_cachetype_cp15();
738                 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
739                 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
740                     arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
741                 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
742                 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
743                 pmap_pte_init_generic();
744                 goto out;
745         }
746 #endif /* CPU_ARM9 */
747 #if defined(CPU_ARM9E)
748         if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
749             cputype == CPU_ID_MV88FR571_41) {
750                 uint32_t sheeva_ctrl;
751
752                 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
753                     MV_L2_ENABLE);
754                 /*
755                  * Workaround for Marvell MV78100 CPU: Cache prefetch
756                  * mechanism may affect the cache coherency validity,
757                  * so it needs to be disabled.
758                  *
759                  * Refer to errata document MV-S501058-00C.pdf (p. 3.1
760                  * L2 Prefetching Mechanism) for details.
761                  */
762                 if (cputype == CPU_ID_MV88FR571_VD ||
763                     cputype == CPU_ID_MV88FR571_41)
764                         sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
765
766                 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
767
768                 cpufuncs = sheeva_cpufuncs;
769                 get_cachetype_cp15();
770                 pmap_pte_init_generic();
771                 goto out;
772         } else if (cputype == CPU_ID_ARM926EJS) {
773                 cpufuncs = armv5_ec_cpufuncs;
774                 get_cachetype_cp15();
775                 pmap_pte_init_generic();
776                 goto out;
777         }
778 #endif /* CPU_ARM9E */
779 #if defined(CPU_ARM1176)
780         if (cputype == CPU_ID_ARM1176JZS) {
781                 cpufuncs = arm1176_cpufuncs;
782                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
783                 get_cachetype_cp15();
784                 goto out;
785         }
786 #endif /* CPU_ARM1176 */
787 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
788         if (cputype == CPU_ID_CORTEXA5 ||
789             cputype == CPU_ID_CORTEXA7 ||
790             cputype == CPU_ID_CORTEXA8R1 ||
791             cputype == CPU_ID_CORTEXA8R2 ||
792             cputype == CPU_ID_CORTEXA8R3 ||
793             cputype == CPU_ID_CORTEXA9R1 ||
794             cputype == CPU_ID_CORTEXA9R2 ||
795             cputype == CPU_ID_CORTEXA9R3 ||
796             cputype == CPU_ID_CORTEXA9R4 ||
797             cputype == CPU_ID_CORTEXA12R0 ||
798             cputype == CPU_ID_CORTEXA15R0 ||
799             cputype == CPU_ID_CORTEXA15R1 ||
800             cputype == CPU_ID_CORTEXA15R2 ||
801             cputype == CPU_ID_CORTEXA15R3 ||
802             cputype == CPU_ID_KRAIT300R0 ||
803             cputype == CPU_ID_KRAIT300R1 ) {
804                 cpufuncs = cortexa_cpufuncs;
805                 cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
806                 get_cachetype_cp15();
807                 goto out;
808         }
809 #endif /* CPU_CORTEXA */
810
811 #if defined(CPU_MV_PJ4B)
812         if (cputype == CPU_ID_MV88SV581X_V7 ||
813             cputype == CPU_ID_MV88SV584X_V7 ||
814             cputype == CPU_ID_ARM_88SV581X_V7) {
815                 cpufuncs = pj4bv7_cpufuncs;
816                 get_cachetype_cp15();
817                 goto out;
818         }
819 #endif /* CPU_MV_PJ4B */
820
821 #if defined(CPU_FA526)
822         if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
823                 cpufuncs = fa526_cpufuncs;
824                 cpu_reset_needs_v4_MMU_disable = 1;     /* SA needs it  */
825                 get_cachetype_cp15();
826                 pmap_pte_init_generic();
827
828                 goto out;
829         }
830 #endif  /* CPU_FA526 */
831
832 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
833         if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
834             cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
835             cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
836                 cpufuncs = xscale_cpufuncs;
837                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
838                 get_cachetype_cp15();
839                 pmap_pte_init_xscale();
840                 goto out;
841         }
842 #endif /* CPU_XSCALE_80321 */
843
844 #if defined(CPU_XSCALE_81342)
845         if (cputype == CPU_ID_81342) {
846                 cpufuncs = xscalec3_cpufuncs;
847                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
848                 get_cachetype_cp15();
849                 pmap_pte_init_xscale();
850                 goto out;
851         }
852 #endif /* CPU_XSCALE_81342 */
853 #ifdef CPU_XSCALE_PXA2X0
854         /* ignore core revision to test PXA2xx CPUs */
855         if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
856             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
857             (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
858
859                 cpufuncs = xscale_cpufuncs;
860                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
861                 get_cachetype_cp15();
862                 pmap_pte_init_xscale();
863
864                 goto out;
865         }
866 #endif /* CPU_XSCALE_PXA2X0 */
867 #ifdef CPU_XSCALE_IXP425
868         if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
869             cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
870
871                 cpufuncs = xscale_cpufuncs;
872                 cpu_reset_needs_v4_MMU_disable = 1;     /* XScale needs it */
873                 get_cachetype_cp15();
874                 pmap_pte_init_xscale();
875
876                 goto out;
877         }
878 #endif /* CPU_XSCALE_IXP425 */
879         /*
880          * Bzzzz. And the answer was ...
881          */
882         panic("No support for this CPU type (%08x) in kernel", cputype);
883         return(ARCHITECTURE_NOT_PRESENT);
884 out:
885         uma_set_align(arm_dcache_align_mask);
886         return (0);
887 }
888
889 /*
890  * CPU Setup code
891  */
892
893 #ifdef CPU_ARM9
894 void
895 arm9_setup(void)
896 {
897         int cpuctrl, cpuctrlmask;
898
899         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
900             | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
901             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
902             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
903             CPU_CONTROL_ROUNDROBIN;
904         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
905                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
906                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
907                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
908                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
909                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
910                  | CPU_CONTROL_ROUNDROBIN;
911
912 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
913         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
914 #endif
915
916 #ifdef __ARMEB__
917         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
918 #endif
919         if (vector_page == ARM_VECTORS_HIGH)
920                 cpuctrl |= CPU_CONTROL_VECRELOC;
921
922         /* Clear out the cache */
923         cpu_idcache_wbinv_all();
924
925         /* Set the control register */
926         cpu_control(cpuctrlmask, cpuctrl);
927         ctrl = cpuctrl;
928
929 }
930 #endif  /* CPU_ARM9 */
931
932 #if defined(CPU_ARM9E)
933 void
934 arm10_setup(void)
935 {
936         int cpuctrl, cpuctrlmask;
937
938         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
939             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
940             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
941         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
942             | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
943             | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
944             | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
945             | CPU_CONTROL_BPRD_ENABLE
946             | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
947
948 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
949         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
950 #endif
951
952 #ifdef __ARMEB__
953         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
954 #endif
955
956         /* Clear out the cache */
957         cpu_idcache_wbinv_all();
958
959         /* Now really make sure they are clean.  */
960         __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
961
962         if (vector_page == ARM_VECTORS_HIGH)
963                 cpuctrl |= CPU_CONTROL_VECRELOC;
964
965         /* Set the control register */
966         ctrl = cpuctrl;
967         cpu_control(0xffffffff, cpuctrl);
968
969         /* And again. */
970         cpu_idcache_wbinv_all();
971 }
972 #endif  /* CPU_ARM9E || CPU_ARM10 */
973
974 #if defined(CPU_ARM1176) \
975  || defined(CPU_MV_PJ4B) \
976  || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
977 static __inline void
978 cpu_scc_setup_ccnt(void)
979 {
980 /* This is how you give userland access to the CCNT and PMCn
981  * registers.
982  * BEWARE! This gives write access also, which may not be what
983  * you want!
984  */
985 #ifdef _PMC_USER_READ_WRITE_
986         /* Set PMUSERENR[0] to allow userland access */
987         cp15_pmuserenr_set(1);
988 #endif
989 #if defined(CPU_ARM1176)
990         /* Set PMCR[2,0] to enable counters and reset CCNT */
991         cp15_pmcr_set(5);
992 #else
993         /* Set up the PMCCNTR register as a cyclecounter:
994          * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
995          * Set PMCR[2,0] to enable counters and reset CCNT
996          * Set PMCNTENSET to 0x80000000 to enable CCNT */
997         cp15_pminten_clr(0xFFFFFFFF);
998         cp15_pmcr_set(5);
999         cp15_pmcnten_set(0x80000000);
1000 #endif
1001 }
1002 #endif
1003
1004 #if defined(CPU_ARM1176)
1005 void
1006 arm11x6_setup(void)
1007 {
1008         int cpuctrl, cpuctrl_wax;
1009         uint32_t auxctrl, auxctrl_wax;
1010         uint32_t tmp, tmp2;
1011         uint32_t sbz=0;
1012         uint32_t cpuid;
1013
1014         cpuid = cpu_ident();
1015
1016         cpuctrl =
1017                 CPU_CONTROL_MMU_ENABLE  |
1018                 CPU_CONTROL_DC_ENABLE   |
1019                 CPU_CONTROL_WBUF_ENABLE |
1020                 CPU_CONTROL_32BP_ENABLE |
1021                 CPU_CONTROL_32BD_ENABLE |
1022                 CPU_CONTROL_LABT_ENABLE |
1023                 CPU_CONTROL_SYST_ENABLE |
1024                 CPU_CONTROL_IC_ENABLE   |
1025                 CPU_CONTROL_UNAL_ENABLE;
1026
1027         /*
1028          * "write as existing" bits
1029          * inverse of this is mask
1030          */
1031         cpuctrl_wax =
1032                 (3 << 30) | /* SBZ */
1033                 (1 << 29) | /* FA */
1034                 (1 << 28) | /* TR */
1035                 (3 << 26) | /* SBZ */
1036                 (3 << 19) | /* SBZ */
1037                 (1 << 17);  /* SBZ */
1038
1039         cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1040         cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1041
1042 #ifdef __ARMEB__
1043         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1044 #endif
1045
1046         if (vector_page == ARM_VECTORS_HIGH)
1047                 cpuctrl |= CPU_CONTROL_VECRELOC;
1048
1049         auxctrl = 0;
1050         auxctrl_wax = ~0;
1051
1052         /*
1053          * Enable an errata workaround
1054          */
1055         if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
1056                 auxctrl = ARM1176_AUXCTL_PHD;
1057                 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
1058         }
1059
1060         /* Clear out the cache */
1061         cpu_idcache_wbinv_all();
1062
1063         /* Now really make sure they are clean.  */
1064         __asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
1065
1066         /* Allow detection code to find the VFP if it's fitted.  */
1067         cp15_cpacr_set(0x0fffffff);
1068
1069         /* Set the control register */
1070         ctrl = cpuctrl;
1071         cpu_control(~cpuctrl_wax, cpuctrl);
1072
1073         tmp = cp15_actlr_get();
1074         tmp2 = tmp;
1075         tmp &= auxctrl_wax;
1076         tmp |= auxctrl;
1077         if (tmp != tmp2)
1078                 cp15_actlr_set(tmp);
1079
1080         /* And again. */
1081         cpu_idcache_wbinv_all();
1082
1083         cpu_scc_setup_ccnt();
1084 }
1085 #endif  /* CPU_ARM1176 */
1086
1087 #ifdef CPU_MV_PJ4B
1088 void
1089 pj4bv7_setup(void)
1090 {
1091         int cpuctrl;
1092
1093         pj4b_config();
1094
1095         cpuctrl = CPU_CONTROL_MMU_ENABLE;
1096 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1097         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1098 #endif
1099         cpuctrl |= CPU_CONTROL_DC_ENABLE;
1100         cpuctrl |= (0xf << 3);
1101         cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1102         cpuctrl |= CPU_CONTROL_IC_ENABLE;
1103         if (vector_page == ARM_VECTORS_HIGH)
1104                 cpuctrl |= CPU_CONTROL_VECRELOC;
1105         cpuctrl |= (0x5 << 16) | (1 < 22);
1106         cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1107
1108         /* Clear out the cache */
1109         cpu_idcache_wbinv_all();
1110
1111         /* Set the control register */
1112         ctrl = cpuctrl;
1113         cpu_control(0xFFFFFFFF, cpuctrl);
1114
1115         /* And again. */
1116         cpu_idcache_wbinv_all();
1117
1118         cpu_scc_setup_ccnt();
1119 }
1120 #endif /* CPU_MV_PJ4B */
1121
1122 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1123
1124 void
1125 cortexa_setup(void)
1126 {
1127         int cpuctrl, cpuctrlmask;
1128
1129         cpuctrlmask = CPU_CONTROL_MMU_ENABLE |     /* MMU enable         [0] */
1130             CPU_CONTROL_AFLT_ENABLE |    /* Alignment fault    [1] */
1131             CPU_CONTROL_DC_ENABLE |      /* DCache enable      [2] */
1132             CPU_CONTROL_BPRD_ENABLE |    /* Branch prediction [11] */
1133             CPU_CONTROL_IC_ENABLE |      /* ICache enable     [12] */
1134             CPU_CONTROL_VECRELOC;        /* Vector relocation [13] */
1135
1136         cpuctrl = CPU_CONTROL_MMU_ENABLE |
1137             CPU_CONTROL_IC_ENABLE |
1138             CPU_CONTROL_DC_ENABLE |
1139             CPU_CONTROL_BPRD_ENABLE;
1140
1141 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1142         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1143 #endif
1144
1145         /* Switch to big endian */
1146 #ifdef __ARMEB__
1147         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1148 #endif
1149
1150         /* Check if the vector page is at the high address (0xffff0000) */
1151         if (vector_page == ARM_VECTORS_HIGH)
1152                 cpuctrl |= CPU_CONTROL_VECRELOC;
1153
1154         /* Clear out the cache */
1155         cpu_idcache_wbinv_all();
1156
1157         /* Set the control register */
1158         ctrl = cpuctrl;
1159         cpu_control(cpuctrlmask, cpuctrl);
1160
1161         /* And again. */
1162         cpu_idcache_wbinv_all();
1163 #if defined(SMP) && !defined(ARM_NEW_PMAP)
1164         armv7_auxctrl((1 << 6) | (1 << 0), (1 << 6) | (1 << 0)); /* Enable SMP + TLB broadcasting  */
1165 #endif
1166
1167         cpu_scc_setup_ccnt();
1168 }
1169 #endif  /* CPU_CORTEXA */
1170
1171 #if defined(CPU_FA526)
1172 void
1173 fa526_setup(void)
1174 {
1175         int cpuctrl, cpuctrlmask;
1176
1177         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1178                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1179                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1180                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1181                 | CPU_CONTROL_BPRD_ENABLE;
1182         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1183                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1184                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1185                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1186                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1187                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1188                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1189
1190 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1191         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1192 #endif
1193
1194 #ifdef __ARMEB__
1195         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1196 #endif
1197
1198         if (vector_page == ARM_VECTORS_HIGH)
1199                 cpuctrl |= CPU_CONTROL_VECRELOC;
1200
1201         /* Clear out the cache */
1202         cpu_idcache_wbinv_all();
1203
1204         /* Set the control register */
1205         ctrl = cpuctrl;
1206         cpu_control(0xffffffff, cpuctrl);
1207 }
1208 #endif  /* CPU_FA526 */
1209
1210 #if defined(CPU_XSCALE_80321) || \
1211   defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1212   defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
1213 void
1214 xscale_setup(void)
1215 {
1216         uint32_t auxctl;
1217         int cpuctrl, cpuctrlmask;
1218
1219         /*
1220          * The XScale Write Buffer is always enabled.  Our option
1221          * is to enable/disable coalescing.  Note that bits 6:3
1222          * must always be enabled.
1223          */
1224
1225         cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1226                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1227                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1228                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1229                  | CPU_CONTROL_BPRD_ENABLE;
1230         cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1231                  | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1232                  | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1233                  | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1234                  | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1235                  | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1236                  | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
1237                  CPU_CONTROL_L2_ENABLE;
1238
1239 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1240         cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1241 #endif
1242
1243 #ifdef __ARMEB__
1244         cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1245 #endif
1246
1247         if (vector_page == ARM_VECTORS_HIGH)
1248                 cpuctrl |= CPU_CONTROL_VECRELOC;
1249 #ifdef CPU_XSCALE_CORE3
1250         cpuctrl |= CPU_CONTROL_L2_ENABLE;
1251 #endif
1252
1253         /* Clear out the cache */
1254         cpu_idcache_wbinv_all();
1255
1256         /*
1257          * Set the control register.  Note that bits 6:3 must always
1258          * be set to 1.
1259          */
1260         ctrl = cpuctrl;
1261 /*      cpu_control(cpuctrlmask, cpuctrl);*/
1262         cpu_control(0xffffffff, cpuctrl);
1263
1264         /* Make sure write coalescing is turned on */
1265         __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1266                 : "=r" (auxctl));
1267 #ifdef XSCALE_NO_COALESCE_WRITES
1268         auxctl |= XSCALE_AUXCTL_K;
1269 #else
1270         auxctl &= ~XSCALE_AUXCTL_K;
1271 #endif
1272 #ifdef CPU_XSCALE_CORE3
1273         auxctl |= XSCALE_AUXCTL_LLR;
1274         auxctl |= XSCALE_AUXCTL_MD_MASK;
1275 #endif
1276         __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1277                 : : "r" (auxctl));
1278 }
1279 #endif  /* CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
1280            CPU_XSCALE_80219 */