1 /* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
4 * arm7tdmi support code Copyright (c) 2001 John Fremlin
5 * arm8 support code Copyright (c) 1997 ARM Limited
6 * arm8 support code Copyright (c) 1997 Causality Limited
7 * arm9 support code Copyright (C) 2001 ARM Ltd
8 * Copyright (c) 1997 Mark Brinicombe.
9 * Copyright (c) 1997 Causality Limited
10 * All rights reserved.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Causality Limited.
23 * 4. The name of Causality Limited may not be used to endorse or promote
24 * products derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * RiscBSD kernel project
43 * C functions for supporting CPU / MMU / TLB specific operations.
47 #include <sys/cdefs.h>
48 __FBSDID("$FreeBSD$");
50 #include <sys/param.h>
51 #include <sys/systm.h>
53 #include <sys/mutex.h>
55 #include <machine/bus.h>
56 #include <machine/cpu.h>
57 #include <machine/disassem.h>
62 #include <machine/cpuconf.h>
63 #include <machine/cpufunc.h>
64 #include <machine/bootconfig.h>
66 #ifdef CPU_XSCALE_80200
67 #include <arm/xscale/i80200/i80200reg.h>
68 #include <arm/xscale/i80200/i80200var.h>
71 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
72 #include <arm/xscale/i80321/i80321reg.h>
73 #include <arm/xscale/i80321/i80321var.h>
76 #if defined(CPU_XSCALE_81342)
77 #include <arm/xscale/i8134x/i81342reg.h>
80 #ifdef CPU_XSCALE_IXP425
81 #include <arm/xscale/ixp425/ixp425reg.h>
82 #include <arm/xscale/ixp425/ixp425var.h>
85 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
86 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
87 #include <arm/xscale/xscalereg.h>
91 struct arm_pmc_funcs *arm_pmc;
94 /* PRIMARY CACHE VARIABLES */
96 int arm_picache_line_size;
99 int arm_pdcache_size; /* and unified */
100 int arm_pdcache_line_size;
101 int arm_pdcache_ways;
104 int arm_pcache_unified;
106 int arm_dcache_align;
107 int arm_dcache_align_mask;
109 /* 1 == use cpu_sleep(), 0 == don't */
110 int cpu_do_powersave;
114 struct cpu_functions arm7tdmi_cpufuncs = {
118 cpufunc_nullop, /* cpwait */
122 cpufunc_control, /* control */
123 cpufunc_domains, /* domain */
124 arm7tdmi_setttb, /* setttb */
125 cpufunc_faultstatus, /* faultstatus */
126 cpufunc_faultaddress, /* faultaddress */
130 arm7tdmi_tlb_flushID, /* tlb_flushID */
131 arm7tdmi_tlb_flushID_SE, /* tlb_flushID_SE */
132 arm7tdmi_tlb_flushID, /* tlb_flushI */
133 arm7tdmi_tlb_flushID_SE, /* tlb_flushI_SE */
134 arm7tdmi_tlb_flushID, /* tlb_flushD */
135 arm7tdmi_tlb_flushID_SE, /* tlb_flushD_SE */
137 /* Cache operations */
139 cpufunc_nullop, /* icache_sync_all */
140 (void *)cpufunc_nullop, /* icache_sync_range */
142 arm7tdmi_cache_flushID, /* dcache_wbinv_all */
143 (void *)arm7tdmi_cache_flushID, /* dcache_wbinv_range */
144 (void *)arm7tdmi_cache_flushID, /* dcache_inv_range */
145 (void *)cpufunc_nullop, /* dcache_wb_range */
147 arm7tdmi_cache_flushID, /* idcache_wbinv_all */
148 (void *)arm7tdmi_cache_flushID, /* idcache_wbinv_range */
150 /* Other functions */
152 cpufunc_nullop, /* flush_prefetchbuf */
153 cpufunc_nullop, /* drain_writebuf */
154 cpufunc_nullop, /* flush_brnchtgt_C */
155 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
157 (void *)cpufunc_nullop, /* sleep */
161 late_abort_fixup, /* dataabt_fixup */
162 cpufunc_null_fixup, /* prefetchabt_fixup */
164 arm7tdmi_context_switch, /* context_switch */
166 arm7tdmi_setup /* cpu setup */
169 #endif /* CPU_ARM7TDMI */
172 struct cpu_functions arm8_cpufuncs = {
176 cpufunc_nullop, /* cpwait */
180 cpufunc_control, /* control */
181 cpufunc_domains, /* domain */
182 arm8_setttb, /* setttb */
183 cpufunc_faultstatus, /* faultstatus */
184 cpufunc_faultaddress, /* faultaddress */
188 arm8_tlb_flushID, /* tlb_flushID */
189 arm8_tlb_flushID_SE, /* tlb_flushID_SE */
190 arm8_tlb_flushID, /* tlb_flushI */
191 arm8_tlb_flushID_SE, /* tlb_flushI_SE */
192 arm8_tlb_flushID, /* tlb_flushD */
193 arm8_tlb_flushID_SE, /* tlb_flushD_SE */
195 /* Cache operations */
197 cpufunc_nullop, /* icache_sync_all */
198 (void *)cpufunc_nullop, /* icache_sync_range */
200 arm8_cache_purgeID, /* dcache_wbinv_all */
201 (void *)arm8_cache_purgeID, /* dcache_wbinv_range */
202 /*XXX*/ (void *)arm8_cache_purgeID, /* dcache_inv_range */
203 (void *)arm8_cache_cleanID, /* dcache_wb_range */
205 arm8_cache_purgeID, /* idcache_wbinv_all */
206 (void *)arm8_cache_purgeID, /* idcache_wbinv_range */
208 /* Other functions */
210 cpufunc_nullop, /* flush_prefetchbuf */
211 cpufunc_nullop, /* drain_writebuf */
212 cpufunc_nullop, /* flush_brnchtgt_C */
213 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
215 (void *)cpufunc_nullop, /* sleep */
219 cpufunc_null_fixup, /* dataabt_fixup */
220 cpufunc_null_fixup, /* prefetchabt_fixup */
222 arm8_context_switch, /* context_switch */
224 arm8_setup /* cpu setup */
226 #endif /* CPU_ARM8 */
229 struct cpu_functions arm9_cpufuncs = {
233 cpufunc_nullop, /* cpwait */
237 cpufunc_control, /* control */
238 cpufunc_domains, /* Domain */
239 arm9_setttb, /* Setttb */
240 cpufunc_faultstatus, /* Faultstatus */
241 cpufunc_faultaddress, /* Faultaddress */
245 armv4_tlb_flushID, /* tlb_flushID */
246 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
247 armv4_tlb_flushI, /* tlb_flushI */
248 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
249 armv4_tlb_flushD, /* tlb_flushD */
250 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
252 /* Cache operations */
254 arm9_icache_sync_all, /* icache_sync_all */
255 arm9_icache_sync_range, /* icache_sync_range */
257 arm9_dcache_wbinv_all, /* dcache_wbinv_all */
258 arm9_dcache_wbinv_range, /* dcache_wbinv_range */
259 /*XXX*/ arm9_dcache_wbinv_range, /* dcache_inv_range */
260 arm9_dcache_wb_range, /* dcache_wb_range */
262 arm9_idcache_wbinv_all, /* idcache_wbinv_all */
263 arm9_idcache_wbinv_range, /* idcache_wbinv_range */
265 /* Other functions */
267 cpufunc_nullop, /* flush_prefetchbuf */
268 armv4_drain_writebuf, /* drain_writebuf */
269 cpufunc_nullop, /* flush_brnchtgt_C */
270 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
272 (void *)cpufunc_nullop, /* sleep */
276 cpufunc_null_fixup, /* dataabt_fixup */
277 cpufunc_null_fixup, /* prefetchabt_fixup */
279 arm9_context_switch, /* context_switch */
281 arm9_setup /* cpu setup */
284 #endif /* CPU_ARM9 */
287 struct cpu_functions arm10_cpufuncs = {
291 cpufunc_nullop, /* cpwait */
295 cpufunc_control, /* control */
296 cpufunc_domains, /* Domain */
297 arm10_setttb, /* Setttb */
298 cpufunc_faultstatus, /* Faultstatus */
299 cpufunc_faultaddress, /* Faultaddress */
303 armv4_tlb_flushID, /* tlb_flushID */
304 arm10_tlb_flushID_SE, /* tlb_flushID_SE */
305 armv4_tlb_flushI, /* tlb_flushI */
306 arm10_tlb_flushI_SE, /* tlb_flushI_SE */
307 armv4_tlb_flushD, /* tlb_flushD */
308 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
310 /* Cache operations */
312 arm10_icache_sync_all, /* icache_sync_all */
313 arm10_icache_sync_range, /* icache_sync_range */
315 arm10_dcache_wbinv_all, /* dcache_wbinv_all */
316 arm10_dcache_wbinv_range, /* dcache_wbinv_range */
317 arm10_dcache_inv_range, /* dcache_inv_range */
318 arm10_dcache_wb_range, /* dcache_wb_range */
320 arm10_idcache_wbinv_all, /* idcache_wbinv_all */
321 arm10_idcache_wbinv_range, /* idcache_wbinv_range */
323 /* Other functions */
325 cpufunc_nullop, /* flush_prefetchbuf */
326 armv4_drain_writebuf, /* drain_writebuf */
327 cpufunc_nullop, /* flush_brnchtgt_C */
328 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
330 (void *)cpufunc_nullop, /* sleep */
334 cpufunc_null_fixup, /* dataabt_fixup */
335 cpufunc_null_fixup, /* prefetchabt_fixup */
337 arm10_context_switch, /* context_switch */
339 arm10_setup /* cpu setup */
342 #endif /* CPU_ARM10 */
345 struct cpu_functions sa110_cpufuncs = {
349 cpufunc_nullop, /* cpwait */
353 cpufunc_control, /* control */
354 cpufunc_domains, /* domain */
355 sa1_setttb, /* setttb */
356 cpufunc_faultstatus, /* faultstatus */
357 cpufunc_faultaddress, /* faultaddress */
361 armv4_tlb_flushID, /* tlb_flushID */
362 sa1_tlb_flushID_SE, /* tlb_flushID_SE */
363 armv4_tlb_flushI, /* tlb_flushI */
364 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
365 armv4_tlb_flushD, /* tlb_flushD */
366 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
368 /* Cache operations */
370 sa1_cache_syncI, /* icache_sync_all */
371 sa1_cache_syncI_rng, /* icache_sync_range */
373 sa1_cache_purgeD, /* dcache_wbinv_all */
374 sa1_cache_purgeD_rng, /* dcache_wbinv_range */
375 /*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
376 sa1_cache_cleanD_rng, /* dcache_wb_range */
378 sa1_cache_purgeID, /* idcache_wbinv_all */
379 sa1_cache_purgeID_rng, /* idcache_wbinv_range */
381 /* Other functions */
383 cpufunc_nullop, /* flush_prefetchbuf */
384 armv4_drain_writebuf, /* drain_writebuf */
385 cpufunc_nullop, /* flush_brnchtgt_C */
386 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
388 (void *)cpufunc_nullop, /* sleep */
392 cpufunc_null_fixup, /* dataabt_fixup */
393 cpufunc_null_fixup, /* prefetchabt_fixup */
395 sa110_context_switch, /* context_switch */
397 sa110_setup /* cpu setup */
399 #endif /* CPU_SA110 */
401 #if defined(CPU_SA1100) || defined(CPU_SA1110)
402 struct cpu_functions sa11x0_cpufuncs = {
406 cpufunc_nullop, /* cpwait */
410 cpufunc_control, /* control */
411 cpufunc_domains, /* domain */
412 sa1_setttb, /* setttb */
413 cpufunc_faultstatus, /* faultstatus */
414 cpufunc_faultaddress, /* faultaddress */
418 armv4_tlb_flushID, /* tlb_flushID */
419 sa1_tlb_flushID_SE, /* tlb_flushID_SE */
420 armv4_tlb_flushI, /* tlb_flushI */
421 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
422 armv4_tlb_flushD, /* tlb_flushD */
423 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
425 /* Cache operations */
427 sa1_cache_syncI, /* icache_sync_all */
428 sa1_cache_syncI_rng, /* icache_sync_range */
430 sa1_cache_purgeD, /* dcache_wbinv_all */
431 sa1_cache_purgeD_rng, /* dcache_wbinv_range */
432 /*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
433 sa1_cache_cleanD_rng, /* dcache_wb_range */
435 sa1_cache_purgeID, /* idcache_wbinv_all */
436 sa1_cache_purgeID_rng, /* idcache_wbinv_range */
438 /* Other functions */
440 sa11x0_drain_readbuf, /* flush_prefetchbuf */
441 armv4_drain_writebuf, /* drain_writebuf */
442 cpufunc_nullop, /* flush_brnchtgt_C */
443 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
445 sa11x0_cpu_sleep, /* sleep */
449 cpufunc_null_fixup, /* dataabt_fixup */
450 cpufunc_null_fixup, /* prefetchabt_fixup */
452 sa11x0_context_switch, /* context_switch */
454 sa11x0_setup /* cpu setup */
456 #endif /* CPU_SA1100 || CPU_SA1110 */
459 struct cpu_functions ixp12x0_cpufuncs = {
463 cpufunc_nullop, /* cpwait */
467 cpufunc_control, /* control */
468 cpufunc_domains, /* domain */
469 sa1_setttb, /* setttb */
470 cpufunc_faultstatus, /* faultstatus */
471 cpufunc_faultaddress, /* faultaddress */
475 armv4_tlb_flushID, /* tlb_flushID */
476 sa1_tlb_flushID_SE, /* tlb_flushID_SE */
477 armv4_tlb_flushI, /* tlb_flushI */
478 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
479 armv4_tlb_flushD, /* tlb_flushD */
480 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
482 /* Cache operations */
484 sa1_cache_syncI, /* icache_sync_all */
485 sa1_cache_syncI_rng, /* icache_sync_range */
487 sa1_cache_purgeD, /* dcache_wbinv_all */
488 sa1_cache_purgeD_rng, /* dcache_wbinv_range */
489 /*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
490 sa1_cache_cleanD_rng, /* dcache_wb_range */
492 sa1_cache_purgeID, /* idcache_wbinv_all */
493 sa1_cache_purgeID_rng, /* idcache_wbinv_range */
495 /* Other functions */
497 ixp12x0_drain_readbuf, /* flush_prefetchbuf */
498 armv4_drain_writebuf, /* drain_writebuf */
499 cpufunc_nullop, /* flush_brnchtgt_C */
500 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
502 (void *)cpufunc_nullop, /* sleep */
506 cpufunc_null_fixup, /* dataabt_fixup */
507 cpufunc_null_fixup, /* prefetchabt_fixup */
509 ixp12x0_context_switch, /* context_switch */
511 ixp12x0_setup /* cpu setup */
513 #endif /* CPU_IXP12X0 */
515 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
516 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
517 defined(CPU_XSCALE_80219)
519 struct cpu_functions xscale_cpufuncs = {
523 xscale_cpwait, /* cpwait */
527 xscale_control, /* control */
528 cpufunc_domains, /* domain */
529 xscale_setttb, /* setttb */
530 cpufunc_faultstatus, /* faultstatus */
531 cpufunc_faultaddress, /* faultaddress */
535 armv4_tlb_flushID, /* tlb_flushID */
536 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
537 armv4_tlb_flushI, /* tlb_flushI */
538 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
539 armv4_tlb_flushD, /* tlb_flushD */
540 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
542 /* Cache operations */
544 xscale_cache_syncI, /* icache_sync_all */
545 xscale_cache_syncI_rng, /* icache_sync_range */
547 xscale_cache_purgeD, /* dcache_wbinv_all */
548 xscale_cache_purgeD_rng, /* dcache_wbinv_range */
549 xscale_cache_flushD_rng, /* dcache_inv_range */
550 xscale_cache_cleanD_rng, /* dcache_wb_range */
552 xscale_cache_purgeID, /* idcache_wbinv_all */
553 xscale_cache_purgeID_rng, /* idcache_wbinv_range */
555 /* Other functions */
557 cpufunc_nullop, /* flush_prefetchbuf */
558 armv4_drain_writebuf, /* drain_writebuf */
559 cpufunc_nullop, /* flush_brnchtgt_C */
560 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
562 xscale_cpu_sleep, /* sleep */
566 cpufunc_null_fixup, /* dataabt_fixup */
567 cpufunc_null_fixup, /* prefetchabt_fixup */
569 xscale_context_switch, /* context_switch */
571 xscale_setup /* cpu setup */
574 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
577 #ifdef CPU_XSCALE_81342
578 struct cpu_functions xscalec3_cpufuncs = {
582 xscale_cpwait, /* cpwait */
586 xscale_control, /* control */
587 cpufunc_domains, /* domain */
588 xscalec3_setttb, /* setttb */
589 cpufunc_faultstatus, /* faultstatus */
590 cpufunc_faultaddress, /* faultaddress */
594 armv4_tlb_flushID, /* tlb_flushID */
595 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
596 armv4_tlb_flushI, /* tlb_flushI */
597 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
598 armv4_tlb_flushD, /* tlb_flushD */
599 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
601 /* Cache operations */
603 xscalec3_cache_syncI, /* icache_sync_all */
604 xscale_cache_syncI_rng, /* icache_sync_range */
606 xscalec3_cache_purgeD, /* dcache_wbinv_all */
607 xscalec3_cache_purgeD_rng, /* dcache_wbinv_range */
608 xscale_cache_flushD_rng, /* dcache_inv_range */
609 xscalec3_cache_cleanD_rng, /* dcache_wb_range */
611 xscalec3_cache_purgeID, /* idcache_wbinv_all */
612 xscalec3_cache_purgeID_rng, /* idcache_wbinv_range */
614 /* Other functions */
616 cpufunc_nullop, /* flush_prefetchbuf */
617 armv4_drain_writebuf, /* drain_writebuf */
618 cpufunc_nullop, /* flush_brnchtgt_C */
619 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
621 xscale_cpu_sleep, /* sleep */
625 cpufunc_null_fixup, /* dataabt_fixup */
626 cpufunc_null_fixup, /* prefetchabt_fixup */
628 xscalec3_context_switch, /* context_switch */
630 xscale_setup /* cpu setup */
632 #endif /* CPU_XSCALE_81342 */
634 * Global constants also used by locore.s
637 struct cpu_functions cpufuncs;
639 u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore.s */
641 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
642 defined (CPU_ARM10) || \
643 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
644 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
645 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
647 static void get_cachetype_cp15(void);
649 /* Additional cache information local to this file. Log2 of some of the
651 static int arm_dcache_l2_nsets;
652 static int arm_dcache_l2_assoc;
653 static int arm_dcache_l2_linesize;
658 u_int ctype, isize, dsize;
661 __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
665 * ...and thus spake the ARM ARM:
667 * If an <opcode2> value corresponding to an unimplemented or
668 * reserved ID register is encountered, the System Control
669 * processor returns the value of the main ID register.
671 if (ctype == cpufunc_id())
674 if ((ctype & CPU_CT_S) == 0)
675 arm_pcache_unified = 1;
678 * If you want to know how this code works, go read the ARM ARM.
681 arm_pcache_type = CPU_CT_CTYPE(ctype);
683 if (arm_pcache_unified == 0) {
684 isize = CPU_CT_ISIZE(ctype);
685 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
686 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
687 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
688 if (isize & CPU_CT_xSIZE_M)
689 arm_picache_line_size = 0; /* not present */
691 arm_picache_ways = 1;
693 arm_picache_ways = multiplier <<
694 (CPU_CT_xSIZE_ASSOC(isize) - 1);
696 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
699 dsize = CPU_CT_DSIZE(ctype);
700 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
701 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
702 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
703 if (dsize & CPU_CT_xSIZE_M)
704 arm_pdcache_line_size = 0; /* not present */
706 arm_pdcache_ways = 1;
708 arm_pdcache_ways = multiplier <<
709 (CPU_CT_xSIZE_ASSOC(dsize) - 1);
711 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
713 arm_dcache_align = arm_pdcache_line_size;
715 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
716 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
717 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
718 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
721 arm_dcache_align_mask = arm_dcache_align - 1;
723 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
725 #if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
727 /* Cache information for CPUs without cache type registers. */
731 int ct_pcache_unified;
733 int ct_pdcache_line_size;
736 int ct_picache_line_size;
740 struct cachetab cachetab[] = {
741 /* cpuid, cache type, u, dsiz, ls, wy, isiz, ls, wy */
742 /* XXX is this type right for SA-1? */
743 { CPU_ID_SA110, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
744 { CPU_ID_SA1100, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 },
745 { CPU_ID_SA1110, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 },
746 { CPU_ID_IXP1200, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
747 { 0, 0, 0, 0, 0, 0, 0, 0}
750 static void get_cachetype_table(void);
753 get_cachetype_table()
756 u_int32_t cpuid = cpufunc_id();
758 for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
759 if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
760 arm_pcache_type = cachetab[i].ct_pcache_type;
761 arm_pcache_unified = cachetab[i].ct_pcache_unified;
762 arm_pdcache_size = cachetab[i].ct_pdcache_size;
763 arm_pdcache_line_size =
764 cachetab[i].ct_pdcache_line_size;
765 arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
766 arm_picache_size = cachetab[i].ct_picache_size;
767 arm_picache_line_size =
768 cachetab[i].ct_picache_line_size;
769 arm_picache_ways = cachetab[i].ct_picache_ways;
772 arm_dcache_align = arm_pdcache_line_size;
774 arm_dcache_align_mask = arm_dcache_align - 1;
777 #endif /* SA110 || SA1100 || SA1111 || IXP12X0 */
780 * Cannot panic here as we may not have a console yet ...
786 cputype = cpufunc_id();
787 cputype &= CPU_ID_CPU_MASK;
790 * NOTE: cpu_do_powersave defaults to off. If we encounter a
791 * CPU type where we want to use it by default, then we set it.
795 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
796 CPU_ID_IS7(cputype) &&
797 (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
798 cpufuncs = arm7tdmi_cpufuncs;
799 cpu_reset_needs_v4_MMU_disable = 0;
800 get_cachetype_cp15();
801 pmap_pte_init_generic();
806 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
807 (cputype & 0x0000f000) == 0x00008000) {
808 cpufuncs = arm8_cpufuncs;
809 cpu_reset_needs_v4_MMU_disable = 0; /* XXX correct? */
810 get_cachetype_cp15();
811 pmap_pte_init_arm8();
814 #endif /* CPU_ARM8 */
816 if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
817 (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
818 (cputype & 0x0000f000) == 0x00009000) {
819 cpufuncs = arm9_cpufuncs;
820 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
821 get_cachetype_cp15();
822 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
823 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
824 arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
825 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
826 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
827 #ifdef ARM9_CACHE_WRITE_THROUGH
828 pmap_pte_init_arm9();
830 pmap_pte_init_generic();
834 #endif /* CPU_ARM9 */
836 if (/* cputype == CPU_ID_ARM1020T || */
837 cputype == CPU_ID_ARM1020E) {
839 * Select write-through cacheing (this isn't really an
840 * option on ARM1020T).
842 cpufuncs = arm10_cpufuncs;
843 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
844 get_cachetype_cp15();
845 arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
846 arm10_dcache_sets_max =
847 (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
848 arm10_dcache_sets_inc;
849 arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
850 arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
851 pmap_pte_init_generic();
854 #endif /* CPU_ARM10 */
856 if (cputype == CPU_ID_SA110) {
857 cpufuncs = sa110_cpufuncs;
858 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
859 get_cachetype_table();
863 #endif /* CPU_SA110 */
865 if (cputype == CPU_ID_SA1100) {
866 cpufuncs = sa11x0_cpufuncs;
867 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
868 get_cachetype_table();
870 /* Use powersave on this CPU. */
871 cpu_do_powersave = 1;
875 #endif /* CPU_SA1100 */
877 if (cputype == CPU_ID_SA1110) {
878 cpufuncs = sa11x0_cpufuncs;
879 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
880 get_cachetype_table();
882 /* Use powersave on this CPU. */
883 cpu_do_powersave = 1;
887 #endif /* CPU_SA1110 */
889 if (cputype == CPU_ID_IXP1200) {
890 cpufuncs = ixp12x0_cpufuncs;
891 cpu_reset_needs_v4_MMU_disable = 1;
892 get_cachetype_table();
896 #endif /* CPU_IXP12X0 */
897 #ifdef CPU_XSCALE_80200
898 if (cputype == CPU_ID_80200) {
899 int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
904 * Reset the Performance Monitoring Unit to a
906 * - CCNT, PMN0, PMN1 reset to 0
907 * - overflow indications cleared
908 * - all counters disabled
910 __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
912 : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
915 #if defined(XSCALE_CCLKCFG)
917 * Crank CCLKCFG to maximum legal value.
919 __asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
921 : "r" (XSCALE_CCLKCFG));
925 * XXX Disable ECC in the Bus Controller Unit; we
926 * don't really support it, yet. Clear any pending
929 __asm __volatile("mcr p13, 0, %0, c0, c1, 0"
931 : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
933 cpufuncs = xscale_cpufuncs;
934 #if defined(PERFCTRS)
939 * i80200 errata: Step-A0 and A1 have a bug where
940 * D$ dirty bits are not cleared on "invalidate by
943 * Workaround: Clean cache line before invalidating.
945 if (rev == 0 || rev == 1)
946 cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
948 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
949 get_cachetype_cp15();
950 pmap_pte_init_xscale();
953 #endif /* CPU_XSCALE_80200 */
954 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
955 if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
956 cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
957 cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
959 * Reset the Performance Monitoring Unit to a
961 * - CCNT, PMN0, PMN1 reset to 0
962 * - overflow indications cleared
963 * - all counters disabled
965 __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
967 : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
970 cpufuncs = xscale_cpufuncs;
971 #if defined(PERFCTRS)
975 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
976 get_cachetype_cp15();
977 pmap_pte_init_xscale();
980 #endif /* CPU_XSCALE_80321 */
982 #if defined(CPU_XSCALE_81342)
983 if (cputype == CPU_ID_81342) {
984 cpufuncs = xscalec3_cpufuncs;
985 #if defined(PERFCTRS)
989 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
990 get_cachetype_cp15();
991 pmap_pte_init_xscale();
994 #endif /* CPU_XSCALE_81342 */
995 #ifdef CPU_XSCALE_PXA2X0
996 /* ignore core revision to test PXA2xx CPUs */
997 if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
998 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1000 cpufuncs = xscale_cpufuncs;
1001 #if defined(PERFCTRS)
1005 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1006 get_cachetype_cp15();
1007 pmap_pte_init_xscale();
1009 /* Use powersave on this CPU. */
1010 cpu_do_powersave = 1;
1014 #endif /* CPU_XSCALE_PXA2X0 */
1015 #ifdef CPU_XSCALE_IXP425
1016 if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1017 cputype == CPU_ID_IXP425_266) {
1019 cpufuncs = xscale_cpufuncs;
1020 #if defined(PERFCTRS)
1024 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1025 get_cachetype_cp15();
1026 pmap_pte_init_xscale();
1030 #endif /* CPU_XSCALE_IXP425 */
1032 * Bzzzz. And the answer was ...
1034 panic("No support for this CPU type (%08x) in kernel", cputype);
1035 return(ARCHITECTURE_NOT_PRESENT);
1039 * Fixup routines for data and prefetch aborts.
1041 * Several compile time symbols are used
1043 * DEBUG_FAULT_CORRECTION - Print debugging information during the
1044 * correction of registers after a fault.
1045 * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
1046 * when defined should use late aborts
1051 * Null abort fixup routine.
1052 * For use when no fixup is required.
1055 cpufunc_null_fixup(arg)
1058 return(ABORT_FIXUP_OK);
1062 #if defined(CPU_ARM7TDMI)
1064 #ifdef DEBUG_FAULT_CORRECTION
1065 #define DFC_PRINTF(x) printf x
1066 #define DFC_DISASSEMBLE(x) disassemble(x)
1068 #define DFC_PRINTF(x) /* nothing */
1069 #define DFC_DISASSEMBLE(x) /* nothing */
1073 * "Early" data abort fixup.
1075 * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode). Also used
1076 * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1078 * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1081 early_abort_fixup(arg)
1084 trapframe_t *frame = arg;
1086 u_int fault_instruction;
1089 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1091 /* Ok an abort in SVC mode */
1094 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1095 * as the fault happened in svc mode but we need it in the
1096 * usr slot so we can treat the registers as an array of ints
1098 * NOTE: This PC is in the position but writeback is not
1100 * Doing it like this is more efficient than trapping this
1101 * case in all possible locations in the following fixup code.
1104 saved_lr = frame->tf_usr_lr;
1105 frame->tf_usr_lr = frame->tf_svc_lr;
1108 * Note the trapframe does not have the SVC r13 so a fault
1109 * from an instruction with writeback to r13 in SVC mode is
1110 * not allowed. This should not happen as the kstack is
1115 /* Get fault address and status from the CPU */
1117 fault_pc = frame->tf_pc;
1118 fault_instruction = *((volatile unsigned int *)fault_pc);
1120 /* Decode the fault instruction and fix the registers as needed */
1122 if ((fault_instruction & 0x0e000000) == 0x08000000) {
1126 int *registers = &frame->tf_r0;
1128 DFC_PRINTF(("LDM/STM\n"));
1129 DFC_DISASSEMBLE(fault_pc);
1130 if (fault_instruction & (1 << 21)) {
1131 DFC_PRINTF(("This instruction must be corrected\n"));
1132 base = (fault_instruction >> 16) & 0x0f;
1134 return ABORT_FIXUP_FAILED;
1135 /* Count registers transferred */
1137 for (loop = 0; loop < 16; ++loop) {
1138 if (fault_instruction & (1<<loop))
1141 DFC_PRINTF(("%d registers used\n", count));
1142 DFC_PRINTF(("Corrected r%d by %d bytes ",
1144 if (fault_instruction & (1 << 23)) {
1145 DFC_PRINTF(("down\n"));
1146 registers[base] -= count * 4;
1148 DFC_PRINTF(("up\n"));
1149 registers[base] += count * 4;
1152 } else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1155 int *registers = &frame->tf_r0;
1157 /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1159 DFC_DISASSEMBLE(fault_pc);
1161 /* Only need to fix registers if write back is turned on */
1163 if ((fault_instruction & (1 << 21)) != 0) {
1164 base = (fault_instruction >> 16) & 0x0f;
1166 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1167 return ABORT_FIXUP_FAILED;
1169 return ABORT_FIXUP_FAILED;
1171 offset = (fault_instruction & 0xff) << 2;
1172 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1173 if ((fault_instruction & (1 << 23)) != 0)
1175 registers[base] += offset;
1176 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1178 } else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1179 return ABORT_FIXUP_FAILED;
1181 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1183 /* Ok an abort in SVC mode */
1186 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1187 * as the fault happened in svc mode but we need it in the
1188 * usr slot so we can treat the registers as an array of ints
1190 * NOTE: This PC is in the position but writeback is not
1192 * Doing it like this is more efficient than trapping this
1193 * case in all possible locations in the prior fixup code.
1196 frame->tf_svc_lr = frame->tf_usr_lr;
1197 frame->tf_usr_lr = saved_lr;
1200 * Note the trapframe does not have the SVC r13 so a fault
1201 * from an instruction with writeback to r13 in SVC mode is
1202 * not allowed. This should not happen as the kstack is
1207 return(ABORT_FIXUP_OK);
1209 #endif /* CPU_ARM2/250/3/6/7 */
1212 #if defined(CPU_ARM7TDMI)
1214 * "Late" (base updated) data abort fixup
1216 * For ARM6 (in late-abort mode) and ARM7.
1218 * In this model, all data-transfer instructions need fixing up. We defer
1219 * LDM, STM, LDC and STC fixup to the early-abort handler.
1222 late_abort_fixup(arg)
1225 trapframe_t *frame = arg;
1227 u_int fault_instruction;
1230 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1232 /* Ok an abort in SVC mode */
1235 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1236 * as the fault happened in svc mode but we need it in the
1237 * usr slot so we can treat the registers as an array of ints
1239 * NOTE: This PC is in the position but writeback is not
1241 * Doing it like this is more efficient than trapping this
1242 * case in all possible locations in the following fixup code.
1245 saved_lr = frame->tf_usr_lr;
1246 frame->tf_usr_lr = frame->tf_svc_lr;
1249 * Note the trapframe does not have the SVC r13 so a fault
1250 * from an instruction with writeback to r13 in SVC mode is
1251 * not allowed. This should not happen as the kstack is
1256 /* Get fault address and status from the CPU */
1258 fault_pc = frame->tf_pc;
1259 fault_instruction = *((volatile unsigned int *)fault_pc);
1261 /* Decode the fault instruction and fix the registers as needed */
1263 /* Was is a swap instruction ? */
1265 if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1266 DFC_DISASSEMBLE(fault_pc);
1267 } else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1269 /* Was is a ldr/str instruction */
1270 /* This is for late abort only */
1274 int *registers = &frame->tf_r0;
1276 DFC_DISASSEMBLE(fault_pc);
1278 /* This is for late abort only */
1280 if ((fault_instruction & (1 << 24)) == 0
1281 || (fault_instruction & (1 << 21)) != 0) {
1282 /* postindexed ldr/str with no writeback */
1284 base = (fault_instruction >> 16) & 0x0f;
1286 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1287 return ABORT_FIXUP_FAILED;
1289 return ABORT_FIXUP_FAILED;
1290 DFC_PRINTF(("late abt fix: r%d=%08x : ",
1291 base, registers[base]));
1292 if ((fault_instruction & (1 << 25)) == 0) {
1293 /* Immediate offset - easy */
1295 offset = fault_instruction & 0xfff;
1296 if ((fault_instruction & (1 << 23)))
1298 registers[base] += offset;
1299 DFC_PRINTF(("imm=%08x ", offset));
1301 /* offset is a shifted register */
1304 offset = fault_instruction & 0x0f;
1306 return ABORT_FIXUP_FAILED;
1309 * Register offset - hard we have to
1310 * cope with shifts !
1312 offset = registers[offset];
1314 if ((fault_instruction & (1 << 4)) == 0)
1315 /* shift with amount */
1316 shift = (fault_instruction >> 7) & 0x1f;
1318 /* shift with register */
1319 if ((fault_instruction & (1 << 7)) != 0)
1320 /* undefined for now so bail out */
1321 return ABORT_FIXUP_FAILED;
1322 shift = ((fault_instruction >> 8) & 0xf);
1324 return ABORT_FIXUP_FAILED;
1325 DFC_PRINTF(("shift reg=%d ", shift));
1326 shift = registers[shift];
1328 DFC_PRINTF(("shift=%08x ", shift));
1329 switch (((fault_instruction >> 5) & 0x3)) {
1330 case 0 : /* Logical left */
1331 offset = (int)(((u_int)offset) << shift);
1333 case 1 : /* Logical Right */
1334 if (shift == 0) shift = 32;
1335 offset = (int)(((u_int)offset) >> shift);
1337 case 2 : /* Arithmetic Right */
1338 if (shift == 0) shift = 32;
1339 offset = (int)(((int)offset) >> shift);
1341 case 3 : /* Rotate right (rol or rxx) */
1342 return ABORT_FIXUP_FAILED;
1346 DFC_PRINTF(("abt: fixed LDR/STR with "
1347 "register offset\n"));
1348 if ((fault_instruction & (1 << 23)))
1350 DFC_PRINTF(("offset=%08x ", offset));
1351 registers[base] += offset;
1353 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1357 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1359 /* Ok an abort in SVC mode */
1362 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1363 * as the fault happened in svc mode but we need it in the
1364 * usr slot so we can treat the registers as an array of ints
1366 * NOTE: This PC is in the position but writeback is not
1368 * Doing it like this is more efficient than trapping this
1369 * case in all possible locations in the prior fixup code.
1372 frame->tf_svc_lr = frame->tf_usr_lr;
1373 frame->tf_usr_lr = saved_lr;
1376 * Note the trapframe does not have the SVC r13 so a fault
1377 * from an instruction with writeback to r13 in SVC mode is
1378 * not allowed. This should not happen as the kstack is
1384 * Now let the early-abort fixup routine have a go, in case it
1385 * was an LDM, STM, LDC or STC that faulted.
1388 return early_abort_fixup(arg);
1390 #endif /* CPU_ARM7TDMI */
1396 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined (CPU_ARM9) || \
1397 defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
1398 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1399 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1400 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
1413 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
1416 parse_cpu_options(args, optlist, cpuctrl)
1418 struct cpu_option *optlist;
1426 while (optlist->co_name) {
1427 if (get_bootconf_option(args, optlist->co_name,
1428 BOOTOPT_TYPE_BOOLEAN, &integer)) {
1430 if (optlist->co_trueop == OR)
1431 cpuctrl |= optlist->co_value;
1432 else if (optlist->co_trueop == BIC)
1433 cpuctrl &= ~optlist->co_value;
1435 if (optlist->co_falseop == OR)
1436 cpuctrl |= optlist->co_value;
1437 else if (optlist->co_falseop == BIC)
1438 cpuctrl &= ~optlist->co_value;
1445 #endif /* CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 || XSCALE*/
1447 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8)
1448 struct cpu_option arm678_options[] = {
1450 { "nocache", IGN, BIC, CPU_CONTROL_IDC_ENABLE },
1451 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1452 #endif /* COMPAT_12 */
1453 { "cpu.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
1454 { "cpu.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
1455 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1456 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1457 { NULL, IGN, IGN, 0 }
1460 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
1463 struct cpu_option arm7tdmi_options[] = {
1464 { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
1465 { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
1466 { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1467 { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1469 { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK },
1470 #endif /* COMPAT_12 */
1471 { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK },
1472 { NULL, IGN, IGN, 0 }
1476 arm7tdmi_setup(args)
1481 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1482 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1483 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1485 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1486 cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
1489 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1492 /* Clear out the cache */
1493 cpu_idcache_wbinv_all();
1495 /* Set the control register */
1497 cpu_control(0xffffffff, cpuctrl);
1499 #endif /* CPU_ARM7TDMI */
1502 struct cpu_option arm8_options[] = {
1503 { "arm8.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
1504 { "arm8.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
1505 { "arm8.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1506 { "arm8.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1508 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1509 #endif /* COMPAT_12 */
1510 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1511 { "arm8.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1512 { NULL, IGN, IGN, 0 }
1520 int cpuctrl, cpuctrlmask;
1524 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1525 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1526 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1527 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1528 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1529 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1530 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
1531 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
1533 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1534 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1537 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1538 cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
1541 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1544 /* Get clock configuration */
1545 clocktest = arm8_clock_config(0, 0) & 0x0f;
1547 /* Special ARM8 clock and test configuration */
1548 if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1552 if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1556 clocktest &= ~(0x01);
1559 if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1563 clocktest &= ~(0x02);
1566 if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
1567 clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
1570 if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
1571 clocktest |= (integer & 7) << 5;
1575 /* Clear out the cache */
1576 cpu_idcache_wbinv_all();
1578 /* Set the control register */
1580 cpu_control(0xffffffff, cpuctrl);
1582 /* Set the clock/test register */
1584 arm8_clock_config(0x7f, clocktest);
1586 #endif /* CPU_ARM8 */
1589 struct cpu_option arm9_options[] = {
1590 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1591 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1592 { "arm9.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1593 { "arm9.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1594 { "arm9.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1595 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1596 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1597 { "arm9.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1598 { NULL, IGN, IGN, 0 }
1605 int cpuctrl, cpuctrlmask;
1607 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1608 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1609 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1610 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
1611 CPU_CONTROL_ROUNDROBIN;
1612 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1613 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1614 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1615 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1616 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1617 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1618 | CPU_CONTROL_ROUNDROBIN;
1620 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1621 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1624 cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1627 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1629 if (vector_page == ARM_VECTORS_HIGH)
1630 cpuctrl |= CPU_CONTROL_VECRELOC;
1632 /* Clear out the cache */
1633 cpu_idcache_wbinv_all();
1635 /* Set the control register */
1636 cpu_control(cpuctrlmask, cpuctrl);
1640 #endif /* CPU_ARM9 */
1643 struct cpu_option arm10_options[] = {
1644 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1645 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1646 { "arm10.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1647 { "arm10.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1648 { "arm10.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1649 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1650 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1651 { "arm10.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1652 { NULL, IGN, IGN, 0 }
1659 int cpuctrl, cpuctrlmask;
1661 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1662 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1663 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1664 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1665 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1666 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1667 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1668 | CPU_CONTROL_BPRD_ENABLE
1669 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1671 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1672 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1675 cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
1678 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1681 /* Clear out the cache */
1682 cpu_idcache_wbinv_all();
1684 /* Now really make sure they are clean. */
1685 asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1687 /* Set the control register */
1689 cpu_control(0xffffffff, cpuctrl);
1692 cpu_idcache_wbinv_all();
1694 #endif /* CPU_ARM10 */
1697 struct cpu_option sa110_options[] = {
1699 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1700 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1701 #endif /* COMPAT_12 */
1702 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1703 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1704 { "sa110.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1705 { "sa110.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1706 { "sa110.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1707 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1708 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1709 { "sa110.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1710 { NULL, IGN, IGN, 0 }
1717 int cpuctrl, cpuctrlmask;
1719 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1720 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1721 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1722 | CPU_CONTROL_WBUF_ENABLE;
1723 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1724 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1725 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1726 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1727 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1728 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1729 | CPU_CONTROL_CPCLK;
1731 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1732 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1735 cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
1738 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1741 /* Clear out the cache */
1742 cpu_idcache_wbinv_all();
1744 /* Set the control register */
1746 /* cpu_control(cpuctrlmask, cpuctrl);*/
1747 cpu_control(0xffffffff, cpuctrl);
1750 * enable clockswitching, note that this doesn't read or write to r0,
1751 * r0 is just to make it valid asm
1753 __asm ("mcr 15, 0, r0, c15, c1, 2");
1755 #endif /* CPU_SA110 */
1757 #if defined(CPU_SA1100) || defined(CPU_SA1110)
1758 struct cpu_option sa11x0_options[] = {
1760 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1761 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1762 #endif /* COMPAT_12 */
1763 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1764 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1765 { "sa11x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1766 { "sa11x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1767 { "sa11x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1768 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1769 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1770 { "sa11x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1771 { NULL, IGN, IGN, 0 }
1778 int cpuctrl, cpuctrlmask;
1780 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1781 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1782 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1783 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
1784 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1785 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1786 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1787 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1788 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1789 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1790 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1792 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1793 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1797 cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
1800 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1803 if (vector_page == ARM_VECTORS_HIGH)
1804 cpuctrl |= CPU_CONTROL_VECRELOC;
1805 /* Clear out the cache */
1806 cpu_idcache_wbinv_all();
1807 /* Set the control register */
1809 cpu_control(0xffffffff, cpuctrl);
1811 #endif /* CPU_SA1100 || CPU_SA1110 */
1813 #if defined(CPU_IXP12X0)
1814 struct cpu_option ixp12x0_options[] = {
1815 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1816 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1817 { "ixp12x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1818 { "ixp12x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1819 { "ixp12x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1820 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1821 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1822 { "ixp12x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1823 { NULL, IGN, IGN, 0 }
1830 int cpuctrl, cpuctrlmask;
1833 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
1834 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
1835 | CPU_CONTROL_IC_ENABLE;
1837 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
1838 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1839 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
1840 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
1841 | CPU_CONTROL_VECRELOC;
1843 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1844 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1847 cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
1850 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1853 if (vector_page == ARM_VECTORS_HIGH)
1854 cpuctrl |= CPU_CONTROL_VECRELOC;
1856 /* Clear out the cache */
1857 cpu_idcache_wbinv_all();
1859 /* Set the control register */
1861 /* cpu_control(0xffffffff, cpuctrl); */
1862 cpu_control(cpuctrlmask, cpuctrl);
1864 #endif /* CPU_IXP12X0 */
1866 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1867 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1868 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
1869 struct cpu_option xscale_options[] = {
1871 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1872 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1873 #endif /* COMPAT_12 */
1874 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1875 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1876 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1877 { "xscale.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1878 { "xscale.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1879 { "xscale.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1880 { "xscale.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1881 { NULL, IGN, IGN, 0 }
1888 #ifndef CPU_XSCALE_CORE3
1891 int cpuctrl, cpuctrlmask;
1894 * The XScale Write Buffer is always enabled. Our option
1895 * is to enable/disable coalescing. Note that bits 6:3
1896 * must always be enabled.
1899 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1900 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1901 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1902 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1903 | CPU_CONTROL_BPRD_ENABLE;
1904 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1905 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1906 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1907 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1908 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1909 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1910 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1912 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1913 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1916 cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
1919 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1922 if (vector_page == ARM_VECTORS_HIGH)
1923 cpuctrl |= CPU_CONTROL_VECRELOC;
1925 /* Clear out the cache */
1926 cpu_idcache_wbinv_all();
1929 * Set the control register. Note that bits 6:3 must always
1933 /* cpu_control(cpuctrlmask, cpuctrl);*/
1934 cpu_control(0xffffffff, cpuctrl);
1936 #ifndef CPU_XSCALE_CORE3
1937 /* Make sure write coalescing is turned on */
1938 __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1940 #ifdef XSCALE_NO_COALESCE_WRITES
1941 auxctl |= XSCALE_AUXCTL_K;
1943 auxctl &= ~XSCALE_AUXCTL_K;
1945 __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1949 #endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425