1 /* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
4 * arm7tdmi support code Copyright (c) 2001 John Fremlin
5 * arm8 support code Copyright (c) 1997 ARM Limited
6 * arm8 support code Copyright (c) 1997 Causality Limited
7 * arm9 support code Copyright (C) 2001 ARM Ltd
8 * Copyright (c) 1997 Mark Brinicombe.
9 * Copyright (c) 1997 Causality Limited
10 * All rights reserved.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Causality Limited.
23 * 4. The name of Causality Limited may not be used to endorse or promote
24 * products derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * RiscBSD kernel project
43 * C functions for supporting CPU / MMU / TLB specific operations.
47 #include <sys/cdefs.h>
48 __FBSDID("$FreeBSD$");
50 #include <sys/param.h>
51 #include <sys/systm.h>
53 #include <sys/mutex.h>
55 #include <machine/bus.h>
56 #include <machine/cpu.h>
57 #include <machine/disassem.h>
63 #include <machine/cpuconf.h>
64 #include <machine/cpufunc.h>
65 #include <machine/bootconfig.h>
67 #ifdef CPU_XSCALE_80200
68 #include <arm/xscale/i80200/i80200reg.h>
69 #include <arm/xscale/i80200/i80200var.h>
72 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
73 #include <arm/xscale/i80321/i80321reg.h>
74 #include <arm/xscale/i80321/i80321var.h>
77 #if defined(CPU_XSCALE_81342)
78 #include <arm/xscale/i8134x/i81342reg.h>
81 #ifdef CPU_XSCALE_IXP425
82 #include <arm/xscale/ixp425/ixp425reg.h>
83 #include <arm/xscale/ixp425/ixp425var.h>
86 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
87 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
88 #include <arm/xscale/xscalereg.h>
92 struct arm_pmc_funcs *arm_pmc;
95 /* PRIMARY CACHE VARIABLES */
97 int arm_picache_line_size;
100 int arm_pdcache_size; /* and unified */
101 int arm_pdcache_line_size;
102 int arm_pdcache_ways;
105 int arm_pcache_unified;
107 int arm_dcache_align;
108 int arm_dcache_align_mask;
110 /* 1 == use cpu_sleep(), 0 == don't */
111 int cpu_do_powersave;
115 struct cpu_functions arm7tdmi_cpufuncs = {
119 cpufunc_nullop, /* cpwait */
123 cpufunc_control, /* control */
124 cpufunc_domains, /* domain */
125 arm7tdmi_setttb, /* setttb */
126 cpufunc_faultstatus, /* faultstatus */
127 cpufunc_faultaddress, /* faultaddress */
131 arm7tdmi_tlb_flushID, /* tlb_flushID */
132 arm7tdmi_tlb_flushID_SE, /* tlb_flushID_SE */
133 arm7tdmi_tlb_flushID, /* tlb_flushI */
134 arm7tdmi_tlb_flushID_SE, /* tlb_flushI_SE */
135 arm7tdmi_tlb_flushID, /* tlb_flushD */
136 arm7tdmi_tlb_flushID_SE, /* tlb_flushD_SE */
138 /* Cache operations */
140 cpufunc_nullop, /* icache_sync_all */
141 (void *)cpufunc_nullop, /* icache_sync_range */
143 arm7tdmi_cache_flushID, /* dcache_wbinv_all */
144 (void *)arm7tdmi_cache_flushID, /* dcache_wbinv_range */
145 (void *)arm7tdmi_cache_flushID, /* dcache_inv_range */
146 (void *)cpufunc_nullop, /* dcache_wb_range */
148 arm7tdmi_cache_flushID, /* idcache_wbinv_all */
149 (void *)arm7tdmi_cache_flushID, /* idcache_wbinv_range */
151 /* Other functions */
153 cpufunc_nullop, /* flush_prefetchbuf */
154 cpufunc_nullop, /* drain_writebuf */
155 cpufunc_nullop, /* flush_brnchtgt_C */
156 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
158 (void *)cpufunc_nullop, /* sleep */
162 late_abort_fixup, /* dataabt_fixup */
163 cpufunc_null_fixup, /* prefetchabt_fixup */
165 arm7tdmi_context_switch, /* context_switch */
167 arm7tdmi_setup /* cpu setup */
170 #endif /* CPU_ARM7TDMI */
173 struct cpu_functions arm8_cpufuncs = {
177 cpufunc_nullop, /* cpwait */
181 cpufunc_control, /* control */
182 cpufunc_domains, /* domain */
183 arm8_setttb, /* setttb */
184 cpufunc_faultstatus, /* faultstatus */
185 cpufunc_faultaddress, /* faultaddress */
189 arm8_tlb_flushID, /* tlb_flushID */
190 arm8_tlb_flushID_SE, /* tlb_flushID_SE */
191 arm8_tlb_flushID, /* tlb_flushI */
192 arm8_tlb_flushID_SE, /* tlb_flushI_SE */
193 arm8_tlb_flushID, /* tlb_flushD */
194 arm8_tlb_flushID_SE, /* tlb_flushD_SE */
196 /* Cache operations */
198 cpufunc_nullop, /* icache_sync_all */
199 (void *)cpufunc_nullop, /* icache_sync_range */
201 arm8_cache_purgeID, /* dcache_wbinv_all */
202 (void *)arm8_cache_purgeID, /* dcache_wbinv_range */
203 /*XXX*/ (void *)arm8_cache_purgeID, /* dcache_inv_range */
204 (void *)arm8_cache_cleanID, /* dcache_wb_range */
206 arm8_cache_purgeID, /* idcache_wbinv_all */
207 (void *)arm8_cache_purgeID, /* idcache_wbinv_range */
209 /* Other functions */
211 cpufunc_nullop, /* flush_prefetchbuf */
212 cpufunc_nullop, /* drain_writebuf */
213 cpufunc_nullop, /* flush_brnchtgt_C */
214 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
216 (void *)cpufunc_nullop, /* sleep */
220 cpufunc_null_fixup, /* dataabt_fixup */
221 cpufunc_null_fixup, /* prefetchabt_fixup */
223 arm8_context_switch, /* context_switch */
225 arm8_setup /* cpu setup */
227 #endif /* CPU_ARM8 */
230 struct cpu_functions arm9_cpufuncs = {
234 cpufunc_nullop, /* cpwait */
238 cpufunc_control, /* control */
239 cpufunc_domains, /* Domain */
240 arm9_setttb, /* Setttb */
241 cpufunc_faultstatus, /* Faultstatus */
242 cpufunc_faultaddress, /* Faultaddress */
246 armv4_tlb_flushID, /* tlb_flushID */
247 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
248 armv4_tlb_flushI, /* tlb_flushI */
249 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
250 armv4_tlb_flushD, /* tlb_flushD */
251 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
253 /* Cache operations */
255 arm9_icache_sync_all, /* icache_sync_all */
256 arm9_icache_sync_range, /* icache_sync_range */
258 arm9_dcache_wbinv_all, /* dcache_wbinv_all */
259 arm9_dcache_wbinv_range, /* dcache_wbinv_range */
260 /*XXX*/ arm9_dcache_wbinv_range, /* dcache_inv_range */
261 arm9_dcache_wb_range, /* dcache_wb_range */
263 arm9_idcache_wbinv_all, /* idcache_wbinv_all */
264 arm9_idcache_wbinv_range, /* idcache_wbinv_range */
266 /* Other functions */
268 cpufunc_nullop, /* flush_prefetchbuf */
269 armv4_drain_writebuf, /* drain_writebuf */
270 cpufunc_nullop, /* flush_brnchtgt_C */
271 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
273 (void *)cpufunc_nullop, /* sleep */
277 cpufunc_null_fixup, /* dataabt_fixup */
278 cpufunc_null_fixup, /* prefetchabt_fixup */
280 arm9_context_switch, /* context_switch */
282 arm9_setup /* cpu setup */
285 #endif /* CPU_ARM9 */
288 struct cpu_functions arm10_cpufuncs = {
292 cpufunc_nullop, /* cpwait */
296 cpufunc_control, /* control */
297 cpufunc_domains, /* Domain */
298 arm10_setttb, /* Setttb */
299 cpufunc_faultstatus, /* Faultstatus */
300 cpufunc_faultaddress, /* Faultaddress */
304 armv4_tlb_flushID, /* tlb_flushID */
305 arm10_tlb_flushID_SE, /* tlb_flushID_SE */
306 armv4_tlb_flushI, /* tlb_flushI */
307 arm10_tlb_flushI_SE, /* tlb_flushI_SE */
308 armv4_tlb_flushD, /* tlb_flushD */
309 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
311 /* Cache operations */
313 arm10_icache_sync_all, /* icache_sync_all */
314 arm10_icache_sync_range, /* icache_sync_range */
316 arm10_dcache_wbinv_all, /* dcache_wbinv_all */
317 arm10_dcache_wbinv_range, /* dcache_wbinv_range */
318 arm10_dcache_inv_range, /* dcache_inv_range */
319 arm10_dcache_wb_range, /* dcache_wb_range */
321 arm10_idcache_wbinv_all, /* idcache_wbinv_all */
322 arm10_idcache_wbinv_range, /* idcache_wbinv_range */
324 /* Other functions */
326 cpufunc_nullop, /* flush_prefetchbuf */
327 armv4_drain_writebuf, /* drain_writebuf */
328 cpufunc_nullop, /* flush_brnchtgt_C */
329 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
331 (void *)cpufunc_nullop, /* sleep */
335 cpufunc_null_fixup, /* dataabt_fixup */
336 cpufunc_null_fixup, /* prefetchabt_fixup */
338 arm10_context_switch, /* context_switch */
340 arm10_setup /* cpu setup */
343 #endif /* CPU_ARM10 */
346 struct cpu_functions sa110_cpufuncs = {
350 cpufunc_nullop, /* cpwait */
354 cpufunc_control, /* control */
355 cpufunc_domains, /* domain */
356 sa1_setttb, /* setttb */
357 cpufunc_faultstatus, /* faultstatus */
358 cpufunc_faultaddress, /* faultaddress */
362 armv4_tlb_flushID, /* tlb_flushID */
363 sa1_tlb_flushID_SE, /* tlb_flushID_SE */
364 armv4_tlb_flushI, /* tlb_flushI */
365 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
366 armv4_tlb_flushD, /* tlb_flushD */
367 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
369 /* Cache operations */
371 sa1_cache_syncI, /* icache_sync_all */
372 sa1_cache_syncI_rng, /* icache_sync_range */
374 sa1_cache_purgeD, /* dcache_wbinv_all */
375 sa1_cache_purgeD_rng, /* dcache_wbinv_range */
376 /*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
377 sa1_cache_cleanD_rng, /* dcache_wb_range */
379 sa1_cache_purgeID, /* idcache_wbinv_all */
380 sa1_cache_purgeID_rng, /* idcache_wbinv_range */
382 /* Other functions */
384 cpufunc_nullop, /* flush_prefetchbuf */
385 armv4_drain_writebuf, /* drain_writebuf */
386 cpufunc_nullop, /* flush_brnchtgt_C */
387 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
389 (void *)cpufunc_nullop, /* sleep */
393 cpufunc_null_fixup, /* dataabt_fixup */
394 cpufunc_null_fixup, /* prefetchabt_fixup */
396 sa110_context_switch, /* context_switch */
398 sa110_setup /* cpu setup */
400 #endif /* CPU_SA110 */
402 #if defined(CPU_SA1100) || defined(CPU_SA1110)
403 struct cpu_functions sa11x0_cpufuncs = {
407 cpufunc_nullop, /* cpwait */
411 cpufunc_control, /* control */
412 cpufunc_domains, /* domain */
413 sa1_setttb, /* setttb */
414 cpufunc_faultstatus, /* faultstatus */
415 cpufunc_faultaddress, /* faultaddress */
419 armv4_tlb_flushID, /* tlb_flushID */
420 sa1_tlb_flushID_SE, /* tlb_flushID_SE */
421 armv4_tlb_flushI, /* tlb_flushI */
422 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
423 armv4_tlb_flushD, /* tlb_flushD */
424 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
426 /* Cache operations */
428 sa1_cache_syncI, /* icache_sync_all */
429 sa1_cache_syncI_rng, /* icache_sync_range */
431 sa1_cache_purgeD, /* dcache_wbinv_all */
432 sa1_cache_purgeD_rng, /* dcache_wbinv_range */
433 /*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
434 sa1_cache_cleanD_rng, /* dcache_wb_range */
436 sa1_cache_purgeID, /* idcache_wbinv_all */
437 sa1_cache_purgeID_rng, /* idcache_wbinv_range */
439 /* Other functions */
441 sa11x0_drain_readbuf, /* flush_prefetchbuf */
442 armv4_drain_writebuf, /* drain_writebuf */
443 cpufunc_nullop, /* flush_brnchtgt_C */
444 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
446 sa11x0_cpu_sleep, /* sleep */
450 cpufunc_null_fixup, /* dataabt_fixup */
451 cpufunc_null_fixup, /* prefetchabt_fixup */
453 sa11x0_context_switch, /* context_switch */
455 sa11x0_setup /* cpu setup */
457 #endif /* CPU_SA1100 || CPU_SA1110 */
460 struct cpu_functions ixp12x0_cpufuncs = {
464 cpufunc_nullop, /* cpwait */
468 cpufunc_control, /* control */
469 cpufunc_domains, /* domain */
470 sa1_setttb, /* setttb */
471 cpufunc_faultstatus, /* faultstatus */
472 cpufunc_faultaddress, /* faultaddress */
476 armv4_tlb_flushID, /* tlb_flushID */
477 sa1_tlb_flushID_SE, /* tlb_flushID_SE */
478 armv4_tlb_flushI, /* tlb_flushI */
479 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
480 armv4_tlb_flushD, /* tlb_flushD */
481 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
483 /* Cache operations */
485 sa1_cache_syncI, /* icache_sync_all */
486 sa1_cache_syncI_rng, /* icache_sync_range */
488 sa1_cache_purgeD, /* dcache_wbinv_all */
489 sa1_cache_purgeD_rng, /* dcache_wbinv_range */
490 /*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
491 sa1_cache_cleanD_rng, /* dcache_wb_range */
493 sa1_cache_purgeID, /* idcache_wbinv_all */
494 sa1_cache_purgeID_rng, /* idcache_wbinv_range */
496 /* Other functions */
498 ixp12x0_drain_readbuf, /* flush_prefetchbuf */
499 armv4_drain_writebuf, /* drain_writebuf */
500 cpufunc_nullop, /* flush_brnchtgt_C */
501 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
503 (void *)cpufunc_nullop, /* sleep */
507 cpufunc_null_fixup, /* dataabt_fixup */
508 cpufunc_null_fixup, /* prefetchabt_fixup */
510 ixp12x0_context_switch, /* context_switch */
512 ixp12x0_setup /* cpu setup */
514 #endif /* CPU_IXP12X0 */
516 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
517 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
518 defined(CPU_XSCALE_80219)
520 struct cpu_functions xscale_cpufuncs = {
524 xscale_cpwait, /* cpwait */
528 xscale_control, /* control */
529 cpufunc_domains, /* domain */
530 xscale_setttb, /* setttb */
531 cpufunc_faultstatus, /* faultstatus */
532 cpufunc_faultaddress, /* faultaddress */
536 armv4_tlb_flushID, /* tlb_flushID */
537 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
538 armv4_tlb_flushI, /* tlb_flushI */
539 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
540 armv4_tlb_flushD, /* tlb_flushD */
541 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
543 /* Cache operations */
545 xscale_cache_syncI, /* icache_sync_all */
546 xscale_cache_syncI_rng, /* icache_sync_range */
548 xscale_cache_purgeD, /* dcache_wbinv_all */
549 xscale_cache_purgeD_rng, /* dcache_wbinv_range */
550 xscale_cache_flushD_rng, /* dcache_inv_range */
551 xscale_cache_cleanD_rng, /* dcache_wb_range */
553 xscale_cache_purgeID, /* idcache_wbinv_all */
554 xscale_cache_purgeID_rng, /* idcache_wbinv_range */
556 /* Other functions */
558 cpufunc_nullop, /* flush_prefetchbuf */
559 armv4_drain_writebuf, /* drain_writebuf */
560 cpufunc_nullop, /* flush_brnchtgt_C */
561 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
563 xscale_cpu_sleep, /* sleep */
567 cpufunc_null_fixup, /* dataabt_fixup */
568 cpufunc_null_fixup, /* prefetchabt_fixup */
570 xscale_context_switch, /* context_switch */
572 xscale_setup /* cpu setup */
575 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
578 #ifdef CPU_XSCALE_81342
579 struct cpu_functions xscalec3_cpufuncs = {
583 xscale_cpwait, /* cpwait */
587 xscale_control, /* control */
588 cpufunc_domains, /* domain */
589 xscalec3_setttb, /* setttb */
590 cpufunc_faultstatus, /* faultstatus */
591 cpufunc_faultaddress, /* faultaddress */
595 armv4_tlb_flushID, /* tlb_flushID */
596 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
597 armv4_tlb_flushI, /* tlb_flushI */
598 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
599 armv4_tlb_flushD, /* tlb_flushD */
600 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
602 /* Cache operations */
604 xscalec3_cache_syncI, /* icache_sync_all */
605 xscale_cache_syncI_rng, /* icache_sync_range */
607 xscalec3_cache_purgeD, /* dcache_wbinv_all */
608 xscalec3_cache_purgeD_rng, /* dcache_wbinv_range */
609 xscale_cache_flushD_rng, /* dcache_inv_range */
610 xscalec3_cache_cleanD_rng, /* dcache_wb_range */
612 xscalec3_cache_purgeID, /* idcache_wbinv_all */
613 xscalec3_cache_purgeID_rng, /* idcache_wbinv_range */
615 /* Other functions */
617 cpufunc_nullop, /* flush_prefetchbuf */
618 armv4_drain_writebuf, /* drain_writebuf */
619 cpufunc_nullop, /* flush_brnchtgt_C */
620 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
622 xscale_cpu_sleep, /* sleep */
626 cpufunc_null_fixup, /* dataabt_fixup */
627 cpufunc_null_fixup, /* prefetchabt_fixup */
629 xscalec3_context_switch, /* context_switch */
631 xscale_setup /* cpu setup */
633 #endif /* CPU_XSCALE_81342 */
635 * Global constants also used by locore.s
638 struct cpu_functions cpufuncs;
640 u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore.s */
642 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
643 defined (CPU_ARM10) || \
644 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
645 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
646 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
648 static void get_cachetype_cp15(void);
650 /* Additional cache information local to this file. Log2 of some of the
652 static int arm_dcache_l2_nsets;
653 static int arm_dcache_l2_assoc;
654 static int arm_dcache_l2_linesize;
659 u_int ctype, isize, dsize;
662 __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
666 * ...and thus spake the ARM ARM:
668 * If an <opcode2> value corresponding to an unimplemented or
669 * reserved ID register is encountered, the System Control
670 * processor returns the value of the main ID register.
672 if (ctype == cpufunc_id())
675 if ((ctype & CPU_CT_S) == 0)
676 arm_pcache_unified = 1;
679 * If you want to know how this code works, go read the ARM ARM.
682 arm_pcache_type = CPU_CT_CTYPE(ctype);
684 if (arm_pcache_unified == 0) {
685 isize = CPU_CT_ISIZE(ctype);
686 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
687 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
688 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
689 if (isize & CPU_CT_xSIZE_M)
690 arm_picache_line_size = 0; /* not present */
692 arm_picache_ways = 1;
694 arm_picache_ways = multiplier <<
695 (CPU_CT_xSIZE_ASSOC(isize) - 1);
697 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
700 dsize = CPU_CT_DSIZE(ctype);
701 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
702 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
703 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
704 if (dsize & CPU_CT_xSIZE_M)
705 arm_pdcache_line_size = 0; /* not present */
707 arm_pdcache_ways = 1;
709 arm_pdcache_ways = multiplier <<
710 (CPU_CT_xSIZE_ASSOC(dsize) - 1);
712 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
714 arm_dcache_align = arm_pdcache_line_size;
716 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
717 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
718 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
719 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
722 arm_dcache_align_mask = arm_dcache_align - 1;
724 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
726 #if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
728 /* Cache information for CPUs without cache type registers. */
732 int ct_pcache_unified;
734 int ct_pdcache_line_size;
737 int ct_picache_line_size;
741 struct cachetab cachetab[] = {
742 /* cpuid, cache type, u, dsiz, ls, wy, isiz, ls, wy */
743 /* XXX is this type right for SA-1? */
744 { CPU_ID_SA110, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
745 { CPU_ID_SA1100, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 },
746 { CPU_ID_SA1110, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 },
747 { CPU_ID_IXP1200, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
748 { 0, 0, 0, 0, 0, 0, 0, 0}
751 static void get_cachetype_table(void);
754 get_cachetype_table()
757 u_int32_t cpuid = cpufunc_id();
759 for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
760 if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
761 arm_pcache_type = cachetab[i].ct_pcache_type;
762 arm_pcache_unified = cachetab[i].ct_pcache_unified;
763 arm_pdcache_size = cachetab[i].ct_pdcache_size;
764 arm_pdcache_line_size =
765 cachetab[i].ct_pdcache_line_size;
766 arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
767 arm_picache_size = cachetab[i].ct_picache_size;
768 arm_picache_line_size =
769 cachetab[i].ct_picache_line_size;
770 arm_picache_ways = cachetab[i].ct_picache_ways;
773 arm_dcache_align = arm_pdcache_line_size;
775 arm_dcache_align_mask = arm_dcache_align - 1;
778 #endif /* SA110 || SA1100 || SA1111 || IXP12X0 */
781 * Cannot panic here as we may not have a console yet ...
787 cputype = cpufunc_id();
788 cputype &= CPU_ID_CPU_MASK;
791 * NOTE: cpu_do_powersave defaults to off. If we encounter a
792 * CPU type where we want to use it by default, then we set it.
796 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
797 CPU_ID_IS7(cputype) &&
798 (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
799 cpufuncs = arm7tdmi_cpufuncs;
800 cpu_reset_needs_v4_MMU_disable = 0;
801 get_cachetype_cp15();
802 pmap_pte_init_generic();
807 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
808 (cputype & 0x0000f000) == 0x00008000) {
809 cpufuncs = arm8_cpufuncs;
810 cpu_reset_needs_v4_MMU_disable = 0; /* XXX correct? */
811 get_cachetype_cp15();
812 pmap_pte_init_arm8();
815 #endif /* CPU_ARM8 */
817 if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
818 (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
819 (cputype & 0x0000f000) == 0x00009000) {
820 cpufuncs = arm9_cpufuncs;
821 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
822 get_cachetype_cp15();
823 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
824 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
825 arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
826 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
827 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
828 #ifdef ARM9_CACHE_WRITE_THROUGH
829 pmap_pte_init_arm9();
831 pmap_pte_init_generic();
835 #endif /* CPU_ARM9 */
837 if (/* cputype == CPU_ID_ARM1020T || */
838 cputype == CPU_ID_ARM1020E) {
840 * Select write-through cacheing (this isn't really an
841 * option on ARM1020T).
843 cpufuncs = arm10_cpufuncs;
844 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
845 get_cachetype_cp15();
846 arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
847 arm10_dcache_sets_max =
848 (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
849 arm10_dcache_sets_inc;
850 arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
851 arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
852 pmap_pte_init_generic();
855 #endif /* CPU_ARM10 */
857 if (cputype == CPU_ID_SA110) {
858 cpufuncs = sa110_cpufuncs;
859 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
860 get_cachetype_table();
864 #endif /* CPU_SA110 */
866 if (cputype == CPU_ID_SA1100) {
867 cpufuncs = sa11x0_cpufuncs;
868 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
869 get_cachetype_table();
871 /* Use powersave on this CPU. */
872 cpu_do_powersave = 1;
876 #endif /* CPU_SA1100 */
878 if (cputype == CPU_ID_SA1110) {
879 cpufuncs = sa11x0_cpufuncs;
880 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
881 get_cachetype_table();
883 /* Use powersave on this CPU. */
884 cpu_do_powersave = 1;
888 #endif /* CPU_SA1110 */
890 if (cputype == CPU_ID_IXP1200) {
891 cpufuncs = ixp12x0_cpufuncs;
892 cpu_reset_needs_v4_MMU_disable = 1;
893 get_cachetype_table();
897 #endif /* CPU_IXP12X0 */
898 #ifdef CPU_XSCALE_80200
899 if (cputype == CPU_ID_80200) {
900 int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
905 * Reset the Performance Monitoring Unit to a
907 * - CCNT, PMN0, PMN1 reset to 0
908 * - overflow indications cleared
909 * - all counters disabled
911 __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
913 : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
916 #if defined(XSCALE_CCLKCFG)
918 * Crank CCLKCFG to maximum legal value.
920 __asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
922 : "r" (XSCALE_CCLKCFG));
926 * XXX Disable ECC in the Bus Controller Unit; we
927 * don't really support it, yet. Clear any pending
930 __asm __volatile("mcr p13, 0, %0, c0, c1, 0"
932 : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
934 cpufuncs = xscale_cpufuncs;
935 #if defined(PERFCTRS)
940 * i80200 errata: Step-A0 and A1 have a bug where
941 * D$ dirty bits are not cleared on "invalidate by
944 * Workaround: Clean cache line before invalidating.
946 if (rev == 0 || rev == 1)
947 cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
949 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
950 get_cachetype_cp15();
951 pmap_pte_init_xscale();
954 #endif /* CPU_XSCALE_80200 */
955 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
956 if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
957 cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
958 cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
960 * Reset the Performance Monitoring Unit to a
962 * - CCNT, PMN0, PMN1 reset to 0
963 * - overflow indications cleared
964 * - all counters disabled
966 __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
968 : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
971 cpufuncs = xscale_cpufuncs;
972 #if defined(PERFCTRS)
976 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
977 get_cachetype_cp15();
978 pmap_pte_init_xscale();
981 #endif /* CPU_XSCALE_80321 */
983 #if defined(CPU_XSCALE_81342)
984 if (cputype == CPU_ID_81342) {
985 cpufuncs = xscalec3_cpufuncs;
986 #if defined(PERFCTRS)
990 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
991 get_cachetype_cp15();
992 pmap_pte_init_xscale();
995 #endif /* CPU_XSCALE_81342 */
996 #ifdef CPU_XSCALE_PXA2X0
997 /* ignore core revision to test PXA2xx CPUs */
998 if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
999 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1001 cpufuncs = xscale_cpufuncs;
1002 #if defined(PERFCTRS)
1006 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1007 get_cachetype_cp15();
1008 pmap_pte_init_xscale();
1010 /* Use powersave on this CPU. */
1011 cpu_do_powersave = 1;
1015 #endif /* CPU_XSCALE_PXA2X0 */
1016 #ifdef CPU_XSCALE_IXP425
1017 if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1018 cputype == CPU_ID_IXP425_266) {
1020 cpufuncs = xscale_cpufuncs;
1021 #if defined(PERFCTRS)
1025 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1026 get_cachetype_cp15();
1027 pmap_pte_init_xscale();
1031 #endif /* CPU_XSCALE_IXP425 */
1033 * Bzzzz. And the answer was ...
1035 panic("No support for this CPU type (%08x) in kernel", cputype);
1036 return(ARCHITECTURE_NOT_PRESENT);
1038 uma_set_align(arm_dcache_align_mask);
1043 * Fixup routines for data and prefetch aborts.
1045 * Several compile time symbols are used
1047 * DEBUG_FAULT_CORRECTION - Print debugging information during the
1048 * correction of registers after a fault.
1049 * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
1050 * when defined should use late aborts
1055 * Null abort fixup routine.
1056 * For use when no fixup is required.
1059 cpufunc_null_fixup(arg)
1062 return(ABORT_FIXUP_OK);
1066 #if defined(CPU_ARM7TDMI)
1068 #ifdef DEBUG_FAULT_CORRECTION
1069 #define DFC_PRINTF(x) printf x
1070 #define DFC_DISASSEMBLE(x) disassemble(x)
1072 #define DFC_PRINTF(x) /* nothing */
1073 #define DFC_DISASSEMBLE(x) /* nothing */
1077 * "Early" data abort fixup.
1079 * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode). Also used
1080 * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1082 * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1085 early_abort_fixup(arg)
1088 trapframe_t *frame = arg;
1090 u_int fault_instruction;
1093 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1095 /* Ok an abort in SVC mode */
1098 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1099 * as the fault happened in svc mode but we need it in the
1100 * usr slot so we can treat the registers as an array of ints
1102 * NOTE: This PC is in the position but writeback is not
1104 * Doing it like this is more efficient than trapping this
1105 * case in all possible locations in the following fixup code.
1108 saved_lr = frame->tf_usr_lr;
1109 frame->tf_usr_lr = frame->tf_svc_lr;
1112 * Note the trapframe does not have the SVC r13 so a fault
1113 * from an instruction with writeback to r13 in SVC mode is
1114 * not allowed. This should not happen as the kstack is
1119 /* Get fault address and status from the CPU */
1121 fault_pc = frame->tf_pc;
1122 fault_instruction = *((volatile unsigned int *)fault_pc);
1124 /* Decode the fault instruction and fix the registers as needed */
1126 if ((fault_instruction & 0x0e000000) == 0x08000000) {
1130 int *registers = &frame->tf_r0;
1132 DFC_PRINTF(("LDM/STM\n"));
1133 DFC_DISASSEMBLE(fault_pc);
1134 if (fault_instruction & (1 << 21)) {
1135 DFC_PRINTF(("This instruction must be corrected\n"));
1136 base = (fault_instruction >> 16) & 0x0f;
1138 return ABORT_FIXUP_FAILED;
1139 /* Count registers transferred */
1141 for (loop = 0; loop < 16; ++loop) {
1142 if (fault_instruction & (1<<loop))
1145 DFC_PRINTF(("%d registers used\n", count));
1146 DFC_PRINTF(("Corrected r%d by %d bytes ",
1148 if (fault_instruction & (1 << 23)) {
1149 DFC_PRINTF(("down\n"));
1150 registers[base] -= count * 4;
1152 DFC_PRINTF(("up\n"));
1153 registers[base] += count * 4;
1156 } else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1159 int *registers = &frame->tf_r0;
1161 /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1163 DFC_DISASSEMBLE(fault_pc);
1165 /* Only need to fix registers if write back is turned on */
1167 if ((fault_instruction & (1 << 21)) != 0) {
1168 base = (fault_instruction >> 16) & 0x0f;
1170 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1171 return ABORT_FIXUP_FAILED;
1173 return ABORT_FIXUP_FAILED;
1175 offset = (fault_instruction & 0xff) << 2;
1176 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1177 if ((fault_instruction & (1 << 23)) != 0)
1179 registers[base] += offset;
1180 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1182 } else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1183 return ABORT_FIXUP_FAILED;
1185 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1187 /* Ok an abort in SVC mode */
1190 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1191 * as the fault happened in svc mode but we need it in the
1192 * usr slot so we can treat the registers as an array of ints
1194 * NOTE: This PC is in the position but writeback is not
1196 * Doing it like this is more efficient than trapping this
1197 * case in all possible locations in the prior fixup code.
1200 frame->tf_svc_lr = frame->tf_usr_lr;
1201 frame->tf_usr_lr = saved_lr;
1204 * Note the trapframe does not have the SVC r13 so a fault
1205 * from an instruction with writeback to r13 in SVC mode is
1206 * not allowed. This should not happen as the kstack is
1211 return(ABORT_FIXUP_OK);
1213 #endif /* CPU_ARM2/250/3/6/7 */
1216 #if defined(CPU_ARM7TDMI)
1218 * "Late" (base updated) data abort fixup
1220 * For ARM6 (in late-abort mode) and ARM7.
1222 * In this model, all data-transfer instructions need fixing up. We defer
1223 * LDM, STM, LDC and STC fixup to the early-abort handler.
1226 late_abort_fixup(arg)
1229 trapframe_t *frame = arg;
1231 u_int fault_instruction;
1234 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1236 /* Ok an abort in SVC mode */
1239 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1240 * as the fault happened in svc mode but we need it in the
1241 * usr slot so we can treat the registers as an array of ints
1243 * NOTE: This PC is in the position but writeback is not
1245 * Doing it like this is more efficient than trapping this
1246 * case in all possible locations in the following fixup code.
1249 saved_lr = frame->tf_usr_lr;
1250 frame->tf_usr_lr = frame->tf_svc_lr;
1253 * Note the trapframe does not have the SVC r13 so a fault
1254 * from an instruction with writeback to r13 in SVC mode is
1255 * not allowed. This should not happen as the kstack is
1260 /* Get fault address and status from the CPU */
1262 fault_pc = frame->tf_pc;
1263 fault_instruction = *((volatile unsigned int *)fault_pc);
1265 /* Decode the fault instruction and fix the registers as needed */
1267 /* Was is a swap instruction ? */
1269 if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1270 DFC_DISASSEMBLE(fault_pc);
1271 } else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1273 /* Was is a ldr/str instruction */
1274 /* This is for late abort only */
1278 int *registers = &frame->tf_r0;
1280 DFC_DISASSEMBLE(fault_pc);
1282 /* This is for late abort only */
1284 if ((fault_instruction & (1 << 24)) == 0
1285 || (fault_instruction & (1 << 21)) != 0) {
1286 /* postindexed ldr/str with no writeback */
1288 base = (fault_instruction >> 16) & 0x0f;
1290 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1291 return ABORT_FIXUP_FAILED;
1293 return ABORT_FIXUP_FAILED;
1294 DFC_PRINTF(("late abt fix: r%d=%08x : ",
1295 base, registers[base]));
1296 if ((fault_instruction & (1 << 25)) == 0) {
1297 /* Immediate offset - easy */
1299 offset = fault_instruction & 0xfff;
1300 if ((fault_instruction & (1 << 23)))
1302 registers[base] += offset;
1303 DFC_PRINTF(("imm=%08x ", offset));
1305 /* offset is a shifted register */
1308 offset = fault_instruction & 0x0f;
1310 return ABORT_FIXUP_FAILED;
1313 * Register offset - hard we have to
1314 * cope with shifts !
1316 offset = registers[offset];
1318 if ((fault_instruction & (1 << 4)) == 0)
1319 /* shift with amount */
1320 shift = (fault_instruction >> 7) & 0x1f;
1322 /* shift with register */
1323 if ((fault_instruction & (1 << 7)) != 0)
1324 /* undefined for now so bail out */
1325 return ABORT_FIXUP_FAILED;
1326 shift = ((fault_instruction >> 8) & 0xf);
1328 return ABORT_FIXUP_FAILED;
1329 DFC_PRINTF(("shift reg=%d ", shift));
1330 shift = registers[shift];
1332 DFC_PRINTF(("shift=%08x ", shift));
1333 switch (((fault_instruction >> 5) & 0x3)) {
1334 case 0 : /* Logical left */
1335 offset = (int)(((u_int)offset) << shift);
1337 case 1 : /* Logical Right */
1338 if (shift == 0) shift = 32;
1339 offset = (int)(((u_int)offset) >> shift);
1341 case 2 : /* Arithmetic Right */
1342 if (shift == 0) shift = 32;
1343 offset = (int)(((int)offset) >> shift);
1345 case 3 : /* Rotate right (rol or rxx) */
1346 return ABORT_FIXUP_FAILED;
1350 DFC_PRINTF(("abt: fixed LDR/STR with "
1351 "register offset\n"));
1352 if ((fault_instruction & (1 << 23)))
1354 DFC_PRINTF(("offset=%08x ", offset));
1355 registers[base] += offset;
1357 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1361 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1363 /* Ok an abort in SVC mode */
1366 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1367 * as the fault happened in svc mode but we need it in the
1368 * usr slot so we can treat the registers as an array of ints
1370 * NOTE: This PC is in the position but writeback is not
1372 * Doing it like this is more efficient than trapping this
1373 * case in all possible locations in the prior fixup code.
1376 frame->tf_svc_lr = frame->tf_usr_lr;
1377 frame->tf_usr_lr = saved_lr;
1380 * Note the trapframe does not have the SVC r13 so a fault
1381 * from an instruction with writeback to r13 in SVC mode is
1382 * not allowed. This should not happen as the kstack is
1388 * Now let the early-abort fixup routine have a go, in case it
1389 * was an LDM, STM, LDC or STC that faulted.
1392 return early_abort_fixup(arg);
1394 #endif /* CPU_ARM7TDMI */
1400 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined (CPU_ARM9) || \
1401 defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
1402 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1403 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1404 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
1417 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
1420 parse_cpu_options(args, optlist, cpuctrl)
1422 struct cpu_option *optlist;
1430 while (optlist->co_name) {
1431 if (get_bootconf_option(args, optlist->co_name,
1432 BOOTOPT_TYPE_BOOLEAN, &integer)) {
1434 if (optlist->co_trueop == OR)
1435 cpuctrl |= optlist->co_value;
1436 else if (optlist->co_trueop == BIC)
1437 cpuctrl &= ~optlist->co_value;
1439 if (optlist->co_falseop == OR)
1440 cpuctrl |= optlist->co_value;
1441 else if (optlist->co_falseop == BIC)
1442 cpuctrl &= ~optlist->co_value;
1449 #endif /* CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 || XSCALE*/
1451 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8)
1452 struct cpu_option arm678_options[] = {
1454 { "nocache", IGN, BIC, CPU_CONTROL_IDC_ENABLE },
1455 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1456 #endif /* COMPAT_12 */
1457 { "cpu.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
1458 { "cpu.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
1459 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1460 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1461 { NULL, IGN, IGN, 0 }
1464 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
1467 struct cpu_option arm7tdmi_options[] = {
1468 { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
1469 { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
1470 { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1471 { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1473 { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK },
1474 #endif /* COMPAT_12 */
1475 { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK },
1476 { NULL, IGN, IGN, 0 }
1480 arm7tdmi_setup(args)
1485 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1486 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1487 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1489 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1490 cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
1493 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1496 /* Clear out the cache */
1497 cpu_idcache_wbinv_all();
1499 /* Set the control register */
1501 cpu_control(0xffffffff, cpuctrl);
1503 #endif /* CPU_ARM7TDMI */
1506 struct cpu_option arm8_options[] = {
1507 { "arm8.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
1508 { "arm8.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
1509 { "arm8.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1510 { "arm8.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1512 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1513 #endif /* COMPAT_12 */
1514 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1515 { "arm8.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1516 { NULL, IGN, IGN, 0 }
1524 int cpuctrl, cpuctrlmask;
1528 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1529 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1530 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1531 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1532 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1533 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1534 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
1535 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
1537 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1538 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1541 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1542 cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
1545 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1548 /* Get clock configuration */
1549 clocktest = arm8_clock_config(0, 0) & 0x0f;
1551 /* Special ARM8 clock and test configuration */
1552 if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1556 if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1560 clocktest &= ~(0x01);
1563 if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1567 clocktest &= ~(0x02);
1570 if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
1571 clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
1574 if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
1575 clocktest |= (integer & 7) << 5;
1579 /* Clear out the cache */
1580 cpu_idcache_wbinv_all();
1582 /* Set the control register */
1584 cpu_control(0xffffffff, cpuctrl);
1586 /* Set the clock/test register */
1588 arm8_clock_config(0x7f, clocktest);
1590 #endif /* CPU_ARM8 */
1593 struct cpu_option arm9_options[] = {
1594 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1595 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1596 { "arm9.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1597 { "arm9.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1598 { "arm9.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1599 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1600 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1601 { "arm9.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1602 { NULL, IGN, IGN, 0 }
1609 int cpuctrl, cpuctrlmask;
1611 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1612 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1613 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1614 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
1615 CPU_CONTROL_ROUNDROBIN;
1616 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1617 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1618 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1619 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1620 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1621 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1622 | CPU_CONTROL_ROUNDROBIN;
1624 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1625 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1628 cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1631 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1633 if (vector_page == ARM_VECTORS_HIGH)
1634 cpuctrl |= CPU_CONTROL_VECRELOC;
1636 /* Clear out the cache */
1637 cpu_idcache_wbinv_all();
1639 /* Set the control register */
1640 cpu_control(cpuctrlmask, cpuctrl);
1644 #endif /* CPU_ARM9 */
1647 struct cpu_option arm10_options[] = {
1648 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1649 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1650 { "arm10.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1651 { "arm10.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1652 { "arm10.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1653 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1654 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1655 { "arm10.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1656 { NULL, IGN, IGN, 0 }
1663 int cpuctrl, cpuctrlmask;
1665 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1666 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1667 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1668 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1669 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1670 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1671 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1672 | CPU_CONTROL_BPRD_ENABLE
1673 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1675 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1676 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1679 cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
1682 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1685 /* Clear out the cache */
1686 cpu_idcache_wbinv_all();
1688 /* Now really make sure they are clean. */
1689 asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1691 /* Set the control register */
1693 cpu_control(0xffffffff, cpuctrl);
1696 cpu_idcache_wbinv_all();
1698 #endif /* CPU_ARM10 */
1701 struct cpu_option sa110_options[] = {
1703 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1704 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1705 #endif /* COMPAT_12 */
1706 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1707 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1708 { "sa110.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1709 { "sa110.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1710 { "sa110.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1711 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1712 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1713 { "sa110.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1714 { NULL, IGN, IGN, 0 }
1721 int cpuctrl, cpuctrlmask;
1723 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1724 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1725 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1726 | CPU_CONTROL_WBUF_ENABLE;
1727 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1728 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1729 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1730 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1731 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1732 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1733 | CPU_CONTROL_CPCLK;
1735 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1736 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1739 cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
1742 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1745 /* Clear out the cache */
1746 cpu_idcache_wbinv_all();
1748 /* Set the control register */
1750 /* cpu_control(cpuctrlmask, cpuctrl);*/
1751 cpu_control(0xffffffff, cpuctrl);
1754 * enable clockswitching, note that this doesn't read or write to r0,
1755 * r0 is just to make it valid asm
1757 __asm ("mcr 15, 0, r0, c15, c1, 2");
1759 #endif /* CPU_SA110 */
1761 #if defined(CPU_SA1100) || defined(CPU_SA1110)
1762 struct cpu_option sa11x0_options[] = {
1764 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1765 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1766 #endif /* COMPAT_12 */
1767 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1768 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1769 { "sa11x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1770 { "sa11x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1771 { "sa11x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1772 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1773 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1774 { "sa11x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1775 { NULL, IGN, IGN, 0 }
1782 int cpuctrl, cpuctrlmask;
1784 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1785 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1786 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1787 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
1788 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1789 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1790 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1791 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1792 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1793 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1794 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1796 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1797 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1801 cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
1804 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1807 if (vector_page == ARM_VECTORS_HIGH)
1808 cpuctrl |= CPU_CONTROL_VECRELOC;
1809 /* Clear out the cache */
1810 cpu_idcache_wbinv_all();
1811 /* Set the control register */
1813 cpu_control(0xffffffff, cpuctrl);
1815 #endif /* CPU_SA1100 || CPU_SA1110 */
1817 #if defined(CPU_IXP12X0)
1818 struct cpu_option ixp12x0_options[] = {
1819 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1820 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1821 { "ixp12x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1822 { "ixp12x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1823 { "ixp12x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1824 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1825 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1826 { "ixp12x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1827 { NULL, IGN, IGN, 0 }
1834 int cpuctrl, cpuctrlmask;
1837 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
1838 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
1839 | CPU_CONTROL_IC_ENABLE;
1841 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
1842 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1843 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
1844 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
1845 | CPU_CONTROL_VECRELOC;
1847 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1848 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1851 cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
1854 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1857 if (vector_page == ARM_VECTORS_HIGH)
1858 cpuctrl |= CPU_CONTROL_VECRELOC;
1860 /* Clear out the cache */
1861 cpu_idcache_wbinv_all();
1863 /* Set the control register */
1865 /* cpu_control(0xffffffff, cpuctrl); */
1866 cpu_control(cpuctrlmask, cpuctrl);
1868 #endif /* CPU_IXP12X0 */
1870 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1871 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1872 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
1873 struct cpu_option xscale_options[] = {
1875 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1876 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1877 #endif /* COMPAT_12 */
1878 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1879 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1880 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1881 { "xscale.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1882 { "xscale.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1883 { "xscale.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1884 { "xscale.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1885 { NULL, IGN, IGN, 0 }
1892 #ifndef CPU_XSCALE_CORE3
1895 int cpuctrl, cpuctrlmask;
1898 * The XScale Write Buffer is always enabled. Our option
1899 * is to enable/disable coalescing. Note that bits 6:3
1900 * must always be enabled.
1903 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1904 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1905 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1906 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1907 | CPU_CONTROL_BPRD_ENABLE;
1908 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1909 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1910 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1911 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1912 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1913 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1914 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1916 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1917 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1920 cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
1923 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1926 if (vector_page == ARM_VECTORS_HIGH)
1927 cpuctrl |= CPU_CONTROL_VECRELOC;
1929 /* Clear out the cache */
1930 cpu_idcache_wbinv_all();
1933 * Set the control register. Note that bits 6:3 must always
1937 /* cpu_control(cpuctrlmask, cpuctrl);*/
1938 cpu_control(0xffffffff, cpuctrl);
1940 #ifndef CPU_XSCALE_CORE3
1941 /* Make sure write coalescing is turned on */
1942 __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1944 #ifdef XSCALE_NO_COALESCE_WRITES
1945 auxctl |= XSCALE_AUXCTL_K;
1947 auxctl &= ~XSCALE_AUXCTL_K;
1949 __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1953 #endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425