1 /* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
4 * arm7tdmi support code Copyright (c) 2001 John Fremlin
5 * arm8 support code Copyright (c) 1997 ARM Limited
6 * arm8 support code Copyright (c) 1997 Causality Limited
7 * arm9 support code Copyright (C) 2001 ARM Ltd
8 * Copyright (c) 1997 Mark Brinicombe.
9 * Copyright (c) 1997 Causality Limited
10 * All rights reserved.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Causality Limited.
23 * 4. The name of Causality Limited may not be used to endorse or promote
24 * products derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * RiscBSD kernel project
43 * C functions for supporting CPU / MMU / TLB specific operations.
47 #include <sys/cdefs.h>
48 __FBSDID("$FreeBSD$");
50 #include <sys/cdefs.h>
52 #include <sys/types.h>
53 #include <sys/param.h>
54 #include <sys/systm.h>
56 #include <sys/mutex.h>
58 #include <machine/bus.h>
59 #include <machine/cpu.h>
60 #include <machine/disassem.h>
65 #include <machine/cpuconf.h>
66 #include <machine/cpufunc.h>
67 #include <machine/bootconfig.h>
69 #ifdef CPU_XSCALE_80200
70 #include <arm/xscale/i80200/i80200reg.h>
71 #include <arm/xscale/i80200/i80200var.h>
74 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
75 #include <arm/xscale/i80321/i80321reg.h>
76 #include <arm/xscale/i80321/i80321var.h>
79 #ifdef CPU_XSCALE_IXP425
80 #include <arm/xscale/ixp425/ixp425reg.h>
81 #include <arm/xscale/ixp425/ixp425var.h>
84 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
85 defined(CPU_XSCALE_80219)
86 #include <arm/xscale/xscalereg.h>
90 struct arm_pmc_funcs *arm_pmc;
93 /* PRIMARY CACHE VARIABLES */
95 int arm_picache_line_size;
98 int arm_pdcache_size; /* and unified */
99 int arm_pdcache_line_size;
100 int arm_pdcache_ways;
103 int arm_pcache_unified;
105 int arm_dcache_align;
106 int arm_dcache_align_mask;
108 /* 1 == use cpu_sleep(), 0 == don't */
109 int cpu_do_powersave;
113 struct cpu_functions arm7tdmi_cpufuncs = {
117 cpufunc_nullop, /* cpwait */
121 cpufunc_control, /* control */
122 cpufunc_domains, /* domain */
123 arm7tdmi_setttb, /* setttb */
124 cpufunc_faultstatus, /* faultstatus */
125 cpufunc_faultaddress, /* faultaddress */
129 arm7tdmi_tlb_flushID, /* tlb_flushID */
130 arm7tdmi_tlb_flushID_SE, /* tlb_flushID_SE */
131 arm7tdmi_tlb_flushID, /* tlb_flushI */
132 arm7tdmi_tlb_flushID_SE, /* tlb_flushI_SE */
133 arm7tdmi_tlb_flushID, /* tlb_flushD */
134 arm7tdmi_tlb_flushID_SE, /* tlb_flushD_SE */
136 /* Cache operations */
138 cpufunc_nullop, /* icache_sync_all */
139 (void *)cpufunc_nullop, /* icache_sync_range */
141 arm7tdmi_cache_flushID, /* dcache_wbinv_all */
142 (void *)arm7tdmi_cache_flushID, /* dcache_wbinv_range */
143 (void *)arm7tdmi_cache_flushID, /* dcache_inv_range */
144 (void *)cpufunc_nullop, /* dcache_wb_range */
146 arm7tdmi_cache_flushID, /* idcache_wbinv_all */
147 (void *)arm7tdmi_cache_flushID, /* idcache_wbinv_range */
149 /* Other functions */
151 cpufunc_nullop, /* flush_prefetchbuf */
152 cpufunc_nullop, /* drain_writebuf */
153 cpufunc_nullop, /* flush_brnchtgt_C */
154 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
156 (void *)cpufunc_nullop, /* sleep */
160 late_abort_fixup, /* dataabt_fixup */
161 cpufunc_null_fixup, /* prefetchabt_fixup */
163 arm7tdmi_context_switch, /* context_switch */
165 arm7tdmi_setup /* cpu setup */
168 #endif /* CPU_ARM7TDMI */
171 struct cpu_functions arm8_cpufuncs = {
175 cpufunc_nullop, /* cpwait */
179 cpufunc_control, /* control */
180 cpufunc_domains, /* domain */
181 arm8_setttb, /* setttb */
182 cpufunc_faultstatus, /* faultstatus */
183 cpufunc_faultaddress, /* faultaddress */
187 arm8_tlb_flushID, /* tlb_flushID */
188 arm8_tlb_flushID_SE, /* tlb_flushID_SE */
189 arm8_tlb_flushID, /* tlb_flushI */
190 arm8_tlb_flushID_SE, /* tlb_flushI_SE */
191 arm8_tlb_flushID, /* tlb_flushD */
192 arm8_tlb_flushID_SE, /* tlb_flushD_SE */
194 /* Cache operations */
196 cpufunc_nullop, /* icache_sync_all */
197 (void *)cpufunc_nullop, /* icache_sync_range */
199 arm8_cache_purgeID, /* dcache_wbinv_all */
200 (void *)arm8_cache_purgeID, /* dcache_wbinv_range */
201 /*XXX*/ (void *)arm8_cache_purgeID, /* dcache_inv_range */
202 (void *)arm8_cache_cleanID, /* dcache_wb_range */
204 arm8_cache_purgeID, /* idcache_wbinv_all */
205 (void *)arm8_cache_purgeID, /* idcache_wbinv_range */
207 /* Other functions */
209 cpufunc_nullop, /* flush_prefetchbuf */
210 cpufunc_nullop, /* drain_writebuf */
211 cpufunc_nullop, /* flush_brnchtgt_C */
212 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
214 (void *)cpufunc_nullop, /* sleep */
218 cpufunc_null_fixup, /* dataabt_fixup */
219 cpufunc_null_fixup, /* prefetchabt_fixup */
221 arm8_context_switch, /* context_switch */
223 arm8_setup /* cpu setup */
225 #endif /* CPU_ARM8 */
228 struct cpu_functions arm9_cpufuncs = {
232 cpufunc_nullop, /* cpwait */
236 cpufunc_control, /* control */
237 cpufunc_domains, /* Domain */
238 arm9_setttb, /* Setttb */
239 cpufunc_faultstatus, /* Faultstatus */
240 cpufunc_faultaddress, /* Faultaddress */
244 armv4_tlb_flushID, /* tlb_flushID */
245 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
246 armv4_tlb_flushI, /* tlb_flushI */
247 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
248 armv4_tlb_flushD, /* tlb_flushD */
249 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
251 /* Cache operations */
253 arm9_icache_sync_all, /* icache_sync_all */
254 arm9_icache_sync_range, /* icache_sync_range */
256 arm9_dcache_wbinv_all, /* dcache_wbinv_all */
257 arm9_dcache_wbinv_range, /* dcache_wbinv_range */
258 /*XXX*/ arm9_dcache_wbinv_range, /* dcache_inv_range */
259 arm9_dcache_wb_range, /* dcache_wb_range */
261 arm9_idcache_wbinv_all, /* idcache_wbinv_all */
262 arm9_idcache_wbinv_range, /* idcache_wbinv_range */
264 /* Other functions */
266 cpufunc_nullop, /* flush_prefetchbuf */
267 armv4_drain_writebuf, /* drain_writebuf */
268 cpufunc_nullop, /* flush_brnchtgt_C */
269 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
271 (void *)cpufunc_nullop, /* sleep */
275 cpufunc_null_fixup, /* dataabt_fixup */
276 cpufunc_null_fixup, /* prefetchabt_fixup */
278 arm9_context_switch, /* context_switch */
280 arm9_setup /* cpu setup */
283 #endif /* CPU_ARM9 */
286 struct cpu_functions arm10_cpufuncs = {
290 cpufunc_nullop, /* cpwait */
294 cpufunc_control, /* control */
295 cpufunc_domains, /* Domain */
296 arm10_setttb, /* Setttb */
297 cpufunc_faultstatus, /* Faultstatus */
298 cpufunc_faultaddress, /* Faultaddress */
302 armv4_tlb_flushID, /* tlb_flushID */
303 arm10_tlb_flushID_SE, /* tlb_flushID_SE */
304 armv4_tlb_flushI, /* tlb_flushI */
305 arm10_tlb_flushI_SE, /* tlb_flushI_SE */
306 armv4_tlb_flushD, /* tlb_flushD */
307 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
309 /* Cache operations */
311 arm10_icache_sync_all, /* icache_sync_all */
312 arm10_icache_sync_range, /* icache_sync_range */
314 arm10_dcache_wbinv_all, /* dcache_wbinv_all */
315 arm10_dcache_wbinv_range, /* dcache_wbinv_range */
316 arm10_dcache_inv_range, /* dcache_inv_range */
317 arm10_dcache_wb_range, /* dcache_wb_range */
319 arm10_idcache_wbinv_all, /* idcache_wbinv_all */
320 arm10_idcache_wbinv_range, /* idcache_wbinv_range */
322 /* Other functions */
324 cpufunc_nullop, /* flush_prefetchbuf */
325 armv4_drain_writebuf, /* drain_writebuf */
326 cpufunc_nullop, /* flush_brnchtgt_C */
327 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
329 (void *)cpufunc_nullop, /* sleep */
333 cpufunc_null_fixup, /* dataabt_fixup */
334 cpufunc_null_fixup, /* prefetchabt_fixup */
336 arm10_context_switch, /* context_switch */
338 arm10_setup /* cpu setup */
341 #endif /* CPU_ARM10 */
344 struct cpu_functions sa110_cpufuncs = {
348 cpufunc_nullop, /* cpwait */
352 cpufunc_control, /* control */
353 cpufunc_domains, /* domain */
354 sa1_setttb, /* setttb */
355 cpufunc_faultstatus, /* faultstatus */
356 cpufunc_faultaddress, /* faultaddress */
360 armv4_tlb_flushID, /* tlb_flushID */
361 sa1_tlb_flushID_SE, /* tlb_flushID_SE */
362 armv4_tlb_flushI, /* tlb_flushI */
363 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
364 armv4_tlb_flushD, /* tlb_flushD */
365 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
367 /* Cache operations */
369 sa1_cache_syncI, /* icache_sync_all */
370 sa1_cache_syncI_rng, /* icache_sync_range */
372 sa1_cache_purgeD, /* dcache_wbinv_all */
373 sa1_cache_purgeD_rng, /* dcache_wbinv_range */
374 /*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
375 sa1_cache_cleanD_rng, /* dcache_wb_range */
377 sa1_cache_purgeID, /* idcache_wbinv_all */
378 sa1_cache_purgeID_rng, /* idcache_wbinv_range */
380 /* Other functions */
382 cpufunc_nullop, /* flush_prefetchbuf */
383 armv4_drain_writebuf, /* drain_writebuf */
384 cpufunc_nullop, /* flush_brnchtgt_C */
385 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
387 (void *)cpufunc_nullop, /* sleep */
391 cpufunc_null_fixup, /* dataabt_fixup */
392 cpufunc_null_fixup, /* prefetchabt_fixup */
394 sa110_context_switch, /* context_switch */
396 sa110_setup /* cpu setup */
398 #endif /* CPU_SA110 */
400 #if defined(CPU_SA1100) || defined(CPU_SA1110)
401 struct cpu_functions sa11x0_cpufuncs = {
405 cpufunc_nullop, /* cpwait */
409 cpufunc_control, /* control */
410 cpufunc_domains, /* domain */
411 sa1_setttb, /* setttb */
412 cpufunc_faultstatus, /* faultstatus */
413 cpufunc_faultaddress, /* faultaddress */
417 armv4_tlb_flushID, /* tlb_flushID */
418 sa1_tlb_flushID_SE, /* tlb_flushID_SE */
419 armv4_tlb_flushI, /* tlb_flushI */
420 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
421 armv4_tlb_flushD, /* tlb_flushD */
422 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
424 /* Cache operations */
426 sa1_cache_syncI, /* icache_sync_all */
427 sa1_cache_syncI_rng, /* icache_sync_range */
429 sa1_cache_purgeD, /* dcache_wbinv_all */
430 sa1_cache_purgeD_rng, /* dcache_wbinv_range */
431 /*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
432 sa1_cache_cleanD_rng, /* dcache_wb_range */
434 sa1_cache_purgeID, /* idcache_wbinv_all */
435 sa1_cache_purgeID_rng, /* idcache_wbinv_range */
437 /* Other functions */
439 sa11x0_drain_readbuf, /* flush_prefetchbuf */
440 armv4_drain_writebuf, /* drain_writebuf */
441 cpufunc_nullop, /* flush_brnchtgt_C */
442 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
444 sa11x0_cpu_sleep, /* sleep */
448 cpufunc_null_fixup, /* dataabt_fixup */
449 cpufunc_null_fixup, /* prefetchabt_fixup */
451 sa11x0_context_switch, /* context_switch */
453 sa11x0_setup /* cpu setup */
455 #endif /* CPU_SA1100 || CPU_SA1110 */
458 struct cpu_functions ixp12x0_cpufuncs = {
462 cpufunc_nullop, /* cpwait */
466 cpufunc_control, /* control */
467 cpufunc_domains, /* domain */
468 sa1_setttb, /* setttb */
469 cpufunc_faultstatus, /* faultstatus */
470 cpufunc_faultaddress, /* faultaddress */
474 armv4_tlb_flushID, /* tlb_flushID */
475 sa1_tlb_flushID_SE, /* tlb_flushID_SE */
476 armv4_tlb_flushI, /* tlb_flushI */
477 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
478 armv4_tlb_flushD, /* tlb_flushD */
479 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
481 /* Cache operations */
483 sa1_cache_syncI, /* icache_sync_all */
484 sa1_cache_syncI_rng, /* icache_sync_range */
486 sa1_cache_purgeD, /* dcache_wbinv_all */
487 sa1_cache_purgeD_rng, /* dcache_wbinv_range */
488 /*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
489 sa1_cache_cleanD_rng, /* dcache_wb_range */
491 sa1_cache_purgeID, /* idcache_wbinv_all */
492 sa1_cache_purgeID_rng, /* idcache_wbinv_range */
494 /* Other functions */
496 ixp12x0_drain_readbuf, /* flush_prefetchbuf */
497 armv4_drain_writebuf, /* drain_writebuf */
498 cpufunc_nullop, /* flush_brnchtgt_C */
499 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
501 (void *)cpufunc_nullop, /* sleep */
505 cpufunc_null_fixup, /* dataabt_fixup */
506 cpufunc_null_fixup, /* prefetchabt_fixup */
508 ixp12x0_context_switch, /* context_switch */
510 ixp12x0_setup /* cpu setup */
512 #endif /* CPU_IXP12X0 */
514 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
515 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
516 defined(CPU_XSCALE_80219)
518 struct cpu_functions xscale_cpufuncs = {
522 xscale_cpwait, /* cpwait */
526 xscale_control, /* control */
527 cpufunc_domains, /* domain */
528 xscale_setttb, /* setttb */
529 cpufunc_faultstatus, /* faultstatus */
530 cpufunc_faultaddress, /* faultaddress */
534 armv4_tlb_flushID, /* tlb_flushID */
535 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
536 armv4_tlb_flushI, /* tlb_flushI */
537 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
538 armv4_tlb_flushD, /* tlb_flushD */
539 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
541 /* Cache operations */
543 xscale_cache_syncI, /* icache_sync_all */
544 xscale_cache_syncI_rng, /* icache_sync_range */
546 xscale_cache_purgeD, /* dcache_wbinv_all */
547 xscale_cache_purgeD_rng, /* dcache_wbinv_range */
548 xscale_cache_flushD_rng, /* dcache_inv_range */
549 xscale_cache_cleanD_rng, /* dcache_wb_range */
551 xscale_cache_purgeID, /* idcache_wbinv_all */
552 xscale_cache_purgeID_rng, /* idcache_wbinv_range */
554 /* Other functions */
556 cpufunc_nullop, /* flush_prefetchbuf */
557 armv4_drain_writebuf, /* drain_writebuf */
558 cpufunc_nullop, /* flush_brnchtgt_C */
559 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
561 xscale_cpu_sleep, /* sleep */
565 cpufunc_null_fixup, /* dataabt_fixup */
566 cpufunc_null_fixup, /* prefetchabt_fixup */
568 xscale_context_switch, /* context_switch */
570 xscale_setup /* cpu setup */
573 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
577 * Global constants also used by locore.s
580 struct cpu_functions cpufuncs;
582 u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore.s */
584 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
585 defined (CPU_ARM10) || \
586 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
587 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
588 defined(CPU_XSCALE_80219)
590 static void get_cachetype_cp15(void);
592 /* Additional cache information local to this file. Log2 of some of the
594 static int arm_dcache_l2_nsets;
595 static int arm_dcache_l2_assoc;
596 static int arm_dcache_l2_linesize;
601 u_int ctype, isize, dsize;
604 __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
608 * ...and thus spake the ARM ARM:
610 * If an <opcode2> value corresponding to an unimplemented or
611 * reserved ID register is encountered, the System Control
612 * processor returns the value of the main ID register.
614 if (ctype == cpufunc_id())
617 if ((ctype & CPU_CT_S) == 0)
618 arm_pcache_unified = 1;
621 * If you want to know how this code works, go read the ARM ARM.
624 arm_pcache_type = CPU_CT_CTYPE(ctype);
626 if (arm_pcache_unified == 0) {
627 isize = CPU_CT_ISIZE(ctype);
628 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
629 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
630 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
631 if (isize & CPU_CT_xSIZE_M)
632 arm_picache_line_size = 0; /* not present */
634 arm_picache_ways = 1;
636 arm_picache_ways = multiplier <<
637 (CPU_CT_xSIZE_ASSOC(isize) - 1);
639 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
642 dsize = CPU_CT_DSIZE(ctype);
643 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
644 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
645 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
646 if (dsize & CPU_CT_xSIZE_M)
647 arm_pdcache_line_size = 0; /* not present */
649 arm_pdcache_ways = 1;
651 arm_pdcache_ways = multiplier <<
652 (CPU_CT_xSIZE_ASSOC(dsize) - 1);
654 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
656 arm_dcache_align = arm_pdcache_line_size;
658 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
659 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
660 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
661 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
664 arm_dcache_align_mask = arm_dcache_align - 1;
666 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
668 #if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
670 /* Cache information for CPUs without cache type registers. */
674 int ct_pcache_unified;
676 int ct_pdcache_line_size;
679 int ct_picache_line_size;
683 struct cachetab cachetab[] = {
684 /* cpuid, cache type, u, dsiz, ls, wy, isiz, ls, wy */
685 /* XXX is this type right for SA-1? */
686 { CPU_ID_SA110, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
687 { CPU_ID_SA1100, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 },
688 { CPU_ID_SA1110, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 },
689 { CPU_ID_IXP1200, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
690 { 0, 0, 0, 0, 0, 0, 0, 0}
693 static void get_cachetype_table(void);
696 get_cachetype_table()
699 u_int32_t cpuid = cpufunc_id();
701 for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
702 if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
703 arm_pcache_type = cachetab[i].ct_pcache_type;
704 arm_pcache_unified = cachetab[i].ct_pcache_unified;
705 arm_pdcache_size = cachetab[i].ct_pdcache_size;
706 arm_pdcache_line_size =
707 cachetab[i].ct_pdcache_line_size;
708 arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
709 arm_picache_size = cachetab[i].ct_picache_size;
710 arm_picache_line_size =
711 cachetab[i].ct_picache_line_size;
712 arm_picache_ways = cachetab[i].ct_picache_ways;
715 arm_dcache_align = arm_pdcache_line_size;
717 arm_dcache_align_mask = arm_dcache_align - 1;
720 #endif /* SA110 || SA1100 || SA1111 || IXP12X0 */
723 * Cannot panic here as we may not have a console yet ...
729 cputype = cpufunc_id();
730 cputype &= CPU_ID_CPU_MASK;
733 * NOTE: cpu_do_powersave defaults to off. If we encounter a
734 * CPU type where we want to use it by default, then we set it.
738 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
739 CPU_ID_IS7(cputype) &&
740 (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
741 cpufuncs = arm7tdmi_cpufuncs;
742 cpu_reset_needs_v4_MMU_disable = 0;
743 get_cachetype_cp15();
744 pmap_pte_init_generic();
749 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
750 (cputype & 0x0000f000) == 0x00008000) {
751 cpufuncs = arm8_cpufuncs;
752 cpu_reset_needs_v4_MMU_disable = 0; /* XXX correct? */
753 get_cachetype_cp15();
754 pmap_pte_init_arm8();
757 #endif /* CPU_ARM8 */
759 if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
760 (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
761 (cputype & 0x0000f000) == 0x00009000) {
762 cpufuncs = arm9_cpufuncs;
763 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
764 get_cachetype_cp15();
765 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
766 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
767 arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
768 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
769 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
770 #ifdef ARM9_CACHE_WRITE_THROUGH
771 pmap_pte_init_arm9();
773 pmap_pte_init_generic();
777 #endif /* CPU_ARM9 */
779 if (/* cputype == CPU_ID_ARM1020T || */
780 cputype == CPU_ID_ARM1020E) {
782 * Select write-through cacheing (this isn't really an
783 * option on ARM1020T).
785 cpufuncs = arm10_cpufuncs;
786 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
787 get_cachetype_cp15();
788 arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
789 arm10_dcache_sets_max =
790 (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
791 arm10_dcache_sets_inc;
792 arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
793 arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
794 pmap_pte_init_generic();
797 #endif /* CPU_ARM10 */
799 if (cputype == CPU_ID_SA110) {
800 cpufuncs = sa110_cpufuncs;
801 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
802 get_cachetype_table();
806 #endif /* CPU_SA110 */
808 if (cputype == CPU_ID_SA1100) {
809 cpufuncs = sa11x0_cpufuncs;
810 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
811 get_cachetype_table();
813 /* Use powersave on this CPU. */
814 cpu_do_powersave = 1;
818 #endif /* CPU_SA1100 */
820 if (cputype == CPU_ID_SA1110) {
821 cpufuncs = sa11x0_cpufuncs;
822 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
823 get_cachetype_table();
825 /* Use powersave on this CPU. */
826 cpu_do_powersave = 1;
830 #endif /* CPU_SA1110 */
832 if (cputype == CPU_ID_IXP1200) {
833 cpufuncs = ixp12x0_cpufuncs;
834 cpu_reset_needs_v4_MMU_disable = 1;
835 get_cachetype_table();
839 #endif /* CPU_IXP12X0 */
840 #ifdef CPU_XSCALE_80200
841 if (cputype == CPU_ID_80200) {
842 int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
847 * Reset the Performance Monitoring Unit to a
849 * - CCNT, PMN0, PMN1 reset to 0
850 * - overflow indications cleared
851 * - all counters disabled
853 __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
855 : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
858 #if defined(XSCALE_CCLKCFG)
860 * Crank CCLKCFG to maximum legal value.
862 __asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
864 : "r" (XSCALE_CCLKCFG));
868 * XXX Disable ECC in the Bus Controller Unit; we
869 * don't really support it, yet. Clear any pending
872 __asm __volatile("mcr p13, 0, %0, c0, c1, 0"
874 : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
876 cpufuncs = xscale_cpufuncs;
877 #if defined(PERFCTRS)
882 * i80200 errata: Step-A0 and A1 have a bug where
883 * D$ dirty bits are not cleared on "invalidate by
886 * Workaround: Clean cache line before invalidating.
888 if (rev == 0 || rev == 1)
889 cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
891 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
892 get_cachetype_cp15();
893 pmap_pte_init_xscale();
896 #endif /* CPU_XSCALE_80200 */
897 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
898 if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
899 cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
900 cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
903 * Reset the Performance Monitoring Unit to a
905 * - CCNT, PMN0, PMN1 reset to 0
906 * - overflow indications cleared
907 * - all counters disabled
909 __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
911 : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
914 cpufuncs = xscale_cpufuncs;
915 #if defined(PERFCTRS)
919 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
920 get_cachetype_cp15();
921 pmap_pte_init_xscale();
924 #endif /* CPU_XSCALE_80321 */
926 #ifdef CPU_XSCALE_PXA2X0
927 /* ignore core revision to test PXA2xx CPUs */
928 if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
929 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
931 cpufuncs = xscale_cpufuncs;
932 #if defined(PERFCTRS)
936 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
937 get_cachetype_cp15();
938 pmap_pte_init_xscale();
940 /* Use powersave on this CPU. */
941 cpu_do_powersave = 1;
945 #endif /* CPU_XSCALE_PXA2X0 */
946 #ifdef CPU_XSCALE_IXP425
947 if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
948 cputype == CPU_ID_IXP425_266) {
951 cpufuncs = xscale_cpufuncs;
952 #if defined(PERFCTRS)
956 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
957 get_cachetype_cp15();
958 pmap_pte_init_xscale();
962 #endif /* CPU_XSCALE_IXP425 */
964 * Bzzzz. And the answer was ...
966 panic("No support for this CPU type (%08x) in kernel", cputype);
967 return(ARCHITECTURE_NOT_PRESENT);
971 * Fixup routines for data and prefetch aborts.
973 * Several compile time symbols are used
975 * DEBUG_FAULT_CORRECTION - Print debugging information during the
976 * correction of registers after a fault.
977 * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
978 * when defined should use late aborts
983 * Null abort fixup routine.
984 * For use when no fixup is required.
987 cpufunc_null_fixup(arg)
990 return(ABORT_FIXUP_OK);
994 #if defined(CPU_ARM7TDMI)
996 #ifdef DEBUG_FAULT_CORRECTION
997 #define DFC_PRINTF(x) printf x
998 #define DFC_DISASSEMBLE(x) disassemble(x)
1000 #define DFC_PRINTF(x) /* nothing */
1001 #define DFC_DISASSEMBLE(x) /* nothing */
1005 * "Early" data abort fixup.
1007 * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode). Also used
1008 * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1010 * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1013 early_abort_fixup(arg)
1016 trapframe_t *frame = arg;
1018 u_int fault_instruction;
1021 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1023 /* Ok an abort in SVC mode */
1026 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1027 * as the fault happened in svc mode but we need it in the
1028 * usr slot so we can treat the registers as an array of ints
1030 * NOTE: This PC is in the position but writeback is not
1032 * Doing it like this is more efficient than trapping this
1033 * case in all possible locations in the following fixup code.
1036 saved_lr = frame->tf_usr_lr;
1037 frame->tf_usr_lr = frame->tf_svc_lr;
1040 * Note the trapframe does not have the SVC r13 so a fault
1041 * from an instruction with writeback to r13 in SVC mode is
1042 * not allowed. This should not happen as the kstack is
1047 /* Get fault address and status from the CPU */
1049 fault_pc = frame->tf_pc;
1050 fault_instruction = *((volatile unsigned int *)fault_pc);
1052 /* Decode the fault instruction and fix the registers as needed */
1054 if ((fault_instruction & 0x0e000000) == 0x08000000) {
1058 int *registers = &frame->tf_r0;
1060 DFC_PRINTF(("LDM/STM\n"));
1061 DFC_DISASSEMBLE(fault_pc);
1062 if (fault_instruction & (1 << 21)) {
1063 DFC_PRINTF(("This instruction must be corrected\n"));
1064 base = (fault_instruction >> 16) & 0x0f;
1066 return ABORT_FIXUP_FAILED;
1067 /* Count registers transferred */
1069 for (loop = 0; loop < 16; ++loop) {
1070 if (fault_instruction & (1<<loop))
1073 DFC_PRINTF(("%d registers used\n", count));
1074 DFC_PRINTF(("Corrected r%d by %d bytes ",
1076 if (fault_instruction & (1 << 23)) {
1077 DFC_PRINTF(("down\n"));
1078 registers[base] -= count * 4;
1080 DFC_PRINTF(("up\n"));
1081 registers[base] += count * 4;
1084 } else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1087 int *registers = &frame->tf_r0;
1089 /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1091 DFC_DISASSEMBLE(fault_pc);
1093 /* Only need to fix registers if write back is turned on */
1095 if ((fault_instruction & (1 << 21)) != 0) {
1096 base = (fault_instruction >> 16) & 0x0f;
1098 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1099 return ABORT_FIXUP_FAILED;
1101 return ABORT_FIXUP_FAILED;
1103 offset = (fault_instruction & 0xff) << 2;
1104 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1105 if ((fault_instruction & (1 << 23)) != 0)
1107 registers[base] += offset;
1108 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1110 } else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1111 return ABORT_FIXUP_FAILED;
1113 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1115 /* Ok an abort in SVC mode */
1118 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1119 * as the fault happened in svc mode but we need it in the
1120 * usr slot so we can treat the registers as an array of ints
1122 * NOTE: This PC is in the position but writeback is not
1124 * Doing it like this is more efficient than trapping this
1125 * case in all possible locations in the prior fixup code.
1128 frame->tf_svc_lr = frame->tf_usr_lr;
1129 frame->tf_usr_lr = saved_lr;
1132 * Note the trapframe does not have the SVC r13 so a fault
1133 * from an instruction with writeback to r13 in SVC mode is
1134 * not allowed. This should not happen as the kstack is
1139 return(ABORT_FIXUP_OK);
1141 #endif /* CPU_ARM2/250/3/6/7 */
1144 #if defined(CPU_ARM7TDMI)
1146 * "Late" (base updated) data abort fixup
1148 * For ARM6 (in late-abort mode) and ARM7.
1150 * In this model, all data-transfer instructions need fixing up. We defer
1151 * LDM, STM, LDC and STC fixup to the early-abort handler.
1154 late_abort_fixup(arg)
1157 trapframe_t *frame = arg;
1159 u_int fault_instruction;
1162 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1164 /* Ok an abort in SVC mode */
1167 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1168 * as the fault happened in svc mode but we need it in the
1169 * usr slot so we can treat the registers as an array of ints
1171 * NOTE: This PC is in the position but writeback is not
1173 * Doing it like this is more efficient than trapping this
1174 * case in all possible locations in the following fixup code.
1177 saved_lr = frame->tf_usr_lr;
1178 frame->tf_usr_lr = frame->tf_svc_lr;
1181 * Note the trapframe does not have the SVC r13 so a fault
1182 * from an instruction with writeback to r13 in SVC mode is
1183 * not allowed. This should not happen as the kstack is
1188 /* Get fault address and status from the CPU */
1190 fault_pc = frame->tf_pc;
1191 fault_instruction = *((volatile unsigned int *)fault_pc);
1193 /* Decode the fault instruction and fix the registers as needed */
1195 /* Was is a swap instruction ? */
1197 if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1198 DFC_DISASSEMBLE(fault_pc);
1199 } else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1201 /* Was is a ldr/str instruction */
1202 /* This is for late abort only */
1206 int *registers = &frame->tf_r0;
1208 DFC_DISASSEMBLE(fault_pc);
1210 /* This is for late abort only */
1212 if ((fault_instruction & (1 << 24)) == 0
1213 || (fault_instruction & (1 << 21)) != 0) {
1214 /* postindexed ldr/str with no writeback */
1216 base = (fault_instruction >> 16) & 0x0f;
1218 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1219 return ABORT_FIXUP_FAILED;
1221 return ABORT_FIXUP_FAILED;
1222 DFC_PRINTF(("late abt fix: r%d=%08x : ",
1223 base, registers[base]));
1224 if ((fault_instruction & (1 << 25)) == 0) {
1225 /* Immediate offset - easy */
1227 offset = fault_instruction & 0xfff;
1228 if ((fault_instruction & (1 << 23)))
1230 registers[base] += offset;
1231 DFC_PRINTF(("imm=%08x ", offset));
1233 /* offset is a shifted register */
1236 offset = fault_instruction & 0x0f;
1238 return ABORT_FIXUP_FAILED;
1241 * Register offset - hard we have to
1242 * cope with shifts !
1244 offset = registers[offset];
1246 if ((fault_instruction & (1 << 4)) == 0)
1247 /* shift with amount */
1248 shift = (fault_instruction >> 7) & 0x1f;
1250 /* shift with register */
1251 if ((fault_instruction & (1 << 7)) != 0)
1252 /* undefined for now so bail out */
1253 return ABORT_FIXUP_FAILED;
1254 shift = ((fault_instruction >> 8) & 0xf);
1256 return ABORT_FIXUP_FAILED;
1257 DFC_PRINTF(("shift reg=%d ", shift));
1258 shift = registers[shift];
1260 DFC_PRINTF(("shift=%08x ", shift));
1261 switch (((fault_instruction >> 5) & 0x3)) {
1262 case 0 : /* Logical left */
1263 offset = (int)(((u_int)offset) << shift);
1265 case 1 : /* Logical Right */
1266 if (shift == 0) shift = 32;
1267 offset = (int)(((u_int)offset) >> shift);
1269 case 2 : /* Arithmetic Right */
1270 if (shift == 0) shift = 32;
1271 offset = (int)(((int)offset) >> shift);
1273 case 3 : /* Rotate right (rol or rxx) */
1274 return ABORT_FIXUP_FAILED;
1278 DFC_PRINTF(("abt: fixed LDR/STR with "
1279 "register offset\n"));
1280 if ((fault_instruction & (1 << 23)))
1282 DFC_PRINTF(("offset=%08x ", offset));
1283 registers[base] += offset;
1285 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1289 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1291 /* Ok an abort in SVC mode */
1294 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1295 * as the fault happened in svc mode but we need it in the
1296 * usr slot so we can treat the registers as an array of ints
1298 * NOTE: This PC is in the position but writeback is not
1300 * Doing it like this is more efficient than trapping this
1301 * case in all possible locations in the prior fixup code.
1304 frame->tf_svc_lr = frame->tf_usr_lr;
1305 frame->tf_usr_lr = saved_lr;
1308 * Note the trapframe does not have the SVC r13 so a fault
1309 * from an instruction with writeback to r13 in SVC mode is
1310 * not allowed. This should not happen as the kstack is
1316 * Now let the early-abort fixup routine have a go, in case it
1317 * was an LDM, STM, LDC or STC that faulted.
1320 return early_abort_fixup(arg);
1322 #endif /* CPU_ARM7TDMI */
1328 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined (CPU_ARM9) || \
1329 defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
1330 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1331 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1332 defined(CPU_XSCALE_80219)
1345 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
1348 parse_cpu_options(args, optlist, cpuctrl)
1350 struct cpu_option *optlist;
1358 while (optlist->co_name) {
1359 if (get_bootconf_option(args, optlist->co_name,
1360 BOOTOPT_TYPE_BOOLEAN, &integer)) {
1362 if (optlist->co_trueop == OR)
1363 cpuctrl |= optlist->co_value;
1364 else if (optlist->co_trueop == BIC)
1365 cpuctrl &= ~optlist->co_value;
1367 if (optlist->co_falseop == OR)
1368 cpuctrl |= optlist->co_value;
1369 else if (optlist->co_falseop == BIC)
1370 cpuctrl &= ~optlist->co_value;
1377 #endif /* CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 || XSCALE*/
1379 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8)
1380 struct cpu_option arm678_options[] = {
1382 { "nocache", IGN, BIC, CPU_CONTROL_IDC_ENABLE },
1383 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1384 #endif /* COMPAT_12 */
1385 { "cpu.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
1386 { "cpu.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
1387 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1388 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1389 { NULL, IGN, IGN, 0 }
1392 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
1395 struct cpu_option arm7tdmi_options[] = {
1396 { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
1397 { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
1398 { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1399 { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1401 { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK },
1402 #endif /* COMPAT_12 */
1403 { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK },
1404 { NULL, IGN, IGN, 0 }
1408 arm7tdmi_setup(args)
1413 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1414 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1415 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1417 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1418 cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
1421 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1424 /* Clear out the cache */
1425 cpu_idcache_wbinv_all();
1427 /* Set the control register */
1429 cpu_control(0xffffffff, cpuctrl);
1431 #endif /* CPU_ARM7TDMI */
1434 struct cpu_option arm8_options[] = {
1435 { "arm8.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
1436 { "arm8.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
1437 { "arm8.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1438 { "arm8.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1440 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1441 #endif /* COMPAT_12 */
1442 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1443 { "arm8.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1444 { NULL, IGN, IGN, 0 }
1452 int cpuctrl, cpuctrlmask;
1456 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1457 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1458 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1459 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1460 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1461 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1462 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
1463 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
1465 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1466 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1469 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1470 cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
1473 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1476 /* Get clock configuration */
1477 clocktest = arm8_clock_config(0, 0) & 0x0f;
1479 /* Special ARM8 clock and test configuration */
1480 if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1484 if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1488 clocktest &= ~(0x01);
1491 if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1495 clocktest &= ~(0x02);
1498 if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
1499 clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
1502 if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
1503 clocktest |= (integer & 7) << 5;
1507 /* Clear out the cache */
1508 cpu_idcache_wbinv_all();
1510 /* Set the control register */
1512 cpu_control(0xffffffff, cpuctrl);
1514 /* Set the clock/test register */
1516 arm8_clock_config(0x7f, clocktest);
1518 #endif /* CPU_ARM8 */
1521 struct cpu_option arm9_options[] = {
1522 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1523 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1524 { "arm9.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1525 { "arm9.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1526 { "arm9.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1527 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1528 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1529 { "arm9.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1530 { NULL, IGN, IGN, 0 }
1537 int cpuctrl, cpuctrlmask;
1539 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1540 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1541 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1542 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
1543 CPU_CONTROL_ROUNDROBIN;
1544 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1545 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1546 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1547 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1548 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1549 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1550 | CPU_CONTROL_ROUNDROBIN;
1552 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1553 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1556 cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1559 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1561 if (vector_page == ARM_VECTORS_HIGH)
1562 cpuctrl |= CPU_CONTROL_VECRELOC;
1564 /* Clear out the cache */
1565 cpu_idcache_wbinv_all();
1567 /* Set the control register */
1568 cpu_control(cpuctrlmask, cpuctrl);
1572 #endif /* CPU_ARM9 */
1575 struct cpu_option arm10_options[] = {
1576 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1577 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1578 { "arm10.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1579 { "arm10.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1580 { "arm10.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1581 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1582 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1583 { "arm10.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1584 { NULL, IGN, IGN, 0 }
1591 int cpuctrl, cpuctrlmask;
1593 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1594 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1595 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1596 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1597 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1598 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1599 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1600 | CPU_CONTROL_BPRD_ENABLE
1601 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1603 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1604 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1607 cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
1610 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1613 /* Clear out the cache */
1614 cpu_idcache_wbinv_all();
1616 /* Now really make sure they are clean. */
1617 asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1619 /* Set the control register */
1621 cpu_control(0xffffffff, cpuctrl);
1624 cpu_idcache_wbinv_all();
1626 #endif /* CPU_ARM10 */
1629 struct cpu_option sa110_options[] = {
1631 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1632 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1633 #endif /* COMPAT_12 */
1634 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1635 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1636 { "sa110.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1637 { "sa110.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1638 { "sa110.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1639 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1640 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1641 { "sa110.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1642 { NULL, IGN, IGN, 0 }
1649 int cpuctrl, cpuctrlmask;
1651 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1652 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1653 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1654 | CPU_CONTROL_WBUF_ENABLE;
1655 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1656 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1657 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1658 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1659 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1660 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1661 | CPU_CONTROL_CPCLK;
1663 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1664 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1667 cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
1670 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1673 /* Clear out the cache */
1674 cpu_idcache_wbinv_all();
1676 /* Set the control register */
1678 /* cpu_control(cpuctrlmask, cpuctrl);*/
1679 cpu_control(0xffffffff, cpuctrl);
1682 * enable clockswitching, note that this doesn't read or write to r0,
1683 * r0 is just to make it valid asm
1685 __asm ("mcr 15, 0, r0, c15, c1, 2");
1687 #endif /* CPU_SA110 */
1689 #if defined(CPU_SA1100) || defined(CPU_SA1110)
1690 struct cpu_option sa11x0_options[] = {
1692 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1693 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1694 #endif /* COMPAT_12 */
1695 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1696 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1697 { "sa11x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1698 { "sa11x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1699 { "sa11x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1700 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1701 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1702 { "sa11x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1703 { NULL, IGN, IGN, 0 }
1710 int cpuctrl, cpuctrlmask;
1712 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1713 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1714 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1715 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
1716 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1717 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1718 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1719 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1720 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1721 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1722 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1724 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1725 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1729 cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
1732 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1735 if (vector_page == ARM_VECTORS_HIGH)
1736 cpuctrl |= CPU_CONTROL_VECRELOC;
1737 /* Clear out the cache */
1738 cpu_idcache_wbinv_all();
1739 /* Set the control register */
1741 cpu_control(0xffffffff, cpuctrl);
1743 #endif /* CPU_SA1100 || CPU_SA1110 */
1745 #if defined(CPU_IXP12X0)
1746 struct cpu_option ixp12x0_options[] = {
1747 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1748 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1749 { "ixp12x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1750 { "ixp12x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1751 { "ixp12x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1752 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1753 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1754 { "ixp12x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1755 { NULL, IGN, IGN, 0 }
1762 int cpuctrl, cpuctrlmask;
1765 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
1766 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
1767 | CPU_CONTROL_IC_ENABLE;
1769 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
1770 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1771 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
1772 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
1773 | CPU_CONTROL_VECRELOC;
1775 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1776 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1779 cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
1782 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1785 if (vector_page == ARM_VECTORS_HIGH)
1786 cpuctrl |= CPU_CONTROL_VECRELOC;
1788 /* Clear out the cache */
1789 cpu_idcache_wbinv_all();
1791 /* Set the control register */
1793 /* cpu_control(0xffffffff, cpuctrl); */
1794 cpu_control(cpuctrlmask, cpuctrl);
1796 #endif /* CPU_IXP12X0 */
1798 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1799 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1800 defined(CPU_XSCALE_80219)
1801 struct cpu_option xscale_options[] = {
1803 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1804 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1805 #endif /* COMPAT_12 */
1806 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1807 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1808 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1809 { "xscale.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1810 { "xscale.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1811 { "xscale.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1812 { "xscale.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1813 { NULL, IGN, IGN, 0 }
1821 int cpuctrl, cpuctrlmask;
1824 * The XScale Write Buffer is always enabled. Our option
1825 * is to enable/disable coalescing. Note that bits 6:3
1826 * must always be enabled.
1829 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1830 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1831 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1832 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1833 | CPU_CONTROL_BPRD_ENABLE;
1834 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1835 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1836 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1837 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1838 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1839 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1840 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1842 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1843 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1846 cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
1849 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1852 if (vector_page == ARM_VECTORS_HIGH)
1853 cpuctrl |= CPU_CONTROL_VECRELOC;
1855 /* Clear out the cache */
1856 cpu_idcache_wbinv_all();
1859 * Set the control register. Note that bits 6:3 must always
1863 /* cpu_control(cpuctrlmask, cpuctrl);*/
1864 cpu_control(0xffffffff, cpuctrl);
1866 /* Make sure write coalescing is turned on */
1867 __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1869 #ifdef XSCALE_NO_COALESCE_WRITES
1870 auxctl |= XSCALE_AUXCTL_K;
1872 auxctl &= ~XSCALE_AUXCTL_K;
1874 __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1877 #endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425