1 /* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
4 * arm7tdmi support code Copyright (c) 2001 John Fremlin
5 * arm8 support code Copyright (c) 1997 ARM Limited
6 * arm8 support code Copyright (c) 1997 Causality Limited
7 * arm9 support code Copyright (C) 2001 ARM Ltd
8 * Copyright (c) 1997 Mark Brinicombe.
9 * Copyright (c) 1997 Causality Limited
10 * All rights reserved.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Causality Limited.
23 * 4. The name of Causality Limited may not be used to endorse or promote
24 * products derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * RiscBSD kernel project
43 * C functions for supporting CPU / MMU / TLB specific operations.
47 #include <sys/cdefs.h>
48 __FBSDID("$FreeBSD$");
50 #include <sys/param.h>
51 #include <sys/systm.h>
53 #include <sys/mutex.h>
55 #include <machine/bus.h>
56 #include <machine/cpu.h>
57 #include <machine/disassem.h>
63 #include <machine/cpuconf.h>
64 #include <machine/cpufunc.h>
65 #include <machine/bootconfig.h>
67 #ifdef CPU_XSCALE_80200
68 #include <arm/xscale/i80200/i80200reg.h>
69 #include <arm/xscale/i80200/i80200var.h>
72 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
73 #include <arm/xscale/i80321/i80321reg.h>
74 #include <arm/xscale/i80321/i80321var.h>
78 * Some definitions in i81342reg.h clash with i80321reg.h.
79 * This only happens for the LINT kernel. As it happens,
80 * we don't need anything from i81342reg.h that we already
81 * got from somewhere else during a LINT compile.
83 #if defined(CPU_XSCALE_81342) && !defined(COMPILING_LINT)
84 #include <arm/xscale/i8134x/i81342reg.h>
87 #ifdef CPU_XSCALE_IXP425
88 #include <arm/xscale/ixp425/ixp425reg.h>
89 #include <arm/xscale/ixp425/ixp425var.h>
92 /* PRIMARY CACHE VARIABLES */
94 int arm_picache_line_size;
97 int arm_pdcache_size; /* and unified */
98 int arm_pdcache_line_size;
102 int arm_pcache_unified;
104 int arm_dcache_align;
105 int arm_dcache_align_mask;
107 u_int arm_cache_level;
108 u_int arm_cache_type[14];
111 /* 1 == use cpu_sleep(), 0 == don't */
112 int cpu_do_powersave;
116 struct cpu_functions arm7tdmi_cpufuncs = {
120 cpufunc_nullop, /* cpwait */
124 cpufunc_control, /* control */
125 cpufunc_domains, /* domain */
126 arm7tdmi_setttb, /* setttb */
127 cpufunc_faultstatus, /* faultstatus */
128 cpufunc_faultaddress, /* faultaddress */
132 arm7tdmi_tlb_flushID, /* tlb_flushID */
133 arm7tdmi_tlb_flushID_SE, /* tlb_flushID_SE */
134 arm7tdmi_tlb_flushID, /* tlb_flushI */
135 arm7tdmi_tlb_flushID_SE, /* tlb_flushI_SE */
136 arm7tdmi_tlb_flushID, /* tlb_flushD */
137 arm7tdmi_tlb_flushID_SE, /* tlb_flushD_SE */
139 /* Cache operations */
141 cpufunc_nullop, /* icache_sync_all */
142 (void *)cpufunc_nullop, /* icache_sync_range */
144 arm7tdmi_cache_flushID, /* dcache_wbinv_all */
145 (void *)arm7tdmi_cache_flushID, /* dcache_wbinv_range */
146 (void *)arm7tdmi_cache_flushID, /* dcache_inv_range */
147 (void *)cpufunc_nullop, /* dcache_wb_range */
149 arm7tdmi_cache_flushID, /* idcache_wbinv_all */
150 (void *)arm7tdmi_cache_flushID, /* idcache_wbinv_range */
151 cpufunc_nullop, /* l2cache_wbinv_all */
152 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
153 (void *)cpufunc_nullop, /* l2cache_inv_range */
154 (void *)cpufunc_nullop, /* l2cache_wb_range */
156 /* Other functions */
158 cpufunc_nullop, /* flush_prefetchbuf */
159 cpufunc_nullop, /* drain_writebuf */
160 cpufunc_nullop, /* flush_brnchtgt_C */
161 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
163 (void *)cpufunc_nullop, /* sleep */
167 late_abort_fixup, /* dataabt_fixup */
168 cpufunc_null_fixup, /* prefetchabt_fixup */
170 arm7tdmi_context_switch, /* context_switch */
172 arm7tdmi_setup /* cpu setup */
175 #endif /* CPU_ARM7TDMI */
178 struct cpu_functions arm8_cpufuncs = {
182 cpufunc_nullop, /* cpwait */
186 cpufunc_control, /* control */
187 cpufunc_domains, /* domain */
188 arm8_setttb, /* setttb */
189 cpufunc_faultstatus, /* faultstatus */
190 cpufunc_faultaddress, /* faultaddress */
194 arm8_tlb_flushID, /* tlb_flushID */
195 arm8_tlb_flushID_SE, /* tlb_flushID_SE */
196 arm8_tlb_flushID, /* tlb_flushI */
197 arm8_tlb_flushID_SE, /* tlb_flushI_SE */
198 arm8_tlb_flushID, /* tlb_flushD */
199 arm8_tlb_flushID_SE, /* tlb_flushD_SE */
201 /* Cache operations */
203 cpufunc_nullop, /* icache_sync_all */
204 (void *)cpufunc_nullop, /* icache_sync_range */
206 arm8_cache_purgeID, /* dcache_wbinv_all */
207 (void *)arm8_cache_purgeID, /* dcache_wbinv_range */
208 /*XXX*/ (void *)arm8_cache_purgeID, /* dcache_inv_range */
209 (void *)arm8_cache_cleanID, /* dcache_wb_range */
211 arm8_cache_purgeID, /* idcache_wbinv_all */
212 (void *)arm8_cache_purgeID, /* idcache_wbinv_range */
213 cpufunc_nullop, /* l2cache_wbinv_all */
214 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
215 (void *)cpufunc_nullop, /* l2cache_inv_range */
216 (void *)cpufunc_nullop, /* l2cache_wb_range */
218 /* Other functions */
220 cpufunc_nullop, /* flush_prefetchbuf */
221 cpufunc_nullop, /* drain_writebuf */
222 cpufunc_nullop, /* flush_brnchtgt_C */
223 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
225 (void *)cpufunc_nullop, /* sleep */
229 cpufunc_null_fixup, /* dataabt_fixup */
230 cpufunc_null_fixup, /* prefetchabt_fixup */
232 arm8_context_switch, /* context_switch */
234 arm8_setup /* cpu setup */
236 #endif /* CPU_ARM8 */
239 struct cpu_functions arm9_cpufuncs = {
243 cpufunc_nullop, /* cpwait */
247 cpufunc_control, /* control */
248 cpufunc_domains, /* Domain */
249 arm9_setttb, /* Setttb */
250 cpufunc_faultstatus, /* Faultstatus */
251 cpufunc_faultaddress, /* Faultaddress */
255 armv4_tlb_flushID, /* tlb_flushID */
256 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
257 armv4_tlb_flushI, /* tlb_flushI */
258 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
259 armv4_tlb_flushD, /* tlb_flushD */
260 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
262 /* Cache operations */
264 arm9_icache_sync_all, /* icache_sync_all */
265 arm9_icache_sync_range, /* icache_sync_range */
267 arm9_dcache_wbinv_all, /* dcache_wbinv_all */
268 arm9_dcache_wbinv_range, /* dcache_wbinv_range */
269 arm9_dcache_inv_range, /* dcache_inv_range */
270 arm9_dcache_wb_range, /* dcache_wb_range */
272 arm9_idcache_wbinv_all, /* idcache_wbinv_all */
273 arm9_idcache_wbinv_range, /* idcache_wbinv_range */
274 cpufunc_nullop, /* l2cache_wbinv_all */
275 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
276 (void *)cpufunc_nullop, /* l2cache_inv_range */
277 (void *)cpufunc_nullop, /* l2cache_wb_range */
279 /* Other functions */
281 cpufunc_nullop, /* flush_prefetchbuf */
282 armv4_drain_writebuf, /* drain_writebuf */
283 cpufunc_nullop, /* flush_brnchtgt_C */
284 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
286 (void *)cpufunc_nullop, /* sleep */
290 cpufunc_null_fixup, /* dataabt_fixup */
291 cpufunc_null_fixup, /* prefetchabt_fixup */
293 arm9_context_switch, /* context_switch */
295 arm9_setup /* cpu setup */
298 #endif /* CPU_ARM9 */
300 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
301 struct cpu_functions armv5_ec_cpufuncs = {
305 cpufunc_nullop, /* cpwait */
309 cpufunc_control, /* control */
310 cpufunc_domains, /* Domain */
311 armv5_ec_setttb, /* Setttb */
312 cpufunc_faultstatus, /* Faultstatus */
313 cpufunc_faultaddress, /* Faultaddress */
317 armv4_tlb_flushID, /* tlb_flushID */
318 arm10_tlb_flushID_SE, /* tlb_flushID_SE */
319 armv4_tlb_flushI, /* tlb_flushI */
320 arm10_tlb_flushI_SE, /* tlb_flushI_SE */
321 armv4_tlb_flushD, /* tlb_flushD */
322 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
324 /* Cache operations */
326 armv5_ec_icache_sync_all, /* icache_sync_all */
327 armv5_ec_icache_sync_range, /* icache_sync_range */
329 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
330 armv5_ec_dcache_wbinv_range, /* dcache_wbinv_range */
331 armv5_ec_dcache_inv_range, /* dcache_inv_range */
332 armv5_ec_dcache_wb_range, /* dcache_wb_range */
334 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
335 armv5_ec_idcache_wbinv_range, /* idcache_wbinv_range */
337 cpufunc_nullop, /* l2cache_wbinv_all */
338 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
339 (void *)cpufunc_nullop, /* l2cache_inv_range */
340 (void *)cpufunc_nullop, /* l2cache_wb_range */
342 /* Other functions */
344 cpufunc_nullop, /* flush_prefetchbuf */
345 armv4_drain_writebuf, /* drain_writebuf */
346 cpufunc_nullop, /* flush_brnchtgt_C */
347 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
349 (void *)cpufunc_nullop, /* sleep */
353 cpufunc_null_fixup, /* dataabt_fixup */
354 cpufunc_null_fixup, /* prefetchabt_fixup */
356 arm10_context_switch, /* context_switch */
358 arm10_setup /* cpu setup */
362 struct cpu_functions sheeva_cpufuncs = {
366 cpufunc_nullop, /* cpwait */
370 cpufunc_control, /* control */
371 cpufunc_domains, /* Domain */
372 sheeva_setttb, /* Setttb */
373 cpufunc_faultstatus, /* Faultstatus */
374 cpufunc_faultaddress, /* Faultaddress */
378 armv4_tlb_flushID, /* tlb_flushID */
379 arm10_tlb_flushID_SE, /* tlb_flushID_SE */
380 armv4_tlb_flushI, /* tlb_flushI */
381 arm10_tlb_flushI_SE, /* tlb_flushI_SE */
382 armv4_tlb_flushD, /* tlb_flushD */
383 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
385 /* Cache operations */
387 armv5_ec_icache_sync_all, /* icache_sync_all */
388 armv5_ec_icache_sync_range, /* icache_sync_range */
390 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
391 sheeva_dcache_wbinv_range, /* dcache_wbinv_range */
392 sheeva_dcache_inv_range, /* dcache_inv_range */
393 sheeva_dcache_wb_range, /* dcache_wb_range */
395 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
396 sheeva_idcache_wbinv_range, /* idcache_wbinv_all */
398 sheeva_l2cache_wbinv_all, /* l2cache_wbinv_all */
399 sheeva_l2cache_wbinv_range, /* l2cache_wbinv_range */
400 sheeva_l2cache_inv_range, /* l2cache_inv_range */
401 sheeva_l2cache_wb_range, /* l2cache_wb_range */
403 /* Other functions */
405 cpufunc_nullop, /* flush_prefetchbuf */
406 armv4_drain_writebuf, /* drain_writebuf */
407 cpufunc_nullop, /* flush_brnchtgt_C */
408 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
410 sheeva_cpu_sleep, /* sleep */
414 cpufunc_null_fixup, /* dataabt_fixup */
415 cpufunc_null_fixup, /* prefetchabt_fixup */
417 arm10_context_switch, /* context_switch */
419 arm10_setup /* cpu setup */
421 #endif /* CPU_ARM9E || CPU_ARM10 */
424 struct cpu_functions arm10_cpufuncs = {
428 cpufunc_nullop, /* cpwait */
432 cpufunc_control, /* control */
433 cpufunc_domains, /* Domain */
434 arm10_setttb, /* Setttb */
435 cpufunc_faultstatus, /* Faultstatus */
436 cpufunc_faultaddress, /* Faultaddress */
440 armv4_tlb_flushID, /* tlb_flushID */
441 arm10_tlb_flushID_SE, /* tlb_flushID_SE */
442 armv4_tlb_flushI, /* tlb_flushI */
443 arm10_tlb_flushI_SE, /* tlb_flushI_SE */
444 armv4_tlb_flushD, /* tlb_flushD */
445 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
447 /* Cache operations */
449 arm10_icache_sync_all, /* icache_sync_all */
450 arm10_icache_sync_range, /* icache_sync_range */
452 arm10_dcache_wbinv_all, /* dcache_wbinv_all */
453 arm10_dcache_wbinv_range, /* dcache_wbinv_range */
454 arm10_dcache_inv_range, /* dcache_inv_range */
455 arm10_dcache_wb_range, /* dcache_wb_range */
457 arm10_idcache_wbinv_all, /* idcache_wbinv_all */
458 arm10_idcache_wbinv_range, /* idcache_wbinv_range */
459 cpufunc_nullop, /* l2cache_wbinv_all */
460 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
461 (void *)cpufunc_nullop, /* l2cache_inv_range */
462 (void *)cpufunc_nullop, /* l2cache_wb_range */
464 /* Other functions */
466 cpufunc_nullop, /* flush_prefetchbuf */
467 armv4_drain_writebuf, /* drain_writebuf */
468 cpufunc_nullop, /* flush_brnchtgt_C */
469 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
471 (void *)cpufunc_nullop, /* sleep */
475 cpufunc_null_fixup, /* dataabt_fixup */
476 cpufunc_null_fixup, /* prefetchabt_fixup */
478 arm10_context_switch, /* context_switch */
480 arm10_setup /* cpu setup */
483 #endif /* CPU_ARM10 */
486 struct cpu_functions pj4bv7_cpufuncs = {
490 arm11_drain_writebuf, /* cpwait */
494 cpufunc_control, /* control */
495 cpufunc_domains, /* Domain */
496 pj4b_setttb, /* Setttb */
497 cpufunc_faultstatus, /* Faultstatus */
498 cpufunc_faultaddress, /* Faultaddress */
502 armv7_tlb_flushID, /* tlb_flushID */
503 armv7_tlb_flushID_SE, /* tlb_flushID_SE */
504 armv7_tlb_flushID, /* tlb_flushI */
505 armv7_tlb_flushID_SE, /* tlb_flushI_SE */
506 armv7_tlb_flushID, /* tlb_flushD */
507 armv7_tlb_flushID_SE, /* tlb_flushD_SE */
509 /* Cache operations */
510 armv7_idcache_wbinv_all, /* icache_sync_all */
511 armv7_icache_sync_range, /* icache_sync_range */
513 armv7_dcache_wbinv_all, /* dcache_wbinv_all */
514 armv7_dcache_wbinv_range, /* dcache_wbinv_range */
515 armv7_dcache_inv_range, /* dcache_inv_range */
516 armv7_dcache_wb_range, /* dcache_wb_range */
518 armv7_idcache_wbinv_all, /* idcache_wbinv_all */
519 armv7_idcache_wbinv_range, /* idcache_wbinv_all */
521 (void *)cpufunc_nullop, /* l2cache_wbinv_all */
522 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
523 (void *)cpufunc_nullop, /* l2cache_inv_range */
524 (void *)cpufunc_nullop, /* l2cache_wb_range */
526 /* Other functions */
528 pj4b_drain_readbuf, /* flush_prefetchbuf */
529 arm11_drain_writebuf, /* drain_writebuf */
530 pj4b_flush_brnchtgt_all, /* flush_brnchtgt_C */
531 pj4b_flush_brnchtgt_va, /* flush_brnchtgt_E */
533 (void *)cpufunc_nullop, /* sleep */
537 cpufunc_null_fixup, /* dataabt_fixup */
538 cpufunc_null_fixup, /* prefetchabt_fixup */
540 arm11_context_switch, /* context_switch */
542 pj4bv7_setup /* cpu setup */
545 struct cpu_functions pj4bv6_cpufuncs = {
549 arm11_drain_writebuf, /* cpwait */
553 cpufunc_control, /* control */
554 cpufunc_domains, /* Domain */
555 pj4b_setttb, /* Setttb */
556 cpufunc_faultstatus, /* Faultstatus */
557 cpufunc_faultaddress, /* Faultaddress */
561 arm11_tlb_flushID, /* tlb_flushID */
562 arm11_tlb_flushID_SE, /* tlb_flushID_SE */
563 arm11_tlb_flushI, /* tlb_flushI */
564 arm11_tlb_flushI_SE, /* tlb_flushI_SE */
565 arm11_tlb_flushD, /* tlb_flushD */
566 arm11_tlb_flushD_SE, /* tlb_flushD_SE */
568 /* Cache operations */
569 armv6_icache_sync_all, /* icache_sync_all */
570 pj4b_icache_sync_range, /* icache_sync_range */
572 armv6_dcache_wbinv_all, /* dcache_wbinv_all */
573 pj4b_dcache_wbinv_range, /* dcache_wbinv_range */
574 pj4b_dcache_inv_range, /* dcache_inv_range */
575 pj4b_dcache_wb_range, /* dcache_wb_range */
577 armv6_idcache_wbinv_all, /* idcache_wbinv_all */
578 pj4b_idcache_wbinv_range, /* idcache_wbinv_all */
580 (void *)cpufunc_nullop, /* l2cache_wbinv_all */
581 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
582 (void *)cpufunc_nullop, /* l2cache_inv_range */
583 (void *)cpufunc_nullop, /* l2cache_wb_range */
585 /* Other functions */
587 pj4b_drain_readbuf, /* flush_prefetchbuf */
588 arm11_drain_writebuf, /* drain_writebuf */
589 pj4b_flush_brnchtgt_all, /* flush_brnchtgt_C */
590 pj4b_flush_brnchtgt_va, /* flush_brnchtgt_E */
592 (void *)cpufunc_nullop, /* sleep */
596 cpufunc_null_fixup, /* dataabt_fixup */
597 cpufunc_null_fixup, /* prefetchabt_fixup */
599 arm11_context_switch, /* context_switch */
601 pj4bv6_setup /* cpu setup */
603 #endif /* CPU_MV_PJ4B */
606 struct cpu_functions sa110_cpufuncs = {
610 cpufunc_nullop, /* cpwait */
614 cpufunc_control, /* control */
615 cpufunc_domains, /* domain */
616 sa1_setttb, /* setttb */
617 cpufunc_faultstatus, /* faultstatus */
618 cpufunc_faultaddress, /* faultaddress */
622 armv4_tlb_flushID, /* tlb_flushID */
623 sa1_tlb_flushID_SE, /* tlb_flushID_SE */
624 armv4_tlb_flushI, /* tlb_flushI */
625 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
626 armv4_tlb_flushD, /* tlb_flushD */
627 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
629 /* Cache operations */
631 sa1_cache_syncI, /* icache_sync_all */
632 sa1_cache_syncI_rng, /* icache_sync_range */
634 sa1_cache_purgeD, /* dcache_wbinv_all */
635 sa1_cache_purgeD_rng, /* dcache_wbinv_range */
636 /*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
637 sa1_cache_cleanD_rng, /* dcache_wb_range */
639 sa1_cache_purgeID, /* idcache_wbinv_all */
640 sa1_cache_purgeID_rng, /* idcache_wbinv_range */
641 cpufunc_nullop, /* l2cache_wbinv_all */
642 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
643 (void *)cpufunc_nullop, /* l2cache_inv_range */
644 (void *)cpufunc_nullop, /* l2cache_wb_range */
646 /* Other functions */
648 cpufunc_nullop, /* flush_prefetchbuf */
649 armv4_drain_writebuf, /* drain_writebuf */
650 cpufunc_nullop, /* flush_brnchtgt_C */
651 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
653 (void *)cpufunc_nullop, /* sleep */
657 cpufunc_null_fixup, /* dataabt_fixup */
658 cpufunc_null_fixup, /* prefetchabt_fixup */
660 sa110_context_switch, /* context_switch */
662 sa110_setup /* cpu setup */
664 #endif /* CPU_SA110 */
666 #if defined(CPU_SA1100) || defined(CPU_SA1110)
667 struct cpu_functions sa11x0_cpufuncs = {
671 cpufunc_nullop, /* cpwait */
675 cpufunc_control, /* control */
676 cpufunc_domains, /* domain */
677 sa1_setttb, /* setttb */
678 cpufunc_faultstatus, /* faultstatus */
679 cpufunc_faultaddress, /* faultaddress */
683 armv4_tlb_flushID, /* tlb_flushID */
684 sa1_tlb_flushID_SE, /* tlb_flushID_SE */
685 armv4_tlb_flushI, /* tlb_flushI */
686 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
687 armv4_tlb_flushD, /* tlb_flushD */
688 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
690 /* Cache operations */
692 sa1_cache_syncI, /* icache_sync_all */
693 sa1_cache_syncI_rng, /* icache_sync_range */
695 sa1_cache_purgeD, /* dcache_wbinv_all */
696 sa1_cache_purgeD_rng, /* dcache_wbinv_range */
697 /*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
698 sa1_cache_cleanD_rng, /* dcache_wb_range */
700 sa1_cache_purgeID, /* idcache_wbinv_all */
701 sa1_cache_purgeID_rng, /* idcache_wbinv_range */
702 cpufunc_nullop, /* l2cache_wbinv_all */
703 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
704 (void *)cpufunc_nullop, /* l2cache_inv_range */
705 (void *)cpufunc_nullop, /* l2cache_wb_range */
707 /* Other functions */
709 sa11x0_drain_readbuf, /* flush_prefetchbuf */
710 armv4_drain_writebuf, /* drain_writebuf */
711 cpufunc_nullop, /* flush_brnchtgt_C */
712 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
714 sa11x0_cpu_sleep, /* sleep */
718 cpufunc_null_fixup, /* dataabt_fixup */
719 cpufunc_null_fixup, /* prefetchabt_fixup */
721 sa11x0_context_switch, /* context_switch */
723 sa11x0_setup /* cpu setup */
725 #endif /* CPU_SA1100 || CPU_SA1110 */
728 struct cpu_functions ixp12x0_cpufuncs = {
732 cpufunc_nullop, /* cpwait */
736 cpufunc_control, /* control */
737 cpufunc_domains, /* domain */
738 sa1_setttb, /* setttb */
739 cpufunc_faultstatus, /* faultstatus */
740 cpufunc_faultaddress, /* faultaddress */
744 armv4_tlb_flushID, /* tlb_flushID */
745 sa1_tlb_flushID_SE, /* tlb_flushID_SE */
746 armv4_tlb_flushI, /* tlb_flushI */
747 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
748 armv4_tlb_flushD, /* tlb_flushD */
749 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
751 /* Cache operations */
753 sa1_cache_syncI, /* icache_sync_all */
754 sa1_cache_syncI_rng, /* icache_sync_range */
756 sa1_cache_purgeD, /* dcache_wbinv_all */
757 sa1_cache_purgeD_rng, /* dcache_wbinv_range */
758 /*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
759 sa1_cache_cleanD_rng, /* dcache_wb_range */
761 sa1_cache_purgeID, /* idcache_wbinv_all */
762 sa1_cache_purgeID_rng, /* idcache_wbinv_range */
763 cpufunc_nullop, /* l2cache_wbinv_all */
764 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
765 (void *)cpufunc_nullop, /* l2cache_inv_range */
766 (void *)cpufunc_nullop, /* l2cache_wb_range */
768 /* Other functions */
770 ixp12x0_drain_readbuf, /* flush_prefetchbuf */
771 armv4_drain_writebuf, /* drain_writebuf */
772 cpufunc_nullop, /* flush_brnchtgt_C */
773 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
775 (void *)cpufunc_nullop, /* sleep */
779 cpufunc_null_fixup, /* dataabt_fixup */
780 cpufunc_null_fixup, /* prefetchabt_fixup */
782 ixp12x0_context_switch, /* context_switch */
784 ixp12x0_setup /* cpu setup */
786 #endif /* CPU_IXP12X0 */
788 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
789 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
790 defined(CPU_XSCALE_80219)
792 struct cpu_functions xscale_cpufuncs = {
796 xscale_cpwait, /* cpwait */
800 xscale_control, /* control */
801 cpufunc_domains, /* domain */
802 xscale_setttb, /* setttb */
803 cpufunc_faultstatus, /* faultstatus */
804 cpufunc_faultaddress, /* faultaddress */
808 armv4_tlb_flushID, /* tlb_flushID */
809 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
810 armv4_tlb_flushI, /* tlb_flushI */
811 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
812 armv4_tlb_flushD, /* tlb_flushD */
813 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
815 /* Cache operations */
817 xscale_cache_syncI, /* icache_sync_all */
818 xscale_cache_syncI_rng, /* icache_sync_range */
820 xscale_cache_purgeD, /* dcache_wbinv_all */
821 xscale_cache_purgeD_rng, /* dcache_wbinv_range */
822 xscale_cache_flushD_rng, /* dcache_inv_range */
823 xscale_cache_cleanD_rng, /* dcache_wb_range */
825 xscale_cache_purgeID, /* idcache_wbinv_all */
826 xscale_cache_purgeID_rng, /* idcache_wbinv_range */
827 cpufunc_nullop, /* l2cache_wbinv_all */
828 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
829 (void *)cpufunc_nullop, /* l2cache_inv_range */
830 (void *)cpufunc_nullop, /* l2cache_wb_range */
832 /* Other functions */
834 cpufunc_nullop, /* flush_prefetchbuf */
835 armv4_drain_writebuf, /* drain_writebuf */
836 cpufunc_nullop, /* flush_brnchtgt_C */
837 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
839 xscale_cpu_sleep, /* sleep */
843 cpufunc_null_fixup, /* dataabt_fixup */
844 cpufunc_null_fixup, /* prefetchabt_fixup */
846 xscale_context_switch, /* context_switch */
848 xscale_setup /* cpu setup */
851 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
854 #ifdef CPU_XSCALE_81342
855 struct cpu_functions xscalec3_cpufuncs = {
859 xscale_cpwait, /* cpwait */
863 xscale_control, /* control */
864 cpufunc_domains, /* domain */
865 xscalec3_setttb, /* setttb */
866 cpufunc_faultstatus, /* faultstatus */
867 cpufunc_faultaddress, /* faultaddress */
871 armv4_tlb_flushID, /* tlb_flushID */
872 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
873 armv4_tlb_flushI, /* tlb_flushI */
874 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
875 armv4_tlb_flushD, /* tlb_flushD */
876 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
878 /* Cache operations */
880 xscalec3_cache_syncI, /* icache_sync_all */
881 xscalec3_cache_syncI_rng, /* icache_sync_range */
883 xscalec3_cache_purgeD, /* dcache_wbinv_all */
884 xscalec3_cache_purgeD_rng, /* dcache_wbinv_range */
885 xscale_cache_flushD_rng, /* dcache_inv_range */
886 xscalec3_cache_cleanD_rng, /* dcache_wb_range */
888 xscalec3_cache_purgeID, /* idcache_wbinv_all */
889 xscalec3_cache_purgeID_rng, /* idcache_wbinv_range */
890 xscalec3_l2cache_purge, /* l2cache_wbinv_all */
891 xscalec3_l2cache_purge_rng, /* l2cache_wbinv_range */
892 xscalec3_l2cache_flush_rng, /* l2cache_inv_range */
893 xscalec3_l2cache_clean_rng, /* l2cache_wb_range */
895 /* Other functions */
897 cpufunc_nullop, /* flush_prefetchbuf */
898 armv4_drain_writebuf, /* drain_writebuf */
899 cpufunc_nullop, /* flush_brnchtgt_C */
900 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
902 xscale_cpu_sleep, /* sleep */
906 cpufunc_null_fixup, /* dataabt_fixup */
907 cpufunc_null_fixup, /* prefetchabt_fixup */
909 xscalec3_context_switch, /* context_switch */
911 xscale_setup /* cpu setup */
913 #endif /* CPU_XSCALE_81342 */
916 #if defined(CPU_FA526) || defined(CPU_FA626TE)
917 struct cpu_functions fa526_cpufuncs = {
921 cpufunc_nullop, /* cpwait */
925 cpufunc_control, /* control */
926 cpufunc_domains, /* domain */
927 fa526_setttb, /* setttb */
928 cpufunc_faultstatus, /* faultstatus */
929 cpufunc_faultaddress, /* faultaddress */
933 armv4_tlb_flushID, /* tlb_flushID */
934 fa526_tlb_flushID_SE, /* tlb_flushID_SE */
935 armv4_tlb_flushI, /* tlb_flushI */
936 fa526_tlb_flushI_SE, /* tlb_flushI_SE */
937 armv4_tlb_flushD, /* tlb_flushD */
938 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
940 /* Cache operations */
942 fa526_icache_sync_all, /* icache_sync_all */
943 fa526_icache_sync_range, /* icache_sync_range */
945 fa526_dcache_wbinv_all, /* dcache_wbinv_all */
946 fa526_dcache_wbinv_range, /* dcache_wbinv_range */
947 fa526_dcache_inv_range, /* dcache_inv_range */
948 fa526_dcache_wb_range, /* dcache_wb_range */
950 fa526_idcache_wbinv_all, /* idcache_wbinv_all */
951 fa526_idcache_wbinv_range, /* idcache_wbinv_range */
952 cpufunc_nullop, /* l2cache_wbinv_all */
953 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
954 (void *)cpufunc_nullop, /* l2cache_inv_range */
955 (void *)cpufunc_nullop, /* l2cache_wb_range */
957 /* Other functions */
959 fa526_flush_prefetchbuf, /* flush_prefetchbuf */
960 armv4_drain_writebuf, /* drain_writebuf */
961 cpufunc_nullop, /* flush_brnchtgt_C */
962 fa526_flush_brnchtgt_E, /* flush_brnchtgt_E */
964 fa526_cpu_sleep, /* sleep */
968 cpufunc_null_fixup, /* dataabt_fixup */
969 cpufunc_null_fixup, /* prefetchabt_fixup */
971 fa526_context_switch, /* context_switch */
973 fa526_setup /* cpu setup */
975 #endif /* CPU_FA526 || CPU_FA626TE */
977 #if defined(CPU_ARM1136)
978 struct cpu_functions arm1136_cpufuncs = {
982 cpufunc_nullop, /* cpwait */
986 cpufunc_control, /* control */
987 cpufunc_domains, /* Domain */
988 arm11x6_setttb, /* Setttb */
989 cpufunc_faultstatus, /* Faultstatus */
990 cpufunc_faultaddress, /* Faultaddress */
994 arm11_tlb_flushID, /* tlb_flushID */
995 arm11_tlb_flushID_SE, /* tlb_flushID_SE */
996 arm11_tlb_flushI, /* tlb_flushI */
997 arm11_tlb_flushI_SE, /* tlb_flushI_SE */
998 arm11_tlb_flushD, /* tlb_flushD */
999 arm11_tlb_flushD_SE, /* tlb_flushD_SE */
1001 /* Cache operations */
1003 arm11x6_icache_sync_all, /* icache_sync_all */
1004 arm11x6_icache_sync_range, /* icache_sync_range */
1006 arm11x6_dcache_wbinv_all, /* dcache_wbinv_all */
1007 armv6_dcache_wbinv_range, /* dcache_wbinv_range */
1008 armv6_dcache_inv_range, /* dcache_inv_range */
1009 armv6_dcache_wb_range, /* dcache_wb_range */
1011 arm11x6_idcache_wbinv_all, /* idcache_wbinv_all */
1012 arm11x6_idcache_wbinv_range, /* idcache_wbinv_range */
1014 (void *)cpufunc_nullop, /* l2cache_wbinv_all */
1015 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
1016 (void *)cpufunc_nullop, /* l2cache_inv_range */
1017 (void *)cpufunc_nullop, /* l2cache_wb_range */
1019 /* Other functions */
1021 arm11x6_flush_prefetchbuf, /* flush_prefetchbuf */
1022 arm11_drain_writebuf, /* drain_writebuf */
1023 cpufunc_nullop, /* flush_brnchtgt_C */
1024 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
1026 arm11_sleep, /* sleep */
1028 /* Soft functions */
1030 cpufunc_null_fixup, /* dataabt_fixup */
1031 cpufunc_null_fixup, /* prefetchabt_fixup */
1033 arm11_context_switch, /* context_switch */
1035 arm11x6_setup /* cpu setup */
1037 #endif /* CPU_ARM1136 */
1038 #if defined(CPU_ARM1176)
1039 struct cpu_functions arm1176_cpufuncs = {
1042 cpufunc_id, /* id */
1043 cpufunc_nullop, /* cpwait */
1047 cpufunc_control, /* control */
1048 cpufunc_domains, /* Domain */
1049 arm11x6_setttb, /* Setttb */
1050 cpufunc_faultstatus, /* Faultstatus */
1051 cpufunc_faultaddress, /* Faultaddress */
1055 arm11_tlb_flushID, /* tlb_flushID */
1056 arm11_tlb_flushID_SE, /* tlb_flushID_SE */
1057 arm11_tlb_flushI, /* tlb_flushI */
1058 arm11_tlb_flushI_SE, /* tlb_flushI_SE */
1059 arm11_tlb_flushD, /* tlb_flushD */
1060 arm11_tlb_flushD_SE, /* tlb_flushD_SE */
1062 /* Cache operations */
1064 arm11x6_icache_sync_all, /* icache_sync_all */
1065 arm11x6_icache_sync_range, /* icache_sync_range */
1067 arm11x6_dcache_wbinv_all, /* dcache_wbinv_all */
1068 armv6_dcache_wbinv_range, /* dcache_wbinv_range */
1069 armv6_dcache_inv_range, /* dcache_inv_range */
1070 armv6_dcache_wb_range, /* dcache_wb_range */
1072 arm11x6_idcache_wbinv_all, /* idcache_wbinv_all */
1073 arm11x6_idcache_wbinv_range, /* idcache_wbinv_range */
1075 (void *)cpufunc_nullop, /* l2cache_wbinv_all */
1076 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
1077 (void *)cpufunc_nullop, /* l2cache_inv_range */
1078 (void *)cpufunc_nullop, /* l2cache_wb_range */
1080 /* Other functions */
1082 arm11x6_flush_prefetchbuf, /* flush_prefetchbuf */
1083 arm11_drain_writebuf, /* drain_writebuf */
1084 cpufunc_nullop, /* flush_brnchtgt_C */
1085 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
1087 arm11x6_sleep, /* sleep */
1089 /* Soft functions */
1091 cpufunc_null_fixup, /* dataabt_fixup */
1092 cpufunc_null_fixup, /* prefetchabt_fixup */
1094 arm11_context_switch, /* context_switch */
1096 arm11x6_setup /* cpu setup */
1098 #endif /*CPU_ARM1176 */
1100 #if defined(CPU_CORTEXA)
1101 struct cpu_functions cortexa_cpufuncs = {
1104 cpufunc_id, /* id */
1105 cpufunc_nullop, /* cpwait */
1109 cpufunc_control, /* control */
1110 cpufunc_domains, /* Domain */
1111 armv7_setttb, /* Setttb */
1112 cpufunc_faultstatus, /* Faultstatus */
1113 cpufunc_faultaddress, /* Faultaddress */
1117 armv7_tlb_flushID, /* tlb_flushID */
1118 armv7_tlb_flushID_SE, /* tlb_flushID_SE */
1119 arm11_tlb_flushI, /* tlb_flushI */
1120 arm11_tlb_flushI_SE, /* tlb_flushI_SE */
1121 arm11_tlb_flushD, /* tlb_flushD */
1122 arm11_tlb_flushD_SE, /* tlb_flushD_SE */
1124 /* Cache operations */
1126 armv7_idcache_wbinv_all, /* icache_sync_all */
1127 armv7_icache_sync_range, /* icache_sync_range */
1129 armv7_dcache_wbinv_all, /* dcache_wbinv_all */
1130 armv7_dcache_wbinv_range, /* dcache_wbinv_range */
1131 armv7_dcache_inv_range, /* dcache_inv_range */
1132 armv7_dcache_wb_range, /* dcache_wb_range */
1134 armv7_idcache_wbinv_all, /* idcache_wbinv_all */
1135 armv7_idcache_wbinv_range, /* idcache_wbinv_range */
1138 * Note: For CPUs using the PL310 the L2 ops are filled in when the
1139 * L2 cache controller is actually enabled.
1141 cpufunc_nullop, /* l2cache_wbinv_all */
1142 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
1143 (void *)cpufunc_nullop, /* l2cache_inv_range */
1144 (void *)cpufunc_nullop, /* l2cache_wb_range */
1146 /* Other functions */
1148 cpufunc_nullop, /* flush_prefetchbuf */
1149 armv7_drain_writebuf, /* drain_writebuf */
1150 cpufunc_nullop, /* flush_brnchtgt_C */
1151 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
1153 arm11_sleep, /* sleep */
1155 /* Soft functions */
1157 cpufunc_null_fixup, /* dataabt_fixup */
1158 cpufunc_null_fixup, /* prefetchabt_fixup */
1160 armv7_context_switch, /* context_switch */
1162 cortexa_setup /* cpu setup */
1164 #endif /* CPU_CORTEXA */
1167 * Global constants also used by locore.s
1170 struct cpu_functions cpufuncs;
1172 u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore.s */
1174 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
1175 defined (CPU_ARM9E) || defined (CPU_ARM10) || defined (CPU_ARM1136) || \
1176 defined(CPU_ARM1176) || defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1177 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1178 defined(CPU_FA526) || defined(CPU_FA626TE) || defined(CPU_MV_PJ4B) || \
1179 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
1180 defined(CPU_CORTEXA)
1182 static void get_cachetype_cp15(void);
1184 /* Additional cache information local to this file. Log2 of some of the
1186 static int arm_dcache_l2_nsets;
1187 static int arm_dcache_l2_assoc;
1188 static int arm_dcache_l2_linesize;
1191 get_cachetype_cp15()
1193 u_int ctype, isize, dsize, cpuid;
1194 u_int clevel, csize, i, sel;
1198 __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
1201 cpuid = cpufunc_id();
1203 * ...and thus spake the ARM ARM:
1205 * If an <opcode2> value corresponding to an unimplemented or
1206 * reserved ID register is encountered, the System Control
1207 * processor returns the value of the main ID register.
1212 if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
1213 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
1215 arm_cache_level = clevel;
1216 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
1218 while ((type = (clevel & 0x7)) && i < 7) {
1219 if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
1220 type == CACHE_SEP_CACHE) {
1222 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
1224 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
1226 arm_cache_type[sel] = csize;
1227 arm_dcache_align = 1 <<
1228 (CPUV7_CT_xSIZE_LEN(csize) + 4);
1229 arm_dcache_align_mask = arm_dcache_align - 1;
1231 if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
1233 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
1235 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
1237 arm_cache_type[sel] = csize;
1243 if ((ctype & CPU_CT_S) == 0)
1244 arm_pcache_unified = 1;
1247 * If you want to know how this code works, go read the ARM ARM.
1250 arm_pcache_type = CPU_CT_CTYPE(ctype);
1252 if (arm_pcache_unified == 0) {
1253 isize = CPU_CT_ISIZE(ctype);
1254 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
1255 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
1256 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
1257 if (isize & CPU_CT_xSIZE_M)
1258 arm_picache_line_size = 0; /* not present */
1260 arm_picache_ways = 1;
1262 arm_picache_ways = multiplier <<
1263 (CPU_CT_xSIZE_ASSOC(isize) - 1);
1265 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
1268 dsize = CPU_CT_DSIZE(ctype);
1269 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
1270 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
1271 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
1272 if (dsize & CPU_CT_xSIZE_M)
1273 arm_pdcache_line_size = 0; /* not present */
1275 arm_pdcache_ways = 1;
1277 arm_pdcache_ways = multiplier <<
1278 (CPU_CT_xSIZE_ASSOC(dsize) - 1);
1280 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
1282 arm_dcache_align = arm_pdcache_line_size;
1284 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
1285 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
1286 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
1287 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
1290 arm_dcache_align_mask = arm_dcache_align - 1;
1293 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
1295 #if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
1296 defined(CPU_IXP12X0)
1297 /* Cache information for CPUs without cache type registers. */
1301 int ct_pcache_unified;
1302 int ct_pdcache_size;
1303 int ct_pdcache_line_size;
1304 int ct_pdcache_ways;
1305 int ct_picache_size;
1306 int ct_picache_line_size;
1307 int ct_picache_ways;
1310 struct cachetab cachetab[] = {
1311 /* cpuid, cache type, u, dsiz, ls, wy, isiz, ls, wy */
1312 /* XXX is this type right for SA-1? */
1313 { CPU_ID_SA110, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
1314 { CPU_ID_SA1100, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 },
1315 { CPU_ID_SA1110, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 },
1316 { CPU_ID_IXP1200, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
1317 { 0, 0, 0, 0, 0, 0, 0, 0}
1320 static void get_cachetype_table(void);
1323 get_cachetype_table()
1326 u_int32_t cpuid = cpufunc_id();
1328 for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
1329 if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
1330 arm_pcache_type = cachetab[i].ct_pcache_type;
1331 arm_pcache_unified = cachetab[i].ct_pcache_unified;
1332 arm_pdcache_size = cachetab[i].ct_pdcache_size;
1333 arm_pdcache_line_size =
1334 cachetab[i].ct_pdcache_line_size;
1335 arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
1336 arm_picache_size = cachetab[i].ct_picache_size;
1337 arm_picache_line_size =
1338 cachetab[i].ct_picache_line_size;
1339 arm_picache_ways = cachetab[i].ct_picache_ways;
1342 arm_dcache_align = arm_pdcache_line_size;
1344 arm_dcache_align_mask = arm_dcache_align - 1;
1347 #endif /* SA110 || SA1100 || SA1111 || IXP12X0 */
1350 * Cannot panic here as we may not have a console yet ...
1356 cputype = cpufunc_id();
1357 cputype &= CPU_ID_CPU_MASK;
1360 * NOTE: cpu_do_powersave defaults to off. If we encounter a
1361 * CPU type where we want to use it by default, then we set it.
1365 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1366 CPU_ID_IS7(cputype) &&
1367 (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
1368 cpufuncs = arm7tdmi_cpufuncs;
1369 cpu_reset_needs_v4_MMU_disable = 0;
1370 get_cachetype_cp15();
1371 pmap_pte_init_generic();
1376 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1377 (cputype & 0x0000f000) == 0x00008000) {
1378 cpufuncs = arm8_cpufuncs;
1379 cpu_reset_needs_v4_MMU_disable = 0; /* XXX correct? */
1380 get_cachetype_cp15();
1381 pmap_pte_init_arm8();
1384 #endif /* CPU_ARM8 */
1386 if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
1387 (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
1388 (cputype & 0x0000f000) == 0x00009000) {
1389 cpufuncs = arm9_cpufuncs;
1390 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
1391 get_cachetype_cp15();
1392 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1393 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
1394 arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
1395 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1396 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
1397 #ifdef ARM9_CACHE_WRITE_THROUGH
1398 pmap_pte_init_arm9();
1400 pmap_pte_init_generic();
1404 #endif /* CPU_ARM9 */
1405 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
1406 if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
1407 cputype == CPU_ID_MV88FR571_41) {
1408 uint32_t sheeva_ctrl;
1410 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
1413 * Workaround for Marvell MV78100 CPU: Cache prefetch
1414 * mechanism may affect the cache coherency validity,
1415 * so it needs to be disabled.
1417 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
1418 * L2 Prefetching Mechanism) for details.
1420 if (cputype == CPU_ID_MV88FR571_VD ||
1421 cputype == CPU_ID_MV88FR571_41)
1422 sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
1424 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
1426 cpufuncs = sheeva_cpufuncs;
1427 get_cachetype_cp15();
1428 pmap_pte_init_generic();
1430 } else if (cputype == CPU_ID_ARM926EJS || cputype == CPU_ID_ARM1026EJS) {
1431 cpufuncs = armv5_ec_cpufuncs;
1432 get_cachetype_cp15();
1433 pmap_pte_init_generic();
1436 #endif /* CPU_ARM9E || CPU_ARM10 */
1438 if (/* cputype == CPU_ID_ARM1020T || */
1439 cputype == CPU_ID_ARM1020E) {
1441 * Select write-through cacheing (this isn't really an
1442 * option on ARM1020T).
1444 cpufuncs = arm10_cpufuncs;
1445 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
1446 get_cachetype_cp15();
1447 arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1448 arm10_dcache_sets_max =
1449 (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1450 arm10_dcache_sets_inc;
1451 arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1452 arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
1453 pmap_pte_init_generic();
1456 #endif /* CPU_ARM10 */
1457 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
1458 if (cputype == CPU_ID_ARM1136JS
1459 || cputype == CPU_ID_ARM1136JSR1
1460 || cputype == CPU_ID_ARM1176JZS) {
1462 if (cputype == CPU_ID_ARM1136JS
1463 || cputype == CPU_ID_ARM1136JSR1)
1464 cpufuncs = arm1136_cpufuncs;
1467 if (cputype == CPU_ID_ARM1176JZS)
1468 cpufuncs = arm1176_cpufuncs;
1470 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
1471 get_cachetype_cp15();
1473 pmap_pte_init_mmu_v6();
1477 #endif /* CPU_ARM1136 || CPU_ARM1176 */
1479 if (cputype == CPU_ID_CORTEXA7 ||
1480 cputype == CPU_ID_CORTEXA8R1 ||
1481 cputype == CPU_ID_CORTEXA8R2 ||
1482 cputype == CPU_ID_CORTEXA8R3 ||
1483 cputype == CPU_ID_CORTEXA9R1 ||
1484 cputype == CPU_ID_CORTEXA9R2 ||
1485 cputype == CPU_ID_CORTEXA9R3 ||
1486 cputype == CPU_ID_CORTEXA15 ) {
1487 cpufuncs = cortexa_cpufuncs;
1488 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
1489 get_cachetype_cp15();
1491 pmap_pte_init_mmu_v6();
1492 /* Use powersave on this CPU. */
1493 cpu_do_powersave = 1;
1496 #endif /* CPU_CORTEXA */
1498 #if defined(CPU_MV_PJ4B)
1499 if (cputype == CPU_ID_MV88SV581X_V6 ||
1500 cputype == CPU_ID_MV88SV581X_V7 ||
1501 cputype == CPU_ID_MV88SV584X_V7 ||
1502 cputype == CPU_ID_ARM_88SV581X_V6 ||
1503 cputype == CPU_ID_ARM_88SV581X_V7) {
1504 if (cpu_pfr(0) & ARM_PFR0_THUMBEE_MASK)
1505 cpufuncs = pj4bv7_cpufuncs;
1507 cpufuncs = pj4bv6_cpufuncs;
1509 get_cachetype_cp15();
1510 pmap_pte_init_mmu_v6();
1512 } else if (cputype == CPU_ID_ARM_88SV584X_V6 ||
1513 cputype == CPU_ID_MV88SV584X_V6) {
1514 cpufuncs = pj4bv6_cpufuncs;
1515 get_cachetype_cp15();
1516 pmap_pte_init_mmu_v6();
1520 #endif /* CPU_MV_PJ4B */
1522 if (cputype == CPU_ID_SA110) {
1523 cpufuncs = sa110_cpufuncs;
1524 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
1525 get_cachetype_table();
1526 pmap_pte_init_sa1();
1529 #endif /* CPU_SA110 */
1531 if (cputype == CPU_ID_SA1100) {
1532 cpufuncs = sa11x0_cpufuncs;
1533 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
1534 get_cachetype_table();
1535 pmap_pte_init_sa1();
1536 /* Use powersave on this CPU. */
1537 cpu_do_powersave = 1;
1541 #endif /* CPU_SA1100 */
1543 if (cputype == CPU_ID_SA1110) {
1544 cpufuncs = sa11x0_cpufuncs;
1545 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
1546 get_cachetype_table();
1547 pmap_pte_init_sa1();
1548 /* Use powersave on this CPU. */
1549 cpu_do_powersave = 1;
1553 #endif /* CPU_SA1110 */
1554 #if defined(CPU_FA526) || defined(CPU_FA626TE)
1555 if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
1556 cpufuncs = fa526_cpufuncs;
1557 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
1558 get_cachetype_cp15();
1559 pmap_pte_init_generic();
1561 /* Use powersave on this CPU. */
1562 cpu_do_powersave = 1;
1566 #endif /* CPU_FA526 || CPU_FA626TE */
1568 if (cputype == CPU_ID_IXP1200) {
1569 cpufuncs = ixp12x0_cpufuncs;
1570 cpu_reset_needs_v4_MMU_disable = 1;
1571 get_cachetype_table();
1572 pmap_pte_init_sa1();
1575 #endif /* CPU_IXP12X0 */
1576 #ifdef CPU_XSCALE_80200
1577 if (cputype == CPU_ID_80200) {
1578 int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1582 #if defined(XSCALE_CCLKCFG)
1584 * Crank CCLKCFG to maximum legal value.
1586 __asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
1588 : "r" (XSCALE_CCLKCFG));
1592 * XXX Disable ECC in the Bus Controller Unit; we
1593 * don't really support it, yet. Clear any pending
1594 * error indications.
1596 __asm __volatile("mcr p13, 0, %0, c0, c1, 0"
1598 : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1600 cpufuncs = xscale_cpufuncs;
1602 * i80200 errata: Step-A0 and A1 have a bug where
1603 * D$ dirty bits are not cleared on "invalidate by
1606 * Workaround: Clean cache line before invalidating.
1608 if (rev == 0 || rev == 1)
1609 cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1611 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1612 get_cachetype_cp15();
1613 pmap_pte_init_xscale();
1616 #endif /* CPU_XSCALE_80200 */
1617 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
1618 if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1619 cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1620 cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1621 cpufuncs = xscale_cpufuncs;
1622 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1623 get_cachetype_cp15();
1624 pmap_pte_init_xscale();
1627 #endif /* CPU_XSCALE_80321 */
1629 #if defined(CPU_XSCALE_81342)
1630 if (cputype == CPU_ID_81342) {
1631 cpufuncs = xscalec3_cpufuncs;
1632 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1633 get_cachetype_cp15();
1634 pmap_pte_init_xscale();
1637 #endif /* CPU_XSCALE_81342 */
1638 #ifdef CPU_XSCALE_PXA2X0
1639 /* ignore core revision to test PXA2xx CPUs */
1640 if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1641 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
1642 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1644 cpufuncs = xscale_cpufuncs;
1645 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1646 get_cachetype_cp15();
1647 pmap_pte_init_xscale();
1649 /* Use powersave on this CPU. */
1650 cpu_do_powersave = 1;
1654 #endif /* CPU_XSCALE_PXA2X0 */
1655 #ifdef CPU_XSCALE_IXP425
1656 if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1657 cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
1659 cpufuncs = xscale_cpufuncs;
1660 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1661 get_cachetype_cp15();
1662 pmap_pte_init_xscale();
1666 #endif /* CPU_XSCALE_IXP425 */
1668 * Bzzzz. And the answer was ...
1670 panic("No support for this CPU type (%08x) in kernel", cputype);
1671 return(ARCHITECTURE_NOT_PRESENT);
1673 uma_set_align(arm_dcache_align_mask);
1678 * Fixup routines for data and prefetch aborts.
1680 * Several compile time symbols are used
1682 * DEBUG_FAULT_CORRECTION - Print debugging information during the
1683 * correction of registers after a fault.
1684 * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
1685 * when defined should use late aborts
1690 * Null abort fixup routine.
1691 * For use when no fixup is required.
1694 cpufunc_null_fixup(arg)
1697 return(ABORT_FIXUP_OK);
1701 #if defined(CPU_ARM7TDMI)
1703 #ifdef DEBUG_FAULT_CORRECTION
1704 #define DFC_PRINTF(x) printf x
1705 #define DFC_DISASSEMBLE(x) disassemble(x)
1707 #define DFC_PRINTF(x) /* nothing */
1708 #define DFC_DISASSEMBLE(x) /* nothing */
1712 * "Early" data abort fixup.
1714 * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode). Also used
1715 * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1717 * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1720 early_abort_fixup(arg)
1723 trapframe_t *frame = arg;
1725 u_int fault_instruction;
1728 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1730 /* Ok an abort in SVC mode */
1733 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1734 * as the fault happened in svc mode but we need it in the
1735 * usr slot so we can treat the registers as an array of ints
1737 * NOTE: This PC is in the position but writeback is not
1739 * Doing it like this is more efficient than trapping this
1740 * case in all possible locations in the following fixup code.
1743 saved_lr = frame->tf_usr_lr;
1744 frame->tf_usr_lr = frame->tf_svc_lr;
1747 * Note the trapframe does not have the SVC r13 so a fault
1748 * from an instruction with writeback to r13 in SVC mode is
1749 * not allowed. This should not happen as the kstack is
1754 /* Get fault address and status from the CPU */
1756 fault_pc = frame->tf_pc;
1757 fault_instruction = *((volatile unsigned int *)fault_pc);
1759 /* Decode the fault instruction and fix the registers as needed */
1761 if ((fault_instruction & 0x0e000000) == 0x08000000) {
1765 int *registers = &frame->tf_r0;
1767 DFC_PRINTF(("LDM/STM\n"));
1768 DFC_DISASSEMBLE(fault_pc);
1769 if (fault_instruction & (1 << 21)) {
1770 DFC_PRINTF(("This instruction must be corrected\n"));
1771 base = (fault_instruction >> 16) & 0x0f;
1773 return ABORT_FIXUP_FAILED;
1774 /* Count registers transferred */
1776 for (loop = 0; loop < 16; ++loop) {
1777 if (fault_instruction & (1<<loop))
1780 DFC_PRINTF(("%d registers used\n", count));
1781 DFC_PRINTF(("Corrected r%d by %d bytes ",
1783 if (fault_instruction & (1 << 23)) {
1784 DFC_PRINTF(("down\n"));
1785 registers[base] -= count * 4;
1787 DFC_PRINTF(("up\n"));
1788 registers[base] += count * 4;
1791 } else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1794 int *registers = &frame->tf_r0;
1796 /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1798 DFC_DISASSEMBLE(fault_pc);
1800 /* Only need to fix registers if write back is turned on */
1802 if ((fault_instruction & (1 << 21)) != 0) {
1803 base = (fault_instruction >> 16) & 0x0f;
1805 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1806 return ABORT_FIXUP_FAILED;
1808 return ABORT_FIXUP_FAILED;
1810 offset = (fault_instruction & 0xff) << 2;
1811 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1812 if ((fault_instruction & (1 << 23)) != 0)
1814 registers[base] += offset;
1815 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1817 } else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1818 return ABORT_FIXUP_FAILED;
1820 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1822 /* Ok an abort in SVC mode */
1825 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1826 * as the fault happened in svc mode but we need it in the
1827 * usr slot so we can treat the registers as an array of ints
1829 * NOTE: This PC is in the position but writeback is not
1831 * Doing it like this is more efficient than trapping this
1832 * case in all possible locations in the prior fixup code.
1835 frame->tf_svc_lr = frame->tf_usr_lr;
1836 frame->tf_usr_lr = saved_lr;
1839 * Note the trapframe does not have the SVC r13 so a fault
1840 * from an instruction with writeback to r13 in SVC mode is
1841 * not allowed. This should not happen as the kstack is
1846 return(ABORT_FIXUP_OK);
1848 #endif /* CPU_ARM2/250/3/6/7 */
1851 #if defined(CPU_ARM7TDMI)
1853 * "Late" (base updated) data abort fixup
1855 * For ARM6 (in late-abort mode) and ARM7.
1857 * In this model, all data-transfer instructions need fixing up. We defer
1858 * LDM, STM, LDC and STC fixup to the early-abort handler.
1861 late_abort_fixup(arg)
1864 trapframe_t *frame = arg;
1866 u_int fault_instruction;
1869 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1871 /* Ok an abort in SVC mode */
1874 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1875 * as the fault happened in svc mode but we need it in the
1876 * usr slot so we can treat the registers as an array of ints
1878 * NOTE: This PC is in the position but writeback is not
1880 * Doing it like this is more efficient than trapping this
1881 * case in all possible locations in the following fixup code.
1884 saved_lr = frame->tf_usr_lr;
1885 frame->tf_usr_lr = frame->tf_svc_lr;
1888 * Note the trapframe does not have the SVC r13 so a fault
1889 * from an instruction with writeback to r13 in SVC mode is
1890 * not allowed. This should not happen as the kstack is
1895 /* Get fault address and status from the CPU */
1897 fault_pc = frame->tf_pc;
1898 fault_instruction = *((volatile unsigned int *)fault_pc);
1900 /* Decode the fault instruction and fix the registers as needed */
1902 /* Was is a swap instruction ? */
1904 if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1905 DFC_DISASSEMBLE(fault_pc);
1906 } else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1908 /* Was is a ldr/str instruction */
1909 /* This is for late abort only */
1913 int *registers = &frame->tf_r0;
1915 DFC_DISASSEMBLE(fault_pc);
1917 /* This is for late abort only */
1919 if ((fault_instruction & (1 << 24)) == 0
1920 || (fault_instruction & (1 << 21)) != 0) {
1921 /* postindexed ldr/str with no writeback */
1923 base = (fault_instruction >> 16) & 0x0f;
1925 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1926 return ABORT_FIXUP_FAILED;
1928 return ABORT_FIXUP_FAILED;
1929 DFC_PRINTF(("late abt fix: r%d=%08x : ",
1930 base, registers[base]));
1931 if ((fault_instruction & (1 << 25)) == 0) {
1932 /* Immediate offset - easy */
1934 offset = fault_instruction & 0xfff;
1935 if ((fault_instruction & (1 << 23)))
1937 registers[base] += offset;
1938 DFC_PRINTF(("imm=%08x ", offset));
1940 /* offset is a shifted register */
1943 offset = fault_instruction & 0x0f;
1945 return ABORT_FIXUP_FAILED;
1948 * Register offset - hard we have to
1949 * cope with shifts !
1951 offset = registers[offset];
1953 if ((fault_instruction & (1 << 4)) == 0)
1954 /* shift with amount */
1955 shift = (fault_instruction >> 7) & 0x1f;
1957 /* shift with register */
1958 if ((fault_instruction & (1 << 7)) != 0)
1959 /* undefined for now so bail out */
1960 return ABORT_FIXUP_FAILED;
1961 shift = ((fault_instruction >> 8) & 0xf);
1963 return ABORT_FIXUP_FAILED;
1964 DFC_PRINTF(("shift reg=%d ", shift));
1965 shift = registers[shift];
1967 DFC_PRINTF(("shift=%08x ", shift));
1968 switch (((fault_instruction >> 5) & 0x3)) {
1969 case 0 : /* Logical left */
1970 offset = (int)(((u_int)offset) << shift);
1972 case 1 : /* Logical Right */
1973 if (shift == 0) shift = 32;
1974 offset = (int)(((u_int)offset) >> shift);
1976 case 2 : /* Arithmetic Right */
1977 if (shift == 0) shift = 32;
1978 offset = (int)(((int)offset) >> shift);
1980 case 3 : /* Rotate right (rol or rxx) */
1981 return ABORT_FIXUP_FAILED;
1985 DFC_PRINTF(("abt: fixed LDR/STR with "
1986 "register offset\n"));
1987 if ((fault_instruction & (1 << 23)))
1989 DFC_PRINTF(("offset=%08x ", offset));
1990 registers[base] += offset;
1992 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1996 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1998 /* Ok an abort in SVC mode */
2001 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
2002 * as the fault happened in svc mode but we need it in the
2003 * usr slot so we can treat the registers as an array of ints
2005 * NOTE: This PC is in the position but writeback is not
2007 * Doing it like this is more efficient than trapping this
2008 * case in all possible locations in the prior fixup code.
2011 frame->tf_svc_lr = frame->tf_usr_lr;
2012 frame->tf_usr_lr = saved_lr;
2015 * Note the trapframe does not have the SVC r13 so a fault
2016 * from an instruction with writeback to r13 in SVC mode is
2017 * not allowed. This should not happen as the kstack is
2023 * Now let the early-abort fixup routine have a go, in case it
2024 * was an LDM, STM, LDC or STC that faulted.
2027 return early_abort_fixup(arg);
2029 #endif /* CPU_ARM7TDMI */
2035 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined (CPU_ARM9) || \
2036 defined(CPU_ARM9E) || \
2037 defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
2038 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2039 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
2040 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
2041 defined(CPU_ARM10) || defined(CPU_ARM1136) || defined(CPU_ARM1176) ||\
2042 defined(CPU_FA526) || defined(CPU_FA626TE)
2055 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
2058 parse_cpu_options(args, optlist, cpuctrl)
2060 struct cpu_option *optlist;
2068 while (optlist->co_name) {
2069 if (get_bootconf_option(args, optlist->co_name,
2070 BOOTOPT_TYPE_BOOLEAN, &integer)) {
2072 if (optlist->co_trueop == OR)
2073 cpuctrl |= optlist->co_value;
2074 else if (optlist->co_trueop == BIC)
2075 cpuctrl &= ~optlist->co_value;
2077 if (optlist->co_falseop == OR)
2078 cpuctrl |= optlist->co_value;
2079 else if (optlist->co_falseop == BIC)
2080 cpuctrl &= ~optlist->co_value;
2087 #endif /* CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 || XSCALE*/
2089 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8)
2090 struct cpu_option arm678_options[] = {
2092 { "nocache", IGN, BIC, CPU_CONTROL_IDC_ENABLE },
2093 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2094 #endif /* COMPAT_12 */
2095 { "cpu.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
2096 { "cpu.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
2097 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2098 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
2099 { NULL, IGN, IGN, 0 }
2102 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
2105 struct cpu_option arm7tdmi_options[] = {
2106 { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
2107 { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
2108 { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2109 { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
2111 { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK },
2112 #endif /* COMPAT_12 */
2113 { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK },
2114 { NULL, IGN, IGN, 0 }
2118 arm7tdmi_setup(args)
2123 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2124 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2125 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2127 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2128 cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
2131 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2134 /* Clear out the cache */
2135 cpu_idcache_wbinv_all();
2137 /* Set the control register */
2139 cpu_control(0xffffffff, cpuctrl);
2141 #endif /* CPU_ARM7TDMI */
2144 struct cpu_option arm8_options[] = {
2145 { "arm8.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
2146 { "arm8.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
2147 { "arm8.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2148 { "arm8.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
2150 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
2151 #endif /* COMPAT_12 */
2152 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
2153 { "arm8.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
2154 { NULL, IGN, IGN, 0 }
2162 int cpuctrl, cpuctrlmask;
2166 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2167 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2168 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2169 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2170 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2171 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2172 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
2173 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
2175 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2176 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2179 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2180 cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
2183 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2186 /* Get clock configuration */
2187 clocktest = arm8_clock_config(0, 0) & 0x0f;
2189 /* Special ARM8 clock and test configuration */
2190 if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2194 if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2198 clocktest &= ~(0x01);
2201 if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2205 clocktest &= ~(0x02);
2208 if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
2209 clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
2212 if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
2213 clocktest |= (integer & 7) << 5;
2217 /* Clear out the cache */
2218 cpu_idcache_wbinv_all();
2220 /* Set the control register */
2222 cpu_control(0xffffffff, cpuctrl);
2224 /* Set the clock/test register */
2226 arm8_clock_config(0x7f, clocktest);
2228 #endif /* CPU_ARM8 */
2231 struct cpu_option arm9_options[] = {
2232 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2233 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2234 { "arm9.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2235 { "arm9.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
2236 { "arm9.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
2237 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2238 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
2239 { "arm9.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2240 { NULL, IGN, IGN, 0 }
2247 int cpuctrl, cpuctrlmask;
2249 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2250 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2251 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2252 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
2253 CPU_CONTROL_ROUNDROBIN;
2254 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2255 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2256 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2257 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2258 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2259 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
2260 | CPU_CONTROL_ROUNDROBIN;
2262 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2263 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2266 cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
2269 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2271 if (vector_page == ARM_VECTORS_HIGH)
2272 cpuctrl |= CPU_CONTROL_VECRELOC;
2274 /* Clear out the cache */
2275 cpu_idcache_wbinv_all();
2277 /* Set the control register */
2278 cpu_control(cpuctrlmask, cpuctrl);
2282 #endif /* CPU_ARM9 */
2284 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
2285 struct cpu_option arm10_options[] = {
2286 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2287 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2288 { "arm10.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2289 { "arm10.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
2290 { "arm10.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
2291 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2292 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
2293 { "arm10.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2294 { NULL, IGN, IGN, 0 }
2301 int cpuctrl, cpuctrlmask;
2303 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2304 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2305 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
2306 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2307 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2308 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2309 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2310 | CPU_CONTROL_BPRD_ENABLE
2311 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2313 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2314 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2317 cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
2320 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2323 /* Clear out the cache */
2324 cpu_idcache_wbinv_all();
2326 /* Now really make sure they are clean. */
2327 __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2329 if (vector_page == ARM_VECTORS_HIGH)
2330 cpuctrl |= CPU_CONTROL_VECRELOC;
2332 /* Set the control register */
2334 cpu_control(0xffffffff, cpuctrl);
2337 cpu_idcache_wbinv_all();
2339 #endif /* CPU_ARM9E || CPU_ARM10 */
2341 #if defined(CPU_ARM1136) || defined(CPU_ARM1176)
2342 struct cpu_option arm11_options[] = {
2343 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2344 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2345 { "arm11.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2346 { "arm11.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
2347 { "arm11.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
2348 { NULL, IGN, IGN, 0 }
2352 arm11x6_setup(char *args)
2354 int cpuctrl, cpuctrl_wax;
2355 uint32_t auxctrl, auxctrl_wax;
2360 cpuid = cpufunc_id();
2363 CPU_CONTROL_MMU_ENABLE |
2364 CPU_CONTROL_DC_ENABLE |
2365 CPU_CONTROL_WBUF_ENABLE |
2366 CPU_CONTROL_32BP_ENABLE |
2367 CPU_CONTROL_32BD_ENABLE |
2368 CPU_CONTROL_LABT_ENABLE |
2369 CPU_CONTROL_SYST_ENABLE |
2370 CPU_CONTROL_IC_ENABLE;
2373 * "write as existing" bits
2374 * inverse of this is mask
2377 (3 << 30) | /* SBZ */
2378 (1 << 29) | /* FA */
2379 (1 << 28) | /* TR */
2380 (3 << 26) | /* SBZ */
2381 (3 << 19) | /* SBZ */
2382 (1 << 17); /* SBZ */
2384 cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
2385 cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
2387 cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2390 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2393 if (vector_page == ARM_VECTORS_HIGH)
2394 cpuctrl |= CPU_CONTROL_VECRELOC;
2399 * This options enables the workaround for the 364296 ARM1136
2400 * r0pX errata (possible cache data corruption with
2401 * hit-under-miss enabled). It sets the undocumented bit 31 in
2402 * the auxiliary control register and the FI bit in the control
2403 * register, thus disabling hit-under-miss without putting the
2404 * processor into full low interrupt latency mode. ARM11MPCore
2407 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
2408 cpuctrl |= CPU_CONTROL_FI_ENABLE;
2409 auxctrl = ARM1136_AUXCTL_PFI;
2410 auxctrl_wax = ~ARM1136_AUXCTL_PFI;
2414 * Enable an errata workaround
2416 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
2417 auxctrl = ARM1176_AUXCTL_PHD;
2418 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
2421 /* Clear out the cache */
2422 cpu_idcache_wbinv_all();
2424 /* Now really make sure they are clean. */
2425 __asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
2427 /* Allow detection code to find the VFP if it's fitted. */
2428 __asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
2430 /* Set the control register */
2432 cpu_control(~cpuctrl_wax, cpuctrl);
2434 __asm volatile ("mrc p15, 0, %0, c1, c0, 1\n\t"
2435 "and %1, %0, %2\n\t"
2436 "orr %1, %1, %3\n\t"
2438 "mcrne p15, 0, %1, c1, c0, 1\n\t"
2439 : "=r"(tmp), "=r"(tmp2) :
2440 "r"(auxctrl_wax), "r"(auxctrl));
2443 cpu_idcache_wbinv_all();
2445 #endif /* CPU_ARM1136 || CPU_ARM1176 */
2449 pj4bv6_setup(char *args)
2455 cpuctrl = CPU_CONTROL_MMU_ENABLE;
2456 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2457 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2459 cpuctrl |= CPU_CONTROL_DC_ENABLE;
2460 cpuctrl |= (0xf << 3);
2462 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2464 cpuctrl |= CPU_CONTROL_SYST_ENABLE;
2465 cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
2466 cpuctrl |= CPU_CONTROL_IC_ENABLE;
2467 if (vector_page == ARM_VECTORS_HIGH)
2468 cpuctrl |= CPU_CONTROL_VECRELOC;
2469 cpuctrl |= (0x5 << 16);
2470 cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
2472 /* cpuctrl |= CPU_CONTROL_L2_ENABLE; */
2474 /* Make sure caches are clean. */
2475 cpu_idcache_wbinv_all();
2476 cpu_l2cache_wbinv_all();
2478 /* Set the control register */
2480 cpu_control(0xffffffff, cpuctrl);
2482 cpu_idcache_wbinv_all();
2483 cpu_l2cache_wbinv_all();
2494 cpuctrl = CPU_CONTROL_MMU_ENABLE;
2495 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2496 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2498 cpuctrl |= CPU_CONTROL_DC_ENABLE;
2499 cpuctrl |= (0xf << 3);
2500 cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
2501 cpuctrl |= CPU_CONTROL_IC_ENABLE;
2502 if (vector_page == ARM_VECTORS_HIGH)
2503 cpuctrl |= CPU_CONTROL_VECRELOC;
2504 cpuctrl |= (0x5 << 16) | (1 < 22);
2505 cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
2507 /* Clear out the cache */
2508 cpu_idcache_wbinv_all();
2510 /* Set the control register */
2512 cpu_control(0xFFFFFFFF, cpuctrl);
2515 cpu_idcache_wbinv_all();
2517 #endif /* CPU_MV_PJ4B */
2522 cortexa_setup(char *args)
2524 int cpuctrl, cpuctrlmask;
2526 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | /* MMU enable [0] */
2527 CPU_CONTROL_AFLT_ENABLE | /* Alignment fault [1] */
2528 CPU_CONTROL_DC_ENABLE | /* DCache enable [2] */
2529 CPU_CONTROL_BPRD_ENABLE | /* Branch prediction [11] */
2530 CPU_CONTROL_IC_ENABLE | /* ICache enable [12] */
2531 CPU_CONTROL_VECRELOC; /* Vector relocation [13] */
2533 cpuctrl = CPU_CONTROL_MMU_ENABLE |
2534 CPU_CONTROL_IC_ENABLE |
2535 CPU_CONTROL_DC_ENABLE |
2536 CPU_CONTROL_BPRD_ENABLE;
2538 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2539 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2542 /* Switch to big endian */
2544 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2547 /* Check if the vector page is at the high address (0xffff0000) */
2548 if (vector_page == ARM_VECTORS_HIGH)
2549 cpuctrl |= CPU_CONTROL_VECRELOC;
2551 /* Clear out the cache */
2552 cpu_idcache_wbinv_all();
2554 /* Set the control register */
2556 cpu_control(cpuctrlmask, cpuctrl);
2559 cpu_idcache_wbinv_all();
2561 armv7_auxctrl((1 << 6) | (1 << 0), (1 << 6) | (1 << 0)); /* Enable SMP + TLB broadcasting */
2564 #endif /* CPU_CORTEXA */
2568 struct cpu_option sa110_options[] = {
2570 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2571 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2572 #endif /* COMPAT_12 */
2573 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2574 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2575 { "sa110.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2576 { "sa110.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
2577 { "sa110.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
2578 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2579 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
2580 { "sa110.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2581 { NULL, IGN, IGN, 0 }
2588 int cpuctrl, cpuctrlmask;
2590 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2591 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2592 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2593 | CPU_CONTROL_WBUF_ENABLE;
2594 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2595 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2596 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2597 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2598 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2599 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2600 | CPU_CONTROL_CPCLK;
2602 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2603 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2606 cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
2609 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2612 /* Clear out the cache */
2613 cpu_idcache_wbinv_all();
2615 /* Set the control register */
2617 /* cpu_control(cpuctrlmask, cpuctrl);*/
2618 cpu_control(0xffffffff, cpuctrl);
2621 * enable clockswitching, note that this doesn't read or write to r0,
2622 * r0 is just to make it valid asm
2624 __asm ("mcr 15, 0, r0, c15, c1, 2");
2626 #endif /* CPU_SA110 */
2628 #if defined(CPU_SA1100) || defined(CPU_SA1110)
2629 struct cpu_option sa11x0_options[] = {
2631 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2632 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2633 #endif /* COMPAT_12 */
2634 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2635 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2636 { "sa11x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2637 { "sa11x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
2638 { "sa11x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
2639 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2640 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
2641 { "sa11x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2642 { NULL, IGN, IGN, 0 }
2649 int cpuctrl, cpuctrlmask;
2651 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2652 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2653 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2654 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
2655 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2656 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2657 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2658 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2659 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2660 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2661 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2663 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2664 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2668 cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
2671 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2674 if (vector_page == ARM_VECTORS_HIGH)
2675 cpuctrl |= CPU_CONTROL_VECRELOC;
2676 /* Clear out the cache */
2677 cpu_idcache_wbinv_all();
2678 /* Set the control register */
2680 cpu_control(0xffffffff, cpuctrl);
2682 #endif /* CPU_SA1100 || CPU_SA1110 */
2684 #if defined(CPU_FA526) || defined(CPU_FA626TE)
2685 struct cpu_option fa526_options[] = {
2687 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE |
2688 CPU_CONTROL_DC_ENABLE) },
2689 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2690 #endif /* COMPAT_12 */
2691 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE |
2692 CPU_CONTROL_DC_ENABLE) },
2693 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE |
2694 CPU_CONTROL_DC_ENABLE) },
2695 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2696 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
2697 { NULL, IGN, IGN, 0 }
2701 fa526_setup(char *args)
2703 int cpuctrl, cpuctrlmask;
2705 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2706 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2707 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2708 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
2709 | CPU_CONTROL_BPRD_ENABLE;
2710 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2711 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2712 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2713 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2714 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2715 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2716 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2718 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2719 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2722 cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
2725 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2728 if (vector_page == ARM_VECTORS_HIGH)
2729 cpuctrl |= CPU_CONTROL_VECRELOC;
2731 /* Clear out the cache */
2732 cpu_idcache_wbinv_all();
2734 /* Set the control register */
2736 cpu_control(0xffffffff, cpuctrl);
2738 #endif /* CPU_FA526 || CPU_FA626TE */
2741 #if defined(CPU_IXP12X0)
2742 struct cpu_option ixp12x0_options[] = {
2743 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2744 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2745 { "ixp12x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2746 { "ixp12x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
2747 { "ixp12x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
2748 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2749 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
2750 { "ixp12x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2751 { NULL, IGN, IGN, 0 }
2758 int cpuctrl, cpuctrlmask;
2761 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
2762 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
2763 | CPU_CONTROL_IC_ENABLE;
2765 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
2766 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2767 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
2768 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
2769 | CPU_CONTROL_VECRELOC;
2771 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2772 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2775 cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
2778 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2781 if (vector_page == ARM_VECTORS_HIGH)
2782 cpuctrl |= CPU_CONTROL_VECRELOC;
2784 /* Clear out the cache */
2785 cpu_idcache_wbinv_all();
2787 /* Set the control register */
2789 /* cpu_control(0xffffffff, cpuctrl); */
2790 cpu_control(cpuctrlmask, cpuctrl);
2792 #endif /* CPU_IXP12X0 */
2794 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2795 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
2796 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
2797 struct cpu_option xscale_options[] = {
2799 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
2800 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2801 #endif /* COMPAT_12 */
2802 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
2803 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2804 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2805 { "xscale.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
2806 { "xscale.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2807 { "xscale.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
2808 { "xscale.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
2809 { NULL, IGN, IGN, 0 }
2817 int cpuctrl, cpuctrlmask;
2820 * The XScale Write Buffer is always enabled. Our option
2821 * is to enable/disable coalescing. Note that bits 6:3
2822 * must always be enabled.
2825 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2826 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2827 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2828 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
2829 | CPU_CONTROL_BPRD_ENABLE;
2830 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2831 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2832 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2833 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2834 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2835 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2836 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
2837 CPU_CONTROL_L2_ENABLE;
2839 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2840 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2843 cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
2846 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2849 if (vector_page == ARM_VECTORS_HIGH)
2850 cpuctrl |= CPU_CONTROL_VECRELOC;
2851 #ifdef CPU_XSCALE_CORE3
2852 cpuctrl |= CPU_CONTROL_L2_ENABLE;
2855 /* Clear out the cache */
2856 cpu_idcache_wbinv_all();
2859 * Set the control register. Note that bits 6:3 must always
2863 /* cpu_control(cpuctrlmask, cpuctrl);*/
2864 cpu_control(0xffffffff, cpuctrl);
2866 /* Make sure write coalescing is turned on */
2867 __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
2869 #ifdef XSCALE_NO_COALESCE_WRITES
2870 auxctl |= XSCALE_AUXCTL_K;
2872 auxctl &= ~XSCALE_AUXCTL_K;
2874 #ifdef CPU_XSCALE_CORE3
2875 auxctl |= XSCALE_AUXCTL_LLR;
2876 auxctl |= XSCALE_AUXCTL_MD_MASK;
2878 __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
2881 #endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425