1 /* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
4 * arm7tdmi support code Copyright (c) 2001 John Fremlin
5 * arm8 support code Copyright (c) 1997 ARM Limited
6 * arm8 support code Copyright (c) 1997 Causality Limited
7 * arm9 support code Copyright (C) 2001 ARM Ltd
8 * Copyright (c) 1997 Mark Brinicombe.
9 * Copyright (c) 1997 Causality Limited
10 * All rights reserved.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Causality Limited.
23 * 4. The name of Causality Limited may not be used to endorse or promote
24 * products derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * RiscBSD kernel project
43 * C functions for supporting CPU / MMU / TLB specific operations.
47 #include <sys/cdefs.h>
48 __FBSDID("$FreeBSD$");
50 #include <sys/param.h>
51 #include <sys/systm.h>
53 #include <sys/mutex.h>
55 #include <machine/bus.h>
56 #include <machine/cpu.h>
57 #include <machine/disassem.h>
63 #include <machine/cpuconf.h>
64 #include <machine/cpufunc.h>
65 #include <machine/bootconfig.h>
67 #ifdef CPU_XSCALE_80200
68 #include <arm/xscale/i80200/i80200reg.h>
69 #include <arm/xscale/i80200/i80200var.h>
72 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
73 #include <arm/xscale/i80321/i80321reg.h>
74 #include <arm/xscale/i80321/i80321var.h>
77 #if defined(CPU_XSCALE_81342)
78 #include <arm/xscale/i8134x/i81342reg.h>
81 #ifdef CPU_XSCALE_IXP425
82 #include <arm/xscale/ixp425/ixp425reg.h>
83 #include <arm/xscale/ixp425/ixp425var.h>
86 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
87 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
88 #include <arm/xscale/xscalereg.h>
92 struct arm_pmc_funcs *arm_pmc;
95 /* PRIMARY CACHE VARIABLES */
97 int arm_picache_line_size;
100 int arm_pdcache_size; /* and unified */
101 int arm_pdcache_line_size;
102 int arm_pdcache_ways;
105 int arm_pcache_unified;
107 int arm_dcache_align;
108 int arm_dcache_align_mask;
110 /* 1 == use cpu_sleep(), 0 == don't */
111 int cpu_do_powersave;
115 struct cpu_functions arm7tdmi_cpufuncs = {
119 cpufunc_nullop, /* cpwait */
123 cpufunc_control, /* control */
124 cpufunc_domains, /* domain */
125 arm7tdmi_setttb, /* setttb */
126 cpufunc_faultstatus, /* faultstatus */
127 cpufunc_faultaddress, /* faultaddress */
131 arm7tdmi_tlb_flushID, /* tlb_flushID */
132 arm7tdmi_tlb_flushID_SE, /* tlb_flushID_SE */
133 arm7tdmi_tlb_flushID, /* tlb_flushI */
134 arm7tdmi_tlb_flushID_SE, /* tlb_flushI_SE */
135 arm7tdmi_tlb_flushID, /* tlb_flushD */
136 arm7tdmi_tlb_flushID_SE, /* tlb_flushD_SE */
138 /* Cache operations */
140 cpufunc_nullop, /* icache_sync_all */
141 (void *)cpufunc_nullop, /* icache_sync_range */
143 arm7tdmi_cache_flushID, /* dcache_wbinv_all */
144 (void *)arm7tdmi_cache_flushID, /* dcache_wbinv_range */
145 (void *)arm7tdmi_cache_flushID, /* dcache_inv_range */
146 (void *)cpufunc_nullop, /* dcache_wb_range */
148 arm7tdmi_cache_flushID, /* idcache_wbinv_all */
149 (void *)arm7tdmi_cache_flushID, /* idcache_wbinv_range */
150 cpufunc_nullop, /* l2cache_wbinv_all */
151 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
152 (void *)cpufunc_nullop, /* l2cache_inv_range */
153 (void *)cpufunc_nullop, /* l2cache_wb_range */
155 /* Other functions */
157 cpufunc_nullop, /* flush_prefetchbuf */
158 cpufunc_nullop, /* drain_writebuf */
159 cpufunc_nullop, /* flush_brnchtgt_C */
160 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
162 (void *)cpufunc_nullop, /* sleep */
166 late_abort_fixup, /* dataabt_fixup */
167 cpufunc_null_fixup, /* prefetchabt_fixup */
169 arm7tdmi_context_switch, /* context_switch */
171 arm7tdmi_setup /* cpu setup */
174 #endif /* CPU_ARM7TDMI */
177 struct cpu_functions arm8_cpufuncs = {
181 cpufunc_nullop, /* cpwait */
185 cpufunc_control, /* control */
186 cpufunc_domains, /* domain */
187 arm8_setttb, /* setttb */
188 cpufunc_faultstatus, /* faultstatus */
189 cpufunc_faultaddress, /* faultaddress */
193 arm8_tlb_flushID, /* tlb_flushID */
194 arm8_tlb_flushID_SE, /* tlb_flushID_SE */
195 arm8_tlb_flushID, /* tlb_flushI */
196 arm8_tlb_flushID_SE, /* tlb_flushI_SE */
197 arm8_tlb_flushID, /* tlb_flushD */
198 arm8_tlb_flushID_SE, /* tlb_flushD_SE */
200 /* Cache operations */
202 cpufunc_nullop, /* icache_sync_all */
203 (void *)cpufunc_nullop, /* icache_sync_range */
205 arm8_cache_purgeID, /* dcache_wbinv_all */
206 (void *)arm8_cache_purgeID, /* dcache_wbinv_range */
207 /*XXX*/ (void *)arm8_cache_purgeID, /* dcache_inv_range */
208 (void *)arm8_cache_cleanID, /* dcache_wb_range */
210 arm8_cache_purgeID, /* idcache_wbinv_all */
211 (void *)arm8_cache_purgeID, /* idcache_wbinv_range */
212 cpufunc_nullop, /* l2cache_wbinv_all */
213 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
214 (void *)cpufunc_nullop, /* l2cache_inv_range */
215 (void *)cpufunc_nullop, /* l2cache_wb_range */
217 /* Other functions */
219 cpufunc_nullop, /* flush_prefetchbuf */
220 cpufunc_nullop, /* drain_writebuf */
221 cpufunc_nullop, /* flush_brnchtgt_C */
222 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
224 (void *)cpufunc_nullop, /* sleep */
228 cpufunc_null_fixup, /* dataabt_fixup */
229 cpufunc_null_fixup, /* prefetchabt_fixup */
231 arm8_context_switch, /* context_switch */
233 arm8_setup /* cpu setup */
235 #endif /* CPU_ARM8 */
238 struct cpu_functions arm9_cpufuncs = {
242 cpufunc_nullop, /* cpwait */
246 cpufunc_control, /* control */
247 cpufunc_domains, /* Domain */
248 arm9_setttb, /* Setttb */
249 cpufunc_faultstatus, /* Faultstatus */
250 cpufunc_faultaddress, /* Faultaddress */
254 armv4_tlb_flushID, /* tlb_flushID */
255 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
256 armv4_tlb_flushI, /* tlb_flushI */
257 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
258 armv4_tlb_flushD, /* tlb_flushD */
259 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
261 /* Cache operations */
263 arm9_icache_sync_all, /* icache_sync_all */
264 arm9_icache_sync_range, /* icache_sync_range */
266 arm9_dcache_wbinv_all, /* dcache_wbinv_all */
267 arm9_dcache_wbinv_range, /* dcache_wbinv_range */
268 /*XXX*/ arm9_dcache_wbinv_range, /* dcache_inv_range */
269 arm9_dcache_wb_range, /* dcache_wb_range */
271 arm9_idcache_wbinv_all, /* idcache_wbinv_all */
272 arm9_idcache_wbinv_range, /* idcache_wbinv_range */
273 cpufunc_nullop, /* l2cache_wbinv_all */
274 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
275 (void *)cpufunc_nullop, /* l2cache_inv_range */
276 (void *)cpufunc_nullop, /* l2cache_wb_range */
278 /* Other functions */
280 cpufunc_nullop, /* flush_prefetchbuf */
281 armv4_drain_writebuf, /* drain_writebuf */
282 cpufunc_nullop, /* flush_brnchtgt_C */
283 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
285 (void *)cpufunc_nullop, /* sleep */
289 cpufunc_null_fixup, /* dataabt_fixup */
290 cpufunc_null_fixup, /* prefetchabt_fixup */
292 arm9_context_switch, /* context_switch */
294 arm9_setup /* cpu setup */
297 #endif /* CPU_ARM9 */
299 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
300 struct cpu_functions armv5_ec_cpufuncs = {
304 cpufunc_nullop, /* cpwait */
308 cpufunc_control, /* control */
309 cpufunc_domains, /* Domain */
310 armv5_ec_setttb, /* Setttb */
311 cpufunc_faultstatus, /* Faultstatus */
312 cpufunc_faultaddress, /* Faultaddress */
316 armv4_tlb_flushID, /* tlb_flushID */
317 arm10_tlb_flushID_SE, /* tlb_flushID_SE */
318 armv4_tlb_flushI, /* tlb_flushI */
319 arm10_tlb_flushI_SE, /* tlb_flushI_SE */
320 armv4_tlb_flushD, /* tlb_flushD */
321 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
323 /* Cache operations */
325 armv5_ec_icache_sync_all, /* icache_sync_all */
326 armv5_ec_icache_sync_range, /* icache_sync_range */
328 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
329 armv5_ec_dcache_wbinv_range, /* dcache_wbinv_range */
330 /*XXX*/ armv5_ec_dcache_wbinv_range, /* dcache_inv_range */
331 armv5_ec_dcache_wb_range, /* dcache_wb_range */
333 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
334 armv5_ec_idcache_wbinv_range, /* idcache_wbinv_range */
336 cpufunc_nullop, /* l2cache_wbinv_all */
337 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
338 (void *)cpufunc_nullop, /* l2cache_inv_range */
339 (void *)cpufunc_nullop, /* l2cache_wb_range */
341 /* Other functions */
343 cpufunc_nullop, /* flush_prefetchbuf */
344 armv4_drain_writebuf, /* drain_writebuf */
345 cpufunc_nullop, /* flush_brnchtgt_C */
346 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
348 (void *)cpufunc_nullop, /* sleep */
352 cpufunc_null_fixup, /* dataabt_fixup */
353 cpufunc_null_fixup, /* prefetchabt_fixup */
355 arm10_context_switch, /* context_switch */
357 arm10_setup /* cpu setup */
360 #endif /* CPU_ARM9E || CPU_ARM10 */
363 struct cpu_functions arm10_cpufuncs = {
367 cpufunc_nullop, /* cpwait */
371 cpufunc_control, /* control */
372 cpufunc_domains, /* Domain */
373 arm10_setttb, /* Setttb */
374 cpufunc_faultstatus, /* Faultstatus */
375 cpufunc_faultaddress, /* Faultaddress */
379 armv4_tlb_flushID, /* tlb_flushID */
380 arm10_tlb_flushID_SE, /* tlb_flushID_SE */
381 armv4_tlb_flushI, /* tlb_flushI */
382 arm10_tlb_flushI_SE, /* tlb_flushI_SE */
383 armv4_tlb_flushD, /* tlb_flushD */
384 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
386 /* Cache operations */
388 arm10_icache_sync_all, /* icache_sync_all */
389 arm10_icache_sync_range, /* icache_sync_range */
391 arm10_dcache_wbinv_all, /* dcache_wbinv_all */
392 arm10_dcache_wbinv_range, /* dcache_wbinv_range */
393 arm10_dcache_inv_range, /* dcache_inv_range */
394 arm10_dcache_wb_range, /* dcache_wb_range */
396 arm10_idcache_wbinv_all, /* idcache_wbinv_all */
397 arm10_idcache_wbinv_range, /* idcache_wbinv_range */
398 cpufunc_nullop, /* l2cache_wbinv_all */
399 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
400 (void *)cpufunc_nullop, /* l2cache_inv_range */
401 (void *)cpufunc_nullop, /* l2cache_wb_range */
403 /* Other functions */
405 cpufunc_nullop, /* flush_prefetchbuf */
406 armv4_drain_writebuf, /* drain_writebuf */
407 cpufunc_nullop, /* flush_brnchtgt_C */
408 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
410 (void *)cpufunc_nullop, /* sleep */
414 cpufunc_null_fixup, /* dataabt_fixup */
415 cpufunc_null_fixup, /* prefetchabt_fixup */
417 arm10_context_switch, /* context_switch */
419 arm10_setup /* cpu setup */
422 #endif /* CPU_ARM10 */
425 struct cpu_functions sa110_cpufuncs = {
429 cpufunc_nullop, /* cpwait */
433 cpufunc_control, /* control */
434 cpufunc_domains, /* domain */
435 sa1_setttb, /* setttb */
436 cpufunc_faultstatus, /* faultstatus */
437 cpufunc_faultaddress, /* faultaddress */
441 armv4_tlb_flushID, /* tlb_flushID */
442 sa1_tlb_flushID_SE, /* tlb_flushID_SE */
443 armv4_tlb_flushI, /* tlb_flushI */
444 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
445 armv4_tlb_flushD, /* tlb_flushD */
446 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
448 /* Cache operations */
450 sa1_cache_syncI, /* icache_sync_all */
451 sa1_cache_syncI_rng, /* icache_sync_range */
453 sa1_cache_purgeD, /* dcache_wbinv_all */
454 sa1_cache_purgeD_rng, /* dcache_wbinv_range */
455 /*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
456 sa1_cache_cleanD_rng, /* dcache_wb_range */
458 sa1_cache_purgeID, /* idcache_wbinv_all */
459 sa1_cache_purgeID_rng, /* idcache_wbinv_range */
460 cpufunc_nullop, /* l2cache_wbinv_all */
461 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
462 (void *)cpufunc_nullop, /* l2cache_inv_range */
463 (void *)cpufunc_nullop, /* l2cache_wb_range */
465 /* Other functions */
467 cpufunc_nullop, /* flush_prefetchbuf */
468 armv4_drain_writebuf, /* drain_writebuf */
469 cpufunc_nullop, /* flush_brnchtgt_C */
470 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
472 (void *)cpufunc_nullop, /* sleep */
476 cpufunc_null_fixup, /* dataabt_fixup */
477 cpufunc_null_fixup, /* prefetchabt_fixup */
479 sa110_context_switch, /* context_switch */
481 sa110_setup /* cpu setup */
483 #endif /* CPU_SA110 */
485 #if defined(CPU_SA1100) || defined(CPU_SA1110)
486 struct cpu_functions sa11x0_cpufuncs = {
490 cpufunc_nullop, /* cpwait */
494 cpufunc_control, /* control */
495 cpufunc_domains, /* domain */
496 sa1_setttb, /* setttb */
497 cpufunc_faultstatus, /* faultstatus */
498 cpufunc_faultaddress, /* faultaddress */
502 armv4_tlb_flushID, /* tlb_flushID */
503 sa1_tlb_flushID_SE, /* tlb_flushID_SE */
504 armv4_tlb_flushI, /* tlb_flushI */
505 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
506 armv4_tlb_flushD, /* tlb_flushD */
507 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
509 /* Cache operations */
511 sa1_cache_syncI, /* icache_sync_all */
512 sa1_cache_syncI_rng, /* icache_sync_range */
514 sa1_cache_purgeD, /* dcache_wbinv_all */
515 sa1_cache_purgeD_rng, /* dcache_wbinv_range */
516 /*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
517 sa1_cache_cleanD_rng, /* dcache_wb_range */
519 sa1_cache_purgeID, /* idcache_wbinv_all */
520 sa1_cache_purgeID_rng, /* idcache_wbinv_range */
521 cpufunc_nullop, /* l2cache_wbinv_all */
522 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
523 (void *)cpufunc_nullop, /* l2cache_inv_range */
524 (void *)cpufunc_nullop, /* l2cache_wb_range */
526 /* Other functions */
528 sa11x0_drain_readbuf, /* flush_prefetchbuf */
529 armv4_drain_writebuf, /* drain_writebuf */
530 cpufunc_nullop, /* flush_brnchtgt_C */
531 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
533 sa11x0_cpu_sleep, /* sleep */
537 cpufunc_null_fixup, /* dataabt_fixup */
538 cpufunc_null_fixup, /* prefetchabt_fixup */
540 sa11x0_context_switch, /* context_switch */
542 sa11x0_setup /* cpu setup */
544 #endif /* CPU_SA1100 || CPU_SA1110 */
547 struct cpu_functions ixp12x0_cpufuncs = {
551 cpufunc_nullop, /* cpwait */
555 cpufunc_control, /* control */
556 cpufunc_domains, /* domain */
557 sa1_setttb, /* setttb */
558 cpufunc_faultstatus, /* faultstatus */
559 cpufunc_faultaddress, /* faultaddress */
563 armv4_tlb_flushID, /* tlb_flushID */
564 sa1_tlb_flushID_SE, /* tlb_flushID_SE */
565 armv4_tlb_flushI, /* tlb_flushI */
566 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
567 armv4_tlb_flushD, /* tlb_flushD */
568 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
570 /* Cache operations */
572 sa1_cache_syncI, /* icache_sync_all */
573 sa1_cache_syncI_rng, /* icache_sync_range */
575 sa1_cache_purgeD, /* dcache_wbinv_all */
576 sa1_cache_purgeD_rng, /* dcache_wbinv_range */
577 /*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
578 sa1_cache_cleanD_rng, /* dcache_wb_range */
580 sa1_cache_purgeID, /* idcache_wbinv_all */
581 sa1_cache_purgeID_rng, /* idcache_wbinv_range */
582 cpufunc_nullop, /* l2cache_wbinv_all */
583 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
584 (void *)cpufunc_nullop, /* l2cache_inv_range */
585 (void *)cpufunc_nullop, /* l2cache_wb_range */
587 /* Other functions */
589 ixp12x0_drain_readbuf, /* flush_prefetchbuf */
590 armv4_drain_writebuf, /* drain_writebuf */
591 cpufunc_nullop, /* flush_brnchtgt_C */
592 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
594 (void *)cpufunc_nullop, /* sleep */
598 cpufunc_null_fixup, /* dataabt_fixup */
599 cpufunc_null_fixup, /* prefetchabt_fixup */
601 ixp12x0_context_switch, /* context_switch */
603 ixp12x0_setup /* cpu setup */
605 #endif /* CPU_IXP12X0 */
607 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
608 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
609 defined(CPU_XSCALE_80219)
611 struct cpu_functions xscale_cpufuncs = {
615 xscale_cpwait, /* cpwait */
619 xscale_control, /* control */
620 cpufunc_domains, /* domain */
621 xscale_setttb, /* setttb */
622 cpufunc_faultstatus, /* faultstatus */
623 cpufunc_faultaddress, /* faultaddress */
627 armv4_tlb_flushID, /* tlb_flushID */
628 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
629 armv4_tlb_flushI, /* tlb_flushI */
630 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
631 armv4_tlb_flushD, /* tlb_flushD */
632 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
634 /* Cache operations */
636 xscale_cache_syncI, /* icache_sync_all */
637 xscale_cache_syncI_rng, /* icache_sync_range */
639 xscale_cache_purgeD, /* dcache_wbinv_all */
640 xscale_cache_purgeD_rng, /* dcache_wbinv_range */
641 xscale_cache_flushD_rng, /* dcache_inv_range */
642 xscale_cache_cleanD_rng, /* dcache_wb_range */
644 xscale_cache_purgeID, /* idcache_wbinv_all */
645 xscale_cache_purgeID_rng, /* idcache_wbinv_range */
646 cpufunc_nullop, /* l2cache_wbinv_all */
647 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
648 (void *)cpufunc_nullop, /* l2cache_inv_range */
649 (void *)cpufunc_nullop, /* l2cache_wb_range */
651 /* Other functions */
653 cpufunc_nullop, /* flush_prefetchbuf */
654 armv4_drain_writebuf, /* drain_writebuf */
655 cpufunc_nullop, /* flush_brnchtgt_C */
656 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
658 xscale_cpu_sleep, /* sleep */
662 cpufunc_null_fixup, /* dataabt_fixup */
663 cpufunc_null_fixup, /* prefetchabt_fixup */
665 xscale_context_switch, /* context_switch */
667 xscale_setup /* cpu setup */
670 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
673 #ifdef CPU_XSCALE_81342
674 struct cpu_functions xscalec3_cpufuncs = {
678 xscale_cpwait, /* cpwait */
682 xscale_control, /* control */
683 cpufunc_domains, /* domain */
684 xscalec3_setttb, /* setttb */
685 cpufunc_faultstatus, /* faultstatus */
686 cpufunc_faultaddress, /* faultaddress */
690 armv4_tlb_flushID, /* tlb_flushID */
691 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
692 armv4_tlb_flushI, /* tlb_flushI */
693 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
694 armv4_tlb_flushD, /* tlb_flushD */
695 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
697 /* Cache operations */
699 xscalec3_cache_syncI, /* icache_sync_all */
700 xscalec3_cache_syncI_rng, /* icache_sync_range */
702 xscalec3_cache_purgeD, /* dcache_wbinv_all */
703 xscalec3_cache_purgeD_rng, /* dcache_wbinv_range */
704 xscale_cache_flushD_rng, /* dcache_inv_range */
705 xscalec3_cache_cleanD_rng, /* dcache_wb_range */
707 xscalec3_cache_purgeID, /* idcache_wbinv_all */
708 xscalec3_cache_purgeID_rng, /* idcache_wbinv_range */
709 xscalec3_l2cache_purge, /* l2cache_wbinv_all */
710 xscalec3_l2cache_purge_rng, /* l2cache_wbinv_range */
711 xscalec3_l2cache_flush_rng, /* l2cache_inv_range */
712 xscalec3_l2cache_clean_rng, /* l2cache_wb_range */
714 /* Other functions */
716 cpufunc_nullop, /* flush_prefetchbuf */
717 armv4_drain_writebuf, /* drain_writebuf */
718 cpufunc_nullop, /* flush_brnchtgt_C */
719 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
721 xscale_cpu_sleep, /* sleep */
725 cpufunc_null_fixup, /* dataabt_fixup */
726 cpufunc_null_fixup, /* prefetchabt_fixup */
728 xscalec3_context_switch, /* context_switch */
730 xscale_setup /* cpu setup */
732 #endif /* CPU_XSCALE_81342 */
734 * Global constants also used by locore.s
737 struct cpu_functions cpufuncs;
739 u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore.s */
741 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
742 defined (CPU_ARM9E) || defined (CPU_ARM10) || \
743 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
744 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
745 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
747 static void get_cachetype_cp15(void);
749 /* Additional cache information local to this file. Log2 of some of the
751 static int arm_dcache_l2_nsets;
752 static int arm_dcache_l2_assoc;
753 static int arm_dcache_l2_linesize;
758 u_int ctype, isize, dsize;
761 __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
765 * ...and thus spake the ARM ARM:
767 * If an <opcode2> value corresponding to an unimplemented or
768 * reserved ID register is encountered, the System Control
769 * processor returns the value of the main ID register.
771 if (ctype == cpufunc_id())
774 if ((ctype & CPU_CT_S) == 0)
775 arm_pcache_unified = 1;
778 * If you want to know how this code works, go read the ARM ARM.
781 arm_pcache_type = CPU_CT_CTYPE(ctype);
783 if (arm_pcache_unified == 0) {
784 isize = CPU_CT_ISIZE(ctype);
785 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
786 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
787 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
788 if (isize & CPU_CT_xSIZE_M)
789 arm_picache_line_size = 0; /* not present */
791 arm_picache_ways = 1;
793 arm_picache_ways = multiplier <<
794 (CPU_CT_xSIZE_ASSOC(isize) - 1);
796 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
799 dsize = CPU_CT_DSIZE(ctype);
800 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
801 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
802 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
803 if (dsize & CPU_CT_xSIZE_M)
804 arm_pdcache_line_size = 0; /* not present */
806 arm_pdcache_ways = 1;
808 arm_pdcache_ways = multiplier <<
809 (CPU_CT_xSIZE_ASSOC(dsize) - 1);
811 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
813 arm_dcache_align = arm_pdcache_line_size;
815 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
816 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
817 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
818 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
821 arm_dcache_align_mask = arm_dcache_align - 1;
823 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
825 #if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
827 /* Cache information for CPUs without cache type registers. */
831 int ct_pcache_unified;
833 int ct_pdcache_line_size;
836 int ct_picache_line_size;
840 struct cachetab cachetab[] = {
841 /* cpuid, cache type, u, dsiz, ls, wy, isiz, ls, wy */
842 /* XXX is this type right for SA-1? */
843 { CPU_ID_SA110, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
844 { CPU_ID_SA1100, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 },
845 { CPU_ID_SA1110, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 },
846 { CPU_ID_IXP1200, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
847 { 0, 0, 0, 0, 0, 0, 0, 0}
850 static void get_cachetype_table(void);
853 get_cachetype_table()
856 u_int32_t cpuid = cpufunc_id();
858 for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
859 if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
860 arm_pcache_type = cachetab[i].ct_pcache_type;
861 arm_pcache_unified = cachetab[i].ct_pcache_unified;
862 arm_pdcache_size = cachetab[i].ct_pdcache_size;
863 arm_pdcache_line_size =
864 cachetab[i].ct_pdcache_line_size;
865 arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
866 arm_picache_size = cachetab[i].ct_picache_size;
867 arm_picache_line_size =
868 cachetab[i].ct_picache_line_size;
869 arm_picache_ways = cachetab[i].ct_picache_ways;
872 arm_dcache_align = arm_pdcache_line_size;
874 arm_dcache_align_mask = arm_dcache_align - 1;
877 #endif /* SA110 || SA1100 || SA1111 || IXP12X0 */
880 * Cannot panic here as we may not have a console yet ...
886 cputype = cpufunc_id();
887 cputype &= CPU_ID_CPU_MASK;
890 * NOTE: cpu_do_powersave defaults to off. If we encounter a
891 * CPU type where we want to use it by default, then we set it.
895 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
896 CPU_ID_IS7(cputype) &&
897 (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
898 cpufuncs = arm7tdmi_cpufuncs;
899 cpu_reset_needs_v4_MMU_disable = 0;
900 get_cachetype_cp15();
901 pmap_pte_init_generic();
906 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
907 (cputype & 0x0000f000) == 0x00008000) {
908 cpufuncs = arm8_cpufuncs;
909 cpu_reset_needs_v4_MMU_disable = 0; /* XXX correct? */
910 get_cachetype_cp15();
911 pmap_pte_init_arm8();
914 #endif /* CPU_ARM8 */
916 if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
917 (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
918 (cputype & 0x0000f000) == 0x00009000) {
919 cpufuncs = arm9_cpufuncs;
920 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
921 get_cachetype_cp15();
922 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
923 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
924 arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
925 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
926 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
927 #ifdef ARM9_CACHE_WRITE_THROUGH
928 pmap_pte_init_arm9();
930 pmap_pte_init_generic();
934 #endif /* CPU_ARM9 */
935 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
936 if (cputype == CPU_ID_ARM926EJS ||
937 cputype == CPU_ID_ARM1026EJS) {
938 cpufuncs = armv5_ec_cpufuncs;
939 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
940 get_cachetype_cp15();
941 pmap_pte_init_generic();
944 #endif /* CPU_ARM9E || CPU_ARM10 */
946 if (/* cputype == CPU_ID_ARM1020T || */
947 cputype == CPU_ID_ARM1020E) {
949 * Select write-through cacheing (this isn't really an
950 * option on ARM1020T).
952 cpufuncs = arm10_cpufuncs;
953 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
954 get_cachetype_cp15();
955 arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
956 arm10_dcache_sets_max =
957 (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
958 arm10_dcache_sets_inc;
959 arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
960 arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
961 pmap_pte_init_generic();
964 #endif /* CPU_ARM10 */
966 if (cputype == CPU_ID_SA110) {
967 cpufuncs = sa110_cpufuncs;
968 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
969 get_cachetype_table();
973 #endif /* CPU_SA110 */
975 if (cputype == CPU_ID_SA1100) {
976 cpufuncs = sa11x0_cpufuncs;
977 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
978 get_cachetype_table();
980 /* Use powersave on this CPU. */
981 cpu_do_powersave = 1;
985 #endif /* CPU_SA1100 */
987 if (cputype == CPU_ID_SA1110) {
988 cpufuncs = sa11x0_cpufuncs;
989 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
990 get_cachetype_table();
992 /* Use powersave on this CPU. */
993 cpu_do_powersave = 1;
997 #endif /* CPU_SA1110 */
999 if (cputype == CPU_ID_IXP1200) {
1000 cpufuncs = ixp12x0_cpufuncs;
1001 cpu_reset_needs_v4_MMU_disable = 1;
1002 get_cachetype_table();
1003 pmap_pte_init_sa1();
1006 #endif /* CPU_IXP12X0 */
1007 #ifdef CPU_XSCALE_80200
1008 if (cputype == CPU_ID_80200) {
1009 int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1014 * Reset the Performance Monitoring Unit to a
1016 * - CCNT, PMN0, PMN1 reset to 0
1017 * - overflow indications cleared
1018 * - all counters disabled
1020 __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
1022 : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1025 #if defined(XSCALE_CCLKCFG)
1027 * Crank CCLKCFG to maximum legal value.
1029 __asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
1031 : "r" (XSCALE_CCLKCFG));
1035 * XXX Disable ECC in the Bus Controller Unit; we
1036 * don't really support it, yet. Clear any pending
1037 * error indications.
1039 __asm __volatile("mcr p13, 0, %0, c0, c1, 0"
1041 : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1043 cpufuncs = xscale_cpufuncs;
1044 #if defined(PERFCTRS)
1049 * i80200 errata: Step-A0 and A1 have a bug where
1050 * D$ dirty bits are not cleared on "invalidate by
1053 * Workaround: Clean cache line before invalidating.
1055 if (rev == 0 || rev == 1)
1056 cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1058 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1059 get_cachetype_cp15();
1060 pmap_pte_init_xscale();
1063 #endif /* CPU_XSCALE_80200 */
1064 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
1065 if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1066 cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1067 cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1069 * Reset the Performance Monitoring Unit to a
1071 * - CCNT, PMN0, PMN1 reset to 0
1072 * - overflow indications cleared
1073 * - all counters disabled
1075 __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
1077 : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1080 cpufuncs = xscale_cpufuncs;
1081 #if defined(PERFCTRS)
1085 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1086 get_cachetype_cp15();
1087 pmap_pte_init_xscale();
1090 #endif /* CPU_XSCALE_80321 */
1092 #if defined(CPU_XSCALE_81342)
1093 if (cputype == CPU_ID_81342) {
1094 cpufuncs = xscalec3_cpufuncs;
1095 #if defined(PERFCTRS)
1099 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1100 get_cachetype_cp15();
1101 pmap_pte_init_xscale();
1104 #endif /* CPU_XSCALE_81342 */
1105 #ifdef CPU_XSCALE_PXA2X0
1106 /* ignore core revision to test PXA2xx CPUs */
1107 if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1108 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1110 cpufuncs = xscale_cpufuncs;
1111 #if defined(PERFCTRS)
1115 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1116 get_cachetype_cp15();
1117 pmap_pte_init_xscale();
1119 /* Use powersave on this CPU. */
1120 cpu_do_powersave = 1;
1124 #endif /* CPU_XSCALE_PXA2X0 */
1125 #ifdef CPU_XSCALE_IXP425
1126 if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1127 cputype == CPU_ID_IXP425_266) {
1129 cpufuncs = xscale_cpufuncs;
1130 #if defined(PERFCTRS)
1134 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1135 get_cachetype_cp15();
1136 pmap_pte_init_xscale();
1140 #endif /* CPU_XSCALE_IXP425 */
1142 * Bzzzz. And the answer was ...
1144 panic("No support for this CPU type (%08x) in kernel", cputype);
1145 return(ARCHITECTURE_NOT_PRESENT);
1147 uma_set_align(arm_dcache_align_mask);
1152 * Fixup routines for data and prefetch aborts.
1154 * Several compile time symbols are used
1156 * DEBUG_FAULT_CORRECTION - Print debugging information during the
1157 * correction of registers after a fault.
1158 * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
1159 * when defined should use late aborts
1164 * Null abort fixup routine.
1165 * For use when no fixup is required.
1168 cpufunc_null_fixup(arg)
1171 return(ABORT_FIXUP_OK);
1175 #if defined(CPU_ARM7TDMI)
1177 #ifdef DEBUG_FAULT_CORRECTION
1178 #define DFC_PRINTF(x) printf x
1179 #define DFC_DISASSEMBLE(x) disassemble(x)
1181 #define DFC_PRINTF(x) /* nothing */
1182 #define DFC_DISASSEMBLE(x) /* nothing */
1186 * "Early" data abort fixup.
1188 * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode). Also used
1189 * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1191 * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1194 early_abort_fixup(arg)
1197 trapframe_t *frame = arg;
1199 u_int fault_instruction;
1202 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1204 /* Ok an abort in SVC mode */
1207 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1208 * as the fault happened in svc mode but we need it in the
1209 * usr slot so we can treat the registers as an array of ints
1211 * NOTE: This PC is in the position but writeback is not
1213 * Doing it like this is more efficient than trapping this
1214 * case in all possible locations in the following fixup code.
1217 saved_lr = frame->tf_usr_lr;
1218 frame->tf_usr_lr = frame->tf_svc_lr;
1221 * Note the trapframe does not have the SVC r13 so a fault
1222 * from an instruction with writeback to r13 in SVC mode is
1223 * not allowed. This should not happen as the kstack is
1228 /* Get fault address and status from the CPU */
1230 fault_pc = frame->tf_pc;
1231 fault_instruction = *((volatile unsigned int *)fault_pc);
1233 /* Decode the fault instruction and fix the registers as needed */
1235 if ((fault_instruction & 0x0e000000) == 0x08000000) {
1239 int *registers = &frame->tf_r0;
1241 DFC_PRINTF(("LDM/STM\n"));
1242 DFC_DISASSEMBLE(fault_pc);
1243 if (fault_instruction & (1 << 21)) {
1244 DFC_PRINTF(("This instruction must be corrected\n"));
1245 base = (fault_instruction >> 16) & 0x0f;
1247 return ABORT_FIXUP_FAILED;
1248 /* Count registers transferred */
1250 for (loop = 0; loop < 16; ++loop) {
1251 if (fault_instruction & (1<<loop))
1254 DFC_PRINTF(("%d registers used\n", count));
1255 DFC_PRINTF(("Corrected r%d by %d bytes ",
1257 if (fault_instruction & (1 << 23)) {
1258 DFC_PRINTF(("down\n"));
1259 registers[base] -= count * 4;
1261 DFC_PRINTF(("up\n"));
1262 registers[base] += count * 4;
1265 } else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1268 int *registers = &frame->tf_r0;
1270 /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1272 DFC_DISASSEMBLE(fault_pc);
1274 /* Only need to fix registers if write back is turned on */
1276 if ((fault_instruction & (1 << 21)) != 0) {
1277 base = (fault_instruction >> 16) & 0x0f;
1279 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1280 return ABORT_FIXUP_FAILED;
1282 return ABORT_FIXUP_FAILED;
1284 offset = (fault_instruction & 0xff) << 2;
1285 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1286 if ((fault_instruction & (1 << 23)) != 0)
1288 registers[base] += offset;
1289 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1291 } else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1292 return ABORT_FIXUP_FAILED;
1294 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1296 /* Ok an abort in SVC mode */
1299 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1300 * as the fault happened in svc mode but we need it in the
1301 * usr slot so we can treat the registers as an array of ints
1303 * NOTE: This PC is in the position but writeback is not
1305 * Doing it like this is more efficient than trapping this
1306 * case in all possible locations in the prior fixup code.
1309 frame->tf_svc_lr = frame->tf_usr_lr;
1310 frame->tf_usr_lr = saved_lr;
1313 * Note the trapframe does not have the SVC r13 so a fault
1314 * from an instruction with writeback to r13 in SVC mode is
1315 * not allowed. This should not happen as the kstack is
1320 return(ABORT_FIXUP_OK);
1322 #endif /* CPU_ARM2/250/3/6/7 */
1325 #if defined(CPU_ARM7TDMI)
1327 * "Late" (base updated) data abort fixup
1329 * For ARM6 (in late-abort mode) and ARM7.
1331 * In this model, all data-transfer instructions need fixing up. We defer
1332 * LDM, STM, LDC and STC fixup to the early-abort handler.
1335 late_abort_fixup(arg)
1338 trapframe_t *frame = arg;
1340 u_int fault_instruction;
1343 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1345 /* Ok an abort in SVC mode */
1348 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1349 * as the fault happened in svc mode but we need it in the
1350 * usr slot so we can treat the registers as an array of ints
1352 * NOTE: This PC is in the position but writeback is not
1354 * Doing it like this is more efficient than trapping this
1355 * case in all possible locations in the following fixup code.
1358 saved_lr = frame->tf_usr_lr;
1359 frame->tf_usr_lr = frame->tf_svc_lr;
1362 * Note the trapframe does not have the SVC r13 so a fault
1363 * from an instruction with writeback to r13 in SVC mode is
1364 * not allowed. This should not happen as the kstack is
1369 /* Get fault address and status from the CPU */
1371 fault_pc = frame->tf_pc;
1372 fault_instruction = *((volatile unsigned int *)fault_pc);
1374 /* Decode the fault instruction and fix the registers as needed */
1376 /* Was is a swap instruction ? */
1378 if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1379 DFC_DISASSEMBLE(fault_pc);
1380 } else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1382 /* Was is a ldr/str instruction */
1383 /* This is for late abort only */
1387 int *registers = &frame->tf_r0;
1389 DFC_DISASSEMBLE(fault_pc);
1391 /* This is for late abort only */
1393 if ((fault_instruction & (1 << 24)) == 0
1394 || (fault_instruction & (1 << 21)) != 0) {
1395 /* postindexed ldr/str with no writeback */
1397 base = (fault_instruction >> 16) & 0x0f;
1399 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1400 return ABORT_FIXUP_FAILED;
1402 return ABORT_FIXUP_FAILED;
1403 DFC_PRINTF(("late abt fix: r%d=%08x : ",
1404 base, registers[base]));
1405 if ((fault_instruction & (1 << 25)) == 0) {
1406 /* Immediate offset - easy */
1408 offset = fault_instruction & 0xfff;
1409 if ((fault_instruction & (1 << 23)))
1411 registers[base] += offset;
1412 DFC_PRINTF(("imm=%08x ", offset));
1414 /* offset is a shifted register */
1417 offset = fault_instruction & 0x0f;
1419 return ABORT_FIXUP_FAILED;
1422 * Register offset - hard we have to
1423 * cope with shifts !
1425 offset = registers[offset];
1427 if ((fault_instruction & (1 << 4)) == 0)
1428 /* shift with amount */
1429 shift = (fault_instruction >> 7) & 0x1f;
1431 /* shift with register */
1432 if ((fault_instruction & (1 << 7)) != 0)
1433 /* undefined for now so bail out */
1434 return ABORT_FIXUP_FAILED;
1435 shift = ((fault_instruction >> 8) & 0xf);
1437 return ABORT_FIXUP_FAILED;
1438 DFC_PRINTF(("shift reg=%d ", shift));
1439 shift = registers[shift];
1441 DFC_PRINTF(("shift=%08x ", shift));
1442 switch (((fault_instruction >> 5) & 0x3)) {
1443 case 0 : /* Logical left */
1444 offset = (int)(((u_int)offset) << shift);
1446 case 1 : /* Logical Right */
1447 if (shift == 0) shift = 32;
1448 offset = (int)(((u_int)offset) >> shift);
1450 case 2 : /* Arithmetic Right */
1451 if (shift == 0) shift = 32;
1452 offset = (int)(((int)offset) >> shift);
1454 case 3 : /* Rotate right (rol or rxx) */
1455 return ABORT_FIXUP_FAILED;
1459 DFC_PRINTF(("abt: fixed LDR/STR with "
1460 "register offset\n"));
1461 if ((fault_instruction & (1 << 23)))
1463 DFC_PRINTF(("offset=%08x ", offset));
1464 registers[base] += offset;
1466 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1470 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1472 /* Ok an abort in SVC mode */
1475 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1476 * as the fault happened in svc mode but we need it in the
1477 * usr slot so we can treat the registers as an array of ints
1479 * NOTE: This PC is in the position but writeback is not
1481 * Doing it like this is more efficient than trapping this
1482 * case in all possible locations in the prior fixup code.
1485 frame->tf_svc_lr = frame->tf_usr_lr;
1486 frame->tf_usr_lr = saved_lr;
1489 * Note the trapframe does not have the SVC r13 so a fault
1490 * from an instruction with writeback to r13 in SVC mode is
1491 * not allowed. This should not happen as the kstack is
1497 * Now let the early-abort fixup routine have a go, in case it
1498 * was an LDM, STM, LDC or STC that faulted.
1501 return early_abort_fixup(arg);
1503 #endif /* CPU_ARM7TDMI */
1509 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined (CPU_ARM9) || \
1510 defined(CPU_ARM9E) || \
1511 defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
1512 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1513 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1514 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
1515 defined(CPU_ARM10) || defined(CPU_ARM11)
1528 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
1531 parse_cpu_options(args, optlist, cpuctrl)
1533 struct cpu_option *optlist;
1541 while (optlist->co_name) {
1542 if (get_bootconf_option(args, optlist->co_name,
1543 BOOTOPT_TYPE_BOOLEAN, &integer)) {
1545 if (optlist->co_trueop == OR)
1546 cpuctrl |= optlist->co_value;
1547 else if (optlist->co_trueop == BIC)
1548 cpuctrl &= ~optlist->co_value;
1550 if (optlist->co_falseop == OR)
1551 cpuctrl |= optlist->co_value;
1552 else if (optlist->co_falseop == BIC)
1553 cpuctrl &= ~optlist->co_value;
1560 #endif /* CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 || XSCALE*/
1562 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8)
1563 struct cpu_option arm678_options[] = {
1565 { "nocache", IGN, BIC, CPU_CONTROL_IDC_ENABLE },
1566 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1567 #endif /* COMPAT_12 */
1568 { "cpu.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
1569 { "cpu.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
1570 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1571 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1572 { NULL, IGN, IGN, 0 }
1575 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
1578 struct cpu_option arm7tdmi_options[] = {
1579 { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
1580 { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
1581 { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1582 { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1584 { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK },
1585 #endif /* COMPAT_12 */
1586 { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK },
1587 { NULL, IGN, IGN, 0 }
1591 arm7tdmi_setup(args)
1596 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1597 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1598 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1600 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1601 cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
1604 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1607 /* Clear out the cache */
1608 cpu_idcache_wbinv_all();
1610 /* Set the control register */
1612 cpu_control(0xffffffff, cpuctrl);
1614 #endif /* CPU_ARM7TDMI */
1617 struct cpu_option arm8_options[] = {
1618 { "arm8.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
1619 { "arm8.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
1620 { "arm8.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1621 { "arm8.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1623 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1624 #endif /* COMPAT_12 */
1625 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1626 { "arm8.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1627 { NULL, IGN, IGN, 0 }
1635 int cpuctrl, cpuctrlmask;
1639 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1640 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1641 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1642 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1643 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1644 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1645 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
1646 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
1648 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1649 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1652 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1653 cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
1656 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1659 /* Get clock configuration */
1660 clocktest = arm8_clock_config(0, 0) & 0x0f;
1662 /* Special ARM8 clock and test configuration */
1663 if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1667 if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1671 clocktest &= ~(0x01);
1674 if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1678 clocktest &= ~(0x02);
1681 if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
1682 clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
1685 if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
1686 clocktest |= (integer & 7) << 5;
1690 /* Clear out the cache */
1691 cpu_idcache_wbinv_all();
1693 /* Set the control register */
1695 cpu_control(0xffffffff, cpuctrl);
1697 /* Set the clock/test register */
1699 arm8_clock_config(0x7f, clocktest);
1701 #endif /* CPU_ARM8 */
1704 struct cpu_option arm9_options[] = {
1705 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1706 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1707 { "arm9.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1708 { "arm9.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1709 { "arm9.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1710 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1711 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1712 { "arm9.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1713 { NULL, IGN, IGN, 0 }
1720 int cpuctrl, cpuctrlmask;
1722 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1723 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1724 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1725 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
1726 CPU_CONTROL_ROUNDROBIN;
1727 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1728 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1729 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1730 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1731 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1732 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1733 | CPU_CONTROL_ROUNDROBIN;
1735 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1736 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1739 cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1742 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1744 if (vector_page == ARM_VECTORS_HIGH)
1745 cpuctrl |= CPU_CONTROL_VECRELOC;
1747 /* Clear out the cache */
1748 cpu_idcache_wbinv_all();
1750 /* Set the control register */
1751 cpu_control(cpuctrlmask, cpuctrl);
1755 #endif /* CPU_ARM9 */
1757 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
1758 struct cpu_option arm10_options[] = {
1759 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1760 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1761 { "arm10.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1762 { "arm10.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1763 { "arm10.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1764 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1765 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1766 { "arm10.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1767 { NULL, IGN, IGN, 0 }
1774 int cpuctrl, cpuctrlmask;
1776 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1777 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1778 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1779 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1780 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1781 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1782 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1783 | CPU_CONTROL_BPRD_ENABLE
1784 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1786 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1787 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1790 cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
1793 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1796 /* Clear out the cache */
1797 cpu_idcache_wbinv_all();
1799 /* Now really make sure they are clean. */
1800 __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1802 if (vector_page == ARM_VECTORS_HIGH)
1803 cpuctrl |= CPU_CONTROL_VECRELOC;
1805 /* Set the control register */
1807 cpu_control(0xffffffff, cpuctrl);
1810 cpu_idcache_wbinv_all();
1812 #endif /* CPU_ARM9E || CPU_ARM10 */
1815 struct cpu_option arm11_options[] = {
1816 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1817 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1818 { "arm11.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1819 { "arm11.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1820 { "arm11.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1821 { NULL, IGN, IGN, 0 }
1828 int cpuctrl, cpuctrlmask;
1830 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1831 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1832 /* | CPU_CONTROL_BPRD_ENABLE */;
1833 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1834 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1835 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
1836 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1837 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1839 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1840 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1843 cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
1846 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1849 /* Clear out the cache */
1850 cpu_idcache_wbinv_all();
1852 /* Now really make sure they are clean. */
1853 __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1855 /* Set the control register */
1856 curcpu()->ci_ctrl = cpuctrl;
1857 cpu_control(0xffffffff, cpuctrl);
1860 cpu_idcache_wbinv_all();
1862 #endif /* CPU_ARM11 */
1865 struct cpu_option sa110_options[] = {
1867 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1868 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1869 #endif /* COMPAT_12 */
1870 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1871 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1872 { "sa110.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1873 { "sa110.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1874 { "sa110.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1875 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1876 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1877 { "sa110.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1878 { NULL, IGN, IGN, 0 }
1885 int cpuctrl, cpuctrlmask;
1887 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1888 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1889 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1890 | CPU_CONTROL_WBUF_ENABLE;
1891 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1892 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1893 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1894 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1895 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1896 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1897 | CPU_CONTROL_CPCLK;
1899 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1900 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1903 cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
1906 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1909 /* Clear out the cache */
1910 cpu_idcache_wbinv_all();
1912 /* Set the control register */
1914 /* cpu_control(cpuctrlmask, cpuctrl);*/
1915 cpu_control(0xffffffff, cpuctrl);
1918 * enable clockswitching, note that this doesn't read or write to r0,
1919 * r0 is just to make it valid asm
1921 __asm ("mcr 15, 0, r0, c15, c1, 2");
1923 #endif /* CPU_SA110 */
1925 #if defined(CPU_SA1100) || defined(CPU_SA1110)
1926 struct cpu_option sa11x0_options[] = {
1928 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1929 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1930 #endif /* COMPAT_12 */
1931 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1932 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1933 { "sa11x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1934 { "sa11x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1935 { "sa11x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1936 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1937 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1938 { "sa11x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1939 { NULL, IGN, IGN, 0 }
1946 int cpuctrl, cpuctrlmask;
1948 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1949 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1950 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1951 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
1952 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1953 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1954 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1955 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1956 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1957 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1958 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1960 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1961 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1965 cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
1968 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1971 if (vector_page == ARM_VECTORS_HIGH)
1972 cpuctrl |= CPU_CONTROL_VECRELOC;
1973 /* Clear out the cache */
1974 cpu_idcache_wbinv_all();
1975 /* Set the control register */
1977 cpu_control(0xffffffff, cpuctrl);
1979 #endif /* CPU_SA1100 || CPU_SA1110 */
1981 #if defined(CPU_IXP12X0)
1982 struct cpu_option ixp12x0_options[] = {
1983 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1984 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1985 { "ixp12x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1986 { "ixp12x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1987 { "ixp12x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1988 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1989 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1990 { "ixp12x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1991 { NULL, IGN, IGN, 0 }
1998 int cpuctrl, cpuctrlmask;
2001 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
2002 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
2003 | CPU_CONTROL_IC_ENABLE;
2005 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
2006 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2007 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
2008 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
2009 | CPU_CONTROL_VECRELOC;
2011 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2012 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2015 cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
2018 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2021 if (vector_page == ARM_VECTORS_HIGH)
2022 cpuctrl |= CPU_CONTROL_VECRELOC;
2024 /* Clear out the cache */
2025 cpu_idcache_wbinv_all();
2027 /* Set the control register */
2029 /* cpu_control(0xffffffff, cpuctrl); */
2030 cpu_control(cpuctrlmask, cpuctrl);
2032 #endif /* CPU_IXP12X0 */
2034 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2035 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
2036 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
2037 struct cpu_option xscale_options[] = {
2039 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
2040 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2041 #endif /* COMPAT_12 */
2042 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
2043 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2044 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2045 { "xscale.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
2046 { "xscale.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2047 { "xscale.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
2048 { "xscale.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
2049 { NULL, IGN, IGN, 0 }
2057 int cpuctrl, cpuctrlmask;
2060 * The XScale Write Buffer is always enabled. Our option
2061 * is to enable/disable coalescing. Note that bits 6:3
2062 * must always be enabled.
2065 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2066 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2067 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2068 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
2069 | CPU_CONTROL_BPRD_ENABLE;
2070 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2071 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2072 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2073 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2074 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2075 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2076 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
2077 CPU_CONTROL_L2_ENABLE;
2079 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2080 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2083 cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
2086 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2089 if (vector_page == ARM_VECTORS_HIGH)
2090 cpuctrl |= CPU_CONTROL_VECRELOC;
2091 #ifdef CPU_XSCALE_CORE3
2092 cpuctrl |= CPU_CONTROL_L2_ENABLE;
2095 /* Clear out the cache */
2096 cpu_idcache_wbinv_all();
2099 * Set the control register. Note that bits 6:3 must always
2103 /* cpu_control(cpuctrlmask, cpuctrl);*/
2104 cpu_control(0xffffffff, cpuctrl);
2106 /* Make sure write coalescing is turned on */
2107 __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
2109 #ifdef XSCALE_NO_COALESCE_WRITES
2110 auxctl |= XSCALE_AUXCTL_K;
2112 auxctl &= ~XSCALE_AUXCTL_K;
2114 #ifdef CPU_XSCALE_CORE3
2115 auxctl |= XSCALE_AUXCTL_LLR;
2116 auxctl |= XSCALE_AUXCTL_MD_MASK;
2118 __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
2121 #endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425