1 /* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
4 * arm7tdmi support code Copyright (c) 2001 John Fremlin
5 * arm8 support code Copyright (c) 1997 ARM Limited
6 * arm8 support code Copyright (c) 1997 Causality Limited
7 * arm9 support code Copyright (C) 2001 ARM Ltd
8 * Copyright (c) 1997 Mark Brinicombe.
9 * Copyright (c) 1997 Causality Limited
10 * All rights reserved.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Causality Limited.
23 * 4. The name of Causality Limited may not be used to endorse or promote
24 * products derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * RiscBSD kernel project
43 * C functions for supporting CPU / MMU / TLB specific operations.
47 #include <sys/cdefs.h>
48 __FBSDID("$FreeBSD$");
50 #include <sys/param.h>
51 #include <sys/systm.h>
53 #include <sys/mutex.h>
55 #include <machine/bus.h>
56 #include <machine/cpu.h>
57 #include <machine/disassem.h>
63 #include <machine/cpuconf.h>
64 #include <machine/cpufunc.h>
65 #include <machine/bootconfig.h>
67 #ifdef CPU_XSCALE_80200
68 #include <arm/xscale/i80200/i80200reg.h>
69 #include <arm/xscale/i80200/i80200var.h>
72 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
73 #include <arm/xscale/i80321/i80321reg.h>
74 #include <arm/xscale/i80321/i80321var.h>
77 #if defined(CPU_XSCALE_81342)
78 #include <arm/xscale/i8134x/i81342reg.h>
81 #ifdef CPU_XSCALE_IXP425
82 #include <arm/xscale/ixp425/ixp425reg.h>
83 #include <arm/xscale/ixp425/ixp425var.h>
86 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
87 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
88 #include <arm/xscale/xscalereg.h>
92 struct arm_pmc_funcs *arm_pmc;
95 /* PRIMARY CACHE VARIABLES */
97 int arm_picache_line_size;
100 int arm_pdcache_size; /* and unified */
101 int arm_pdcache_line_size;
102 int arm_pdcache_ways;
105 int arm_pcache_unified;
107 int arm_dcache_align;
108 int arm_dcache_align_mask;
110 /* 1 == use cpu_sleep(), 0 == don't */
111 int cpu_do_powersave;
115 struct cpu_functions arm7tdmi_cpufuncs = {
119 cpufunc_nullop, /* cpwait */
123 cpufunc_control, /* control */
124 cpufunc_domains, /* domain */
125 arm7tdmi_setttb, /* setttb */
126 cpufunc_faultstatus, /* faultstatus */
127 cpufunc_faultaddress, /* faultaddress */
131 arm7tdmi_tlb_flushID, /* tlb_flushID */
132 arm7tdmi_tlb_flushID_SE, /* tlb_flushID_SE */
133 arm7tdmi_tlb_flushID, /* tlb_flushI */
134 arm7tdmi_tlb_flushID_SE, /* tlb_flushI_SE */
135 arm7tdmi_tlb_flushID, /* tlb_flushD */
136 arm7tdmi_tlb_flushID_SE, /* tlb_flushD_SE */
138 /* Cache operations */
140 cpufunc_nullop, /* icache_sync_all */
141 (void *)cpufunc_nullop, /* icache_sync_range */
143 arm7tdmi_cache_flushID, /* dcache_wbinv_all */
144 (void *)arm7tdmi_cache_flushID, /* dcache_wbinv_range */
145 (void *)arm7tdmi_cache_flushID, /* dcache_inv_range */
146 (void *)cpufunc_nullop, /* dcache_wb_range */
148 arm7tdmi_cache_flushID, /* idcache_wbinv_all */
149 (void *)arm7tdmi_cache_flushID, /* idcache_wbinv_range */
150 cpufunc_nullop, /* l2cache_wbinv_all */
151 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
152 (void *)cpufunc_nullop, /* l2cache_inv_range */
153 (void *)cpufunc_nullop, /* l2cache_wb_range */
155 /* Other functions */
157 cpufunc_nullop, /* flush_prefetchbuf */
158 cpufunc_nullop, /* drain_writebuf */
159 cpufunc_nullop, /* flush_brnchtgt_C */
160 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
162 (void *)cpufunc_nullop, /* sleep */
166 late_abort_fixup, /* dataabt_fixup */
167 cpufunc_null_fixup, /* prefetchabt_fixup */
169 arm7tdmi_context_switch, /* context_switch */
171 arm7tdmi_setup /* cpu setup */
174 #endif /* CPU_ARM7TDMI */
177 struct cpu_functions arm8_cpufuncs = {
181 cpufunc_nullop, /* cpwait */
185 cpufunc_control, /* control */
186 cpufunc_domains, /* domain */
187 arm8_setttb, /* setttb */
188 cpufunc_faultstatus, /* faultstatus */
189 cpufunc_faultaddress, /* faultaddress */
193 arm8_tlb_flushID, /* tlb_flushID */
194 arm8_tlb_flushID_SE, /* tlb_flushID_SE */
195 arm8_tlb_flushID, /* tlb_flushI */
196 arm8_tlb_flushID_SE, /* tlb_flushI_SE */
197 arm8_tlb_flushID, /* tlb_flushD */
198 arm8_tlb_flushID_SE, /* tlb_flushD_SE */
200 /* Cache operations */
202 cpufunc_nullop, /* icache_sync_all */
203 (void *)cpufunc_nullop, /* icache_sync_range */
205 arm8_cache_purgeID, /* dcache_wbinv_all */
206 (void *)arm8_cache_purgeID, /* dcache_wbinv_range */
207 /*XXX*/ (void *)arm8_cache_purgeID, /* dcache_inv_range */
208 (void *)arm8_cache_cleanID, /* dcache_wb_range */
210 arm8_cache_purgeID, /* idcache_wbinv_all */
211 (void *)arm8_cache_purgeID, /* idcache_wbinv_range */
212 cpufunc_nullop, /* l2cache_wbinv_all */
213 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
214 (void *)cpufunc_nullop, /* l2cache_inv_range */
215 (void *)cpufunc_nullop, /* l2cache_wb_range */
217 /* Other functions */
219 cpufunc_nullop, /* flush_prefetchbuf */
220 cpufunc_nullop, /* drain_writebuf */
221 cpufunc_nullop, /* flush_brnchtgt_C */
222 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
224 (void *)cpufunc_nullop, /* sleep */
228 cpufunc_null_fixup, /* dataabt_fixup */
229 cpufunc_null_fixup, /* prefetchabt_fixup */
231 arm8_context_switch, /* context_switch */
233 arm8_setup /* cpu setup */
235 #endif /* CPU_ARM8 */
238 struct cpu_functions arm9_cpufuncs = {
242 cpufunc_nullop, /* cpwait */
246 cpufunc_control, /* control */
247 cpufunc_domains, /* Domain */
248 arm9_setttb, /* Setttb */
249 cpufunc_faultstatus, /* Faultstatus */
250 cpufunc_faultaddress, /* Faultaddress */
254 armv4_tlb_flushID, /* tlb_flushID */
255 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
256 armv4_tlb_flushI, /* tlb_flushI */
257 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
258 armv4_tlb_flushD, /* tlb_flushD */
259 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
261 /* Cache operations */
263 arm9_icache_sync_all, /* icache_sync_all */
264 arm9_icache_sync_range, /* icache_sync_range */
266 arm9_dcache_wbinv_all, /* dcache_wbinv_all */
267 arm9_dcache_wbinv_range, /* dcache_wbinv_range */
268 arm9_dcache_inv_range, /* dcache_inv_range */
269 arm9_dcache_wb_range, /* dcache_wb_range */
271 arm9_idcache_wbinv_all, /* idcache_wbinv_all */
272 arm9_idcache_wbinv_range, /* idcache_wbinv_range */
273 cpufunc_nullop, /* l2cache_wbinv_all */
274 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
275 (void *)cpufunc_nullop, /* l2cache_inv_range */
276 (void *)cpufunc_nullop, /* l2cache_wb_range */
278 /* Other functions */
280 cpufunc_nullop, /* flush_prefetchbuf */
281 armv4_drain_writebuf, /* drain_writebuf */
282 cpufunc_nullop, /* flush_brnchtgt_C */
283 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
285 (void *)cpufunc_nullop, /* sleep */
289 cpufunc_null_fixup, /* dataabt_fixup */
290 cpufunc_null_fixup, /* prefetchabt_fixup */
292 arm9_context_switch, /* context_switch */
294 arm9_setup /* cpu setup */
297 #endif /* CPU_ARM9 */
299 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
300 struct cpu_functions armv5_ec_cpufuncs = {
304 cpufunc_nullop, /* cpwait */
308 cpufunc_control, /* control */
309 cpufunc_domains, /* Domain */
310 armv5_ec_setttb, /* Setttb */
311 cpufunc_faultstatus, /* Faultstatus */
312 cpufunc_faultaddress, /* Faultaddress */
316 armv4_tlb_flushID, /* tlb_flushID */
317 arm10_tlb_flushID_SE, /* tlb_flushID_SE */
318 armv4_tlb_flushI, /* tlb_flushI */
319 arm10_tlb_flushI_SE, /* tlb_flushI_SE */
320 armv4_tlb_flushD, /* tlb_flushD */
321 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
323 /* Cache operations */
325 armv5_ec_icache_sync_all, /* icache_sync_all */
326 armv5_ec_icache_sync_range, /* icache_sync_range */
328 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
329 armv5_ec_dcache_wbinv_range, /* dcache_wbinv_range */
330 armv5_ec_dcache_inv_range, /* dcache_inv_range */
331 armv5_ec_dcache_wb_range, /* dcache_wb_range */
333 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
334 armv5_ec_idcache_wbinv_range, /* idcache_wbinv_range */
336 cpufunc_nullop, /* l2cache_wbinv_all */
337 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
338 (void *)cpufunc_nullop, /* l2cache_inv_range */
339 (void *)cpufunc_nullop, /* l2cache_wb_range */
341 /* Other functions */
343 cpufunc_nullop, /* flush_prefetchbuf */
344 armv4_drain_writebuf, /* drain_writebuf */
345 cpufunc_nullop, /* flush_brnchtgt_C */
346 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
348 (void *)cpufunc_nullop, /* sleep */
352 cpufunc_null_fixup, /* dataabt_fixup */
353 cpufunc_null_fixup, /* prefetchabt_fixup */
355 arm10_context_switch, /* context_switch */
357 arm10_setup /* cpu setup */
361 struct cpu_functions sheeva_cpufuncs = {
365 cpufunc_nullop, /* cpwait */
369 cpufunc_control, /* control */
370 cpufunc_domains, /* Domain */
371 sheeva_setttb, /* Setttb */
372 cpufunc_faultstatus, /* Faultstatus */
373 cpufunc_faultaddress, /* Faultaddress */
377 armv4_tlb_flushID, /* tlb_flushID */
378 arm10_tlb_flushID_SE, /* tlb_flushID_SE */
379 armv4_tlb_flushI, /* tlb_flushI */
380 arm10_tlb_flushI_SE, /* tlb_flushI_SE */
381 armv4_tlb_flushD, /* tlb_flushD */
382 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
384 /* Cache operations */
386 armv5_ec_icache_sync_all, /* icache_sync_all */
387 armv5_ec_icache_sync_range, /* icache_sync_range */
389 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
390 sheeva_dcache_wbinv_range, /* dcache_wbinv_range */
391 sheeva_dcache_inv_range, /* dcache_inv_range */
392 sheeva_dcache_wb_range, /* dcache_wb_range */
394 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
395 sheeva_idcache_wbinv_range, /* idcache_wbinv_all */
397 sheeva_l2cache_wbinv_all, /* l2cache_wbinv_all */
398 sheeva_l2cache_wbinv_range, /* l2cache_wbinv_range */
399 sheeva_l2cache_inv_range, /* l2cache_inv_range */
400 sheeva_l2cache_wb_range, /* l2cache_wb_range */
402 /* Other functions */
404 cpufunc_nullop, /* flush_prefetchbuf */
405 armv4_drain_writebuf, /* drain_writebuf */
406 cpufunc_nullop, /* flush_brnchtgt_C */
407 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
409 (void *)cpufunc_nullop, /* sleep */
413 cpufunc_null_fixup, /* dataabt_fixup */
414 cpufunc_null_fixup, /* prefetchabt_fixup */
416 arm10_context_switch, /* context_switch */
418 arm10_setup /* cpu setup */
420 #endif /* CPU_ARM9E || CPU_ARM10 */
423 struct cpu_functions arm10_cpufuncs = {
427 cpufunc_nullop, /* cpwait */
431 cpufunc_control, /* control */
432 cpufunc_domains, /* Domain */
433 arm10_setttb, /* Setttb */
434 cpufunc_faultstatus, /* Faultstatus */
435 cpufunc_faultaddress, /* Faultaddress */
439 armv4_tlb_flushID, /* tlb_flushID */
440 arm10_tlb_flushID_SE, /* tlb_flushID_SE */
441 armv4_tlb_flushI, /* tlb_flushI */
442 arm10_tlb_flushI_SE, /* tlb_flushI_SE */
443 armv4_tlb_flushD, /* tlb_flushD */
444 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
446 /* Cache operations */
448 arm10_icache_sync_all, /* icache_sync_all */
449 arm10_icache_sync_range, /* icache_sync_range */
451 arm10_dcache_wbinv_all, /* dcache_wbinv_all */
452 arm10_dcache_wbinv_range, /* dcache_wbinv_range */
453 arm10_dcache_inv_range, /* dcache_inv_range */
454 arm10_dcache_wb_range, /* dcache_wb_range */
456 arm10_idcache_wbinv_all, /* idcache_wbinv_all */
457 arm10_idcache_wbinv_range, /* idcache_wbinv_range */
458 cpufunc_nullop, /* l2cache_wbinv_all */
459 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
460 (void *)cpufunc_nullop, /* l2cache_inv_range */
461 (void *)cpufunc_nullop, /* l2cache_wb_range */
463 /* Other functions */
465 cpufunc_nullop, /* flush_prefetchbuf */
466 armv4_drain_writebuf, /* drain_writebuf */
467 cpufunc_nullop, /* flush_brnchtgt_C */
468 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
470 (void *)cpufunc_nullop, /* sleep */
474 cpufunc_null_fixup, /* dataabt_fixup */
475 cpufunc_null_fixup, /* prefetchabt_fixup */
477 arm10_context_switch, /* context_switch */
479 arm10_setup /* cpu setup */
482 #endif /* CPU_ARM10 */
485 struct cpu_functions sa110_cpufuncs = {
489 cpufunc_nullop, /* cpwait */
493 cpufunc_control, /* control */
494 cpufunc_domains, /* domain */
495 sa1_setttb, /* setttb */
496 cpufunc_faultstatus, /* faultstatus */
497 cpufunc_faultaddress, /* faultaddress */
501 armv4_tlb_flushID, /* tlb_flushID */
502 sa1_tlb_flushID_SE, /* tlb_flushID_SE */
503 armv4_tlb_flushI, /* tlb_flushI */
504 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
505 armv4_tlb_flushD, /* tlb_flushD */
506 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
508 /* Cache operations */
510 sa1_cache_syncI, /* icache_sync_all */
511 sa1_cache_syncI_rng, /* icache_sync_range */
513 sa1_cache_purgeD, /* dcache_wbinv_all */
514 sa1_cache_purgeD_rng, /* dcache_wbinv_range */
515 /*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
516 sa1_cache_cleanD_rng, /* dcache_wb_range */
518 sa1_cache_purgeID, /* idcache_wbinv_all */
519 sa1_cache_purgeID_rng, /* idcache_wbinv_range */
520 cpufunc_nullop, /* l2cache_wbinv_all */
521 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
522 (void *)cpufunc_nullop, /* l2cache_inv_range */
523 (void *)cpufunc_nullop, /* l2cache_wb_range */
525 /* Other functions */
527 cpufunc_nullop, /* flush_prefetchbuf */
528 armv4_drain_writebuf, /* drain_writebuf */
529 cpufunc_nullop, /* flush_brnchtgt_C */
530 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
532 (void *)cpufunc_nullop, /* sleep */
536 cpufunc_null_fixup, /* dataabt_fixup */
537 cpufunc_null_fixup, /* prefetchabt_fixup */
539 sa110_context_switch, /* context_switch */
541 sa110_setup /* cpu setup */
543 #endif /* CPU_SA110 */
545 #if defined(CPU_SA1100) || defined(CPU_SA1110)
546 struct cpu_functions sa11x0_cpufuncs = {
550 cpufunc_nullop, /* cpwait */
554 cpufunc_control, /* control */
555 cpufunc_domains, /* domain */
556 sa1_setttb, /* setttb */
557 cpufunc_faultstatus, /* faultstatus */
558 cpufunc_faultaddress, /* faultaddress */
562 armv4_tlb_flushID, /* tlb_flushID */
563 sa1_tlb_flushID_SE, /* tlb_flushID_SE */
564 armv4_tlb_flushI, /* tlb_flushI */
565 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
566 armv4_tlb_flushD, /* tlb_flushD */
567 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
569 /* Cache operations */
571 sa1_cache_syncI, /* icache_sync_all */
572 sa1_cache_syncI_rng, /* icache_sync_range */
574 sa1_cache_purgeD, /* dcache_wbinv_all */
575 sa1_cache_purgeD_rng, /* dcache_wbinv_range */
576 /*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
577 sa1_cache_cleanD_rng, /* dcache_wb_range */
579 sa1_cache_purgeID, /* idcache_wbinv_all */
580 sa1_cache_purgeID_rng, /* idcache_wbinv_range */
581 cpufunc_nullop, /* l2cache_wbinv_all */
582 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
583 (void *)cpufunc_nullop, /* l2cache_inv_range */
584 (void *)cpufunc_nullop, /* l2cache_wb_range */
586 /* Other functions */
588 sa11x0_drain_readbuf, /* flush_prefetchbuf */
589 armv4_drain_writebuf, /* drain_writebuf */
590 cpufunc_nullop, /* flush_brnchtgt_C */
591 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
593 sa11x0_cpu_sleep, /* sleep */
597 cpufunc_null_fixup, /* dataabt_fixup */
598 cpufunc_null_fixup, /* prefetchabt_fixup */
600 sa11x0_context_switch, /* context_switch */
602 sa11x0_setup /* cpu setup */
604 #endif /* CPU_SA1100 || CPU_SA1110 */
607 struct cpu_functions ixp12x0_cpufuncs = {
611 cpufunc_nullop, /* cpwait */
615 cpufunc_control, /* control */
616 cpufunc_domains, /* domain */
617 sa1_setttb, /* setttb */
618 cpufunc_faultstatus, /* faultstatus */
619 cpufunc_faultaddress, /* faultaddress */
623 armv4_tlb_flushID, /* tlb_flushID */
624 sa1_tlb_flushID_SE, /* tlb_flushID_SE */
625 armv4_tlb_flushI, /* tlb_flushI */
626 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
627 armv4_tlb_flushD, /* tlb_flushD */
628 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
630 /* Cache operations */
632 sa1_cache_syncI, /* icache_sync_all */
633 sa1_cache_syncI_rng, /* icache_sync_range */
635 sa1_cache_purgeD, /* dcache_wbinv_all */
636 sa1_cache_purgeD_rng, /* dcache_wbinv_range */
637 /*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
638 sa1_cache_cleanD_rng, /* dcache_wb_range */
640 sa1_cache_purgeID, /* idcache_wbinv_all */
641 sa1_cache_purgeID_rng, /* idcache_wbinv_range */
642 cpufunc_nullop, /* l2cache_wbinv_all */
643 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
644 (void *)cpufunc_nullop, /* l2cache_inv_range */
645 (void *)cpufunc_nullop, /* l2cache_wb_range */
647 /* Other functions */
649 ixp12x0_drain_readbuf, /* flush_prefetchbuf */
650 armv4_drain_writebuf, /* drain_writebuf */
651 cpufunc_nullop, /* flush_brnchtgt_C */
652 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
654 (void *)cpufunc_nullop, /* sleep */
658 cpufunc_null_fixup, /* dataabt_fixup */
659 cpufunc_null_fixup, /* prefetchabt_fixup */
661 ixp12x0_context_switch, /* context_switch */
663 ixp12x0_setup /* cpu setup */
665 #endif /* CPU_IXP12X0 */
667 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
668 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
669 defined(CPU_XSCALE_80219)
671 struct cpu_functions xscale_cpufuncs = {
675 xscale_cpwait, /* cpwait */
679 xscale_control, /* control */
680 cpufunc_domains, /* domain */
681 xscale_setttb, /* setttb */
682 cpufunc_faultstatus, /* faultstatus */
683 cpufunc_faultaddress, /* faultaddress */
687 armv4_tlb_flushID, /* tlb_flushID */
688 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
689 armv4_tlb_flushI, /* tlb_flushI */
690 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
691 armv4_tlb_flushD, /* tlb_flushD */
692 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
694 /* Cache operations */
696 xscale_cache_syncI, /* icache_sync_all */
697 xscale_cache_syncI_rng, /* icache_sync_range */
699 xscale_cache_purgeD, /* dcache_wbinv_all */
700 xscale_cache_purgeD_rng, /* dcache_wbinv_range */
701 xscale_cache_flushD_rng, /* dcache_inv_range */
702 xscale_cache_cleanD_rng, /* dcache_wb_range */
704 xscale_cache_purgeID, /* idcache_wbinv_all */
705 xscale_cache_purgeID_rng, /* idcache_wbinv_range */
706 cpufunc_nullop, /* l2cache_wbinv_all */
707 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
708 (void *)cpufunc_nullop, /* l2cache_inv_range */
709 (void *)cpufunc_nullop, /* l2cache_wb_range */
711 /* Other functions */
713 cpufunc_nullop, /* flush_prefetchbuf */
714 armv4_drain_writebuf, /* drain_writebuf */
715 cpufunc_nullop, /* flush_brnchtgt_C */
716 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
718 xscale_cpu_sleep, /* sleep */
722 cpufunc_null_fixup, /* dataabt_fixup */
723 cpufunc_null_fixup, /* prefetchabt_fixup */
725 xscale_context_switch, /* context_switch */
727 xscale_setup /* cpu setup */
730 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
733 #ifdef CPU_XSCALE_81342
734 struct cpu_functions xscalec3_cpufuncs = {
738 xscale_cpwait, /* cpwait */
742 xscale_control, /* control */
743 cpufunc_domains, /* domain */
744 xscalec3_setttb, /* setttb */
745 cpufunc_faultstatus, /* faultstatus */
746 cpufunc_faultaddress, /* faultaddress */
750 armv4_tlb_flushID, /* tlb_flushID */
751 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
752 armv4_tlb_flushI, /* tlb_flushI */
753 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
754 armv4_tlb_flushD, /* tlb_flushD */
755 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
757 /* Cache operations */
759 xscalec3_cache_syncI, /* icache_sync_all */
760 xscalec3_cache_syncI_rng, /* icache_sync_range */
762 xscalec3_cache_purgeD, /* dcache_wbinv_all */
763 xscalec3_cache_purgeD_rng, /* dcache_wbinv_range */
764 xscale_cache_flushD_rng, /* dcache_inv_range */
765 xscalec3_cache_cleanD_rng, /* dcache_wb_range */
767 xscalec3_cache_purgeID, /* idcache_wbinv_all */
768 xscalec3_cache_purgeID_rng, /* idcache_wbinv_range */
769 xscalec3_l2cache_purge, /* l2cache_wbinv_all */
770 xscalec3_l2cache_purge_rng, /* l2cache_wbinv_range */
771 xscalec3_l2cache_flush_rng, /* l2cache_inv_range */
772 xscalec3_l2cache_clean_rng, /* l2cache_wb_range */
774 /* Other functions */
776 cpufunc_nullop, /* flush_prefetchbuf */
777 armv4_drain_writebuf, /* drain_writebuf */
778 cpufunc_nullop, /* flush_brnchtgt_C */
779 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
781 xscale_cpu_sleep, /* sleep */
785 cpufunc_null_fixup, /* dataabt_fixup */
786 cpufunc_null_fixup, /* prefetchabt_fixup */
788 xscalec3_context_switch, /* context_switch */
790 xscale_setup /* cpu setup */
792 #endif /* CPU_XSCALE_81342 */
794 * Global constants also used by locore.s
797 struct cpu_functions cpufuncs;
799 u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore.s */
801 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
802 defined (CPU_ARM9E) || defined (CPU_ARM10) || \
803 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
804 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
805 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
807 static void get_cachetype_cp15(void);
809 /* Additional cache information local to this file. Log2 of some of the
811 static int arm_dcache_l2_nsets;
812 static int arm_dcache_l2_assoc;
813 static int arm_dcache_l2_linesize;
818 u_int ctype, isize, dsize;
821 __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
825 * ...and thus spake the ARM ARM:
827 * If an <opcode2> value corresponding to an unimplemented or
828 * reserved ID register is encountered, the System Control
829 * processor returns the value of the main ID register.
831 if (ctype == cpufunc_id())
834 if ((ctype & CPU_CT_S) == 0)
835 arm_pcache_unified = 1;
838 * If you want to know how this code works, go read the ARM ARM.
841 arm_pcache_type = CPU_CT_CTYPE(ctype);
843 if (arm_pcache_unified == 0) {
844 isize = CPU_CT_ISIZE(ctype);
845 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
846 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
847 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
848 if (isize & CPU_CT_xSIZE_M)
849 arm_picache_line_size = 0; /* not present */
851 arm_picache_ways = 1;
853 arm_picache_ways = multiplier <<
854 (CPU_CT_xSIZE_ASSOC(isize) - 1);
856 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
859 dsize = CPU_CT_DSIZE(ctype);
860 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
861 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
862 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
863 if (dsize & CPU_CT_xSIZE_M)
864 arm_pdcache_line_size = 0; /* not present */
866 arm_pdcache_ways = 1;
868 arm_pdcache_ways = multiplier <<
869 (CPU_CT_xSIZE_ASSOC(dsize) - 1);
871 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
873 arm_dcache_align = arm_pdcache_line_size;
875 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
876 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
877 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
878 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
881 arm_dcache_align_mask = arm_dcache_align - 1;
883 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
885 #if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
887 /* Cache information for CPUs without cache type registers. */
891 int ct_pcache_unified;
893 int ct_pdcache_line_size;
896 int ct_picache_line_size;
900 struct cachetab cachetab[] = {
901 /* cpuid, cache type, u, dsiz, ls, wy, isiz, ls, wy */
902 /* XXX is this type right for SA-1? */
903 { CPU_ID_SA110, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
904 { CPU_ID_SA1100, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 },
905 { CPU_ID_SA1110, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 },
906 { CPU_ID_IXP1200, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
907 { 0, 0, 0, 0, 0, 0, 0, 0}
910 static void get_cachetype_table(void);
913 get_cachetype_table()
916 u_int32_t cpuid = cpufunc_id();
918 for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
919 if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
920 arm_pcache_type = cachetab[i].ct_pcache_type;
921 arm_pcache_unified = cachetab[i].ct_pcache_unified;
922 arm_pdcache_size = cachetab[i].ct_pdcache_size;
923 arm_pdcache_line_size =
924 cachetab[i].ct_pdcache_line_size;
925 arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
926 arm_picache_size = cachetab[i].ct_picache_size;
927 arm_picache_line_size =
928 cachetab[i].ct_picache_line_size;
929 arm_picache_ways = cachetab[i].ct_picache_ways;
932 arm_dcache_align = arm_pdcache_line_size;
934 arm_dcache_align_mask = arm_dcache_align - 1;
937 #endif /* SA110 || SA1100 || SA1111 || IXP12X0 */
940 * Cannot panic here as we may not have a console yet ...
946 cputype = cpufunc_id();
947 cputype &= CPU_ID_CPU_MASK;
950 * NOTE: cpu_do_powersave defaults to off. If we encounter a
951 * CPU type where we want to use it by default, then we set it.
955 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
956 CPU_ID_IS7(cputype) &&
957 (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
958 cpufuncs = arm7tdmi_cpufuncs;
959 cpu_reset_needs_v4_MMU_disable = 0;
960 get_cachetype_cp15();
961 pmap_pte_init_generic();
966 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
967 (cputype & 0x0000f000) == 0x00008000) {
968 cpufuncs = arm8_cpufuncs;
969 cpu_reset_needs_v4_MMU_disable = 0; /* XXX correct? */
970 get_cachetype_cp15();
971 pmap_pte_init_arm8();
974 #endif /* CPU_ARM8 */
976 if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
977 (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
978 (cputype & 0x0000f000) == 0x00009000) {
979 cpufuncs = arm9_cpufuncs;
980 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
981 get_cachetype_cp15();
982 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
983 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
984 arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
985 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
986 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
987 #ifdef ARM9_CACHE_WRITE_THROUGH
988 pmap_pte_init_arm9();
990 pmap_pte_init_generic();
994 #endif /* CPU_ARM9 */
995 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
996 if (cputype == CPU_ID_ARM926EJS || cputype == CPU_ID_ARM1026EJS ||
997 cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
998 cputype == CPU_ID_MV88FR571_41) {
999 if (cputype == CPU_ID_MV88FR131 ||
1000 cputype == CPU_ID_MV88FR571_VD ||
1001 cputype == CPU_ID_MV88FR571_41) {
1003 cpufuncs = sheeva_cpufuncs;
1005 * Workaround for Marvell MV78100 CPU: Cache prefetch
1006 * mechanism may affect the cache coherency validity,
1007 * so it needs to be disabled.
1009 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
1010 * L2 Prefetching Mechanism) for details.
1012 if (cputype == CPU_ID_MV88FR571_VD ||
1013 cputype == CPU_ID_MV88FR571_41) {
1014 sheeva_control_ext(0xffffffff,
1015 FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN |
1016 FC_BRANCH_TARG_BUF_DIS | FC_L2CACHE_EN |
1019 sheeva_control_ext(0xffffffff,
1020 FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN |
1021 FC_BRANCH_TARG_BUF_DIS | FC_L2CACHE_EN);
1024 cpufuncs = armv5_ec_cpufuncs;
1026 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
1027 get_cachetype_cp15();
1028 pmap_pte_init_generic();
1031 #endif /* CPU_ARM9E || CPU_ARM10 */
1033 if (/* cputype == CPU_ID_ARM1020T || */
1034 cputype == CPU_ID_ARM1020E) {
1036 * Select write-through cacheing (this isn't really an
1037 * option on ARM1020T).
1039 cpufuncs = arm10_cpufuncs;
1040 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
1041 get_cachetype_cp15();
1042 arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1043 arm10_dcache_sets_max =
1044 (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1045 arm10_dcache_sets_inc;
1046 arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1047 arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
1048 pmap_pte_init_generic();
1051 #endif /* CPU_ARM10 */
1053 if (cputype == CPU_ID_SA110) {
1054 cpufuncs = sa110_cpufuncs;
1055 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
1056 get_cachetype_table();
1057 pmap_pte_init_sa1();
1060 #endif /* CPU_SA110 */
1062 if (cputype == CPU_ID_SA1100) {
1063 cpufuncs = sa11x0_cpufuncs;
1064 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
1065 get_cachetype_table();
1066 pmap_pte_init_sa1();
1067 /* Use powersave on this CPU. */
1068 cpu_do_powersave = 1;
1072 #endif /* CPU_SA1100 */
1074 if (cputype == CPU_ID_SA1110) {
1075 cpufuncs = sa11x0_cpufuncs;
1076 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
1077 get_cachetype_table();
1078 pmap_pte_init_sa1();
1079 /* Use powersave on this CPU. */
1080 cpu_do_powersave = 1;
1084 #endif /* CPU_SA1110 */
1086 if (cputype == CPU_ID_IXP1200) {
1087 cpufuncs = ixp12x0_cpufuncs;
1088 cpu_reset_needs_v4_MMU_disable = 1;
1089 get_cachetype_table();
1090 pmap_pte_init_sa1();
1093 #endif /* CPU_IXP12X0 */
1094 #ifdef CPU_XSCALE_80200
1095 if (cputype == CPU_ID_80200) {
1096 int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1101 * Reset the Performance Monitoring Unit to a
1103 * - CCNT, PMN0, PMN1 reset to 0
1104 * - overflow indications cleared
1105 * - all counters disabled
1107 __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
1109 : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1112 #if defined(XSCALE_CCLKCFG)
1114 * Crank CCLKCFG to maximum legal value.
1116 __asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
1118 : "r" (XSCALE_CCLKCFG));
1122 * XXX Disable ECC in the Bus Controller Unit; we
1123 * don't really support it, yet. Clear any pending
1124 * error indications.
1126 __asm __volatile("mcr p13, 0, %0, c0, c1, 0"
1128 : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1130 cpufuncs = xscale_cpufuncs;
1131 #if defined(PERFCTRS)
1136 * i80200 errata: Step-A0 and A1 have a bug where
1137 * D$ dirty bits are not cleared on "invalidate by
1140 * Workaround: Clean cache line before invalidating.
1142 if (rev == 0 || rev == 1)
1143 cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1145 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1146 get_cachetype_cp15();
1147 pmap_pte_init_xscale();
1150 #endif /* CPU_XSCALE_80200 */
1151 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
1152 if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1153 cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1154 cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1156 * Reset the Performance Monitoring Unit to a
1158 * - CCNT, PMN0, PMN1 reset to 0
1159 * - overflow indications cleared
1160 * - all counters disabled
1162 __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
1164 : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1167 cpufuncs = xscale_cpufuncs;
1168 #if defined(PERFCTRS)
1172 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1173 get_cachetype_cp15();
1174 pmap_pte_init_xscale();
1177 #endif /* CPU_XSCALE_80321 */
1179 #if defined(CPU_XSCALE_81342)
1180 if (cputype == CPU_ID_81342) {
1181 cpufuncs = xscalec3_cpufuncs;
1182 #if defined(PERFCTRS)
1186 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1187 get_cachetype_cp15();
1188 pmap_pte_init_xscale();
1191 #endif /* CPU_XSCALE_81342 */
1192 #ifdef CPU_XSCALE_PXA2X0
1193 /* ignore core revision to test PXA2xx CPUs */
1194 if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1195 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
1196 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1198 cpufuncs = xscale_cpufuncs;
1199 #if defined(PERFCTRS)
1203 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1204 get_cachetype_cp15();
1205 pmap_pte_init_xscale();
1207 /* Use powersave on this CPU. */
1208 cpu_do_powersave = 1;
1212 #endif /* CPU_XSCALE_PXA2X0 */
1213 #ifdef CPU_XSCALE_IXP425
1214 if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1215 cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
1217 cpufuncs = xscale_cpufuncs;
1218 #if defined(PERFCTRS)
1222 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1223 get_cachetype_cp15();
1224 pmap_pte_init_xscale();
1228 #endif /* CPU_XSCALE_IXP425 */
1230 * Bzzzz. And the answer was ...
1232 panic("No support for this CPU type (%08x) in kernel", cputype);
1233 return(ARCHITECTURE_NOT_PRESENT);
1235 uma_set_align(arm_dcache_align_mask);
1240 * Fixup routines for data and prefetch aborts.
1242 * Several compile time symbols are used
1244 * DEBUG_FAULT_CORRECTION - Print debugging information during the
1245 * correction of registers after a fault.
1246 * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
1247 * when defined should use late aborts
1252 * Null abort fixup routine.
1253 * For use when no fixup is required.
1256 cpufunc_null_fixup(arg)
1259 return(ABORT_FIXUP_OK);
1263 #if defined(CPU_ARM7TDMI)
1265 #ifdef DEBUG_FAULT_CORRECTION
1266 #define DFC_PRINTF(x) printf x
1267 #define DFC_DISASSEMBLE(x) disassemble(x)
1269 #define DFC_PRINTF(x) /* nothing */
1270 #define DFC_DISASSEMBLE(x) /* nothing */
1274 * "Early" data abort fixup.
1276 * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode). Also used
1277 * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1279 * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1282 early_abort_fixup(arg)
1285 trapframe_t *frame = arg;
1287 u_int fault_instruction;
1290 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1292 /* Ok an abort in SVC mode */
1295 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1296 * as the fault happened in svc mode but we need it in the
1297 * usr slot so we can treat the registers as an array of ints
1299 * NOTE: This PC is in the position but writeback is not
1301 * Doing it like this is more efficient than trapping this
1302 * case in all possible locations in the following fixup code.
1305 saved_lr = frame->tf_usr_lr;
1306 frame->tf_usr_lr = frame->tf_svc_lr;
1309 * Note the trapframe does not have the SVC r13 so a fault
1310 * from an instruction with writeback to r13 in SVC mode is
1311 * not allowed. This should not happen as the kstack is
1316 /* Get fault address and status from the CPU */
1318 fault_pc = frame->tf_pc;
1319 fault_instruction = *((volatile unsigned int *)fault_pc);
1321 /* Decode the fault instruction and fix the registers as needed */
1323 if ((fault_instruction & 0x0e000000) == 0x08000000) {
1327 int *registers = &frame->tf_r0;
1329 DFC_PRINTF(("LDM/STM\n"));
1330 DFC_DISASSEMBLE(fault_pc);
1331 if (fault_instruction & (1 << 21)) {
1332 DFC_PRINTF(("This instruction must be corrected\n"));
1333 base = (fault_instruction >> 16) & 0x0f;
1335 return ABORT_FIXUP_FAILED;
1336 /* Count registers transferred */
1338 for (loop = 0; loop < 16; ++loop) {
1339 if (fault_instruction & (1<<loop))
1342 DFC_PRINTF(("%d registers used\n", count));
1343 DFC_PRINTF(("Corrected r%d by %d bytes ",
1345 if (fault_instruction & (1 << 23)) {
1346 DFC_PRINTF(("down\n"));
1347 registers[base] -= count * 4;
1349 DFC_PRINTF(("up\n"));
1350 registers[base] += count * 4;
1353 } else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1356 int *registers = &frame->tf_r0;
1358 /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1360 DFC_DISASSEMBLE(fault_pc);
1362 /* Only need to fix registers if write back is turned on */
1364 if ((fault_instruction & (1 << 21)) != 0) {
1365 base = (fault_instruction >> 16) & 0x0f;
1367 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1368 return ABORT_FIXUP_FAILED;
1370 return ABORT_FIXUP_FAILED;
1372 offset = (fault_instruction & 0xff) << 2;
1373 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1374 if ((fault_instruction & (1 << 23)) != 0)
1376 registers[base] += offset;
1377 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1379 } else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1380 return ABORT_FIXUP_FAILED;
1382 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1384 /* Ok an abort in SVC mode */
1387 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1388 * as the fault happened in svc mode but we need it in the
1389 * usr slot so we can treat the registers as an array of ints
1391 * NOTE: This PC is in the position but writeback is not
1393 * Doing it like this is more efficient than trapping this
1394 * case in all possible locations in the prior fixup code.
1397 frame->tf_svc_lr = frame->tf_usr_lr;
1398 frame->tf_usr_lr = saved_lr;
1401 * Note the trapframe does not have the SVC r13 so a fault
1402 * from an instruction with writeback to r13 in SVC mode is
1403 * not allowed. This should not happen as the kstack is
1408 return(ABORT_FIXUP_OK);
1410 #endif /* CPU_ARM2/250/3/6/7 */
1413 #if defined(CPU_ARM7TDMI)
1415 * "Late" (base updated) data abort fixup
1417 * For ARM6 (in late-abort mode) and ARM7.
1419 * In this model, all data-transfer instructions need fixing up. We defer
1420 * LDM, STM, LDC and STC fixup to the early-abort handler.
1423 late_abort_fixup(arg)
1426 trapframe_t *frame = arg;
1428 u_int fault_instruction;
1431 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1433 /* Ok an abort in SVC mode */
1436 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1437 * as the fault happened in svc mode but we need it in the
1438 * usr slot so we can treat the registers as an array of ints
1440 * NOTE: This PC is in the position but writeback is not
1442 * Doing it like this is more efficient than trapping this
1443 * case in all possible locations in the following fixup code.
1446 saved_lr = frame->tf_usr_lr;
1447 frame->tf_usr_lr = frame->tf_svc_lr;
1450 * Note the trapframe does not have the SVC r13 so a fault
1451 * from an instruction with writeback to r13 in SVC mode is
1452 * not allowed. This should not happen as the kstack is
1457 /* Get fault address and status from the CPU */
1459 fault_pc = frame->tf_pc;
1460 fault_instruction = *((volatile unsigned int *)fault_pc);
1462 /* Decode the fault instruction and fix the registers as needed */
1464 /* Was is a swap instruction ? */
1466 if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1467 DFC_DISASSEMBLE(fault_pc);
1468 } else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1470 /* Was is a ldr/str instruction */
1471 /* This is for late abort only */
1475 int *registers = &frame->tf_r0;
1477 DFC_DISASSEMBLE(fault_pc);
1479 /* This is for late abort only */
1481 if ((fault_instruction & (1 << 24)) == 0
1482 || (fault_instruction & (1 << 21)) != 0) {
1483 /* postindexed ldr/str with no writeback */
1485 base = (fault_instruction >> 16) & 0x0f;
1487 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1488 return ABORT_FIXUP_FAILED;
1490 return ABORT_FIXUP_FAILED;
1491 DFC_PRINTF(("late abt fix: r%d=%08x : ",
1492 base, registers[base]));
1493 if ((fault_instruction & (1 << 25)) == 0) {
1494 /* Immediate offset - easy */
1496 offset = fault_instruction & 0xfff;
1497 if ((fault_instruction & (1 << 23)))
1499 registers[base] += offset;
1500 DFC_PRINTF(("imm=%08x ", offset));
1502 /* offset is a shifted register */
1505 offset = fault_instruction & 0x0f;
1507 return ABORT_FIXUP_FAILED;
1510 * Register offset - hard we have to
1511 * cope with shifts !
1513 offset = registers[offset];
1515 if ((fault_instruction & (1 << 4)) == 0)
1516 /* shift with amount */
1517 shift = (fault_instruction >> 7) & 0x1f;
1519 /* shift with register */
1520 if ((fault_instruction & (1 << 7)) != 0)
1521 /* undefined for now so bail out */
1522 return ABORT_FIXUP_FAILED;
1523 shift = ((fault_instruction >> 8) & 0xf);
1525 return ABORT_FIXUP_FAILED;
1526 DFC_PRINTF(("shift reg=%d ", shift));
1527 shift = registers[shift];
1529 DFC_PRINTF(("shift=%08x ", shift));
1530 switch (((fault_instruction >> 5) & 0x3)) {
1531 case 0 : /* Logical left */
1532 offset = (int)(((u_int)offset) << shift);
1534 case 1 : /* Logical Right */
1535 if (shift == 0) shift = 32;
1536 offset = (int)(((u_int)offset) >> shift);
1538 case 2 : /* Arithmetic Right */
1539 if (shift == 0) shift = 32;
1540 offset = (int)(((int)offset) >> shift);
1542 case 3 : /* Rotate right (rol or rxx) */
1543 return ABORT_FIXUP_FAILED;
1547 DFC_PRINTF(("abt: fixed LDR/STR with "
1548 "register offset\n"));
1549 if ((fault_instruction & (1 << 23)))
1551 DFC_PRINTF(("offset=%08x ", offset));
1552 registers[base] += offset;
1554 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1558 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1560 /* Ok an abort in SVC mode */
1563 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1564 * as the fault happened in svc mode but we need it in the
1565 * usr slot so we can treat the registers as an array of ints
1567 * NOTE: This PC is in the position but writeback is not
1569 * Doing it like this is more efficient than trapping this
1570 * case in all possible locations in the prior fixup code.
1573 frame->tf_svc_lr = frame->tf_usr_lr;
1574 frame->tf_usr_lr = saved_lr;
1577 * Note the trapframe does not have the SVC r13 so a fault
1578 * from an instruction with writeback to r13 in SVC mode is
1579 * not allowed. This should not happen as the kstack is
1585 * Now let the early-abort fixup routine have a go, in case it
1586 * was an LDM, STM, LDC or STC that faulted.
1589 return early_abort_fixup(arg);
1591 #endif /* CPU_ARM7TDMI */
1597 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined (CPU_ARM9) || \
1598 defined(CPU_ARM9E) || \
1599 defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
1600 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1601 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1602 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
1603 defined(CPU_ARM10) || defined(CPU_ARM11)
1616 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
1619 parse_cpu_options(args, optlist, cpuctrl)
1621 struct cpu_option *optlist;
1629 while (optlist->co_name) {
1630 if (get_bootconf_option(args, optlist->co_name,
1631 BOOTOPT_TYPE_BOOLEAN, &integer)) {
1633 if (optlist->co_trueop == OR)
1634 cpuctrl |= optlist->co_value;
1635 else if (optlist->co_trueop == BIC)
1636 cpuctrl &= ~optlist->co_value;
1638 if (optlist->co_falseop == OR)
1639 cpuctrl |= optlist->co_value;
1640 else if (optlist->co_falseop == BIC)
1641 cpuctrl &= ~optlist->co_value;
1648 #endif /* CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 || XSCALE*/
1650 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8)
1651 struct cpu_option arm678_options[] = {
1653 { "nocache", IGN, BIC, CPU_CONTROL_IDC_ENABLE },
1654 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1655 #endif /* COMPAT_12 */
1656 { "cpu.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
1657 { "cpu.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
1658 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1659 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1660 { NULL, IGN, IGN, 0 }
1663 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
1666 struct cpu_option arm7tdmi_options[] = {
1667 { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
1668 { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
1669 { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1670 { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1672 { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK },
1673 #endif /* COMPAT_12 */
1674 { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK },
1675 { NULL, IGN, IGN, 0 }
1679 arm7tdmi_setup(args)
1684 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1685 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1686 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1688 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1689 cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
1692 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1695 /* Clear out the cache */
1696 cpu_idcache_wbinv_all();
1698 /* Set the control register */
1700 cpu_control(0xffffffff, cpuctrl);
1702 #endif /* CPU_ARM7TDMI */
1705 struct cpu_option arm8_options[] = {
1706 { "arm8.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
1707 { "arm8.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
1708 { "arm8.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1709 { "arm8.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1711 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1712 #endif /* COMPAT_12 */
1713 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1714 { "arm8.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1715 { NULL, IGN, IGN, 0 }
1723 int cpuctrl, cpuctrlmask;
1727 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1728 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1729 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1730 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1731 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1732 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1733 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
1734 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
1736 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1737 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1740 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1741 cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
1744 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1747 /* Get clock configuration */
1748 clocktest = arm8_clock_config(0, 0) & 0x0f;
1750 /* Special ARM8 clock and test configuration */
1751 if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1755 if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1759 clocktest &= ~(0x01);
1762 if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1766 clocktest &= ~(0x02);
1769 if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
1770 clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
1773 if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
1774 clocktest |= (integer & 7) << 5;
1778 /* Clear out the cache */
1779 cpu_idcache_wbinv_all();
1781 /* Set the control register */
1783 cpu_control(0xffffffff, cpuctrl);
1785 /* Set the clock/test register */
1787 arm8_clock_config(0x7f, clocktest);
1789 #endif /* CPU_ARM8 */
1792 struct cpu_option arm9_options[] = {
1793 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1794 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1795 { "arm9.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1796 { "arm9.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1797 { "arm9.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1798 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1799 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1800 { "arm9.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1801 { NULL, IGN, IGN, 0 }
1808 int cpuctrl, cpuctrlmask;
1810 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1811 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1812 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1813 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
1814 CPU_CONTROL_ROUNDROBIN;
1815 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1816 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1817 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1818 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1819 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1820 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1821 | CPU_CONTROL_ROUNDROBIN;
1823 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1824 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1827 cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1830 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1832 if (vector_page == ARM_VECTORS_HIGH)
1833 cpuctrl |= CPU_CONTROL_VECRELOC;
1835 /* Clear out the cache */
1836 cpu_idcache_wbinv_all();
1838 /* Set the control register */
1839 cpu_control(cpuctrlmask, cpuctrl);
1843 #endif /* CPU_ARM9 */
1845 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
1846 struct cpu_option arm10_options[] = {
1847 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1848 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1849 { "arm10.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1850 { "arm10.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1851 { "arm10.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1852 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1853 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1854 { "arm10.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1855 { NULL, IGN, IGN, 0 }
1862 int cpuctrl, cpuctrlmask;
1864 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1865 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1866 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1867 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1868 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1869 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1870 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1871 | CPU_CONTROL_BPRD_ENABLE
1872 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1874 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1875 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1878 cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
1881 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1884 /* Clear out the cache */
1885 cpu_idcache_wbinv_all();
1887 /* Now really make sure they are clean. */
1888 __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1890 if (vector_page == ARM_VECTORS_HIGH)
1891 cpuctrl |= CPU_CONTROL_VECRELOC;
1893 /* Set the control register */
1895 cpu_control(0xffffffff, cpuctrl);
1898 cpu_idcache_wbinv_all();
1900 #endif /* CPU_ARM9E || CPU_ARM10 */
1903 struct cpu_option arm11_options[] = {
1904 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1905 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1906 { "arm11.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1907 { "arm11.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1908 { "arm11.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1909 { NULL, IGN, IGN, 0 }
1916 int cpuctrl, cpuctrlmask;
1918 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1919 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1920 /* | CPU_CONTROL_BPRD_ENABLE */;
1921 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1922 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1923 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
1924 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1925 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1927 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1928 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1931 cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
1934 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1937 /* Clear out the cache */
1938 cpu_idcache_wbinv_all();
1940 /* Now really make sure they are clean. */
1941 __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1943 /* Set the control register */
1944 curcpu()->ci_ctrl = cpuctrl;
1945 cpu_control(0xffffffff, cpuctrl);
1948 cpu_idcache_wbinv_all();
1950 #endif /* CPU_ARM11 */
1953 struct cpu_option sa110_options[] = {
1955 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1956 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1957 #endif /* COMPAT_12 */
1958 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1959 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1960 { "sa110.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1961 { "sa110.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1962 { "sa110.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1963 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1964 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1965 { "sa110.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1966 { NULL, IGN, IGN, 0 }
1973 int cpuctrl, cpuctrlmask;
1975 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1976 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1977 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1978 | CPU_CONTROL_WBUF_ENABLE;
1979 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1980 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1981 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1982 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1983 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1984 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1985 | CPU_CONTROL_CPCLK;
1987 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1988 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1991 cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
1994 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1997 /* Clear out the cache */
1998 cpu_idcache_wbinv_all();
2000 /* Set the control register */
2002 /* cpu_control(cpuctrlmask, cpuctrl);*/
2003 cpu_control(0xffffffff, cpuctrl);
2006 * enable clockswitching, note that this doesn't read or write to r0,
2007 * r0 is just to make it valid asm
2009 __asm ("mcr 15, 0, r0, c15, c1, 2");
2011 #endif /* CPU_SA110 */
2013 #if defined(CPU_SA1100) || defined(CPU_SA1110)
2014 struct cpu_option sa11x0_options[] = {
2016 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2017 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2018 #endif /* COMPAT_12 */
2019 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2020 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2021 { "sa11x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2022 { "sa11x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
2023 { "sa11x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
2024 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2025 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
2026 { "sa11x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2027 { NULL, IGN, IGN, 0 }
2034 int cpuctrl, cpuctrlmask;
2036 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2037 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2038 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2039 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
2040 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2041 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2042 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2043 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2044 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2045 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2046 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2048 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2049 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2053 cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
2056 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2059 if (vector_page == ARM_VECTORS_HIGH)
2060 cpuctrl |= CPU_CONTROL_VECRELOC;
2061 /* Clear out the cache */
2062 cpu_idcache_wbinv_all();
2063 /* Set the control register */
2065 cpu_control(0xffffffff, cpuctrl);
2067 #endif /* CPU_SA1100 || CPU_SA1110 */
2069 #if defined(CPU_IXP12X0)
2070 struct cpu_option ixp12x0_options[] = {
2071 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2072 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2073 { "ixp12x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2074 { "ixp12x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
2075 { "ixp12x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
2076 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2077 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
2078 { "ixp12x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2079 { NULL, IGN, IGN, 0 }
2086 int cpuctrl, cpuctrlmask;
2089 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
2090 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
2091 | CPU_CONTROL_IC_ENABLE;
2093 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
2094 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2095 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
2096 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
2097 | CPU_CONTROL_VECRELOC;
2099 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2100 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2103 cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
2106 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2109 if (vector_page == ARM_VECTORS_HIGH)
2110 cpuctrl |= CPU_CONTROL_VECRELOC;
2112 /* Clear out the cache */
2113 cpu_idcache_wbinv_all();
2115 /* Set the control register */
2117 /* cpu_control(0xffffffff, cpuctrl); */
2118 cpu_control(cpuctrlmask, cpuctrl);
2120 #endif /* CPU_IXP12X0 */
2122 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2123 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
2124 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
2125 struct cpu_option xscale_options[] = {
2127 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
2128 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2129 #endif /* COMPAT_12 */
2130 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
2131 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2132 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2133 { "xscale.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
2134 { "xscale.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2135 { "xscale.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
2136 { "xscale.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
2137 { NULL, IGN, IGN, 0 }
2145 int cpuctrl, cpuctrlmask;
2148 * The XScale Write Buffer is always enabled. Our option
2149 * is to enable/disable coalescing. Note that bits 6:3
2150 * must always be enabled.
2153 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2154 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2155 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2156 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
2157 | CPU_CONTROL_BPRD_ENABLE;
2158 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2159 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2160 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2161 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2162 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2163 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2164 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
2165 CPU_CONTROL_L2_ENABLE;
2167 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2168 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2171 cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
2174 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2177 if (vector_page == ARM_VECTORS_HIGH)
2178 cpuctrl |= CPU_CONTROL_VECRELOC;
2179 #ifdef CPU_XSCALE_CORE3
2180 cpuctrl |= CPU_CONTROL_L2_ENABLE;
2183 /* Clear out the cache */
2184 cpu_idcache_wbinv_all();
2187 * Set the control register. Note that bits 6:3 must always
2191 /* cpu_control(cpuctrlmask, cpuctrl);*/
2192 cpu_control(0xffffffff, cpuctrl);
2194 /* Make sure write coalescing is turned on */
2195 __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
2197 #ifdef XSCALE_NO_COALESCE_WRITES
2198 auxctl |= XSCALE_AUXCTL_K;
2200 auxctl &= ~XSCALE_AUXCTL_K;
2202 #ifdef CPU_XSCALE_CORE3
2203 auxctl |= XSCALE_AUXCTL_LLR;
2204 auxctl |= XSCALE_AUXCTL_MD_MASK;
2206 __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
2209 #endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425