1 /* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
4 * arm7tdmi support code Copyright (c) 2001 John Fremlin
5 * arm8 support code Copyright (c) 1997 ARM Limited
6 * arm8 support code Copyright (c) 1997 Causality Limited
7 * arm9 support code Copyright (C) 2001 ARM Ltd
8 * Copyright (c) 1997 Mark Brinicombe.
9 * Copyright (c) 1997 Causality Limited
10 * All rights reserved.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Causality Limited.
23 * 4. The name of Causality Limited may not be used to endorse or promote
24 * products derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * RiscBSD kernel project
43 * C functions for supporting CPU / MMU / TLB specific operations.
47 #include <sys/cdefs.h>
48 __FBSDID("$FreeBSD$");
50 #include <sys/param.h>
51 #include <sys/systm.h>
53 #include <sys/mutex.h>
55 #include <machine/bus.h>
56 #include <machine/cpu.h>
57 #include <machine/disassem.h>
63 #include <machine/cpuconf.h>
64 #include <machine/cpufunc.h>
65 #include <machine/bootconfig.h>
67 #ifdef CPU_XSCALE_80200
68 #include <arm/xscale/i80200/i80200reg.h>
69 #include <arm/xscale/i80200/i80200var.h>
72 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
73 #include <arm/xscale/i80321/i80321reg.h>
74 #include <arm/xscale/i80321/i80321var.h>
77 #if defined(CPU_XSCALE_81342)
78 #include <arm/xscale/i8134x/i81342reg.h>
81 #ifdef CPU_XSCALE_IXP425
82 #include <arm/xscale/ixp425/ixp425reg.h>
83 #include <arm/xscale/ixp425/ixp425var.h>
86 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
87 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
88 #include <arm/xscale/xscalereg.h>
92 struct arm_pmc_funcs *arm_pmc;
95 /* PRIMARY CACHE VARIABLES */
97 int arm_picache_line_size;
100 int arm_pdcache_size; /* and unified */
101 int arm_pdcache_line_size;
102 int arm_pdcache_ways;
105 int arm_pcache_unified;
107 int arm_dcache_align;
108 int arm_dcache_align_mask;
110 /* 1 == use cpu_sleep(), 0 == don't */
111 int cpu_do_powersave;
115 struct cpu_functions arm7tdmi_cpufuncs = {
119 cpufunc_nullop, /* cpwait */
123 cpufunc_control, /* control */
124 cpufunc_domains, /* domain */
125 arm7tdmi_setttb, /* setttb */
126 cpufunc_faultstatus, /* faultstatus */
127 cpufunc_faultaddress, /* faultaddress */
131 arm7tdmi_tlb_flushID, /* tlb_flushID */
132 arm7tdmi_tlb_flushID_SE, /* tlb_flushID_SE */
133 arm7tdmi_tlb_flushID, /* tlb_flushI */
134 arm7tdmi_tlb_flushID_SE, /* tlb_flushI_SE */
135 arm7tdmi_tlb_flushID, /* tlb_flushD */
136 arm7tdmi_tlb_flushID_SE, /* tlb_flushD_SE */
138 /* Cache operations */
140 cpufunc_nullop, /* icache_sync_all */
141 (void *)cpufunc_nullop, /* icache_sync_range */
143 arm7tdmi_cache_flushID, /* dcache_wbinv_all */
144 (void *)arm7tdmi_cache_flushID, /* dcache_wbinv_range */
145 (void *)arm7tdmi_cache_flushID, /* dcache_inv_range */
146 (void *)cpufunc_nullop, /* dcache_wb_range */
148 arm7tdmi_cache_flushID, /* idcache_wbinv_all */
149 (void *)arm7tdmi_cache_flushID, /* idcache_wbinv_range */
150 cpufunc_nullop, /* l2cache_wbinv_all */
151 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
152 (void *)cpufunc_nullop, /* l2cache_inv_range */
153 (void *)cpufunc_nullop, /* l2cache_wb_range */
155 /* Other functions */
157 cpufunc_nullop, /* flush_prefetchbuf */
158 cpufunc_nullop, /* drain_writebuf */
159 cpufunc_nullop, /* flush_brnchtgt_C */
160 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
162 (void *)cpufunc_nullop, /* sleep */
166 late_abort_fixup, /* dataabt_fixup */
167 cpufunc_null_fixup, /* prefetchabt_fixup */
169 arm7tdmi_context_switch, /* context_switch */
171 arm7tdmi_setup /* cpu setup */
174 #endif /* CPU_ARM7TDMI */
177 struct cpu_functions arm8_cpufuncs = {
181 cpufunc_nullop, /* cpwait */
185 cpufunc_control, /* control */
186 cpufunc_domains, /* domain */
187 arm8_setttb, /* setttb */
188 cpufunc_faultstatus, /* faultstatus */
189 cpufunc_faultaddress, /* faultaddress */
193 arm8_tlb_flushID, /* tlb_flushID */
194 arm8_tlb_flushID_SE, /* tlb_flushID_SE */
195 arm8_tlb_flushID, /* tlb_flushI */
196 arm8_tlb_flushID_SE, /* tlb_flushI_SE */
197 arm8_tlb_flushID, /* tlb_flushD */
198 arm8_tlb_flushID_SE, /* tlb_flushD_SE */
200 /* Cache operations */
202 cpufunc_nullop, /* icache_sync_all */
203 (void *)cpufunc_nullop, /* icache_sync_range */
205 arm8_cache_purgeID, /* dcache_wbinv_all */
206 (void *)arm8_cache_purgeID, /* dcache_wbinv_range */
207 /*XXX*/ (void *)arm8_cache_purgeID, /* dcache_inv_range */
208 (void *)arm8_cache_cleanID, /* dcache_wb_range */
210 arm8_cache_purgeID, /* idcache_wbinv_all */
211 (void *)arm8_cache_purgeID, /* idcache_wbinv_range */
212 cpufunc_nullop, /* l2cache_wbinv_all */
213 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
214 (void *)cpufunc_nullop, /* l2cache_inv_range */
215 (void *)cpufunc_nullop, /* l2cache_wb_range */
217 /* Other functions */
219 cpufunc_nullop, /* flush_prefetchbuf */
220 cpufunc_nullop, /* drain_writebuf */
221 cpufunc_nullop, /* flush_brnchtgt_C */
222 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
224 (void *)cpufunc_nullop, /* sleep */
228 cpufunc_null_fixup, /* dataabt_fixup */
229 cpufunc_null_fixup, /* prefetchabt_fixup */
231 arm8_context_switch, /* context_switch */
233 arm8_setup /* cpu setup */
235 #endif /* CPU_ARM8 */
238 struct cpu_functions arm9_cpufuncs = {
242 cpufunc_nullop, /* cpwait */
246 cpufunc_control, /* control */
247 cpufunc_domains, /* Domain */
248 arm9_setttb, /* Setttb */
249 cpufunc_faultstatus, /* Faultstatus */
250 cpufunc_faultaddress, /* Faultaddress */
254 armv4_tlb_flushID, /* tlb_flushID */
255 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
256 armv4_tlb_flushI, /* tlb_flushI */
257 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
258 armv4_tlb_flushD, /* tlb_flushD */
259 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
261 /* Cache operations */
263 arm9_icache_sync_all, /* icache_sync_all */
264 arm9_icache_sync_range, /* icache_sync_range */
266 arm9_dcache_wbinv_all, /* dcache_wbinv_all */
267 arm9_dcache_wbinv_range, /* dcache_wbinv_range */
268 /*XXX*/ arm9_dcache_wbinv_range, /* dcache_inv_range */
269 arm9_dcache_wb_range, /* dcache_wb_range */
271 arm9_idcache_wbinv_all, /* idcache_wbinv_all */
272 arm9_idcache_wbinv_range, /* idcache_wbinv_range */
273 cpufunc_nullop, /* l2cache_wbinv_all */
274 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
275 (void *)cpufunc_nullop, /* l2cache_inv_range */
276 (void *)cpufunc_nullop, /* l2cache_wb_range */
278 /* Other functions */
280 cpufunc_nullop, /* flush_prefetchbuf */
281 armv4_drain_writebuf, /* drain_writebuf */
282 cpufunc_nullop, /* flush_brnchtgt_C */
283 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
285 (void *)cpufunc_nullop, /* sleep */
289 cpufunc_null_fixup, /* dataabt_fixup */
290 cpufunc_null_fixup, /* prefetchabt_fixup */
292 arm9_context_switch, /* context_switch */
294 arm9_setup /* cpu setup */
297 #endif /* CPU_ARM9 */
299 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
300 struct cpu_functions armv5_ec_cpufuncs = {
304 cpufunc_nullop, /* cpwait */
308 cpufunc_control, /* control */
309 cpufunc_domains, /* Domain */
310 armv5_ec_setttb, /* Setttb */
311 cpufunc_faultstatus, /* Faultstatus */
312 cpufunc_faultaddress, /* Faultaddress */
316 armv4_tlb_flushID, /* tlb_flushID */
317 arm10_tlb_flushID_SE, /* tlb_flushID_SE */
318 armv4_tlb_flushI, /* tlb_flushI */
319 arm10_tlb_flushI_SE, /* tlb_flushI_SE */
320 armv4_tlb_flushD, /* tlb_flushD */
321 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
323 /* Cache operations */
325 armv5_ec_icache_sync_all, /* icache_sync_all */
326 armv5_ec_icache_sync_range, /* icache_sync_range */
328 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
329 armv5_ec_dcache_wbinv_range, /* dcache_wbinv_range */
330 /*XXX*/ armv5_ec_dcache_wbinv_range, /* dcache_inv_range */
331 armv5_ec_dcache_wb_range, /* dcache_wb_range */
333 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
334 armv5_ec_idcache_wbinv_range, /* idcache_wbinv_range */
336 cpufunc_nullop, /* l2cache_wbinv_all */
337 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
338 (void *)cpufunc_nullop, /* l2cache_inv_range */
339 (void *)cpufunc_nullop, /* l2cache_wb_range */
341 /* Other functions */
343 cpufunc_nullop, /* flush_prefetchbuf */
344 armv4_drain_writebuf, /* drain_writebuf */
345 cpufunc_nullop, /* flush_brnchtgt_C */
346 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
348 (void *)cpufunc_nullop, /* sleep */
352 cpufunc_null_fixup, /* dataabt_fixup */
353 cpufunc_null_fixup, /* prefetchabt_fixup */
355 arm10_context_switch, /* context_switch */
357 arm10_setup /* cpu setup */
361 struct cpu_functions feroceon_cpufuncs = {
365 cpufunc_nullop, /* cpwait */
369 cpufunc_control, /* control */
370 cpufunc_domains, /* Domain */
371 feroceon_setttb, /* Setttb */
372 cpufunc_faultstatus, /* Faultstatus */
373 cpufunc_faultaddress, /* Faultaddress */
377 armv4_tlb_flushID, /* tlb_flushID */
378 arm10_tlb_flushID_SE, /* tlb_flushID_SE */
379 armv4_tlb_flushI, /* tlb_flushI */
380 arm10_tlb_flushI_SE, /* tlb_flushI_SE */
381 armv4_tlb_flushD, /* tlb_flushD */
382 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
384 /* Cache operations */
386 armv5_ec_icache_sync_all, /* icache_sync_all */
387 armv5_ec_icache_sync_range, /* icache_sync_range */
389 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
390 feroceon_dcache_wbinv_range, /* dcache_wbinv_range */
391 feroceon_dcache_inv_range, /* dcache_inv_range */
392 feroceon_dcache_wb_range, /* dcache_wb_range */
394 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
395 feroceon_idcache_wbinv_range, /* idcache_wbinv_all */
397 feroceon_l2cache_wbinv_all, /* l2cache_wbinv_all */
398 feroceon_l2cache_wbinv_range, /* l2cache_wbinv_range */
399 feroceon_l2cache_inv_range, /* l2cache_inv_range */
400 feroceon_l2cache_wb_range, /* l2cache_wb_range */
402 /* Other functions */
404 cpufunc_nullop, /* flush_prefetchbuf */
405 armv4_drain_writebuf, /* drain_writebuf */
406 cpufunc_nullop, /* flush_brnchtgt_C */
407 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
409 (void *)cpufunc_nullop, /* sleep */
413 cpufunc_null_fixup, /* dataabt_fixup */
414 cpufunc_null_fixup, /* prefetchabt_fixup */
416 arm10_context_switch, /* context_switch */
418 arm10_setup /* cpu setup */
420 #endif /* CPU_ARM9E || CPU_ARM10 */
423 struct cpu_functions arm10_cpufuncs = {
427 cpufunc_nullop, /* cpwait */
431 cpufunc_control, /* control */
432 cpufunc_domains, /* Domain */
433 arm10_setttb, /* Setttb */
434 cpufunc_faultstatus, /* Faultstatus */
435 cpufunc_faultaddress, /* Faultaddress */
439 armv4_tlb_flushID, /* tlb_flushID */
440 arm10_tlb_flushID_SE, /* tlb_flushID_SE */
441 armv4_tlb_flushI, /* tlb_flushI */
442 arm10_tlb_flushI_SE, /* tlb_flushI_SE */
443 armv4_tlb_flushD, /* tlb_flushD */
444 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
446 /* Cache operations */
448 arm10_icache_sync_all, /* icache_sync_all */
449 arm10_icache_sync_range, /* icache_sync_range */
451 arm10_dcache_wbinv_all, /* dcache_wbinv_all */
452 arm10_dcache_wbinv_range, /* dcache_wbinv_range */
453 arm10_dcache_inv_range, /* dcache_inv_range */
454 arm10_dcache_wb_range, /* dcache_wb_range */
456 arm10_idcache_wbinv_all, /* idcache_wbinv_all */
457 arm10_idcache_wbinv_range, /* idcache_wbinv_range */
458 cpufunc_nullop, /* l2cache_wbinv_all */
459 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
460 (void *)cpufunc_nullop, /* l2cache_inv_range */
461 (void *)cpufunc_nullop, /* l2cache_wb_range */
463 /* Other functions */
465 cpufunc_nullop, /* flush_prefetchbuf */
466 armv4_drain_writebuf, /* drain_writebuf */
467 cpufunc_nullop, /* flush_brnchtgt_C */
468 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
470 (void *)cpufunc_nullop, /* sleep */
474 cpufunc_null_fixup, /* dataabt_fixup */
475 cpufunc_null_fixup, /* prefetchabt_fixup */
477 arm10_context_switch, /* context_switch */
479 arm10_setup /* cpu setup */
482 #endif /* CPU_ARM10 */
485 struct cpu_functions sa110_cpufuncs = {
489 cpufunc_nullop, /* cpwait */
493 cpufunc_control, /* control */
494 cpufunc_domains, /* domain */
495 sa1_setttb, /* setttb */
496 cpufunc_faultstatus, /* faultstatus */
497 cpufunc_faultaddress, /* faultaddress */
501 armv4_tlb_flushID, /* tlb_flushID */
502 sa1_tlb_flushID_SE, /* tlb_flushID_SE */
503 armv4_tlb_flushI, /* tlb_flushI */
504 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
505 armv4_tlb_flushD, /* tlb_flushD */
506 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
508 /* Cache operations */
510 sa1_cache_syncI, /* icache_sync_all */
511 sa1_cache_syncI_rng, /* icache_sync_range */
513 sa1_cache_purgeD, /* dcache_wbinv_all */
514 sa1_cache_purgeD_rng, /* dcache_wbinv_range */
515 /*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
516 sa1_cache_cleanD_rng, /* dcache_wb_range */
518 sa1_cache_purgeID, /* idcache_wbinv_all */
519 sa1_cache_purgeID_rng, /* idcache_wbinv_range */
520 cpufunc_nullop, /* l2cache_wbinv_all */
521 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
522 (void *)cpufunc_nullop, /* l2cache_inv_range */
523 (void *)cpufunc_nullop, /* l2cache_wb_range */
525 /* Other functions */
527 cpufunc_nullop, /* flush_prefetchbuf */
528 armv4_drain_writebuf, /* drain_writebuf */
529 cpufunc_nullop, /* flush_brnchtgt_C */
530 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
532 (void *)cpufunc_nullop, /* sleep */
536 cpufunc_null_fixup, /* dataabt_fixup */
537 cpufunc_null_fixup, /* prefetchabt_fixup */
539 sa110_context_switch, /* context_switch */
541 sa110_setup /* cpu setup */
543 #endif /* CPU_SA110 */
545 #if defined(CPU_SA1100) || defined(CPU_SA1110)
546 struct cpu_functions sa11x0_cpufuncs = {
550 cpufunc_nullop, /* cpwait */
554 cpufunc_control, /* control */
555 cpufunc_domains, /* domain */
556 sa1_setttb, /* setttb */
557 cpufunc_faultstatus, /* faultstatus */
558 cpufunc_faultaddress, /* faultaddress */
562 armv4_tlb_flushID, /* tlb_flushID */
563 sa1_tlb_flushID_SE, /* tlb_flushID_SE */
564 armv4_tlb_flushI, /* tlb_flushI */
565 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
566 armv4_tlb_flushD, /* tlb_flushD */
567 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
569 /* Cache operations */
571 sa1_cache_syncI, /* icache_sync_all */
572 sa1_cache_syncI_rng, /* icache_sync_range */
574 sa1_cache_purgeD, /* dcache_wbinv_all */
575 sa1_cache_purgeD_rng, /* dcache_wbinv_range */
576 /*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
577 sa1_cache_cleanD_rng, /* dcache_wb_range */
579 sa1_cache_purgeID, /* idcache_wbinv_all */
580 sa1_cache_purgeID_rng, /* idcache_wbinv_range */
581 cpufunc_nullop, /* l2cache_wbinv_all */
582 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
583 (void *)cpufunc_nullop, /* l2cache_inv_range */
584 (void *)cpufunc_nullop, /* l2cache_wb_range */
586 /* Other functions */
588 sa11x0_drain_readbuf, /* flush_prefetchbuf */
589 armv4_drain_writebuf, /* drain_writebuf */
590 cpufunc_nullop, /* flush_brnchtgt_C */
591 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
593 sa11x0_cpu_sleep, /* sleep */
597 cpufunc_null_fixup, /* dataabt_fixup */
598 cpufunc_null_fixup, /* prefetchabt_fixup */
600 sa11x0_context_switch, /* context_switch */
602 sa11x0_setup /* cpu setup */
604 #endif /* CPU_SA1100 || CPU_SA1110 */
607 struct cpu_functions ixp12x0_cpufuncs = {
611 cpufunc_nullop, /* cpwait */
615 cpufunc_control, /* control */
616 cpufunc_domains, /* domain */
617 sa1_setttb, /* setttb */
618 cpufunc_faultstatus, /* faultstatus */
619 cpufunc_faultaddress, /* faultaddress */
623 armv4_tlb_flushID, /* tlb_flushID */
624 sa1_tlb_flushID_SE, /* tlb_flushID_SE */
625 armv4_tlb_flushI, /* tlb_flushI */
626 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
627 armv4_tlb_flushD, /* tlb_flushD */
628 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
630 /* Cache operations */
632 sa1_cache_syncI, /* icache_sync_all */
633 sa1_cache_syncI_rng, /* icache_sync_range */
635 sa1_cache_purgeD, /* dcache_wbinv_all */
636 sa1_cache_purgeD_rng, /* dcache_wbinv_range */
637 /*XXX*/ sa1_cache_purgeD_rng, /* dcache_inv_range */
638 sa1_cache_cleanD_rng, /* dcache_wb_range */
640 sa1_cache_purgeID, /* idcache_wbinv_all */
641 sa1_cache_purgeID_rng, /* idcache_wbinv_range */
642 cpufunc_nullop, /* l2cache_wbinv_all */
643 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
644 (void *)cpufunc_nullop, /* l2cache_inv_range */
645 (void *)cpufunc_nullop, /* l2cache_wb_range */
647 /* Other functions */
649 ixp12x0_drain_readbuf, /* flush_prefetchbuf */
650 armv4_drain_writebuf, /* drain_writebuf */
651 cpufunc_nullop, /* flush_brnchtgt_C */
652 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
654 (void *)cpufunc_nullop, /* sleep */
658 cpufunc_null_fixup, /* dataabt_fixup */
659 cpufunc_null_fixup, /* prefetchabt_fixup */
661 ixp12x0_context_switch, /* context_switch */
663 ixp12x0_setup /* cpu setup */
665 #endif /* CPU_IXP12X0 */
667 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
668 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
669 defined(CPU_XSCALE_80219)
671 struct cpu_functions xscale_cpufuncs = {
675 xscale_cpwait, /* cpwait */
679 xscale_control, /* control */
680 cpufunc_domains, /* domain */
681 xscale_setttb, /* setttb */
682 cpufunc_faultstatus, /* faultstatus */
683 cpufunc_faultaddress, /* faultaddress */
687 armv4_tlb_flushID, /* tlb_flushID */
688 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
689 armv4_tlb_flushI, /* tlb_flushI */
690 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
691 armv4_tlb_flushD, /* tlb_flushD */
692 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
694 /* Cache operations */
696 xscale_cache_syncI, /* icache_sync_all */
697 xscale_cache_syncI_rng, /* icache_sync_range */
699 xscale_cache_purgeD, /* dcache_wbinv_all */
700 xscale_cache_purgeD_rng, /* dcache_wbinv_range */
701 xscale_cache_flushD_rng, /* dcache_inv_range */
702 xscale_cache_cleanD_rng, /* dcache_wb_range */
704 xscale_cache_purgeID, /* idcache_wbinv_all */
705 xscale_cache_purgeID_rng, /* idcache_wbinv_range */
706 cpufunc_nullop, /* l2cache_wbinv_all */
707 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
708 (void *)cpufunc_nullop, /* l2cache_inv_range */
709 (void *)cpufunc_nullop, /* l2cache_wb_range */
711 /* Other functions */
713 cpufunc_nullop, /* flush_prefetchbuf */
714 armv4_drain_writebuf, /* drain_writebuf */
715 cpufunc_nullop, /* flush_brnchtgt_C */
716 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
718 xscale_cpu_sleep, /* sleep */
722 cpufunc_null_fixup, /* dataabt_fixup */
723 cpufunc_null_fixup, /* prefetchabt_fixup */
725 xscale_context_switch, /* context_switch */
727 xscale_setup /* cpu setup */
730 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
733 #ifdef CPU_XSCALE_81342
734 struct cpu_functions xscalec3_cpufuncs = {
738 xscale_cpwait, /* cpwait */
742 xscale_control, /* control */
743 cpufunc_domains, /* domain */
744 xscalec3_setttb, /* setttb */
745 cpufunc_faultstatus, /* faultstatus */
746 cpufunc_faultaddress, /* faultaddress */
750 armv4_tlb_flushID, /* tlb_flushID */
751 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
752 armv4_tlb_flushI, /* tlb_flushI */
753 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
754 armv4_tlb_flushD, /* tlb_flushD */
755 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
757 /* Cache operations */
759 xscalec3_cache_syncI, /* icache_sync_all */
760 xscalec3_cache_syncI_rng, /* icache_sync_range */
762 xscalec3_cache_purgeD, /* dcache_wbinv_all */
763 xscalec3_cache_purgeD_rng, /* dcache_wbinv_range */
764 xscale_cache_flushD_rng, /* dcache_inv_range */
765 xscalec3_cache_cleanD_rng, /* dcache_wb_range */
767 xscalec3_cache_purgeID, /* idcache_wbinv_all */
768 xscalec3_cache_purgeID_rng, /* idcache_wbinv_range */
769 xscalec3_l2cache_purge, /* l2cache_wbinv_all */
770 xscalec3_l2cache_purge_rng, /* l2cache_wbinv_range */
771 xscalec3_l2cache_flush_rng, /* l2cache_inv_range */
772 xscalec3_l2cache_clean_rng, /* l2cache_wb_range */
774 /* Other functions */
776 cpufunc_nullop, /* flush_prefetchbuf */
777 armv4_drain_writebuf, /* drain_writebuf */
778 cpufunc_nullop, /* flush_brnchtgt_C */
779 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
781 xscale_cpu_sleep, /* sleep */
785 cpufunc_null_fixup, /* dataabt_fixup */
786 cpufunc_null_fixup, /* prefetchabt_fixup */
788 xscalec3_context_switch, /* context_switch */
790 xscale_setup /* cpu setup */
792 #endif /* CPU_XSCALE_81342 */
794 * Global constants also used by locore.s
797 struct cpu_functions cpufuncs;
799 u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore.s */
801 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
802 defined (CPU_ARM9E) || defined (CPU_ARM10) || \
803 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
804 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
805 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
807 static void get_cachetype_cp15(void);
809 /* Additional cache information local to this file. Log2 of some of the
811 static int arm_dcache_l2_nsets;
812 static int arm_dcache_l2_assoc;
813 static int arm_dcache_l2_linesize;
818 u_int ctype, isize, dsize;
821 __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
825 * ...and thus spake the ARM ARM:
827 * If an <opcode2> value corresponding to an unimplemented or
828 * reserved ID register is encountered, the System Control
829 * processor returns the value of the main ID register.
831 if (ctype == cpufunc_id())
834 if ((ctype & CPU_CT_S) == 0)
835 arm_pcache_unified = 1;
838 * If you want to know how this code works, go read the ARM ARM.
841 arm_pcache_type = CPU_CT_CTYPE(ctype);
843 if (arm_pcache_unified == 0) {
844 isize = CPU_CT_ISIZE(ctype);
845 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
846 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
847 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
848 if (isize & CPU_CT_xSIZE_M)
849 arm_picache_line_size = 0; /* not present */
851 arm_picache_ways = 1;
853 arm_picache_ways = multiplier <<
854 (CPU_CT_xSIZE_ASSOC(isize) - 1);
856 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
859 dsize = CPU_CT_DSIZE(ctype);
860 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
861 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
862 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
863 if (dsize & CPU_CT_xSIZE_M)
864 arm_pdcache_line_size = 0; /* not present */
866 arm_pdcache_ways = 1;
868 arm_pdcache_ways = multiplier <<
869 (CPU_CT_xSIZE_ASSOC(dsize) - 1);
871 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
873 arm_dcache_align = arm_pdcache_line_size;
875 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
876 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
877 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
878 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
881 arm_dcache_align_mask = arm_dcache_align - 1;
883 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
885 #if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
887 /* Cache information for CPUs without cache type registers. */
891 int ct_pcache_unified;
893 int ct_pdcache_line_size;
896 int ct_picache_line_size;
900 struct cachetab cachetab[] = {
901 /* cpuid, cache type, u, dsiz, ls, wy, isiz, ls, wy */
902 /* XXX is this type right for SA-1? */
903 { CPU_ID_SA110, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
904 { CPU_ID_SA1100, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 },
905 { CPU_ID_SA1110, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 },
906 { CPU_ID_IXP1200, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
907 { 0, 0, 0, 0, 0, 0, 0, 0}
910 static void get_cachetype_table(void);
913 get_cachetype_table()
916 u_int32_t cpuid = cpufunc_id();
918 for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
919 if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
920 arm_pcache_type = cachetab[i].ct_pcache_type;
921 arm_pcache_unified = cachetab[i].ct_pcache_unified;
922 arm_pdcache_size = cachetab[i].ct_pdcache_size;
923 arm_pdcache_line_size =
924 cachetab[i].ct_pdcache_line_size;
925 arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
926 arm_picache_size = cachetab[i].ct_picache_size;
927 arm_picache_line_size =
928 cachetab[i].ct_picache_line_size;
929 arm_picache_ways = cachetab[i].ct_picache_ways;
932 arm_dcache_align = arm_pdcache_line_size;
934 arm_dcache_align_mask = arm_dcache_align - 1;
937 #endif /* SA110 || SA1100 || SA1111 || IXP12X0 */
940 * Cannot panic here as we may not have a console yet ...
946 cputype = cpufunc_id();
947 cputype &= CPU_ID_CPU_MASK;
950 * NOTE: cpu_do_powersave defaults to off. If we encounter a
951 * CPU type where we want to use it by default, then we set it.
955 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
956 CPU_ID_IS7(cputype) &&
957 (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
958 cpufuncs = arm7tdmi_cpufuncs;
959 cpu_reset_needs_v4_MMU_disable = 0;
960 get_cachetype_cp15();
961 pmap_pte_init_generic();
966 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
967 (cputype & 0x0000f000) == 0x00008000) {
968 cpufuncs = arm8_cpufuncs;
969 cpu_reset_needs_v4_MMU_disable = 0; /* XXX correct? */
970 get_cachetype_cp15();
971 pmap_pte_init_arm8();
974 #endif /* CPU_ARM8 */
976 if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
977 (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
978 (cputype & 0x0000f000) == 0x00009000) {
979 cpufuncs = arm9_cpufuncs;
980 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
981 get_cachetype_cp15();
982 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
983 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
984 arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
985 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
986 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
987 #ifdef ARM9_CACHE_WRITE_THROUGH
988 pmap_pte_init_arm9();
990 pmap_pte_init_generic();
994 #endif /* CPU_ARM9 */
995 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
996 if (cputype == CPU_ID_ARM926EJS || cputype == CPU_ID_ARM1026EJS ||
997 cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
998 cputype == CPU_ID_MV88FR571_41) {
999 if (cputype == CPU_ID_MV88FR131 ||
1000 cputype == CPU_ID_MV88FR571_VD ||
1001 cputype == CPU_ID_MV88FR571_41) {
1003 cpufuncs = feroceon_cpufuncs;
1005 * Workaround for Marvell MV78100 CPU: Cache prefetch
1006 * mechanism may affect the cache coherency validity,
1007 * so it needs to be disabled.
1009 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
1010 * L2 Prefetching Mechanism) for details.
1012 if (cputype == CPU_ID_MV88FR571_VD ||
1013 cputype == CPU_ID_MV88FR571_41) {
1014 feroceon_control_ext(0xffffffff,
1015 FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN |
1016 FC_BRANCH_TARG_BUF_DIS | FC_L2CACHE_EN |
1019 feroceon_control_ext(0xffffffff,
1020 FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN |
1021 FC_BRANCH_TARG_BUF_DIS | FC_L2CACHE_EN);
1024 cpufuncs = armv5_ec_cpufuncs;
1026 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
1027 get_cachetype_cp15();
1028 pmap_pte_init_generic();
1031 #endif /* CPU_ARM9E || CPU_ARM10 */
1033 if (/* cputype == CPU_ID_ARM1020T || */
1034 cputype == CPU_ID_ARM1020E) {
1036 * Select write-through cacheing (this isn't really an
1037 * option on ARM1020T).
1039 cpufuncs = arm10_cpufuncs;
1040 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
1041 get_cachetype_cp15();
1042 arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1043 arm10_dcache_sets_max =
1044 (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1045 arm10_dcache_sets_inc;
1046 arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1047 arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
1048 pmap_pte_init_generic();
1051 #endif /* CPU_ARM10 */
1053 if (cputype == CPU_ID_SA110) {
1054 cpufuncs = sa110_cpufuncs;
1055 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
1056 get_cachetype_table();
1057 pmap_pte_init_sa1();
1060 #endif /* CPU_SA110 */
1062 if (cputype == CPU_ID_SA1100) {
1063 cpufuncs = sa11x0_cpufuncs;
1064 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
1065 get_cachetype_table();
1066 pmap_pte_init_sa1();
1067 /* Use powersave on this CPU. */
1068 cpu_do_powersave = 1;
1072 #endif /* CPU_SA1100 */
1074 if (cputype == CPU_ID_SA1110) {
1075 cpufuncs = sa11x0_cpufuncs;
1076 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
1077 get_cachetype_table();
1078 pmap_pte_init_sa1();
1079 /* Use powersave on this CPU. */
1080 cpu_do_powersave = 1;
1084 #endif /* CPU_SA1110 */
1086 if (cputype == CPU_ID_IXP1200) {
1087 cpufuncs = ixp12x0_cpufuncs;
1088 cpu_reset_needs_v4_MMU_disable = 1;
1089 get_cachetype_table();
1090 pmap_pte_init_sa1();
1093 #endif /* CPU_IXP12X0 */
1094 #ifdef CPU_XSCALE_80200
1095 if (cputype == CPU_ID_80200) {
1096 int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1101 * Reset the Performance Monitoring Unit to a
1103 * - CCNT, PMN0, PMN1 reset to 0
1104 * - overflow indications cleared
1105 * - all counters disabled
1107 __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
1109 : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1112 #if defined(XSCALE_CCLKCFG)
1114 * Crank CCLKCFG to maximum legal value.
1116 __asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
1118 : "r" (XSCALE_CCLKCFG));
1122 * XXX Disable ECC in the Bus Controller Unit; we
1123 * don't really support it, yet. Clear any pending
1124 * error indications.
1126 __asm __volatile("mcr p13, 0, %0, c0, c1, 0"
1128 : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1130 cpufuncs = xscale_cpufuncs;
1131 #if defined(PERFCTRS)
1136 * i80200 errata: Step-A0 and A1 have a bug where
1137 * D$ dirty bits are not cleared on "invalidate by
1140 * Workaround: Clean cache line before invalidating.
1142 if (rev == 0 || rev == 1)
1143 cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1145 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1146 get_cachetype_cp15();
1147 pmap_pte_init_xscale();
1150 #endif /* CPU_XSCALE_80200 */
1151 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
1152 if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1153 cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1154 cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1156 * Reset the Performance Monitoring Unit to a
1158 * - CCNT, PMN0, PMN1 reset to 0
1159 * - overflow indications cleared
1160 * - all counters disabled
1162 __asm __volatile("mcr p14, 0, %0, c0, c0, 0"
1164 : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1167 cpufuncs = xscale_cpufuncs;
1168 #if defined(PERFCTRS)
1172 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1173 get_cachetype_cp15();
1174 pmap_pte_init_xscale();
1177 #endif /* CPU_XSCALE_80321 */
1179 #if defined(CPU_XSCALE_81342)
1180 if (cputype == CPU_ID_81342) {
1181 cpufuncs = xscalec3_cpufuncs;
1182 #if defined(PERFCTRS)
1186 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1187 get_cachetype_cp15();
1188 pmap_pte_init_xscale();
1191 #endif /* CPU_XSCALE_81342 */
1192 #ifdef CPU_XSCALE_PXA2X0
1193 /* ignore core revision to test PXA2xx CPUs */
1194 if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1195 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1197 cpufuncs = xscale_cpufuncs;
1198 #if defined(PERFCTRS)
1202 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1203 get_cachetype_cp15();
1204 pmap_pte_init_xscale();
1206 /* Use powersave on this CPU. */
1207 cpu_do_powersave = 1;
1211 #endif /* CPU_XSCALE_PXA2X0 */
1212 #ifdef CPU_XSCALE_IXP425
1213 if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1214 cputype == CPU_ID_IXP425_266) {
1216 cpufuncs = xscale_cpufuncs;
1217 #if defined(PERFCTRS)
1221 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1222 get_cachetype_cp15();
1223 pmap_pte_init_xscale();
1227 #endif /* CPU_XSCALE_IXP425 */
1229 * Bzzzz. And the answer was ...
1231 panic("No support for this CPU type (%08x) in kernel", cputype);
1232 return(ARCHITECTURE_NOT_PRESENT);
1234 uma_set_align(arm_dcache_align_mask);
1239 * Fixup routines for data and prefetch aborts.
1241 * Several compile time symbols are used
1243 * DEBUG_FAULT_CORRECTION - Print debugging information during the
1244 * correction of registers after a fault.
1245 * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
1246 * when defined should use late aborts
1251 * Null abort fixup routine.
1252 * For use when no fixup is required.
1255 cpufunc_null_fixup(arg)
1258 return(ABORT_FIXUP_OK);
1262 #if defined(CPU_ARM7TDMI)
1264 #ifdef DEBUG_FAULT_CORRECTION
1265 #define DFC_PRINTF(x) printf x
1266 #define DFC_DISASSEMBLE(x) disassemble(x)
1268 #define DFC_PRINTF(x) /* nothing */
1269 #define DFC_DISASSEMBLE(x) /* nothing */
1273 * "Early" data abort fixup.
1275 * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode). Also used
1276 * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1278 * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1281 early_abort_fixup(arg)
1284 trapframe_t *frame = arg;
1286 u_int fault_instruction;
1289 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1291 /* Ok an abort in SVC mode */
1294 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1295 * as the fault happened in svc mode but we need it in the
1296 * usr slot so we can treat the registers as an array of ints
1298 * NOTE: This PC is in the position but writeback is not
1300 * Doing it like this is more efficient than trapping this
1301 * case in all possible locations in the following fixup code.
1304 saved_lr = frame->tf_usr_lr;
1305 frame->tf_usr_lr = frame->tf_svc_lr;
1308 * Note the trapframe does not have the SVC r13 so a fault
1309 * from an instruction with writeback to r13 in SVC mode is
1310 * not allowed. This should not happen as the kstack is
1315 /* Get fault address and status from the CPU */
1317 fault_pc = frame->tf_pc;
1318 fault_instruction = *((volatile unsigned int *)fault_pc);
1320 /* Decode the fault instruction and fix the registers as needed */
1322 if ((fault_instruction & 0x0e000000) == 0x08000000) {
1326 int *registers = &frame->tf_r0;
1328 DFC_PRINTF(("LDM/STM\n"));
1329 DFC_DISASSEMBLE(fault_pc);
1330 if (fault_instruction & (1 << 21)) {
1331 DFC_PRINTF(("This instruction must be corrected\n"));
1332 base = (fault_instruction >> 16) & 0x0f;
1334 return ABORT_FIXUP_FAILED;
1335 /* Count registers transferred */
1337 for (loop = 0; loop < 16; ++loop) {
1338 if (fault_instruction & (1<<loop))
1341 DFC_PRINTF(("%d registers used\n", count));
1342 DFC_PRINTF(("Corrected r%d by %d bytes ",
1344 if (fault_instruction & (1 << 23)) {
1345 DFC_PRINTF(("down\n"));
1346 registers[base] -= count * 4;
1348 DFC_PRINTF(("up\n"));
1349 registers[base] += count * 4;
1352 } else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1355 int *registers = &frame->tf_r0;
1357 /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1359 DFC_DISASSEMBLE(fault_pc);
1361 /* Only need to fix registers if write back is turned on */
1363 if ((fault_instruction & (1 << 21)) != 0) {
1364 base = (fault_instruction >> 16) & 0x0f;
1366 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1367 return ABORT_FIXUP_FAILED;
1369 return ABORT_FIXUP_FAILED;
1371 offset = (fault_instruction & 0xff) << 2;
1372 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1373 if ((fault_instruction & (1 << 23)) != 0)
1375 registers[base] += offset;
1376 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1378 } else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1379 return ABORT_FIXUP_FAILED;
1381 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1383 /* Ok an abort in SVC mode */
1386 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1387 * as the fault happened in svc mode but we need it in the
1388 * usr slot so we can treat the registers as an array of ints
1390 * NOTE: This PC is in the position but writeback is not
1392 * Doing it like this is more efficient than trapping this
1393 * case in all possible locations in the prior fixup code.
1396 frame->tf_svc_lr = frame->tf_usr_lr;
1397 frame->tf_usr_lr = saved_lr;
1400 * Note the trapframe does not have the SVC r13 so a fault
1401 * from an instruction with writeback to r13 in SVC mode is
1402 * not allowed. This should not happen as the kstack is
1407 return(ABORT_FIXUP_OK);
1409 #endif /* CPU_ARM2/250/3/6/7 */
1412 #if defined(CPU_ARM7TDMI)
1414 * "Late" (base updated) data abort fixup
1416 * For ARM6 (in late-abort mode) and ARM7.
1418 * In this model, all data-transfer instructions need fixing up. We defer
1419 * LDM, STM, LDC and STC fixup to the early-abort handler.
1422 late_abort_fixup(arg)
1425 trapframe_t *frame = arg;
1427 u_int fault_instruction;
1430 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1432 /* Ok an abort in SVC mode */
1435 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1436 * as the fault happened in svc mode but we need it in the
1437 * usr slot so we can treat the registers as an array of ints
1439 * NOTE: This PC is in the position but writeback is not
1441 * Doing it like this is more efficient than trapping this
1442 * case in all possible locations in the following fixup code.
1445 saved_lr = frame->tf_usr_lr;
1446 frame->tf_usr_lr = frame->tf_svc_lr;
1449 * Note the trapframe does not have the SVC r13 so a fault
1450 * from an instruction with writeback to r13 in SVC mode is
1451 * not allowed. This should not happen as the kstack is
1456 /* Get fault address and status from the CPU */
1458 fault_pc = frame->tf_pc;
1459 fault_instruction = *((volatile unsigned int *)fault_pc);
1461 /* Decode the fault instruction and fix the registers as needed */
1463 /* Was is a swap instruction ? */
1465 if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1466 DFC_DISASSEMBLE(fault_pc);
1467 } else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1469 /* Was is a ldr/str instruction */
1470 /* This is for late abort only */
1474 int *registers = &frame->tf_r0;
1476 DFC_DISASSEMBLE(fault_pc);
1478 /* This is for late abort only */
1480 if ((fault_instruction & (1 << 24)) == 0
1481 || (fault_instruction & (1 << 21)) != 0) {
1482 /* postindexed ldr/str with no writeback */
1484 base = (fault_instruction >> 16) & 0x0f;
1486 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1487 return ABORT_FIXUP_FAILED;
1489 return ABORT_FIXUP_FAILED;
1490 DFC_PRINTF(("late abt fix: r%d=%08x : ",
1491 base, registers[base]));
1492 if ((fault_instruction & (1 << 25)) == 0) {
1493 /* Immediate offset - easy */
1495 offset = fault_instruction & 0xfff;
1496 if ((fault_instruction & (1 << 23)))
1498 registers[base] += offset;
1499 DFC_PRINTF(("imm=%08x ", offset));
1501 /* offset is a shifted register */
1504 offset = fault_instruction & 0x0f;
1506 return ABORT_FIXUP_FAILED;
1509 * Register offset - hard we have to
1510 * cope with shifts !
1512 offset = registers[offset];
1514 if ((fault_instruction & (1 << 4)) == 0)
1515 /* shift with amount */
1516 shift = (fault_instruction >> 7) & 0x1f;
1518 /* shift with register */
1519 if ((fault_instruction & (1 << 7)) != 0)
1520 /* undefined for now so bail out */
1521 return ABORT_FIXUP_FAILED;
1522 shift = ((fault_instruction >> 8) & 0xf);
1524 return ABORT_FIXUP_FAILED;
1525 DFC_PRINTF(("shift reg=%d ", shift));
1526 shift = registers[shift];
1528 DFC_PRINTF(("shift=%08x ", shift));
1529 switch (((fault_instruction >> 5) & 0x3)) {
1530 case 0 : /* Logical left */
1531 offset = (int)(((u_int)offset) << shift);
1533 case 1 : /* Logical Right */
1534 if (shift == 0) shift = 32;
1535 offset = (int)(((u_int)offset) >> shift);
1537 case 2 : /* Arithmetic Right */
1538 if (shift == 0) shift = 32;
1539 offset = (int)(((int)offset) >> shift);
1541 case 3 : /* Rotate right (rol or rxx) */
1542 return ABORT_FIXUP_FAILED;
1546 DFC_PRINTF(("abt: fixed LDR/STR with "
1547 "register offset\n"));
1548 if ((fault_instruction & (1 << 23)))
1550 DFC_PRINTF(("offset=%08x ", offset));
1551 registers[base] += offset;
1553 DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1557 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1559 /* Ok an abort in SVC mode */
1562 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1563 * as the fault happened in svc mode but we need it in the
1564 * usr slot so we can treat the registers as an array of ints
1566 * NOTE: This PC is in the position but writeback is not
1568 * Doing it like this is more efficient than trapping this
1569 * case in all possible locations in the prior fixup code.
1572 frame->tf_svc_lr = frame->tf_usr_lr;
1573 frame->tf_usr_lr = saved_lr;
1576 * Note the trapframe does not have the SVC r13 so a fault
1577 * from an instruction with writeback to r13 in SVC mode is
1578 * not allowed. This should not happen as the kstack is
1584 * Now let the early-abort fixup routine have a go, in case it
1585 * was an LDM, STM, LDC or STC that faulted.
1588 return early_abort_fixup(arg);
1590 #endif /* CPU_ARM7TDMI */
1596 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined (CPU_ARM9) || \
1597 defined(CPU_ARM9E) || \
1598 defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
1599 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1600 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1601 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
1602 defined(CPU_ARM10) || defined(CPU_ARM11)
1615 static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
1618 parse_cpu_options(args, optlist, cpuctrl)
1620 struct cpu_option *optlist;
1628 while (optlist->co_name) {
1629 if (get_bootconf_option(args, optlist->co_name,
1630 BOOTOPT_TYPE_BOOLEAN, &integer)) {
1632 if (optlist->co_trueop == OR)
1633 cpuctrl |= optlist->co_value;
1634 else if (optlist->co_trueop == BIC)
1635 cpuctrl &= ~optlist->co_value;
1637 if (optlist->co_falseop == OR)
1638 cpuctrl |= optlist->co_value;
1639 else if (optlist->co_falseop == BIC)
1640 cpuctrl &= ~optlist->co_value;
1647 #endif /* CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 || XSCALE*/
1649 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8)
1650 struct cpu_option arm678_options[] = {
1652 { "nocache", IGN, BIC, CPU_CONTROL_IDC_ENABLE },
1653 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1654 #endif /* COMPAT_12 */
1655 { "cpu.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
1656 { "cpu.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
1657 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1658 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1659 { NULL, IGN, IGN, 0 }
1662 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
1665 struct cpu_option arm7tdmi_options[] = {
1666 { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
1667 { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
1668 { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1669 { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1671 { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK },
1672 #endif /* COMPAT_12 */
1673 { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK },
1674 { NULL, IGN, IGN, 0 }
1678 arm7tdmi_setup(args)
1683 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1684 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1685 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1687 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1688 cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
1691 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1694 /* Clear out the cache */
1695 cpu_idcache_wbinv_all();
1697 /* Set the control register */
1699 cpu_control(0xffffffff, cpuctrl);
1701 #endif /* CPU_ARM7TDMI */
1704 struct cpu_option arm8_options[] = {
1705 { "arm8.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE },
1706 { "arm8.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE },
1707 { "arm8.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1708 { "arm8.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1710 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1711 #endif /* COMPAT_12 */
1712 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1713 { "arm8.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
1714 { NULL, IGN, IGN, 0 }
1722 int cpuctrl, cpuctrlmask;
1726 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1727 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1728 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1729 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1730 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1731 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1732 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
1733 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
1735 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1736 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1739 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1740 cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
1743 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1746 /* Get clock configuration */
1747 clocktest = arm8_clock_config(0, 0) & 0x0f;
1749 /* Special ARM8 clock and test configuration */
1750 if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1754 if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1758 clocktest &= ~(0x01);
1761 if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1765 clocktest &= ~(0x02);
1768 if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
1769 clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
1772 if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
1773 clocktest |= (integer & 7) << 5;
1777 /* Clear out the cache */
1778 cpu_idcache_wbinv_all();
1780 /* Set the control register */
1782 cpu_control(0xffffffff, cpuctrl);
1784 /* Set the clock/test register */
1786 arm8_clock_config(0x7f, clocktest);
1788 #endif /* CPU_ARM8 */
1791 struct cpu_option arm9_options[] = {
1792 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1793 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1794 { "arm9.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1795 { "arm9.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1796 { "arm9.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1797 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1798 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1799 { "arm9.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1800 { NULL, IGN, IGN, 0 }
1807 int cpuctrl, cpuctrlmask;
1809 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1810 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1811 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1812 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
1813 CPU_CONTROL_ROUNDROBIN;
1814 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1815 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1816 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1817 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1818 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1819 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1820 | CPU_CONTROL_ROUNDROBIN;
1822 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1823 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1826 cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1829 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1831 if (vector_page == ARM_VECTORS_HIGH)
1832 cpuctrl |= CPU_CONTROL_VECRELOC;
1834 /* Clear out the cache */
1835 cpu_idcache_wbinv_all();
1837 /* Set the control register */
1838 cpu_control(cpuctrlmask, cpuctrl);
1842 #endif /* CPU_ARM9 */
1844 #if defined(CPU_ARM9E) || defined(CPU_ARM10)
1845 struct cpu_option arm10_options[] = {
1846 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1847 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1848 { "arm10.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1849 { "arm10.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1850 { "arm10.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1851 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1852 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1853 { "arm10.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1854 { NULL, IGN, IGN, 0 }
1861 int cpuctrl, cpuctrlmask;
1863 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1864 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1865 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1866 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1867 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1868 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1869 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1870 | CPU_CONTROL_BPRD_ENABLE
1871 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1873 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1874 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1877 cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
1880 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1883 /* Clear out the cache */
1884 cpu_idcache_wbinv_all();
1886 /* Now really make sure they are clean. */
1887 __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1889 if (vector_page == ARM_VECTORS_HIGH)
1890 cpuctrl |= CPU_CONTROL_VECRELOC;
1892 /* Set the control register */
1894 cpu_control(0xffffffff, cpuctrl);
1897 cpu_idcache_wbinv_all();
1899 #endif /* CPU_ARM9E || CPU_ARM10 */
1902 struct cpu_option arm11_options[] = {
1903 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1904 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1905 { "arm11.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1906 { "arm11.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1907 { "arm11.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1908 { NULL, IGN, IGN, 0 }
1915 int cpuctrl, cpuctrlmask;
1917 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1918 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1919 /* | CPU_CONTROL_BPRD_ENABLE */;
1920 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1921 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1922 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
1923 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1924 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1926 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1927 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1930 cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
1933 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1936 /* Clear out the cache */
1937 cpu_idcache_wbinv_all();
1939 /* Now really make sure they are clean. */
1940 __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1942 /* Set the control register */
1943 curcpu()->ci_ctrl = cpuctrl;
1944 cpu_control(0xffffffff, cpuctrl);
1947 cpu_idcache_wbinv_all();
1949 #endif /* CPU_ARM11 */
1952 struct cpu_option sa110_options[] = {
1954 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1955 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1956 #endif /* COMPAT_12 */
1957 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1958 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1959 { "sa110.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1960 { "sa110.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
1961 { "sa110.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
1962 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1963 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
1964 { "sa110.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
1965 { NULL, IGN, IGN, 0 }
1972 int cpuctrl, cpuctrlmask;
1974 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1975 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1976 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1977 | CPU_CONTROL_WBUF_ENABLE;
1978 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1979 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1980 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1981 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1982 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1983 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1984 | CPU_CONTROL_CPCLK;
1986 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1987 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1990 cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
1993 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1996 /* Clear out the cache */
1997 cpu_idcache_wbinv_all();
1999 /* Set the control register */
2001 /* cpu_control(cpuctrlmask, cpuctrl);*/
2002 cpu_control(0xffffffff, cpuctrl);
2005 * enable clockswitching, note that this doesn't read or write to r0,
2006 * r0 is just to make it valid asm
2008 __asm ("mcr 15, 0, r0, c15, c1, 2");
2010 #endif /* CPU_SA110 */
2012 #if defined(CPU_SA1100) || defined(CPU_SA1110)
2013 struct cpu_option sa11x0_options[] = {
2015 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2016 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2017 #endif /* COMPAT_12 */
2018 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2019 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2020 { "sa11x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2021 { "sa11x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
2022 { "sa11x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
2023 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2024 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
2025 { "sa11x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2026 { NULL, IGN, IGN, 0 }
2033 int cpuctrl, cpuctrlmask;
2035 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2036 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2037 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2038 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
2039 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2040 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2041 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2042 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2043 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2044 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2045 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2047 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2048 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2052 cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
2055 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2058 if (vector_page == ARM_VECTORS_HIGH)
2059 cpuctrl |= CPU_CONTROL_VECRELOC;
2060 /* Clear out the cache */
2061 cpu_idcache_wbinv_all();
2062 /* Set the control register */
2064 cpu_control(0xffffffff, cpuctrl);
2066 #endif /* CPU_SA1100 || CPU_SA1110 */
2068 #if defined(CPU_IXP12X0)
2069 struct cpu_option ixp12x0_options[] = {
2070 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2071 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2072 { "ixp12x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2073 { "ixp12x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
2074 { "ixp12x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
2075 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2076 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE },
2077 { "ixp12x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE },
2078 { NULL, IGN, IGN, 0 }
2085 int cpuctrl, cpuctrlmask;
2088 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
2089 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
2090 | CPU_CONTROL_IC_ENABLE;
2092 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
2093 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2094 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
2095 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
2096 | CPU_CONTROL_VECRELOC;
2098 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2099 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2102 cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
2105 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2108 if (vector_page == ARM_VECTORS_HIGH)
2109 cpuctrl |= CPU_CONTROL_VECRELOC;
2111 /* Clear out the cache */
2112 cpu_idcache_wbinv_all();
2114 /* Set the control register */
2116 /* cpu_control(0xffffffff, cpuctrl); */
2117 cpu_control(cpuctrlmask, cpuctrl);
2119 #endif /* CPU_IXP12X0 */
2121 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2122 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
2123 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
2124 struct cpu_option xscale_options[] = {
2126 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
2127 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2128 #endif /* COMPAT_12 */
2129 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
2130 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2131 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2132 { "xscale.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE },
2133 { "xscale.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2134 { "xscale.icache", BIC, OR, CPU_CONTROL_IC_ENABLE },
2135 { "xscale.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE },
2136 { NULL, IGN, IGN, 0 }
2144 int cpuctrl, cpuctrlmask;
2147 * The XScale Write Buffer is always enabled. Our option
2148 * is to enable/disable coalescing. Note that bits 6:3
2149 * must always be enabled.
2152 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2153 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2154 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2155 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
2156 | CPU_CONTROL_BPRD_ENABLE;
2157 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2158 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2159 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2160 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2161 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2162 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2163 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
2164 CPU_CONTROL_L2_ENABLE;
2166 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2167 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2170 cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
2173 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2176 if (vector_page == ARM_VECTORS_HIGH)
2177 cpuctrl |= CPU_CONTROL_VECRELOC;
2178 #ifdef CPU_XSCALE_CORE3
2179 cpuctrl |= CPU_CONTROL_L2_ENABLE;
2182 /* Clear out the cache */
2183 cpu_idcache_wbinv_all();
2186 * Set the control register. Note that bits 6:3 must always
2190 /* cpu_control(cpuctrlmask, cpuctrl);*/
2191 cpu_control(0xffffffff, cpuctrl);
2193 /* Make sure write coalescing is turned on */
2194 __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
2196 #ifdef XSCALE_NO_COALESCE_WRITES
2197 auxctl |= XSCALE_AUXCTL_K;
2199 auxctl &= ~XSCALE_AUXCTL_K;
2201 #ifdef CPU_XSCALE_CORE3
2202 auxctl |= XSCALE_AUXCTL_LLR;
2203 auxctl |= XSCALE_AUXCTL_MD_MASK;
2205 __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
2208 #endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425