1 /* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
4 * arm9 support code Copyright (C) 2001 ARM Ltd
5 * Copyright (c) 1997 Mark Brinicombe.
6 * Copyright (c) 1997 Causality Limited
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by Causality Limited.
20 * 4. The name of Causality Limited may not be used to endorse or promote
21 * products derived from this software without specific prior written
24 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * RiscBSD kernel project
40 * C functions for supporting CPU / MMU / TLB specific operations.
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
47 #include <sys/param.h>
48 #include <sys/systm.h>
50 #include <sys/mutex.h>
52 #include <machine/bus.h>
53 #include <machine/cpu.h>
54 #include <machine/disassem.h>
60 #include <machine/cpuconf.h>
61 #include <machine/cpufunc.h>
63 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
64 #include <arm/xscale/i80321/i80321reg.h>
65 #include <arm/xscale/i80321/i80321var.h>
69 * Some definitions in i81342reg.h clash with i80321reg.h.
70 * This only happens for the LINT kernel. As it happens,
71 * we don't need anything from i81342reg.h that we already
72 * got from somewhere else during a LINT compile.
74 #if defined(CPU_XSCALE_81342) && !defined(COMPILING_LINT)
75 #include <arm/xscale/i8134x/i81342reg.h>
78 #ifdef CPU_XSCALE_IXP425
79 #include <arm/xscale/ixp425/ixp425reg.h>
80 #include <arm/xscale/ixp425/ixp425var.h>
83 /* PRIMARY CACHE VARIABLES */
85 int arm_picache_line_size;
88 int arm_pdcache_size; /* and unified */
89 int arm_pdcache_line_size;
93 int arm_pcache_unified;
96 int arm_dcache_align_mask;
98 u_int arm_cache_level;
99 u_int arm_cache_type[14];
102 /* 1 == use cpu_sleep(), 0 == don't */
103 int cpu_do_powersave;
107 struct cpu_functions arm9_cpufuncs = {
111 cpufunc_nullop, /* cpwait */
115 cpufunc_control, /* control */
116 cpufunc_domains, /* Domain */
117 arm9_setttb, /* Setttb */
118 cpufunc_faultstatus, /* Faultstatus */
119 cpufunc_faultaddress, /* Faultaddress */
123 armv4_tlb_flushID, /* tlb_flushID */
124 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
125 armv4_tlb_flushI, /* tlb_flushI */
126 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
127 armv4_tlb_flushD, /* tlb_flushD */
128 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
130 /* Cache operations */
132 arm9_icache_sync_all, /* icache_sync_all */
133 arm9_icache_sync_range, /* icache_sync_range */
135 arm9_dcache_wbinv_all, /* dcache_wbinv_all */
136 arm9_dcache_wbinv_range, /* dcache_wbinv_range */
137 arm9_dcache_inv_range, /* dcache_inv_range */
138 arm9_dcache_wb_range, /* dcache_wb_range */
140 armv4_idcache_inv_all, /* idcache_inv_all */
141 arm9_idcache_wbinv_all, /* idcache_wbinv_all */
142 arm9_idcache_wbinv_range, /* idcache_wbinv_range */
143 cpufunc_nullop, /* l2cache_wbinv_all */
144 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
145 (void *)cpufunc_nullop, /* l2cache_inv_range */
146 (void *)cpufunc_nullop, /* l2cache_wb_range */
147 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
149 /* Other functions */
151 cpufunc_nullop, /* flush_prefetchbuf */
152 armv4_drain_writebuf, /* drain_writebuf */
153 cpufunc_nullop, /* flush_brnchtgt_C */
154 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
156 (void *)cpufunc_nullop, /* sleep */
160 cpufunc_null_fixup, /* dataabt_fixup */
161 cpufunc_null_fixup, /* prefetchabt_fixup */
163 arm9_context_switch, /* context_switch */
165 arm9_setup /* cpu setup */
168 #endif /* CPU_ARM9 */
170 #if defined(CPU_ARM9E)
171 struct cpu_functions armv5_ec_cpufuncs = {
175 cpufunc_nullop, /* cpwait */
179 cpufunc_control, /* control */
180 cpufunc_domains, /* Domain */
181 armv5_ec_setttb, /* Setttb */
182 cpufunc_faultstatus, /* Faultstatus */
183 cpufunc_faultaddress, /* Faultaddress */
187 armv4_tlb_flushID, /* tlb_flushID */
188 arm10_tlb_flushID_SE, /* tlb_flushID_SE */
189 armv4_tlb_flushI, /* tlb_flushI */
190 arm10_tlb_flushI_SE, /* tlb_flushI_SE */
191 armv4_tlb_flushD, /* tlb_flushD */
192 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
194 /* Cache operations */
196 armv5_ec_icache_sync_all, /* icache_sync_all */
197 armv5_ec_icache_sync_range, /* icache_sync_range */
199 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
200 armv5_ec_dcache_wbinv_range, /* dcache_wbinv_range */
201 armv5_ec_dcache_inv_range, /* dcache_inv_range */
202 armv5_ec_dcache_wb_range, /* dcache_wb_range */
204 armv4_idcache_inv_all, /* idcache_inv_all */
205 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
206 armv5_ec_idcache_wbinv_range, /* idcache_wbinv_range */
208 cpufunc_nullop, /* l2cache_wbinv_all */
209 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
210 (void *)cpufunc_nullop, /* l2cache_inv_range */
211 (void *)cpufunc_nullop, /* l2cache_wb_range */
212 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
214 /* Other functions */
216 cpufunc_nullop, /* flush_prefetchbuf */
217 armv4_drain_writebuf, /* drain_writebuf */
218 cpufunc_nullop, /* flush_brnchtgt_C */
219 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
221 (void *)cpufunc_nullop, /* sleep */
225 cpufunc_null_fixup, /* dataabt_fixup */
226 cpufunc_null_fixup, /* prefetchabt_fixup */
228 arm10_context_switch, /* context_switch */
230 arm10_setup /* cpu setup */
234 struct cpu_functions sheeva_cpufuncs = {
238 cpufunc_nullop, /* cpwait */
242 cpufunc_control, /* control */
243 cpufunc_domains, /* Domain */
244 sheeva_setttb, /* Setttb */
245 cpufunc_faultstatus, /* Faultstatus */
246 cpufunc_faultaddress, /* Faultaddress */
250 armv4_tlb_flushID, /* tlb_flushID */
251 arm10_tlb_flushID_SE, /* tlb_flushID_SE */
252 armv4_tlb_flushI, /* tlb_flushI */
253 arm10_tlb_flushI_SE, /* tlb_flushI_SE */
254 armv4_tlb_flushD, /* tlb_flushD */
255 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
257 /* Cache operations */
259 armv5_ec_icache_sync_all, /* icache_sync_all */
260 armv5_ec_icache_sync_range, /* icache_sync_range */
262 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
263 sheeva_dcache_wbinv_range, /* dcache_wbinv_range */
264 sheeva_dcache_inv_range, /* dcache_inv_range */
265 sheeva_dcache_wb_range, /* dcache_wb_range */
267 armv4_idcache_inv_all, /* idcache_inv_all */
268 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
269 sheeva_idcache_wbinv_range, /* idcache_wbinv_all */
271 sheeva_l2cache_wbinv_all, /* l2cache_wbinv_all */
272 sheeva_l2cache_wbinv_range, /* l2cache_wbinv_range */
273 sheeva_l2cache_inv_range, /* l2cache_inv_range */
274 sheeva_l2cache_wb_range, /* l2cache_wb_range */
275 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
277 /* Other functions */
279 cpufunc_nullop, /* flush_prefetchbuf */
280 armv4_drain_writebuf, /* drain_writebuf */
281 cpufunc_nullop, /* flush_brnchtgt_C */
282 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
284 sheeva_cpu_sleep, /* sleep */
288 cpufunc_null_fixup, /* dataabt_fixup */
289 cpufunc_null_fixup, /* prefetchabt_fixup */
291 arm10_context_switch, /* context_switch */
293 arm10_setup /* cpu setup */
295 #endif /* CPU_ARM9E */
298 struct cpu_functions pj4bv7_cpufuncs = {
302 armv7_drain_writebuf, /* cpwait */
306 cpufunc_control, /* control */
307 cpufunc_domains, /* Domain */
308 armv7_setttb, /* Setttb */
309 cpufunc_faultstatus, /* Faultstatus */
310 cpufunc_faultaddress, /* Faultaddress */
314 armv7_tlb_flushID, /* tlb_flushID */
315 armv7_tlb_flushID_SE, /* tlb_flushID_SE */
316 armv7_tlb_flushID, /* tlb_flushI */
317 armv7_tlb_flushID_SE, /* tlb_flushI_SE */
318 armv7_tlb_flushID, /* tlb_flushD */
319 armv7_tlb_flushID_SE, /* tlb_flushD_SE */
321 /* Cache operations */
322 armv7_idcache_wbinv_all, /* icache_sync_all */
323 armv7_icache_sync_range, /* icache_sync_range */
325 armv7_dcache_wbinv_all, /* dcache_wbinv_all */
326 armv7_dcache_wbinv_range, /* dcache_wbinv_range */
327 armv7_dcache_inv_range, /* dcache_inv_range */
328 armv7_dcache_wb_range, /* dcache_wb_range */
330 armv7_idcache_inv_all, /* idcache_inv_all */
331 armv7_idcache_wbinv_all, /* idcache_wbinv_all */
332 armv7_idcache_wbinv_range, /* idcache_wbinv_all */
334 (void *)cpufunc_nullop, /* l2cache_wbinv_all */
335 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
336 (void *)cpufunc_nullop, /* l2cache_inv_range */
337 (void *)cpufunc_nullop, /* l2cache_wb_range */
338 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
340 /* Other functions */
342 cpufunc_nullop, /* flush_prefetchbuf */
343 armv7_drain_writebuf, /* drain_writebuf */
344 cpufunc_nullop, /* flush_brnchtgt_C */
345 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
347 (void *)cpufunc_nullop, /* sleep */
351 cpufunc_null_fixup, /* dataabt_fixup */
352 cpufunc_null_fixup, /* prefetchabt_fixup */
354 armv7_context_switch, /* context_switch */
356 pj4bv7_setup /* cpu setup */
358 #endif /* CPU_MV_PJ4B */
360 #if defined(CPU_XSCALE_80321) || \
361 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
362 defined(CPU_XSCALE_80219)
364 struct cpu_functions xscale_cpufuncs = {
368 xscale_cpwait, /* cpwait */
372 xscale_control, /* control */
373 cpufunc_domains, /* domain */
374 xscale_setttb, /* setttb */
375 cpufunc_faultstatus, /* faultstatus */
376 cpufunc_faultaddress, /* faultaddress */
380 armv4_tlb_flushID, /* tlb_flushID */
381 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
382 armv4_tlb_flushI, /* tlb_flushI */
383 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
384 armv4_tlb_flushD, /* tlb_flushD */
385 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
387 /* Cache operations */
389 xscale_cache_syncI, /* icache_sync_all */
390 xscale_cache_syncI_rng, /* icache_sync_range */
392 xscale_cache_purgeD, /* dcache_wbinv_all */
393 xscale_cache_purgeD_rng, /* dcache_wbinv_range */
394 xscale_cache_flushD_rng, /* dcache_inv_range */
395 xscale_cache_cleanD_rng, /* dcache_wb_range */
397 xscale_cache_flushID, /* idcache_inv_all */
398 xscale_cache_purgeID, /* idcache_wbinv_all */
399 xscale_cache_purgeID_rng, /* idcache_wbinv_range */
400 cpufunc_nullop, /* l2cache_wbinv_all */
401 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
402 (void *)cpufunc_nullop, /* l2cache_inv_range */
403 (void *)cpufunc_nullop, /* l2cache_wb_range */
404 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
406 /* Other functions */
408 cpufunc_nullop, /* flush_prefetchbuf */
409 armv4_drain_writebuf, /* drain_writebuf */
410 cpufunc_nullop, /* flush_brnchtgt_C */
411 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
413 xscale_cpu_sleep, /* sleep */
417 cpufunc_null_fixup, /* dataabt_fixup */
418 cpufunc_null_fixup, /* prefetchabt_fixup */
420 xscale_context_switch, /* context_switch */
422 xscale_setup /* cpu setup */
425 /* CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
428 #ifdef CPU_XSCALE_81342
429 struct cpu_functions xscalec3_cpufuncs = {
433 xscale_cpwait, /* cpwait */
437 xscale_control, /* control */
438 cpufunc_domains, /* domain */
439 xscalec3_setttb, /* setttb */
440 cpufunc_faultstatus, /* faultstatus */
441 cpufunc_faultaddress, /* faultaddress */
445 armv4_tlb_flushID, /* tlb_flushID */
446 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
447 armv4_tlb_flushI, /* tlb_flushI */
448 (void *)armv4_tlb_flushI, /* tlb_flushI_SE */
449 armv4_tlb_flushD, /* tlb_flushD */
450 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
452 /* Cache operations */
454 xscalec3_cache_syncI, /* icache_sync_all */
455 xscalec3_cache_syncI_rng, /* icache_sync_range */
457 xscalec3_cache_purgeD, /* dcache_wbinv_all */
458 xscalec3_cache_purgeD_rng, /* dcache_wbinv_range */
459 xscale_cache_flushD_rng, /* dcache_inv_range */
460 xscalec3_cache_cleanD_rng, /* dcache_wb_range */
462 xscale_cache_flushID, /* idcache_inv_all */
463 xscalec3_cache_purgeID, /* idcache_wbinv_all */
464 xscalec3_cache_purgeID_rng, /* idcache_wbinv_range */
465 xscalec3_l2cache_purge, /* l2cache_wbinv_all */
466 xscalec3_l2cache_purge_rng, /* l2cache_wbinv_range */
467 xscalec3_l2cache_flush_rng, /* l2cache_inv_range */
468 xscalec3_l2cache_clean_rng, /* l2cache_wb_range */
469 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
471 /* Other functions */
473 cpufunc_nullop, /* flush_prefetchbuf */
474 armv4_drain_writebuf, /* drain_writebuf */
475 cpufunc_nullop, /* flush_brnchtgt_C */
476 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
478 xscale_cpu_sleep, /* sleep */
482 cpufunc_null_fixup, /* dataabt_fixup */
483 cpufunc_null_fixup, /* prefetchabt_fixup */
485 xscalec3_context_switch, /* context_switch */
487 xscale_setup /* cpu setup */
489 #endif /* CPU_XSCALE_81342 */
492 #if defined(CPU_FA526)
493 struct cpu_functions fa526_cpufuncs = {
497 cpufunc_nullop, /* cpwait */
501 cpufunc_control, /* control */
502 cpufunc_domains, /* domain */
503 fa526_setttb, /* setttb */
504 cpufunc_faultstatus, /* faultstatus */
505 cpufunc_faultaddress, /* faultaddress */
509 armv4_tlb_flushID, /* tlb_flushID */
510 fa526_tlb_flushID_SE, /* tlb_flushID_SE */
511 armv4_tlb_flushI, /* tlb_flushI */
512 fa526_tlb_flushI_SE, /* tlb_flushI_SE */
513 armv4_tlb_flushD, /* tlb_flushD */
514 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
516 /* Cache operations */
518 fa526_icache_sync_all, /* icache_sync_all */
519 fa526_icache_sync_range, /* icache_sync_range */
521 fa526_dcache_wbinv_all, /* dcache_wbinv_all */
522 fa526_dcache_wbinv_range, /* dcache_wbinv_range */
523 fa526_dcache_inv_range, /* dcache_inv_range */
524 fa526_dcache_wb_range, /* dcache_wb_range */
526 armv4_idcache_inv_all, /* idcache_inv_all */
527 fa526_idcache_wbinv_all, /* idcache_wbinv_all */
528 fa526_idcache_wbinv_range, /* idcache_wbinv_range */
529 cpufunc_nullop, /* l2cache_wbinv_all */
530 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
531 (void *)cpufunc_nullop, /* l2cache_inv_range */
532 (void *)cpufunc_nullop, /* l2cache_wb_range */
533 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
535 /* Other functions */
537 fa526_flush_prefetchbuf, /* flush_prefetchbuf */
538 armv4_drain_writebuf, /* drain_writebuf */
539 cpufunc_nullop, /* flush_brnchtgt_C */
540 fa526_flush_brnchtgt_E, /* flush_brnchtgt_E */
542 fa526_cpu_sleep, /* sleep */
546 cpufunc_null_fixup, /* dataabt_fixup */
547 cpufunc_null_fixup, /* prefetchabt_fixup */
549 fa526_context_switch, /* context_switch */
551 fa526_setup /* cpu setup */
553 #endif /* CPU_FA526 */
555 #if defined(CPU_ARM1176)
556 struct cpu_functions arm1176_cpufuncs = {
560 cpufunc_nullop, /* cpwait */
564 cpufunc_control, /* control */
565 cpufunc_domains, /* Domain */
566 arm11x6_setttb, /* Setttb */
567 cpufunc_faultstatus, /* Faultstatus */
568 cpufunc_faultaddress, /* Faultaddress */
572 arm11_tlb_flushID, /* tlb_flushID */
573 arm11_tlb_flushID_SE, /* tlb_flushID_SE */
574 arm11_tlb_flushI, /* tlb_flushI */
575 arm11_tlb_flushI_SE, /* tlb_flushI_SE */
576 arm11_tlb_flushD, /* tlb_flushD */
577 arm11_tlb_flushD_SE, /* tlb_flushD_SE */
579 /* Cache operations */
581 arm11x6_icache_sync_all, /* icache_sync_all */
582 arm11x6_icache_sync_range, /* icache_sync_range */
584 arm11x6_dcache_wbinv_all, /* dcache_wbinv_all */
585 armv6_dcache_wbinv_range, /* dcache_wbinv_range */
586 armv6_dcache_inv_range, /* dcache_inv_range */
587 armv6_dcache_wb_range, /* dcache_wb_range */
589 armv6_idcache_inv_all, /* idcache_inv_all */
590 arm11x6_idcache_wbinv_all, /* idcache_wbinv_all */
591 arm11x6_idcache_wbinv_range, /* idcache_wbinv_range */
593 (void *)cpufunc_nullop, /* l2cache_wbinv_all */
594 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
595 (void *)cpufunc_nullop, /* l2cache_inv_range */
596 (void *)cpufunc_nullop, /* l2cache_wb_range */
597 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
599 /* Other functions */
601 arm11x6_flush_prefetchbuf, /* flush_prefetchbuf */
602 arm11_drain_writebuf, /* drain_writebuf */
603 cpufunc_nullop, /* flush_brnchtgt_C */
604 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
606 arm11x6_sleep, /* sleep */
610 cpufunc_null_fixup, /* dataabt_fixup */
611 cpufunc_null_fixup, /* prefetchabt_fixup */
613 arm11_context_switch, /* context_switch */
615 arm11x6_setup /* cpu setup */
617 #endif /*CPU_ARM1176 */
619 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
620 struct cpu_functions cortexa_cpufuncs = {
624 cpufunc_nullop, /* cpwait */
628 cpufunc_control, /* control */
629 cpufunc_domains, /* Domain */
630 armv7_setttb, /* Setttb */
631 cpufunc_faultstatus, /* Faultstatus */
632 cpufunc_faultaddress, /* Faultaddress */
635 * TLB functions. ARMv7 does all TLB ops based on a unified TLB model
636 * whether the hardware implements separate I+D or not, so we use the
637 * same 'ID' functions for all 3 variations.
640 armv7_tlb_flushID, /* tlb_flushID */
641 armv7_tlb_flushID_SE, /* tlb_flushID_SE */
642 armv7_tlb_flushID, /* tlb_flushI */
643 armv7_tlb_flushID_SE, /* tlb_flushI_SE */
644 armv7_tlb_flushID, /* tlb_flushD */
645 armv7_tlb_flushID_SE, /* tlb_flushD_SE */
647 /* Cache operations */
649 armv7_icache_sync_all, /* icache_sync_all */
650 armv7_icache_sync_range, /* icache_sync_range */
652 armv7_dcache_wbinv_all, /* dcache_wbinv_all */
653 armv7_dcache_wbinv_range, /* dcache_wbinv_range */
654 armv7_dcache_inv_range, /* dcache_inv_range */
655 armv7_dcache_wb_range, /* dcache_wb_range */
657 armv7_idcache_inv_all, /* idcache_inv_all */
658 armv7_idcache_wbinv_all, /* idcache_wbinv_all */
659 armv7_idcache_wbinv_range, /* idcache_wbinv_range */
662 * Note: For CPUs using the PL310 the L2 ops are filled in when the
663 * L2 cache controller is actually enabled.
665 cpufunc_nullop, /* l2cache_wbinv_all */
666 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
667 (void *)cpufunc_nullop, /* l2cache_inv_range */
668 (void *)cpufunc_nullop, /* l2cache_wb_range */
669 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
671 /* Other functions */
673 cpufunc_nullop, /* flush_prefetchbuf */
674 armv7_drain_writebuf, /* drain_writebuf */
675 cpufunc_nullop, /* flush_brnchtgt_C */
676 (void *)cpufunc_nullop, /* flush_brnchtgt_E */
678 armv7_sleep, /* sleep */
682 cpufunc_null_fixup, /* dataabt_fixup */
683 cpufunc_null_fixup, /* prefetchabt_fixup */
685 armv7_context_switch, /* context_switch */
687 cortexa_setup /* cpu setup */
689 #endif /* CPU_CORTEXA */
692 * Global constants also used by locore.s
695 struct cpu_functions cpufuncs;
697 u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore.s */
699 #if defined(CPU_ARM9) || \
700 defined (CPU_ARM9E) || \
701 defined(CPU_ARM1176) || defined(CPU_XSCALE_80321) || \
702 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
703 defined(CPU_FA526) || defined(CPU_MV_PJ4B) || \
704 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
705 defined(CPU_CORTEXA) || defined(CPU_KRAIT)
707 /* Global cache line sizes, use 32 as default */
708 int arm_dcache_min_line_size = 32;
709 int arm_icache_min_line_size = 32;
710 int arm_idcache_min_line_size = 32;
712 static void get_cachetype_cp15(void);
714 /* Additional cache information local to this file. Log2 of some of the
716 static int arm_dcache_l2_nsets;
717 static int arm_dcache_l2_assoc;
718 static int arm_dcache_l2_linesize;
723 u_int ctype, isize, dsize, cpuid;
724 u_int clevel, csize, i, sel;
728 __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
731 cpuid = cpufunc_id();
733 * ...and thus spake the ARM ARM:
735 * If an <opcode2> value corresponding to an unimplemented or
736 * reserved ID register is encountered, the System Control
737 * processor returns the value of the main ID register.
742 if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
743 /* Resolve minimal cache line sizes */
744 arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
745 arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
746 arm_idcache_min_line_size =
747 min(arm_icache_min_line_size, arm_dcache_min_line_size);
749 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
751 arm_cache_level = clevel;
752 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
754 while ((type = (clevel & 0x7)) && i < 7) {
755 if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
756 type == CACHE_SEP_CACHE) {
758 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
760 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
762 arm_cache_type[sel] = csize;
763 arm_dcache_align = 1 <<
764 (CPUV7_CT_xSIZE_LEN(csize) + 4);
765 arm_dcache_align_mask = arm_dcache_align - 1;
767 if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
769 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
771 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
773 arm_cache_type[sel] = csize;
779 if ((ctype & CPU_CT_S) == 0)
780 arm_pcache_unified = 1;
783 * If you want to know how this code works, go read the ARM ARM.
786 arm_pcache_type = CPU_CT_CTYPE(ctype);
788 if (arm_pcache_unified == 0) {
789 isize = CPU_CT_ISIZE(ctype);
790 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
791 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
792 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
793 if (isize & CPU_CT_xSIZE_M)
794 arm_picache_line_size = 0; /* not present */
796 arm_picache_ways = 1;
798 arm_picache_ways = multiplier <<
799 (CPU_CT_xSIZE_ASSOC(isize) - 1);
801 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
804 dsize = CPU_CT_DSIZE(ctype);
805 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
806 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
807 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
808 if (dsize & CPU_CT_xSIZE_M)
809 arm_pdcache_line_size = 0; /* not present */
811 arm_pdcache_ways = 1;
813 arm_pdcache_ways = multiplier <<
814 (CPU_CT_xSIZE_ASSOC(dsize) - 1);
816 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
818 arm_dcache_align = arm_pdcache_line_size;
820 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
821 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
822 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
823 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
826 arm_dcache_align_mask = arm_dcache_align - 1;
829 #endif /* ARM9 || XSCALE */
832 * Cannot panic here as we may not have a console yet ...
838 cputype = cpufunc_id();
839 cputype &= CPU_ID_CPU_MASK;
842 * NOTE: cpu_do_powersave defaults to off. If we encounter a
843 * CPU type where we want to use it by default, then we set it.
847 if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
848 (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
849 (cputype & 0x0000f000) == 0x00009000) {
850 cpufuncs = arm9_cpufuncs;
851 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
852 get_cachetype_cp15();
853 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
854 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
855 arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
856 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
857 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
858 pmap_pte_init_generic();
861 #endif /* CPU_ARM9 */
862 #if defined(CPU_ARM9E)
863 if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
864 cputype == CPU_ID_MV88FR571_41) {
865 uint32_t sheeva_ctrl;
867 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
870 * Workaround for Marvell MV78100 CPU: Cache prefetch
871 * mechanism may affect the cache coherency validity,
872 * so it needs to be disabled.
874 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
875 * L2 Prefetching Mechanism) for details.
877 if (cputype == CPU_ID_MV88FR571_VD ||
878 cputype == CPU_ID_MV88FR571_41)
879 sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
881 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
883 cpufuncs = sheeva_cpufuncs;
884 get_cachetype_cp15();
885 pmap_pte_init_generic();
887 } else if (cputype == CPU_ID_ARM926EJS) {
888 cpufuncs = armv5_ec_cpufuncs;
889 get_cachetype_cp15();
890 pmap_pte_init_generic();
893 #endif /* CPU_ARM9E */
894 #if defined(CPU_ARM1176)
895 if (cputype == CPU_ID_ARM1176JZS) {
896 cpufuncs = arm1176_cpufuncs;
897 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
898 get_cachetype_cp15();
900 pmap_pte_init_mmu_v6();
904 #endif /* CPU_ARM1176 */
905 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
906 if (cputype == CPU_ID_CORTEXA5 ||
907 cputype == CPU_ID_CORTEXA7 ||
908 cputype == CPU_ID_CORTEXA8R1 ||
909 cputype == CPU_ID_CORTEXA8R2 ||
910 cputype == CPU_ID_CORTEXA8R3 ||
911 cputype == CPU_ID_CORTEXA9R1 ||
912 cputype == CPU_ID_CORTEXA9R2 ||
913 cputype == CPU_ID_CORTEXA9R3 ||
914 cputype == CPU_ID_CORTEXA12R0 ||
915 cputype == CPU_ID_CORTEXA15R0 ||
916 cputype == CPU_ID_CORTEXA15R1 ||
917 cputype == CPU_ID_CORTEXA15R2 ||
918 cputype == CPU_ID_CORTEXA15R3 ||
919 cputype == CPU_ID_KRAIT ) {
920 cpufuncs = cortexa_cpufuncs;
921 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
922 get_cachetype_cp15();
924 pmap_pte_init_mmu_v6();
925 /* Use powersave on this CPU. */
926 cpu_do_powersave = 1;
929 #endif /* CPU_CORTEXA */
931 #if defined(CPU_MV_PJ4B)
932 if (cputype == CPU_ID_MV88SV581X_V7 ||
933 cputype == CPU_ID_MV88SV584X_V7 ||
934 cputype == CPU_ID_ARM_88SV581X_V7) {
935 cpufuncs = pj4bv7_cpufuncs;
936 get_cachetype_cp15();
937 pmap_pte_init_mmu_v6();
940 #endif /* CPU_MV_PJ4B */
942 #if defined(CPU_FA526)
943 if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
944 cpufuncs = fa526_cpufuncs;
945 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
946 get_cachetype_cp15();
947 pmap_pte_init_generic();
949 /* Use powersave on this CPU. */
950 cpu_do_powersave = 1;
954 #endif /* CPU_FA526 */
956 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
957 if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
958 cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
959 cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
960 cpufuncs = xscale_cpufuncs;
961 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
962 get_cachetype_cp15();
963 pmap_pte_init_xscale();
966 #endif /* CPU_XSCALE_80321 */
968 #if defined(CPU_XSCALE_81342)
969 if (cputype == CPU_ID_81342) {
970 cpufuncs = xscalec3_cpufuncs;
971 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
972 get_cachetype_cp15();
973 pmap_pte_init_xscale();
976 #endif /* CPU_XSCALE_81342 */
977 #ifdef CPU_XSCALE_PXA2X0
978 /* ignore core revision to test PXA2xx CPUs */
979 if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
980 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
981 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
983 cpufuncs = xscale_cpufuncs;
984 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
985 get_cachetype_cp15();
986 pmap_pte_init_xscale();
988 /* Use powersave on this CPU. */
989 cpu_do_powersave = 1;
993 #endif /* CPU_XSCALE_PXA2X0 */
994 #ifdef CPU_XSCALE_IXP425
995 if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
996 cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
998 cpufuncs = xscale_cpufuncs;
999 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
1000 get_cachetype_cp15();
1001 pmap_pte_init_xscale();
1005 #endif /* CPU_XSCALE_IXP425 */
1007 * Bzzzz. And the answer was ...
1009 panic("No support for this CPU type (%08x) in kernel", cputype);
1010 return(ARCHITECTURE_NOT_PRESENT);
1012 uma_set_align(arm_dcache_align_mask);
1017 * Fixup routines for data and prefetch aborts.
1019 * Several compile time symbols are used
1021 * DEBUG_FAULT_CORRECTION - Print debugging information during the
1022 * correction of registers after a fault.
1027 * Null abort fixup routine.
1028 * For use when no fixup is required.
1031 cpufunc_null_fixup(arg)
1034 return(ABORT_FIXUP_OK);
1045 int cpuctrl, cpuctrlmask;
1047 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1048 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1049 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1050 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
1051 CPU_CONTROL_ROUNDROBIN;
1052 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1053 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1054 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1055 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1056 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1057 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1058 | CPU_CONTROL_ROUNDROBIN;
1060 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1061 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1065 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1067 if (vector_page == ARM_VECTORS_HIGH)
1068 cpuctrl |= CPU_CONTROL_VECRELOC;
1070 /* Clear out the cache */
1071 cpu_idcache_wbinv_all();
1073 /* Set the control register */
1074 cpu_control(cpuctrlmask, cpuctrl);
1078 #endif /* CPU_ARM9 */
1080 #if defined(CPU_ARM9E)
1084 int cpuctrl, cpuctrlmask;
1086 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1087 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1088 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1089 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1090 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1091 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1092 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1093 | CPU_CONTROL_BPRD_ENABLE
1094 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1096 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1097 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1101 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1104 /* Clear out the cache */
1105 cpu_idcache_wbinv_all();
1107 /* Now really make sure they are clean. */
1108 __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1110 if (vector_page == ARM_VECTORS_HIGH)
1111 cpuctrl |= CPU_CONTROL_VECRELOC;
1113 /* Set the control register */
1115 cpu_control(0xffffffff, cpuctrl);
1118 cpu_idcache_wbinv_all();
1120 #endif /* CPU_ARM9E || CPU_ARM10 */
1122 #if defined(CPU_ARM1176) \
1123 || defined(CPU_MV_PJ4B) \
1124 || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1125 static __inline void
1126 cpu_scc_setup_ccnt(void)
1128 /* This is how you give userland access to the CCNT and PMCn
1130 * BEWARE! This gives write access also, which may not be what
1133 #ifdef _PMC_USER_READ_WRITE_
1134 #if defined(CPU_ARM1176)
1135 /* Use the Secure User and Non-secure Access Validation Control Register
1136 * to allow userland access
1138 __asm volatile ("mcr p15, 0, %0, c15, c9, 0\n\t"
1142 /* Set PMUSERENR[0] to allow userland access */
1143 __asm volatile ("mcr p15, 0, %0, c9, c14, 0\n\t"
1148 #if defined(CPU_ARM1176)
1149 /* Set PMCR[2,0] to enable counters and reset CCNT */
1150 __asm volatile ("mcr p15, 0, %0, c15, c12, 0\n\t"
1154 /* Set up the PMCCNTR register as a cyclecounter:
1155 * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
1156 * Set PMCR[2,0] to enable counters and reset CCNT
1157 * Set PMCNTENSET to 0x80000000 to enable CCNT */
1158 __asm volatile ("mcr p15, 0, %0, c9, c14, 2\n\t"
1159 "mcr p15, 0, %1, c9, c12, 0\n\t"
1160 "mcr p15, 0, %2, c9, c12, 1\n\t"
1169 #if defined(CPU_ARM1176)
1173 int cpuctrl, cpuctrl_wax;
1174 uint32_t auxctrl, auxctrl_wax;
1179 cpuid = cpufunc_id();
1182 CPU_CONTROL_MMU_ENABLE |
1183 CPU_CONTROL_DC_ENABLE |
1184 CPU_CONTROL_WBUF_ENABLE |
1185 CPU_CONTROL_32BP_ENABLE |
1186 CPU_CONTROL_32BD_ENABLE |
1187 CPU_CONTROL_LABT_ENABLE |
1188 CPU_CONTROL_SYST_ENABLE |
1189 CPU_CONTROL_IC_ENABLE |
1190 CPU_CONTROL_UNAL_ENABLE;
1193 * "write as existing" bits
1194 * inverse of this is mask
1197 (3 << 30) | /* SBZ */
1198 (1 << 29) | /* FA */
1199 (1 << 28) | /* TR */
1200 (3 << 26) | /* SBZ */
1201 (3 << 19) | /* SBZ */
1202 (1 << 17); /* SBZ */
1204 cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1205 cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1208 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1211 if (vector_page == ARM_VECTORS_HIGH)
1212 cpuctrl |= CPU_CONTROL_VECRELOC;
1218 * Enable an errata workaround
1220 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
1221 auxctrl = ARM1176_AUXCTL_PHD;
1222 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
1225 /* Clear out the cache */
1226 cpu_idcache_wbinv_all();
1228 /* Now really make sure they are clean. */
1229 __asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
1231 /* Allow detection code to find the VFP if it's fitted. */
1232 __asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
1234 /* Set the control register */
1236 cpu_control(~cpuctrl_wax, cpuctrl);
1238 __asm volatile ("mrc p15, 0, %0, c1, c0, 1\n\t"
1239 "and %1, %0, %2\n\t"
1240 "orr %1, %1, %3\n\t"
1242 "mcrne p15, 0, %1, c1, c0, 1\n\t"
1243 : "=r"(tmp), "=r"(tmp2) :
1244 "r"(auxctrl_wax), "r"(auxctrl));
1247 cpu_idcache_wbinv_all();
1249 cpu_scc_setup_ccnt();
1251 #endif /* CPU_ARM1176 */
1261 cpuctrl = CPU_CONTROL_MMU_ENABLE;
1262 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1263 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1265 cpuctrl |= CPU_CONTROL_DC_ENABLE;
1266 cpuctrl |= (0xf << 3);
1267 cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1268 cpuctrl |= CPU_CONTROL_IC_ENABLE;
1269 if (vector_page == ARM_VECTORS_HIGH)
1270 cpuctrl |= CPU_CONTROL_VECRELOC;
1271 cpuctrl |= (0x5 << 16) | (1 < 22);
1272 cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1274 /* Clear out the cache */
1275 cpu_idcache_wbinv_all();
1277 /* Set the control register */
1279 cpu_control(0xFFFFFFFF, cpuctrl);
1282 cpu_idcache_wbinv_all();
1284 cpu_scc_setup_ccnt();
1286 #endif /* CPU_MV_PJ4B */
1288 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1293 int cpuctrl, cpuctrlmask;
1295 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | /* MMU enable [0] */
1296 CPU_CONTROL_AFLT_ENABLE | /* Alignment fault [1] */
1297 CPU_CONTROL_DC_ENABLE | /* DCache enable [2] */
1298 CPU_CONTROL_BPRD_ENABLE | /* Branch prediction [11] */
1299 CPU_CONTROL_IC_ENABLE | /* ICache enable [12] */
1300 CPU_CONTROL_VECRELOC; /* Vector relocation [13] */
1302 cpuctrl = CPU_CONTROL_MMU_ENABLE |
1303 CPU_CONTROL_IC_ENABLE |
1304 CPU_CONTROL_DC_ENABLE |
1305 CPU_CONTROL_BPRD_ENABLE;
1307 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1308 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1311 /* Switch to big endian */
1313 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1316 /* Check if the vector page is at the high address (0xffff0000) */
1317 if (vector_page == ARM_VECTORS_HIGH)
1318 cpuctrl |= CPU_CONTROL_VECRELOC;
1320 /* Clear out the cache */
1321 cpu_idcache_wbinv_all();
1323 /* Set the control register */
1325 cpu_control(cpuctrlmask, cpuctrl);
1328 cpu_idcache_wbinv_all();
1330 armv7_auxctrl((1 << 6) | (1 << 0), (1 << 6) | (1 << 0)); /* Enable SMP + TLB broadcasting */
1333 cpu_scc_setup_ccnt();
1335 #endif /* CPU_CORTEXA */
1337 #if defined(CPU_FA526)
1341 int cpuctrl, cpuctrlmask;
1343 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1344 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1345 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1346 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1347 | CPU_CONTROL_BPRD_ENABLE;
1348 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1349 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1350 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1351 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1352 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1353 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1354 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1356 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1357 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1361 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1364 if (vector_page == ARM_VECTORS_HIGH)
1365 cpuctrl |= CPU_CONTROL_VECRELOC;
1367 /* Clear out the cache */
1368 cpu_idcache_wbinv_all();
1370 /* Set the control register */
1372 cpu_control(0xffffffff, cpuctrl);
1374 #endif /* CPU_FA526 */
1376 #if defined(CPU_XSCALE_80321) || \
1377 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1378 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
1383 int cpuctrl, cpuctrlmask;
1386 * The XScale Write Buffer is always enabled. Our option
1387 * is to enable/disable coalescing. Note that bits 6:3
1388 * must always be enabled.
1391 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1392 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1393 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1394 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1395 | CPU_CONTROL_BPRD_ENABLE;
1396 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1397 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1398 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1399 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1400 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1401 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1402 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
1403 CPU_CONTROL_L2_ENABLE;
1405 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1406 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1410 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1413 if (vector_page == ARM_VECTORS_HIGH)
1414 cpuctrl |= CPU_CONTROL_VECRELOC;
1415 #ifdef CPU_XSCALE_CORE3
1416 cpuctrl |= CPU_CONTROL_L2_ENABLE;
1419 /* Clear out the cache */
1420 cpu_idcache_wbinv_all();
1423 * Set the control register. Note that bits 6:3 must always
1427 /* cpu_control(cpuctrlmask, cpuctrl);*/
1428 cpu_control(0xffffffff, cpuctrl);
1430 /* Make sure write coalescing is turned on */
1431 __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1433 #ifdef XSCALE_NO_COALESCE_WRITES
1434 auxctl |= XSCALE_AUXCTL_K;
1436 auxctl &= ~XSCALE_AUXCTL_K;
1438 #ifdef CPU_XSCALE_CORE3
1439 auxctl |= XSCALE_AUXCTL_LLR;
1440 auxctl |= XSCALE_AUXCTL_MD_MASK;
1442 __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1445 #endif /* CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425