1 /* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
4 * arm9 support code Copyright (C) 2001 ARM Ltd
5 * Copyright (c) 1997 Mark Brinicombe.
6 * Copyright (c) 1997 Causality Limited
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by Causality Limited.
20 * 4. The name of Causality Limited may not be used to endorse or promote
21 * products derived from this software without specific prior written
24 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * RiscBSD kernel project
40 * C functions for supporting CPU / MMU / TLB specific operations.
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
47 #include <sys/param.h>
48 #include <sys/systm.h>
50 #include <sys/mutex.h>
52 #include <machine/bus.h>
53 #include <machine/cpu.h>
54 #include <machine/disassem.h>
60 #include <machine/acle-compat.h>
61 #include <machine/cpuconf.h>
62 #include <machine/cpufunc.h>
64 #if defined(CPU_XSCALE_81342)
65 #include <arm/xscale/i8134x/i81342reg.h>
68 #ifdef CPU_XSCALE_IXP425
69 #include <arm/xscale/ixp425/ixp425reg.h>
70 #include <arm/xscale/ixp425/ixp425var.h>
73 /* PRIMARY CACHE VARIABLES */
75 int arm_picache_line_size;
78 int arm_pdcache_size; /* and unified */
79 int arm_pdcache_line_size;
83 int arm_pcache_unified;
86 int arm_dcache_align_mask;
88 u_int arm_cache_level;
89 u_int arm_cache_type[14];
93 struct cpu_functions arm9_cpufuncs = {
96 cpufunc_nullop, /* cpwait */
100 cpufunc_control, /* control */
101 arm9_setttb, /* Setttb */
105 armv4_tlb_flushID, /* tlb_flushID */
106 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
107 armv4_tlb_flushD, /* tlb_flushD */
108 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
110 /* Cache operations */
112 arm9_icache_sync_range, /* icache_sync_range */
114 arm9_dcache_wbinv_all, /* dcache_wbinv_all */
115 arm9_dcache_wbinv_range, /* dcache_wbinv_range */
116 arm9_dcache_inv_range, /* dcache_inv_range */
117 arm9_dcache_wb_range, /* dcache_wb_range */
119 armv4_idcache_inv_all, /* idcache_inv_all */
120 arm9_idcache_wbinv_all, /* idcache_wbinv_all */
121 arm9_idcache_wbinv_range, /* idcache_wbinv_range */
122 cpufunc_nullop, /* l2cache_wbinv_all */
123 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
124 (void *)cpufunc_nullop, /* l2cache_inv_range */
125 (void *)cpufunc_nullop, /* l2cache_wb_range */
126 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
128 /* Other functions */
130 armv4_drain_writebuf, /* drain_writebuf */
132 (void *)cpufunc_nullop, /* sleep */
136 arm9_context_switch, /* context_switch */
138 arm9_setup /* cpu setup */
141 #endif /* CPU_ARM9 */
143 #if defined(CPU_ARM9E)
144 struct cpu_functions armv5_ec_cpufuncs = {
147 cpufunc_nullop, /* cpwait */
151 cpufunc_control, /* control */
152 armv5_ec_setttb, /* Setttb */
156 armv4_tlb_flushID, /* tlb_flushID */
157 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
158 armv4_tlb_flushD, /* tlb_flushD */
159 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
161 /* Cache operations */
163 armv5_ec_icache_sync_range, /* icache_sync_range */
165 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
166 armv5_ec_dcache_wbinv_range, /* dcache_wbinv_range */
167 armv5_ec_dcache_inv_range, /* dcache_inv_range */
168 armv5_ec_dcache_wb_range, /* dcache_wb_range */
170 armv4_idcache_inv_all, /* idcache_inv_all */
171 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
172 armv5_ec_idcache_wbinv_range, /* idcache_wbinv_range */
174 cpufunc_nullop, /* l2cache_wbinv_all */
175 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
176 (void *)cpufunc_nullop, /* l2cache_inv_range */
177 (void *)cpufunc_nullop, /* l2cache_wb_range */
178 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
180 /* Other functions */
182 armv4_drain_writebuf, /* drain_writebuf */
184 (void *)cpufunc_nullop, /* sleep */
188 arm9_context_switch, /* context_switch */
190 arm10_setup /* cpu setup */
194 struct cpu_functions sheeva_cpufuncs = {
197 cpufunc_nullop, /* cpwait */
201 cpufunc_control, /* control */
202 sheeva_setttb, /* Setttb */
206 armv4_tlb_flushID, /* tlb_flushID */
207 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
208 armv4_tlb_flushD, /* tlb_flushD */
209 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
211 /* Cache operations */
213 armv5_ec_icache_sync_range, /* icache_sync_range */
215 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
216 sheeva_dcache_wbinv_range, /* dcache_wbinv_range */
217 sheeva_dcache_inv_range, /* dcache_inv_range */
218 sheeva_dcache_wb_range, /* dcache_wb_range */
220 armv4_idcache_inv_all, /* idcache_inv_all */
221 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
222 sheeva_idcache_wbinv_range, /* idcache_wbinv_all */
224 sheeva_l2cache_wbinv_all, /* l2cache_wbinv_all */
225 sheeva_l2cache_wbinv_range, /* l2cache_wbinv_range */
226 sheeva_l2cache_inv_range, /* l2cache_inv_range */
227 sheeva_l2cache_wb_range, /* l2cache_wb_range */
228 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
230 /* Other functions */
232 armv4_drain_writebuf, /* drain_writebuf */
234 sheeva_cpu_sleep, /* sleep */
238 arm9_context_switch, /* context_switch */
240 arm10_setup /* cpu setup */
242 #endif /* CPU_ARM9E */
245 struct cpu_functions pj4bv7_cpufuncs = {
248 armv7_drain_writebuf, /* cpwait */
252 cpufunc_control, /* control */
253 armv7_setttb, /* Setttb */
257 armv7_tlb_flushID, /* tlb_flushID */
258 armv7_tlb_flushID_SE, /* tlb_flushID_SE */
259 armv7_tlb_flushID, /* tlb_flushD */
260 armv7_tlb_flushID_SE, /* tlb_flushD_SE */
262 /* Cache operations */
263 armv7_icache_sync_range, /* icache_sync_range */
265 armv7_dcache_wbinv_all, /* dcache_wbinv_all */
266 armv7_dcache_wbinv_range, /* dcache_wbinv_range */
267 armv7_dcache_inv_range, /* dcache_inv_range */
268 armv7_dcache_wb_range, /* dcache_wb_range */
270 armv7_idcache_inv_all, /* idcache_inv_all */
271 armv7_idcache_wbinv_all, /* idcache_wbinv_all */
272 armv7_idcache_wbinv_range, /* idcache_wbinv_all */
274 (void *)cpufunc_nullop, /* l2cache_wbinv_all */
275 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
276 (void *)cpufunc_nullop, /* l2cache_inv_range */
277 (void *)cpufunc_nullop, /* l2cache_wb_range */
278 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
280 /* Other functions */
282 armv7_drain_writebuf, /* drain_writebuf */
284 (void *)cpufunc_nullop, /* sleep */
287 armv7_context_switch, /* context_switch */
289 pj4bv7_setup /* cpu setup */
291 #endif /* CPU_MV_PJ4B */
293 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
295 struct cpu_functions xscale_cpufuncs = {
298 xscale_cpwait, /* cpwait */
302 xscale_control, /* control */
303 xscale_setttb, /* setttb */
307 armv4_tlb_flushID, /* tlb_flushID */
308 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
309 armv4_tlb_flushD, /* tlb_flushD */
310 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
312 /* Cache operations */
314 xscale_cache_syncI_rng, /* icache_sync_range */
316 xscale_cache_purgeD, /* dcache_wbinv_all */
317 xscale_cache_purgeD_rng, /* dcache_wbinv_range */
318 xscale_cache_flushD_rng, /* dcache_inv_range */
319 xscale_cache_cleanD_rng, /* dcache_wb_range */
321 xscale_cache_flushID, /* idcache_inv_all */
322 xscale_cache_purgeID, /* idcache_wbinv_all */
323 xscale_cache_purgeID_rng, /* idcache_wbinv_range */
324 cpufunc_nullop, /* l2cache_wbinv_all */
325 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
326 (void *)cpufunc_nullop, /* l2cache_inv_range */
327 (void *)cpufunc_nullop, /* l2cache_wb_range */
328 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
330 /* Other functions */
332 armv4_drain_writebuf, /* drain_writebuf */
334 xscale_cpu_sleep, /* sleep */
338 xscale_context_switch, /* context_switch */
340 xscale_setup /* cpu setup */
343 /* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
345 #ifdef CPU_XSCALE_81342
346 struct cpu_functions xscalec3_cpufuncs = {
349 xscale_cpwait, /* cpwait */
353 xscale_control, /* control */
354 xscalec3_setttb, /* setttb */
358 armv4_tlb_flushID, /* tlb_flushID */
359 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
360 armv4_tlb_flushD, /* tlb_flushD */
361 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
363 /* Cache operations */
365 xscalec3_cache_syncI_rng, /* icache_sync_range */
367 xscalec3_cache_purgeD, /* dcache_wbinv_all */
368 xscalec3_cache_purgeD_rng, /* dcache_wbinv_range */
369 xscale_cache_flushD_rng, /* dcache_inv_range */
370 xscalec3_cache_cleanD_rng, /* dcache_wb_range */
372 xscale_cache_flushID, /* idcache_inv_all */
373 xscalec3_cache_purgeID, /* idcache_wbinv_all */
374 xscalec3_cache_purgeID_rng, /* idcache_wbinv_range */
375 xscalec3_l2cache_purge, /* l2cache_wbinv_all */
376 xscalec3_l2cache_purge_rng, /* l2cache_wbinv_range */
377 xscalec3_l2cache_flush_rng, /* l2cache_inv_range */
378 xscalec3_l2cache_clean_rng, /* l2cache_wb_range */
379 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
381 /* Other functions */
383 armv4_drain_writebuf, /* drain_writebuf */
385 xscale_cpu_sleep, /* sleep */
389 xscalec3_context_switch, /* context_switch */
391 xscale_setup /* cpu setup */
393 #endif /* CPU_XSCALE_81342 */
396 #if defined(CPU_FA526)
397 struct cpu_functions fa526_cpufuncs = {
400 cpufunc_nullop, /* cpwait */
404 cpufunc_control, /* control */
405 fa526_setttb, /* setttb */
409 armv4_tlb_flushID, /* tlb_flushID */
410 fa526_tlb_flushID_SE, /* tlb_flushID_SE */
411 armv4_tlb_flushD, /* tlb_flushD */
412 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
414 /* Cache operations */
416 fa526_icache_sync_range, /* icache_sync_range */
418 fa526_dcache_wbinv_all, /* dcache_wbinv_all */
419 fa526_dcache_wbinv_range, /* dcache_wbinv_range */
420 fa526_dcache_inv_range, /* dcache_inv_range */
421 fa526_dcache_wb_range, /* dcache_wb_range */
423 armv4_idcache_inv_all, /* idcache_inv_all */
424 fa526_idcache_wbinv_all, /* idcache_wbinv_all */
425 fa526_idcache_wbinv_range, /* idcache_wbinv_range */
426 cpufunc_nullop, /* l2cache_wbinv_all */
427 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
428 (void *)cpufunc_nullop, /* l2cache_inv_range */
429 (void *)cpufunc_nullop, /* l2cache_wb_range */
430 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
432 /* Other functions */
434 armv4_drain_writebuf, /* drain_writebuf */
436 fa526_cpu_sleep, /* sleep */
441 fa526_context_switch, /* context_switch */
443 fa526_setup /* cpu setup */
445 #endif /* CPU_FA526 */
447 #if defined(CPU_ARM1176)
448 struct cpu_functions arm1176_cpufuncs = {
451 cpufunc_nullop, /* cpwait */
455 cpufunc_control, /* control */
456 arm11x6_setttb, /* Setttb */
460 arm11_tlb_flushID, /* tlb_flushID */
461 arm11_tlb_flushID_SE, /* tlb_flushID_SE */
462 arm11_tlb_flushD, /* tlb_flushD */
463 arm11_tlb_flushD_SE, /* tlb_flushD_SE */
465 /* Cache operations */
467 arm11x6_icache_sync_range, /* icache_sync_range */
469 arm11x6_dcache_wbinv_all, /* dcache_wbinv_all */
470 armv6_dcache_wbinv_range, /* dcache_wbinv_range */
471 armv6_dcache_inv_range, /* dcache_inv_range */
472 armv6_dcache_wb_range, /* dcache_wb_range */
474 armv6_idcache_inv_all, /* idcache_inv_all */
475 arm11x6_idcache_wbinv_all, /* idcache_wbinv_all */
476 arm11x6_idcache_wbinv_range, /* idcache_wbinv_range */
478 (void *)cpufunc_nullop, /* l2cache_wbinv_all */
479 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
480 (void *)cpufunc_nullop, /* l2cache_inv_range */
481 (void *)cpufunc_nullop, /* l2cache_wb_range */
482 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
484 /* Other functions */
486 arm11_drain_writebuf, /* drain_writebuf */
488 arm11x6_sleep, /* sleep */
492 arm11_context_switch, /* context_switch */
494 arm11x6_setup /* cpu setup */
496 #endif /*CPU_ARM1176 */
498 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
499 struct cpu_functions cortexa_cpufuncs = {
502 cpufunc_nullop, /* cpwait */
506 cpufunc_control, /* control */
507 armv7_setttb, /* Setttb */
510 * TLB functions. ARMv7 does all TLB ops based on a unified TLB model
511 * whether the hardware implements separate I+D or not, so we use the
512 * same 'ID' functions for all 3 variations.
515 armv7_tlb_flushID, /* tlb_flushID */
516 armv7_tlb_flushID_SE, /* tlb_flushID_SE */
517 armv7_tlb_flushID, /* tlb_flushD */
518 armv7_tlb_flushID_SE, /* tlb_flushD_SE */
520 /* Cache operations */
522 armv7_icache_sync_range, /* icache_sync_range */
524 armv7_dcache_wbinv_all, /* dcache_wbinv_all */
525 armv7_dcache_wbinv_range, /* dcache_wbinv_range */
526 armv7_dcache_inv_range, /* dcache_inv_range */
527 armv7_dcache_wb_range, /* dcache_wb_range */
529 armv7_idcache_inv_all, /* idcache_inv_all */
530 armv7_idcache_wbinv_all, /* idcache_wbinv_all */
531 armv7_idcache_wbinv_range, /* idcache_wbinv_range */
534 * Note: For CPUs using the PL310 the L2 ops are filled in when the
535 * L2 cache controller is actually enabled.
537 cpufunc_nullop, /* l2cache_wbinv_all */
538 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
539 (void *)cpufunc_nullop, /* l2cache_inv_range */
540 (void *)cpufunc_nullop, /* l2cache_wb_range */
541 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
543 /* Other functions */
545 armv7_drain_writebuf, /* drain_writebuf */
547 armv7_cpu_sleep, /* sleep */
551 armv7_context_switch, /* context_switch */
553 cortexa_setup /* cpu setup */
555 #endif /* CPU_CORTEXA */
558 * Global constants also used by locore.s
561 struct cpu_functions cpufuncs;
564 u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore-v4.s */
567 #if defined(CPU_ARM9) || \
568 defined (CPU_ARM9E) || \
569 defined(CPU_ARM1176) || \
570 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
571 defined(CPU_FA526) || defined(CPU_MV_PJ4B) || \
572 defined(CPU_XSCALE_81342) || \
573 defined(CPU_CORTEXA) || defined(CPU_KRAIT)
575 /* Global cache line sizes, use 32 as default */
576 int arm_dcache_min_line_size = 32;
577 int arm_icache_min_line_size = 32;
578 int arm_idcache_min_line_size = 32;
580 static void get_cachetype_cp15(void);
582 /* Additional cache information local to this file. Log2 of some of the
584 static int arm_dcache_l2_nsets;
585 static int arm_dcache_l2_assoc;
586 static int arm_dcache_l2_linesize;
591 u_int ctype, isize, dsize, cpuid;
592 u_int clevel, csize, i, sel;
596 __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
601 * ...and thus spake the ARM ARM:
603 * If an <opcode2> value corresponding to an unimplemented or
604 * reserved ID register is encountered, the System Control
605 * processor returns the value of the main ID register.
610 if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
611 /* Resolve minimal cache line sizes */
612 arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
613 arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
614 arm_idcache_min_line_size =
615 min(arm_icache_min_line_size, arm_dcache_min_line_size);
617 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
619 arm_cache_level = clevel;
620 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
622 while ((type = (clevel & 0x7)) && i < 7) {
623 if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
624 type == CACHE_SEP_CACHE) {
626 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
628 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
630 arm_cache_type[sel] = csize;
631 arm_dcache_align = 1 <<
632 (CPUV7_CT_xSIZE_LEN(csize) + 4);
633 arm_dcache_align_mask = arm_dcache_align - 1;
635 if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
637 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
639 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
641 arm_cache_type[sel] = csize;
647 if ((ctype & CPU_CT_S) == 0)
648 arm_pcache_unified = 1;
651 * If you want to know how this code works, go read the ARM ARM.
654 arm_pcache_type = CPU_CT_CTYPE(ctype);
656 if (arm_pcache_unified == 0) {
657 isize = CPU_CT_ISIZE(ctype);
658 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
659 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
660 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
661 if (isize & CPU_CT_xSIZE_M)
662 arm_picache_line_size = 0; /* not present */
664 arm_picache_ways = 1;
666 arm_picache_ways = multiplier <<
667 (CPU_CT_xSIZE_ASSOC(isize) - 1);
669 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
672 dsize = CPU_CT_DSIZE(ctype);
673 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
674 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
675 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
676 if (dsize & CPU_CT_xSIZE_M)
677 arm_pdcache_line_size = 0; /* not present */
679 arm_pdcache_ways = 1;
681 arm_pdcache_ways = multiplier <<
682 (CPU_CT_xSIZE_ASSOC(dsize) - 1);
684 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
686 arm_dcache_align = arm_pdcache_line_size;
688 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
689 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
690 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
691 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
694 arm_dcache_align_mask = arm_dcache_align - 1;
697 #endif /* ARM9 || XSCALE */
700 * Cannot panic here as we may not have a console yet ...
706 cputype = cpu_ident();
707 cputype &= CPU_ID_CPU_MASK;
710 if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
711 (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
712 (cputype & 0x0000f000) == 0x00009000) {
713 cpufuncs = arm9_cpufuncs;
714 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
715 get_cachetype_cp15();
716 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
717 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
718 arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
719 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
720 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
721 pmap_pte_init_generic();
724 #endif /* CPU_ARM9 */
725 #if defined(CPU_ARM9E)
726 if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
727 cputype == CPU_ID_MV88FR571_41) {
728 uint32_t sheeva_ctrl;
730 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
733 * Workaround for Marvell MV78100 CPU: Cache prefetch
734 * mechanism may affect the cache coherency validity,
735 * so it needs to be disabled.
737 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
738 * L2 Prefetching Mechanism) for details.
740 if (cputype == CPU_ID_MV88FR571_VD ||
741 cputype == CPU_ID_MV88FR571_41)
742 sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
744 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
746 cpufuncs = sheeva_cpufuncs;
747 get_cachetype_cp15();
748 pmap_pte_init_generic();
750 } else if (cputype == CPU_ID_ARM926EJS) {
751 cpufuncs = armv5_ec_cpufuncs;
752 get_cachetype_cp15();
753 pmap_pte_init_generic();
756 #endif /* CPU_ARM9E */
757 #if defined(CPU_ARM1176)
758 if (cputype == CPU_ID_ARM1176JZS) {
759 cpufuncs = arm1176_cpufuncs;
760 get_cachetype_cp15();
763 #endif /* CPU_ARM1176 */
764 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
765 if (cputype == CPU_ID_CORTEXA5 ||
766 cputype == CPU_ID_CORTEXA7 ||
767 cputype == CPU_ID_CORTEXA8R1 ||
768 cputype == CPU_ID_CORTEXA8R2 ||
769 cputype == CPU_ID_CORTEXA8R3 ||
770 cputype == CPU_ID_CORTEXA9R1 ||
771 cputype == CPU_ID_CORTEXA9R2 ||
772 cputype == CPU_ID_CORTEXA9R3 ||
773 cputype == CPU_ID_CORTEXA9R4 ||
774 cputype == CPU_ID_CORTEXA12R0 ||
775 cputype == CPU_ID_CORTEXA15R0 ||
776 cputype == CPU_ID_CORTEXA15R1 ||
777 cputype == CPU_ID_CORTEXA15R2 ||
778 cputype == CPU_ID_CORTEXA15R3 ||
779 cputype == CPU_ID_KRAIT300R0 ||
780 cputype == CPU_ID_KRAIT300R1 ) {
781 cpufuncs = cortexa_cpufuncs;
782 get_cachetype_cp15();
785 #endif /* CPU_CORTEXA */
787 #if defined(CPU_MV_PJ4B)
788 if (cputype == CPU_ID_MV88SV581X_V7 ||
789 cputype == CPU_ID_MV88SV584X_V7 ||
790 cputype == CPU_ID_ARM_88SV581X_V7) {
791 cpufuncs = pj4bv7_cpufuncs;
792 get_cachetype_cp15();
795 #endif /* CPU_MV_PJ4B */
797 #if defined(CPU_FA526)
798 if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
799 cpufuncs = fa526_cpufuncs;
800 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
801 get_cachetype_cp15();
802 pmap_pte_init_generic();
806 #endif /* CPU_FA526 */
808 #if defined(CPU_XSCALE_81342)
809 if (cputype == CPU_ID_81342) {
810 cpufuncs = xscalec3_cpufuncs;
811 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
812 get_cachetype_cp15();
813 pmap_pte_init_xscale();
816 #endif /* CPU_XSCALE_81342 */
817 #ifdef CPU_XSCALE_PXA2X0
818 /* ignore core revision to test PXA2xx CPUs */
819 if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
820 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
821 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
823 cpufuncs = xscale_cpufuncs;
824 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
825 get_cachetype_cp15();
826 pmap_pte_init_xscale();
830 #endif /* CPU_XSCALE_PXA2X0 */
831 #ifdef CPU_XSCALE_IXP425
832 if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
833 cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
835 cpufuncs = xscale_cpufuncs;
836 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
837 get_cachetype_cp15();
838 pmap_pte_init_xscale();
842 #endif /* CPU_XSCALE_IXP425 */
844 * Bzzzz. And the answer was ...
846 panic("No support for this CPU type (%08x) in kernel", cputype);
847 return(ARCHITECTURE_NOT_PRESENT);
849 uma_set_align(arm_dcache_align_mask);
861 int cpuctrl, cpuctrlmask;
863 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
864 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
865 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
866 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
867 CPU_CONTROL_ROUNDROBIN;
868 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
869 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
870 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
871 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
872 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
873 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
874 | CPU_CONTROL_ROUNDROBIN;
876 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
877 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
881 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
883 if (vector_page == ARM_VECTORS_HIGH)
884 cpuctrl |= CPU_CONTROL_VECRELOC;
886 /* Clear out the cache */
887 cpu_idcache_wbinv_all();
889 /* Set the control register */
890 cpu_control(cpuctrlmask, cpuctrl);
893 #endif /* CPU_ARM9 */
895 #if defined(CPU_ARM9E)
899 int cpuctrl, cpuctrlmask;
901 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
902 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
903 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
904 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
905 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
906 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
907 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
908 | CPU_CONTROL_BPRD_ENABLE
909 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
911 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
912 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
916 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
919 /* Clear out the cache */
920 cpu_idcache_wbinv_all();
922 /* Now really make sure they are clean. */
923 __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
925 if (vector_page == ARM_VECTORS_HIGH)
926 cpuctrl |= CPU_CONTROL_VECRELOC;
928 /* Set the control register */
929 cpu_control(0xffffffff, cpuctrl);
932 cpu_idcache_wbinv_all();
934 #endif /* CPU_ARM9E || CPU_ARM10 */
936 #if defined(CPU_ARM1176) \
937 || defined(CPU_MV_PJ4B) \
938 || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
940 cpu_scc_setup_ccnt(void)
942 /* This is how you give userland access to the CCNT and PMCn
944 * BEWARE! This gives write access also, which may not be what
947 #ifdef _PMC_USER_READ_WRITE_
948 /* Set PMUSERENR[0] to allow userland access */
949 cp15_pmuserenr_set(1);
951 #if defined(CPU_ARM1176)
952 /* Set PMCR[2,0] to enable counters and reset CCNT */
955 /* Set up the PMCCNTR register as a cyclecounter:
956 * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
957 * Set PMCR[2,0] to enable counters and reset CCNT
958 * Set PMCNTENSET to 0x80000000 to enable CCNT */
959 cp15_pminten_clr(0xFFFFFFFF);
961 cp15_pmcnten_set(0x80000000);
966 #if defined(CPU_ARM1176)
970 uint32_t auxctrl, auxctrl_wax;
980 * Enable an errata workaround
982 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
983 auxctrl = ARM1176_AUXCTL_PHD;
984 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
987 tmp = cp15_actlr_get();
994 cpu_scc_setup_ccnt();
996 #endif /* CPU_ARM1176 */
1004 cpu_scc_setup_ccnt();
1006 #endif /* CPU_MV_PJ4B */
1008 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1014 cpu_scc_setup_ccnt();
1016 #endif /* CPU_CORTEXA */
1018 #if defined(CPU_FA526)
1022 int cpuctrl, cpuctrlmask;
1024 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1025 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1026 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1027 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1028 | CPU_CONTROL_BPRD_ENABLE;
1029 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1030 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1031 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1032 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1033 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1034 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1035 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1037 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1038 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1042 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1045 if (vector_page == ARM_VECTORS_HIGH)
1046 cpuctrl |= CPU_CONTROL_VECRELOC;
1048 /* Clear out the cache */
1049 cpu_idcache_wbinv_all();
1051 /* Set the control register */
1052 cpu_control(0xffffffff, cpuctrl);
1054 #endif /* CPU_FA526 */
1056 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1057 defined(CPU_XSCALE_81342)
1062 int cpuctrl, cpuctrlmask;
1065 * The XScale Write Buffer is always enabled. Our option
1066 * is to enable/disable coalescing. Note that bits 6:3
1067 * must always be enabled.
1070 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1071 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1072 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1073 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1074 | CPU_CONTROL_BPRD_ENABLE;
1075 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1076 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1077 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1078 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1079 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1080 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1081 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
1082 CPU_CONTROL_L2_ENABLE;
1084 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1085 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1089 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1092 if (vector_page == ARM_VECTORS_HIGH)
1093 cpuctrl |= CPU_CONTROL_VECRELOC;
1094 #ifdef CPU_XSCALE_CORE3
1095 cpuctrl |= CPU_CONTROL_L2_ENABLE;
1098 /* Clear out the cache */
1099 cpu_idcache_wbinv_all();
1102 * Set the control register. Note that bits 6:3 must always
1105 /* cpu_control(cpuctrlmask, cpuctrl);*/
1106 cpu_control(0xffffffff, cpuctrl);
1108 /* Make sure write coalescing is turned on */
1109 __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1111 #ifdef XSCALE_NO_COALESCE_WRITES
1112 auxctl |= XSCALE_AUXCTL_K;
1114 auxctl &= ~XSCALE_AUXCTL_K;
1116 #ifdef CPU_XSCALE_CORE3
1117 auxctl |= XSCALE_AUXCTL_LLR;
1118 auxctl |= XSCALE_AUXCTL_MD_MASK;
1120 __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1123 #endif /* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */