1 /* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
4 * arm9 support code Copyright (C) 2001 ARM Ltd
5 * Copyright (c) 1997 Mark Brinicombe.
6 * Copyright (c) 1997 Causality Limited
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by Causality Limited.
20 * 4. The name of Causality Limited may not be used to endorse or promote
21 * products derived from this software without specific prior written
24 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * RiscBSD kernel project
40 * C functions for supporting CPU / MMU / TLB specific operations.
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
47 #include <sys/param.h>
48 #include <sys/systm.h>
50 #include <sys/mutex.h>
52 #include <machine/bus.h>
53 #include <machine/cpu.h>
54 #include <machine/disassem.h>
60 #include <machine/cpuconf.h>
61 #include <machine/cpufunc.h>
63 #if defined(CPU_XSCALE_81342)
64 #include <arm/xscale/i8134x/i81342reg.h>
67 #ifdef CPU_XSCALE_IXP425
68 #include <arm/xscale/ixp425/ixp425reg.h>
69 #include <arm/xscale/ixp425/ixp425var.h>
72 /* PRIMARY CACHE VARIABLES */
74 int arm_picache_line_size;
77 int arm_pdcache_size; /* and unified */
78 int arm_pdcache_line_size;
82 int arm_pcache_unified;
85 int arm_dcache_align_mask;
87 u_int arm_cache_level;
88 u_int arm_cache_type[14];
92 struct cpu_functions arm9_cpufuncs = {
95 cpufunc_nullop, /* cpwait */
99 cpufunc_control, /* control */
100 arm9_setttb, /* Setttb */
104 armv4_tlb_flushID, /* tlb_flushID */
105 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
106 armv4_tlb_flushD, /* tlb_flushD */
107 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
109 /* Cache operations */
111 arm9_icache_sync_range, /* icache_sync_range */
113 arm9_dcache_wbinv_all, /* dcache_wbinv_all */
114 arm9_dcache_wbinv_range, /* dcache_wbinv_range */
115 arm9_dcache_inv_range, /* dcache_inv_range */
116 arm9_dcache_wb_range, /* dcache_wb_range */
118 armv4_idcache_inv_all, /* idcache_inv_all */
119 arm9_idcache_wbinv_all, /* idcache_wbinv_all */
120 arm9_idcache_wbinv_range, /* idcache_wbinv_range */
121 cpufunc_nullop, /* l2cache_wbinv_all */
122 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
123 (void *)cpufunc_nullop, /* l2cache_inv_range */
124 (void *)cpufunc_nullop, /* l2cache_wb_range */
125 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
127 /* Other functions */
129 armv4_drain_writebuf, /* drain_writebuf */
131 (void *)cpufunc_nullop, /* sleep */
135 arm9_context_switch, /* context_switch */
137 arm9_setup /* cpu setup */
140 #endif /* CPU_ARM9 */
142 #if defined(CPU_ARM9E)
143 struct cpu_functions armv5_ec_cpufuncs = {
146 cpufunc_nullop, /* cpwait */
150 cpufunc_control, /* control */
151 armv5_ec_setttb, /* Setttb */
155 armv4_tlb_flushID, /* tlb_flushID */
156 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
157 armv4_tlb_flushD, /* tlb_flushD */
158 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
160 /* Cache operations */
162 armv5_ec_icache_sync_range, /* icache_sync_range */
164 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
165 armv5_ec_dcache_wbinv_range, /* dcache_wbinv_range */
166 armv5_ec_dcache_inv_range, /* dcache_inv_range */
167 armv5_ec_dcache_wb_range, /* dcache_wb_range */
169 armv4_idcache_inv_all, /* idcache_inv_all */
170 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
171 armv5_ec_idcache_wbinv_range, /* idcache_wbinv_range */
173 cpufunc_nullop, /* l2cache_wbinv_all */
174 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
175 (void *)cpufunc_nullop, /* l2cache_inv_range */
176 (void *)cpufunc_nullop, /* l2cache_wb_range */
177 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
179 /* Other functions */
181 armv4_drain_writebuf, /* drain_writebuf */
183 (void *)cpufunc_nullop, /* sleep */
187 arm9_context_switch, /* context_switch */
189 arm10_setup /* cpu setup */
193 struct cpu_functions sheeva_cpufuncs = {
196 cpufunc_nullop, /* cpwait */
200 cpufunc_control, /* control */
201 sheeva_setttb, /* Setttb */
205 armv4_tlb_flushID, /* tlb_flushID */
206 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
207 armv4_tlb_flushD, /* tlb_flushD */
208 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
210 /* Cache operations */
212 armv5_ec_icache_sync_range, /* icache_sync_range */
214 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
215 sheeva_dcache_wbinv_range, /* dcache_wbinv_range */
216 sheeva_dcache_inv_range, /* dcache_inv_range */
217 sheeva_dcache_wb_range, /* dcache_wb_range */
219 armv4_idcache_inv_all, /* idcache_inv_all */
220 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
221 sheeva_idcache_wbinv_range, /* idcache_wbinv_all */
223 sheeva_l2cache_wbinv_all, /* l2cache_wbinv_all */
224 sheeva_l2cache_wbinv_range, /* l2cache_wbinv_range */
225 sheeva_l2cache_inv_range, /* l2cache_inv_range */
226 sheeva_l2cache_wb_range, /* l2cache_wb_range */
227 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
229 /* Other functions */
231 armv4_drain_writebuf, /* drain_writebuf */
233 sheeva_cpu_sleep, /* sleep */
237 arm9_context_switch, /* context_switch */
239 arm10_setup /* cpu setup */
241 #endif /* CPU_ARM9E */
244 struct cpu_functions pj4bv7_cpufuncs = {
247 armv7_drain_writebuf, /* cpwait */
251 cpufunc_control, /* control */
252 armv7_setttb, /* Setttb */
256 armv7_tlb_flushID, /* tlb_flushID */
257 armv7_tlb_flushID_SE, /* tlb_flushID_SE */
258 armv7_tlb_flushID, /* tlb_flushD */
259 armv7_tlb_flushID_SE, /* tlb_flushD_SE */
261 /* Cache operations */
262 armv7_icache_sync_range, /* icache_sync_range */
264 armv7_dcache_wbinv_all, /* dcache_wbinv_all */
265 armv7_dcache_wbinv_range, /* dcache_wbinv_range */
266 armv7_dcache_inv_range, /* dcache_inv_range */
267 armv7_dcache_wb_range, /* dcache_wb_range */
269 armv7_idcache_inv_all, /* idcache_inv_all */
270 armv7_idcache_wbinv_all, /* idcache_wbinv_all */
271 armv7_idcache_wbinv_range, /* idcache_wbinv_all */
273 (void *)cpufunc_nullop, /* l2cache_wbinv_all */
274 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
275 (void *)cpufunc_nullop, /* l2cache_inv_range */
276 (void *)cpufunc_nullop, /* l2cache_wb_range */
277 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
279 /* Other functions */
281 armv7_drain_writebuf, /* drain_writebuf */
283 (void *)cpufunc_nullop, /* sleep */
286 armv7_context_switch, /* context_switch */
288 pj4bv7_setup /* cpu setup */
290 #endif /* CPU_MV_PJ4B */
292 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
294 struct cpu_functions xscale_cpufuncs = {
297 xscale_cpwait, /* cpwait */
301 xscale_control, /* control */
302 xscale_setttb, /* setttb */
306 armv4_tlb_flushID, /* tlb_flushID */
307 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
308 armv4_tlb_flushD, /* tlb_flushD */
309 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
311 /* Cache operations */
313 xscale_cache_syncI_rng, /* icache_sync_range */
315 xscale_cache_purgeD, /* dcache_wbinv_all */
316 xscale_cache_purgeD_rng, /* dcache_wbinv_range */
317 xscale_cache_flushD_rng, /* dcache_inv_range */
318 xscale_cache_cleanD_rng, /* dcache_wb_range */
320 xscale_cache_flushID, /* idcache_inv_all */
321 xscale_cache_purgeID, /* idcache_wbinv_all */
322 xscale_cache_purgeID_rng, /* idcache_wbinv_range */
323 cpufunc_nullop, /* l2cache_wbinv_all */
324 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
325 (void *)cpufunc_nullop, /* l2cache_inv_range */
326 (void *)cpufunc_nullop, /* l2cache_wb_range */
327 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
329 /* Other functions */
331 armv4_drain_writebuf, /* drain_writebuf */
333 xscale_cpu_sleep, /* sleep */
337 xscale_context_switch, /* context_switch */
339 xscale_setup /* cpu setup */
342 /* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
344 #ifdef CPU_XSCALE_81342
345 struct cpu_functions xscalec3_cpufuncs = {
348 xscale_cpwait, /* cpwait */
352 xscale_control, /* control */
353 xscalec3_setttb, /* setttb */
357 armv4_tlb_flushID, /* tlb_flushID */
358 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
359 armv4_tlb_flushD, /* tlb_flushD */
360 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
362 /* Cache operations */
364 xscalec3_cache_syncI_rng, /* icache_sync_range */
366 xscalec3_cache_purgeD, /* dcache_wbinv_all */
367 xscalec3_cache_purgeD_rng, /* dcache_wbinv_range */
368 xscale_cache_flushD_rng, /* dcache_inv_range */
369 xscalec3_cache_cleanD_rng, /* dcache_wb_range */
371 xscale_cache_flushID, /* idcache_inv_all */
372 xscalec3_cache_purgeID, /* idcache_wbinv_all */
373 xscalec3_cache_purgeID_rng, /* idcache_wbinv_range */
374 xscalec3_l2cache_purge, /* l2cache_wbinv_all */
375 xscalec3_l2cache_purge_rng, /* l2cache_wbinv_range */
376 xscalec3_l2cache_flush_rng, /* l2cache_inv_range */
377 xscalec3_l2cache_clean_rng, /* l2cache_wb_range */
378 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
380 /* Other functions */
382 armv4_drain_writebuf, /* drain_writebuf */
384 xscale_cpu_sleep, /* sleep */
388 xscalec3_context_switch, /* context_switch */
390 xscale_setup /* cpu setup */
392 #endif /* CPU_XSCALE_81342 */
395 #if defined(CPU_FA526)
396 struct cpu_functions fa526_cpufuncs = {
399 cpufunc_nullop, /* cpwait */
403 cpufunc_control, /* control */
404 fa526_setttb, /* setttb */
408 armv4_tlb_flushID, /* tlb_flushID */
409 fa526_tlb_flushID_SE, /* tlb_flushID_SE */
410 armv4_tlb_flushD, /* tlb_flushD */
411 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
413 /* Cache operations */
415 fa526_icache_sync_range, /* icache_sync_range */
417 fa526_dcache_wbinv_all, /* dcache_wbinv_all */
418 fa526_dcache_wbinv_range, /* dcache_wbinv_range */
419 fa526_dcache_inv_range, /* dcache_inv_range */
420 fa526_dcache_wb_range, /* dcache_wb_range */
422 armv4_idcache_inv_all, /* idcache_inv_all */
423 fa526_idcache_wbinv_all, /* idcache_wbinv_all */
424 fa526_idcache_wbinv_range, /* idcache_wbinv_range */
425 cpufunc_nullop, /* l2cache_wbinv_all */
426 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
427 (void *)cpufunc_nullop, /* l2cache_inv_range */
428 (void *)cpufunc_nullop, /* l2cache_wb_range */
429 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
431 /* Other functions */
433 armv4_drain_writebuf, /* drain_writebuf */
435 fa526_cpu_sleep, /* sleep */
440 fa526_context_switch, /* context_switch */
442 fa526_setup /* cpu setup */
444 #endif /* CPU_FA526 */
446 #if defined(CPU_ARM1176)
447 struct cpu_functions arm1176_cpufuncs = {
450 cpufunc_nullop, /* cpwait */
454 cpufunc_control, /* control */
455 arm11x6_setttb, /* Setttb */
459 arm11_tlb_flushID, /* tlb_flushID */
460 arm11_tlb_flushID_SE, /* tlb_flushID_SE */
461 arm11_tlb_flushD, /* tlb_flushD */
462 arm11_tlb_flushD_SE, /* tlb_flushD_SE */
464 /* Cache operations */
466 arm11x6_icache_sync_range, /* icache_sync_range */
468 arm11x6_dcache_wbinv_all, /* dcache_wbinv_all */
469 armv6_dcache_wbinv_range, /* dcache_wbinv_range */
470 armv6_dcache_inv_range, /* dcache_inv_range */
471 armv6_dcache_wb_range, /* dcache_wb_range */
473 armv6_idcache_inv_all, /* idcache_inv_all */
474 arm11x6_idcache_wbinv_all, /* idcache_wbinv_all */
475 arm11x6_idcache_wbinv_range, /* idcache_wbinv_range */
477 (void *)cpufunc_nullop, /* l2cache_wbinv_all */
478 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
479 (void *)cpufunc_nullop, /* l2cache_inv_range */
480 (void *)cpufunc_nullop, /* l2cache_wb_range */
481 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
483 /* Other functions */
485 arm11_drain_writebuf, /* drain_writebuf */
487 arm11x6_sleep, /* sleep */
491 arm11_context_switch, /* context_switch */
493 arm11x6_setup /* cpu setup */
495 #endif /*CPU_ARM1176 */
497 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
498 struct cpu_functions cortexa_cpufuncs = {
501 cpufunc_nullop, /* cpwait */
505 cpufunc_control, /* control */
506 armv7_setttb, /* Setttb */
509 * TLB functions. ARMv7 does all TLB ops based on a unified TLB model
510 * whether the hardware implements separate I+D or not, so we use the
511 * same 'ID' functions for all 3 variations.
514 armv7_tlb_flushID, /* tlb_flushID */
515 armv7_tlb_flushID_SE, /* tlb_flushID_SE */
516 armv7_tlb_flushID, /* tlb_flushD */
517 armv7_tlb_flushID_SE, /* tlb_flushD_SE */
519 /* Cache operations */
521 armv7_icache_sync_range, /* icache_sync_range */
523 armv7_dcache_wbinv_all, /* dcache_wbinv_all */
524 armv7_dcache_wbinv_range, /* dcache_wbinv_range */
525 armv7_dcache_inv_range, /* dcache_inv_range */
526 armv7_dcache_wb_range, /* dcache_wb_range */
528 armv7_idcache_inv_all, /* idcache_inv_all */
529 armv7_idcache_wbinv_all, /* idcache_wbinv_all */
530 armv7_idcache_wbinv_range, /* idcache_wbinv_range */
533 * Note: For CPUs using the PL310 the L2 ops are filled in when the
534 * L2 cache controller is actually enabled.
536 cpufunc_nullop, /* l2cache_wbinv_all */
537 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
538 (void *)cpufunc_nullop, /* l2cache_inv_range */
539 (void *)cpufunc_nullop, /* l2cache_wb_range */
540 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
542 /* Other functions */
544 armv7_drain_writebuf, /* drain_writebuf */
546 armv7_cpu_sleep, /* sleep */
550 armv7_context_switch, /* context_switch */
552 cortexa_setup /* cpu setup */
554 #endif /* CPU_CORTEXA */
557 * Global constants also used by locore.s
560 struct cpu_functions cpufuncs;
562 u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore.s */
564 #if defined(CPU_ARM9) || \
565 defined (CPU_ARM9E) || \
566 defined(CPU_ARM1176) || \
567 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
568 defined(CPU_FA526) || defined(CPU_MV_PJ4B) || \
569 defined(CPU_XSCALE_81342) || \
570 defined(CPU_CORTEXA) || defined(CPU_KRAIT)
572 /* Global cache line sizes, use 32 as default */
573 int arm_dcache_min_line_size = 32;
574 int arm_icache_min_line_size = 32;
575 int arm_idcache_min_line_size = 32;
577 static void get_cachetype_cp15(void);
579 /* Additional cache information local to this file. Log2 of some of the
581 static int arm_dcache_l2_nsets;
582 static int arm_dcache_l2_assoc;
583 static int arm_dcache_l2_linesize;
588 u_int ctype, isize, dsize, cpuid;
589 u_int clevel, csize, i, sel;
593 __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
598 * ...and thus spake the ARM ARM:
600 * If an <opcode2> value corresponding to an unimplemented or
601 * reserved ID register is encountered, the System Control
602 * processor returns the value of the main ID register.
607 if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
608 /* Resolve minimal cache line sizes */
609 arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
610 arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
611 arm_idcache_min_line_size =
612 min(arm_icache_min_line_size, arm_dcache_min_line_size);
614 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
616 arm_cache_level = clevel;
617 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
619 while ((type = (clevel & 0x7)) && i < 7) {
620 if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
621 type == CACHE_SEP_CACHE) {
623 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
625 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
627 arm_cache_type[sel] = csize;
628 arm_dcache_align = 1 <<
629 (CPUV7_CT_xSIZE_LEN(csize) + 4);
630 arm_dcache_align_mask = arm_dcache_align - 1;
632 if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
634 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
636 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
638 arm_cache_type[sel] = csize;
644 if ((ctype & CPU_CT_S) == 0)
645 arm_pcache_unified = 1;
648 * If you want to know how this code works, go read the ARM ARM.
651 arm_pcache_type = CPU_CT_CTYPE(ctype);
653 if (arm_pcache_unified == 0) {
654 isize = CPU_CT_ISIZE(ctype);
655 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
656 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
657 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
658 if (isize & CPU_CT_xSIZE_M)
659 arm_picache_line_size = 0; /* not present */
661 arm_picache_ways = 1;
663 arm_picache_ways = multiplier <<
664 (CPU_CT_xSIZE_ASSOC(isize) - 1);
666 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
669 dsize = CPU_CT_DSIZE(ctype);
670 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
671 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
672 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
673 if (dsize & CPU_CT_xSIZE_M)
674 arm_pdcache_line_size = 0; /* not present */
676 arm_pdcache_ways = 1;
678 arm_pdcache_ways = multiplier <<
679 (CPU_CT_xSIZE_ASSOC(dsize) - 1);
681 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
683 arm_dcache_align = arm_pdcache_line_size;
685 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
686 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
687 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
688 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
691 arm_dcache_align_mask = arm_dcache_align - 1;
694 #endif /* ARM9 || XSCALE */
697 * Cannot panic here as we may not have a console yet ...
703 cputype = cpu_ident();
704 cputype &= CPU_ID_CPU_MASK;
707 if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
708 (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
709 (cputype & 0x0000f000) == 0x00009000) {
710 cpufuncs = arm9_cpufuncs;
711 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
712 get_cachetype_cp15();
713 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
714 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
715 arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
716 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
717 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
718 pmap_pte_init_generic();
721 #endif /* CPU_ARM9 */
722 #if defined(CPU_ARM9E)
723 if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
724 cputype == CPU_ID_MV88FR571_41) {
725 uint32_t sheeva_ctrl;
727 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
730 * Workaround for Marvell MV78100 CPU: Cache prefetch
731 * mechanism may affect the cache coherency validity,
732 * so it needs to be disabled.
734 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
735 * L2 Prefetching Mechanism) for details.
737 if (cputype == CPU_ID_MV88FR571_VD ||
738 cputype == CPU_ID_MV88FR571_41)
739 sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
741 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
743 cpufuncs = sheeva_cpufuncs;
744 get_cachetype_cp15();
745 pmap_pte_init_generic();
747 } else if (cputype == CPU_ID_ARM926EJS) {
748 cpufuncs = armv5_ec_cpufuncs;
749 get_cachetype_cp15();
750 pmap_pte_init_generic();
753 #endif /* CPU_ARM9E */
754 #if defined(CPU_ARM1176)
755 if (cputype == CPU_ID_ARM1176JZS) {
756 cpufuncs = arm1176_cpufuncs;
757 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
758 get_cachetype_cp15();
761 #endif /* CPU_ARM1176 */
762 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
763 if (cputype == CPU_ID_CORTEXA5 ||
764 cputype == CPU_ID_CORTEXA7 ||
765 cputype == CPU_ID_CORTEXA8R1 ||
766 cputype == CPU_ID_CORTEXA8R2 ||
767 cputype == CPU_ID_CORTEXA8R3 ||
768 cputype == CPU_ID_CORTEXA9R1 ||
769 cputype == CPU_ID_CORTEXA9R2 ||
770 cputype == CPU_ID_CORTEXA9R3 ||
771 cputype == CPU_ID_CORTEXA9R4 ||
772 cputype == CPU_ID_CORTEXA12R0 ||
773 cputype == CPU_ID_CORTEXA15R0 ||
774 cputype == CPU_ID_CORTEXA15R1 ||
775 cputype == CPU_ID_CORTEXA15R2 ||
776 cputype == CPU_ID_CORTEXA15R3 ||
777 cputype == CPU_ID_KRAIT300R0 ||
778 cputype == CPU_ID_KRAIT300R1 ) {
779 cpufuncs = cortexa_cpufuncs;
780 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
781 get_cachetype_cp15();
784 #endif /* CPU_CORTEXA */
786 #if defined(CPU_MV_PJ4B)
787 if (cputype == CPU_ID_MV88SV581X_V7 ||
788 cputype == CPU_ID_MV88SV584X_V7 ||
789 cputype == CPU_ID_ARM_88SV581X_V7) {
790 cpufuncs = pj4bv7_cpufuncs;
791 get_cachetype_cp15();
794 #endif /* CPU_MV_PJ4B */
796 #if defined(CPU_FA526)
797 if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
798 cpufuncs = fa526_cpufuncs;
799 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
800 get_cachetype_cp15();
801 pmap_pte_init_generic();
805 #endif /* CPU_FA526 */
807 #if defined(CPU_XSCALE_81342)
808 if (cputype == CPU_ID_81342) {
809 cpufuncs = xscalec3_cpufuncs;
810 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
811 get_cachetype_cp15();
812 pmap_pte_init_xscale();
815 #endif /* CPU_XSCALE_81342 */
816 #ifdef CPU_XSCALE_PXA2X0
817 /* ignore core revision to test PXA2xx CPUs */
818 if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
819 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
820 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
822 cpufuncs = xscale_cpufuncs;
823 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
824 get_cachetype_cp15();
825 pmap_pte_init_xscale();
829 #endif /* CPU_XSCALE_PXA2X0 */
830 #ifdef CPU_XSCALE_IXP425
831 if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
832 cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
834 cpufuncs = xscale_cpufuncs;
835 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
836 get_cachetype_cp15();
837 pmap_pte_init_xscale();
841 #endif /* CPU_XSCALE_IXP425 */
843 * Bzzzz. And the answer was ...
845 panic("No support for this CPU type (%08x) in kernel", cputype);
846 return(ARCHITECTURE_NOT_PRESENT);
848 uma_set_align(arm_dcache_align_mask);
860 int cpuctrl, cpuctrlmask;
862 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
863 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
864 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
865 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
866 CPU_CONTROL_ROUNDROBIN;
867 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
868 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
869 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
870 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
871 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
872 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
873 | CPU_CONTROL_ROUNDROBIN;
875 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
876 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
880 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
882 if (vector_page == ARM_VECTORS_HIGH)
883 cpuctrl |= CPU_CONTROL_VECRELOC;
885 /* Clear out the cache */
886 cpu_idcache_wbinv_all();
888 /* Set the control register */
889 cpu_control(cpuctrlmask, cpuctrl);
892 #endif /* CPU_ARM9 */
894 #if defined(CPU_ARM9E)
898 int cpuctrl, cpuctrlmask;
900 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
901 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
902 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
903 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
904 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
905 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
906 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
907 | CPU_CONTROL_BPRD_ENABLE
908 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
910 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
911 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
915 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
918 /* Clear out the cache */
919 cpu_idcache_wbinv_all();
921 /* Now really make sure they are clean. */
922 __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
924 if (vector_page == ARM_VECTORS_HIGH)
925 cpuctrl |= CPU_CONTROL_VECRELOC;
927 /* Set the control register */
928 cpu_control(0xffffffff, cpuctrl);
931 cpu_idcache_wbinv_all();
933 #endif /* CPU_ARM9E || CPU_ARM10 */
935 #if defined(CPU_ARM1176) \
936 || defined(CPU_MV_PJ4B) \
937 || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
939 cpu_scc_setup_ccnt(void)
941 /* This is how you give userland access to the CCNT and PMCn
943 * BEWARE! This gives write access also, which may not be what
946 #ifdef _PMC_USER_READ_WRITE_
947 /* Set PMUSERENR[0] to allow userland access */
948 cp15_pmuserenr_set(1);
950 #if defined(CPU_ARM1176)
951 /* Set PMCR[2,0] to enable counters and reset CCNT */
954 /* Set up the PMCCNTR register as a cyclecounter:
955 * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
956 * Set PMCR[2,0] to enable counters and reset CCNT
957 * Set PMCNTENSET to 0x80000000 to enable CCNT */
958 cp15_pminten_clr(0xFFFFFFFF);
960 cp15_pmcnten_set(0x80000000);
965 #if defined(CPU_ARM1176)
969 uint32_t auxctrl, auxctrl_wax;
979 * Enable an errata workaround
981 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
982 auxctrl = ARM1176_AUXCTL_PHD;
983 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
986 tmp = cp15_actlr_get();
993 cpu_scc_setup_ccnt();
995 #endif /* CPU_ARM1176 */
1003 cpu_scc_setup_ccnt();
1005 #endif /* CPU_MV_PJ4B */
1007 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1013 cpu_scc_setup_ccnt();
1015 #endif /* CPU_CORTEXA */
1017 #if defined(CPU_FA526)
1021 int cpuctrl, cpuctrlmask;
1023 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1024 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1025 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1026 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1027 | CPU_CONTROL_BPRD_ENABLE;
1028 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1029 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1030 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1031 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1032 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1033 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1034 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1036 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1037 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1041 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1044 if (vector_page == ARM_VECTORS_HIGH)
1045 cpuctrl |= CPU_CONTROL_VECRELOC;
1047 /* Clear out the cache */
1048 cpu_idcache_wbinv_all();
1050 /* Set the control register */
1051 cpu_control(0xffffffff, cpuctrl);
1053 #endif /* CPU_FA526 */
1055 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1056 defined(CPU_XSCALE_81342)
1061 int cpuctrl, cpuctrlmask;
1064 * The XScale Write Buffer is always enabled. Our option
1065 * is to enable/disable coalescing. Note that bits 6:3
1066 * must always be enabled.
1069 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1070 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1071 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1072 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1073 | CPU_CONTROL_BPRD_ENABLE;
1074 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1075 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1076 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1077 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1078 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1079 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1080 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
1081 CPU_CONTROL_L2_ENABLE;
1083 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1084 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1088 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1091 if (vector_page == ARM_VECTORS_HIGH)
1092 cpuctrl |= CPU_CONTROL_VECRELOC;
1093 #ifdef CPU_XSCALE_CORE3
1094 cpuctrl |= CPU_CONTROL_L2_ENABLE;
1097 /* Clear out the cache */
1098 cpu_idcache_wbinv_all();
1101 * Set the control register. Note that bits 6:3 must always
1104 /* cpu_control(cpuctrlmask, cpuctrl);*/
1105 cpu_control(0xffffffff, cpuctrl);
1107 /* Make sure write coalescing is turned on */
1108 __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1110 #ifdef XSCALE_NO_COALESCE_WRITES
1111 auxctl |= XSCALE_AUXCTL_K;
1113 auxctl &= ~XSCALE_AUXCTL_K;
1115 #ifdef CPU_XSCALE_CORE3
1116 auxctl |= XSCALE_AUXCTL_LLR;
1117 auxctl |= XSCALE_AUXCTL_MD_MASK;
1119 __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1122 #endif /* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */