1 /* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
4 * arm9 support code Copyright (C) 2001 ARM Ltd
5 * Copyright (c) 1997 Mark Brinicombe.
6 * Copyright (c) 1997 Causality Limited
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by Causality Limited.
20 * 4. The name of Causality Limited may not be used to endorse or promote
21 * products derived from this software without specific prior written
24 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * RiscBSD kernel project
40 * C functions for supporting CPU / MMU / TLB specific operations.
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
47 #include <sys/param.h>
48 #include <sys/systm.h>
50 #include <sys/mutex.h>
52 #include <machine/bus.h>
53 #include <machine/cpu.h>
54 #include <machine/disassem.h>
60 #include <machine/cpuconf.h>
61 #include <machine/cpufunc.h>
63 #if defined(CPU_XSCALE_81342)
64 #include <arm/xscale/i8134x/i81342reg.h>
67 #ifdef CPU_XSCALE_IXP425
68 #include <arm/xscale/ixp425/ixp425reg.h>
69 #include <arm/xscale/ixp425/ixp425var.h>
72 /* PRIMARY CACHE VARIABLES */
74 int arm_picache_line_size;
77 int arm_pdcache_size; /* and unified */
78 int arm_pdcache_line_size;
82 int arm_pcache_unified;
85 int arm_dcache_align_mask;
87 u_int arm_cache_level;
88 u_int arm_cache_type[14];
92 struct cpu_functions arm9_cpufuncs = {
95 cpufunc_nullop, /* cpwait */
99 cpufunc_control, /* control */
100 arm9_setttb, /* Setttb */
104 armv4_tlb_flushID, /* tlb_flushID */
105 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
106 armv4_tlb_flushD, /* tlb_flushD */
107 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
109 /* Cache operations */
111 arm9_icache_sync_range, /* icache_sync_range */
113 arm9_dcache_wbinv_all, /* dcache_wbinv_all */
114 arm9_dcache_wbinv_range, /* dcache_wbinv_range */
115 arm9_dcache_inv_range, /* dcache_inv_range */
116 arm9_dcache_wb_range, /* dcache_wb_range */
118 armv4_idcache_inv_all, /* idcache_inv_all */
119 arm9_idcache_wbinv_all, /* idcache_wbinv_all */
120 arm9_idcache_wbinv_range, /* idcache_wbinv_range */
121 cpufunc_nullop, /* l2cache_wbinv_all */
122 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
123 (void *)cpufunc_nullop, /* l2cache_inv_range */
124 (void *)cpufunc_nullop, /* l2cache_wb_range */
125 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
127 /* Other functions */
129 armv4_drain_writebuf, /* drain_writebuf */
131 (void *)cpufunc_nullop, /* sleep */
135 arm9_context_switch, /* context_switch */
137 arm9_setup /* cpu setup */
140 #endif /* CPU_ARM9 */
142 #if defined(CPU_ARM9E)
143 struct cpu_functions armv5_ec_cpufuncs = {
146 cpufunc_nullop, /* cpwait */
150 cpufunc_control, /* control */
151 armv5_ec_setttb, /* Setttb */
155 armv4_tlb_flushID, /* tlb_flushID */
156 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
157 armv4_tlb_flushD, /* tlb_flushD */
158 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
160 /* Cache operations */
162 armv5_ec_icache_sync_range, /* icache_sync_range */
164 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
165 armv5_ec_dcache_wbinv_range, /* dcache_wbinv_range */
166 armv5_ec_dcache_inv_range, /* dcache_inv_range */
167 armv5_ec_dcache_wb_range, /* dcache_wb_range */
169 armv4_idcache_inv_all, /* idcache_inv_all */
170 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
171 armv5_ec_idcache_wbinv_range, /* idcache_wbinv_range */
173 cpufunc_nullop, /* l2cache_wbinv_all */
174 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
175 (void *)cpufunc_nullop, /* l2cache_inv_range */
176 (void *)cpufunc_nullop, /* l2cache_wb_range */
177 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
179 /* Other functions */
181 armv4_drain_writebuf, /* drain_writebuf */
183 (void *)cpufunc_nullop, /* sleep */
187 arm9_context_switch, /* context_switch */
189 arm10_setup /* cpu setup */
193 struct cpu_functions sheeva_cpufuncs = {
196 cpufunc_nullop, /* cpwait */
200 cpufunc_control, /* control */
201 sheeva_setttb, /* Setttb */
205 armv4_tlb_flushID, /* tlb_flushID */
206 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
207 armv4_tlb_flushD, /* tlb_flushD */
208 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
210 /* Cache operations */
212 armv5_ec_icache_sync_range, /* icache_sync_range */
214 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
215 sheeva_dcache_wbinv_range, /* dcache_wbinv_range */
216 sheeva_dcache_inv_range, /* dcache_inv_range */
217 sheeva_dcache_wb_range, /* dcache_wb_range */
219 armv4_idcache_inv_all, /* idcache_inv_all */
220 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
221 sheeva_idcache_wbinv_range, /* idcache_wbinv_all */
223 sheeva_l2cache_wbinv_all, /* l2cache_wbinv_all */
224 sheeva_l2cache_wbinv_range, /* l2cache_wbinv_range */
225 sheeva_l2cache_inv_range, /* l2cache_inv_range */
226 sheeva_l2cache_wb_range, /* l2cache_wb_range */
227 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
229 /* Other functions */
231 armv4_drain_writebuf, /* drain_writebuf */
233 sheeva_cpu_sleep, /* sleep */
237 arm9_context_switch, /* context_switch */
239 arm10_setup /* cpu setup */
241 #endif /* CPU_ARM9E */
244 struct cpu_functions pj4bv7_cpufuncs = {
247 armv7_drain_writebuf, /* cpwait */
251 cpufunc_control, /* control */
252 armv7_setttb, /* Setttb */
256 armv7_tlb_flushID, /* tlb_flushID */
257 armv7_tlb_flushID_SE, /* tlb_flushID_SE */
258 armv7_tlb_flushID, /* tlb_flushD */
259 armv7_tlb_flushID_SE, /* tlb_flushD_SE */
261 /* Cache operations */
262 armv7_icache_sync_range, /* icache_sync_range */
264 armv7_dcache_wbinv_all, /* dcache_wbinv_all */
265 armv7_dcache_wbinv_range, /* dcache_wbinv_range */
266 armv7_dcache_inv_range, /* dcache_inv_range */
267 armv7_dcache_wb_range, /* dcache_wb_range */
269 armv7_idcache_inv_all, /* idcache_inv_all */
270 armv7_idcache_wbinv_all, /* idcache_wbinv_all */
271 armv7_idcache_wbinv_range, /* idcache_wbinv_all */
273 (void *)cpufunc_nullop, /* l2cache_wbinv_all */
274 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
275 (void *)cpufunc_nullop, /* l2cache_inv_range */
276 (void *)cpufunc_nullop, /* l2cache_wb_range */
277 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
279 /* Other functions */
281 armv7_drain_writebuf, /* drain_writebuf */
283 (void *)cpufunc_nullop, /* sleep */
286 armv7_context_switch, /* context_switch */
288 pj4bv7_setup /* cpu setup */
290 #endif /* CPU_MV_PJ4B */
292 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
294 struct cpu_functions xscale_cpufuncs = {
297 xscale_cpwait, /* cpwait */
301 xscale_control, /* control */
302 xscale_setttb, /* setttb */
306 armv4_tlb_flushID, /* tlb_flushID */
307 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
308 armv4_tlb_flushD, /* tlb_flushD */
309 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
311 /* Cache operations */
313 xscale_cache_syncI_rng, /* icache_sync_range */
315 xscale_cache_purgeD, /* dcache_wbinv_all */
316 xscale_cache_purgeD_rng, /* dcache_wbinv_range */
317 xscale_cache_flushD_rng, /* dcache_inv_range */
318 xscale_cache_cleanD_rng, /* dcache_wb_range */
320 xscale_cache_flushID, /* idcache_inv_all */
321 xscale_cache_purgeID, /* idcache_wbinv_all */
322 xscale_cache_purgeID_rng, /* idcache_wbinv_range */
323 cpufunc_nullop, /* l2cache_wbinv_all */
324 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
325 (void *)cpufunc_nullop, /* l2cache_inv_range */
326 (void *)cpufunc_nullop, /* l2cache_wb_range */
327 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
329 /* Other functions */
331 armv4_drain_writebuf, /* drain_writebuf */
333 xscale_cpu_sleep, /* sleep */
337 xscale_context_switch, /* context_switch */
339 xscale_setup /* cpu setup */
342 /* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
344 #ifdef CPU_XSCALE_81342
345 struct cpu_functions xscalec3_cpufuncs = {
348 xscale_cpwait, /* cpwait */
352 xscale_control, /* control */
353 xscalec3_setttb, /* setttb */
357 armv4_tlb_flushID, /* tlb_flushID */
358 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
359 armv4_tlb_flushD, /* tlb_flushD */
360 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
362 /* Cache operations */
364 xscalec3_cache_syncI_rng, /* icache_sync_range */
366 xscalec3_cache_purgeD, /* dcache_wbinv_all */
367 xscalec3_cache_purgeD_rng, /* dcache_wbinv_range */
368 xscale_cache_flushD_rng, /* dcache_inv_range */
369 xscalec3_cache_cleanD_rng, /* dcache_wb_range */
371 xscale_cache_flushID, /* idcache_inv_all */
372 xscalec3_cache_purgeID, /* idcache_wbinv_all */
373 xscalec3_cache_purgeID_rng, /* idcache_wbinv_range */
374 xscalec3_l2cache_purge, /* l2cache_wbinv_all */
375 xscalec3_l2cache_purge_rng, /* l2cache_wbinv_range */
376 xscalec3_l2cache_flush_rng, /* l2cache_inv_range */
377 xscalec3_l2cache_clean_rng, /* l2cache_wb_range */
378 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
380 /* Other functions */
382 armv4_drain_writebuf, /* drain_writebuf */
384 xscale_cpu_sleep, /* sleep */
388 xscalec3_context_switch, /* context_switch */
390 xscale_setup /* cpu setup */
392 #endif /* CPU_XSCALE_81342 */
395 #if defined(CPU_FA526)
396 struct cpu_functions fa526_cpufuncs = {
399 cpufunc_nullop, /* cpwait */
403 cpufunc_control, /* control */
404 fa526_setttb, /* setttb */
408 armv4_tlb_flushID, /* tlb_flushID */
409 fa526_tlb_flushID_SE, /* tlb_flushID_SE */
410 armv4_tlb_flushD, /* tlb_flushD */
411 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
413 /* Cache operations */
415 fa526_icache_sync_range, /* icache_sync_range */
417 fa526_dcache_wbinv_all, /* dcache_wbinv_all */
418 fa526_dcache_wbinv_range, /* dcache_wbinv_range */
419 fa526_dcache_inv_range, /* dcache_inv_range */
420 fa526_dcache_wb_range, /* dcache_wb_range */
422 armv4_idcache_inv_all, /* idcache_inv_all */
423 fa526_idcache_wbinv_all, /* idcache_wbinv_all */
424 fa526_idcache_wbinv_range, /* idcache_wbinv_range */
425 cpufunc_nullop, /* l2cache_wbinv_all */
426 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
427 (void *)cpufunc_nullop, /* l2cache_inv_range */
428 (void *)cpufunc_nullop, /* l2cache_wb_range */
429 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
431 /* Other functions */
433 armv4_drain_writebuf, /* drain_writebuf */
435 fa526_cpu_sleep, /* sleep */
440 fa526_context_switch, /* context_switch */
442 fa526_setup /* cpu setup */
444 #endif /* CPU_FA526 */
446 #if defined(CPU_ARM1176)
447 struct cpu_functions arm1176_cpufuncs = {
450 cpufunc_nullop, /* cpwait */
454 cpufunc_control, /* control */
455 arm11x6_setttb, /* Setttb */
459 arm11_tlb_flushID, /* tlb_flushID */
460 arm11_tlb_flushID_SE, /* tlb_flushID_SE */
461 arm11_tlb_flushD, /* tlb_flushD */
462 arm11_tlb_flushD_SE, /* tlb_flushD_SE */
464 /* Cache operations */
466 arm11x6_icache_sync_range, /* icache_sync_range */
468 arm11x6_dcache_wbinv_all, /* dcache_wbinv_all */
469 armv6_dcache_wbinv_range, /* dcache_wbinv_range */
470 armv6_dcache_inv_range, /* dcache_inv_range */
471 armv6_dcache_wb_range, /* dcache_wb_range */
473 armv6_idcache_inv_all, /* idcache_inv_all */
474 arm11x6_idcache_wbinv_all, /* idcache_wbinv_all */
475 arm11x6_idcache_wbinv_range, /* idcache_wbinv_range */
477 (void *)cpufunc_nullop, /* l2cache_wbinv_all */
478 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
479 (void *)cpufunc_nullop, /* l2cache_inv_range */
480 (void *)cpufunc_nullop, /* l2cache_wb_range */
481 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
483 /* Other functions */
485 arm11_drain_writebuf, /* drain_writebuf */
487 arm11x6_sleep, /* sleep */
491 arm11_context_switch, /* context_switch */
493 arm11x6_setup /* cpu setup */
495 #endif /*CPU_ARM1176 */
497 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
498 struct cpu_functions cortexa_cpufuncs = {
501 cpufunc_nullop, /* cpwait */
505 cpufunc_control, /* control */
506 armv7_setttb, /* Setttb */
509 * TLB functions. ARMv7 does all TLB ops based on a unified TLB model
510 * whether the hardware implements separate I+D or not, so we use the
511 * same 'ID' functions for all 3 variations.
514 armv7_tlb_flushID, /* tlb_flushID */
515 armv7_tlb_flushID_SE, /* tlb_flushID_SE */
516 armv7_tlb_flushID, /* tlb_flushD */
517 armv7_tlb_flushID_SE, /* tlb_flushD_SE */
519 /* Cache operations */
521 armv7_icache_sync_range, /* icache_sync_range */
523 armv7_dcache_wbinv_all, /* dcache_wbinv_all */
524 armv7_dcache_wbinv_range, /* dcache_wbinv_range */
525 armv7_dcache_inv_range, /* dcache_inv_range */
526 armv7_dcache_wb_range, /* dcache_wb_range */
528 armv7_idcache_inv_all, /* idcache_inv_all */
529 armv7_idcache_wbinv_all, /* idcache_wbinv_all */
530 armv7_idcache_wbinv_range, /* idcache_wbinv_range */
533 * Note: For CPUs using the PL310 the L2 ops are filled in when the
534 * L2 cache controller is actually enabled.
536 cpufunc_nullop, /* l2cache_wbinv_all */
537 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
538 (void *)cpufunc_nullop, /* l2cache_inv_range */
539 (void *)cpufunc_nullop, /* l2cache_wb_range */
540 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
542 /* Other functions */
544 armv7_drain_writebuf, /* drain_writebuf */
546 armv7_cpu_sleep, /* sleep */
550 armv7_context_switch, /* context_switch */
552 cortexa_setup /* cpu setup */
554 #endif /* CPU_CORTEXA */
557 * Global constants also used by locore.s
560 struct cpu_functions cpufuncs;
563 u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore-v4.s */
566 #if defined(CPU_ARM9) || \
567 defined (CPU_ARM9E) || \
568 defined(CPU_ARM1176) || \
569 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
570 defined(CPU_FA526) || defined(CPU_MV_PJ4B) || \
571 defined(CPU_XSCALE_81342) || \
572 defined(CPU_CORTEXA) || defined(CPU_KRAIT)
574 /* Global cache line sizes, use 32 as default */
575 int arm_dcache_min_line_size = 32;
576 int arm_icache_min_line_size = 32;
577 int arm_idcache_min_line_size = 32;
579 static void get_cachetype_cp15(void);
581 /* Additional cache information local to this file. Log2 of some of the
583 static int arm_dcache_l2_nsets;
584 static int arm_dcache_l2_assoc;
585 static int arm_dcache_l2_linesize;
590 u_int ctype, isize, dsize, cpuid;
591 u_int clevel, csize, i, sel;
595 __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
600 * ...and thus spake the ARM ARM:
602 * If an <opcode2> value corresponding to an unimplemented or
603 * reserved ID register is encountered, the System Control
604 * processor returns the value of the main ID register.
609 if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
610 /* Resolve minimal cache line sizes */
611 arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
612 arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
613 arm_idcache_min_line_size =
614 min(arm_icache_min_line_size, arm_dcache_min_line_size);
616 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
618 arm_cache_level = clevel;
619 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
621 while ((type = (clevel & 0x7)) && i < 7) {
622 if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
623 type == CACHE_SEP_CACHE) {
625 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
627 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
629 arm_cache_type[sel] = csize;
630 arm_dcache_align = 1 <<
631 (CPUV7_CT_xSIZE_LEN(csize) + 4);
632 arm_dcache_align_mask = arm_dcache_align - 1;
634 if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
636 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
638 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
640 arm_cache_type[sel] = csize;
646 if ((ctype & CPU_CT_S) == 0)
647 arm_pcache_unified = 1;
650 * If you want to know how this code works, go read the ARM ARM.
653 arm_pcache_type = CPU_CT_CTYPE(ctype);
655 if (arm_pcache_unified == 0) {
656 isize = CPU_CT_ISIZE(ctype);
657 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
658 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
659 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
660 if (isize & CPU_CT_xSIZE_M)
661 arm_picache_line_size = 0; /* not present */
663 arm_picache_ways = 1;
665 arm_picache_ways = multiplier <<
666 (CPU_CT_xSIZE_ASSOC(isize) - 1);
668 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
671 dsize = CPU_CT_DSIZE(ctype);
672 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
673 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
674 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
675 if (dsize & CPU_CT_xSIZE_M)
676 arm_pdcache_line_size = 0; /* not present */
678 arm_pdcache_ways = 1;
680 arm_pdcache_ways = multiplier <<
681 (CPU_CT_xSIZE_ASSOC(dsize) - 1);
683 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
685 arm_dcache_align = arm_pdcache_line_size;
687 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
688 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
689 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
690 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
693 arm_dcache_align_mask = arm_dcache_align - 1;
696 #endif /* ARM9 || XSCALE */
699 * Cannot panic here as we may not have a console yet ...
705 cputype = cpu_ident();
706 cputype &= CPU_ID_CPU_MASK;
709 if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
710 (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
711 (cputype & 0x0000f000) == 0x00009000) {
712 cpufuncs = arm9_cpufuncs;
713 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
714 get_cachetype_cp15();
715 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
716 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
717 arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
718 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
719 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
720 pmap_pte_init_generic();
723 #endif /* CPU_ARM9 */
724 #if defined(CPU_ARM9E)
725 if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
726 cputype == CPU_ID_MV88FR571_41) {
727 uint32_t sheeva_ctrl;
729 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
732 * Workaround for Marvell MV78100 CPU: Cache prefetch
733 * mechanism may affect the cache coherency validity,
734 * so it needs to be disabled.
736 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
737 * L2 Prefetching Mechanism) for details.
739 if (cputype == CPU_ID_MV88FR571_VD ||
740 cputype == CPU_ID_MV88FR571_41)
741 sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
743 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
745 cpufuncs = sheeva_cpufuncs;
746 get_cachetype_cp15();
747 pmap_pte_init_generic();
749 } else if (cputype == CPU_ID_ARM926EJS) {
750 cpufuncs = armv5_ec_cpufuncs;
751 get_cachetype_cp15();
752 pmap_pte_init_generic();
755 #endif /* CPU_ARM9E */
756 #if defined(CPU_ARM1176)
757 if (cputype == CPU_ID_ARM1176JZS) {
758 cpufuncs = arm1176_cpufuncs;
759 get_cachetype_cp15();
762 #endif /* CPU_ARM1176 */
763 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
764 switch(cputype & CPU_ID_SCHEME_MASK) {
765 case CPU_ID_CORTEXA5:
766 case CPU_ID_CORTEXA7:
767 case CPU_ID_CORTEXA8:
768 case CPU_ID_CORTEXA9:
769 case CPU_ID_CORTEXA12:
770 case CPU_ID_CORTEXA15:
771 case CPU_ID_KRAIT300:
772 cpufuncs = cortexa_cpufuncs;
773 get_cachetype_cp15();
778 #endif /* CPU_CORTEXA */
780 #if defined(CPU_MV_PJ4B)
781 if (cputype == CPU_ID_MV88SV581X_V7 ||
782 cputype == CPU_ID_MV88SV584X_V7 ||
783 cputype == CPU_ID_ARM_88SV581X_V7) {
784 cpufuncs = pj4bv7_cpufuncs;
785 get_cachetype_cp15();
788 #endif /* CPU_MV_PJ4B */
790 #if defined(CPU_FA526)
791 if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
792 cpufuncs = fa526_cpufuncs;
793 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
794 get_cachetype_cp15();
795 pmap_pte_init_generic();
799 #endif /* CPU_FA526 */
801 #if defined(CPU_XSCALE_81342)
802 if (cputype == CPU_ID_81342) {
803 cpufuncs = xscalec3_cpufuncs;
804 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
805 get_cachetype_cp15();
806 pmap_pte_init_xscale();
809 #endif /* CPU_XSCALE_81342 */
810 #ifdef CPU_XSCALE_PXA2X0
811 /* ignore core revision to test PXA2xx CPUs */
812 if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
813 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
814 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
816 cpufuncs = xscale_cpufuncs;
817 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
818 get_cachetype_cp15();
819 pmap_pte_init_xscale();
823 #endif /* CPU_XSCALE_PXA2X0 */
824 #ifdef CPU_XSCALE_IXP425
825 if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
826 cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
828 cpufuncs = xscale_cpufuncs;
829 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
830 get_cachetype_cp15();
831 pmap_pte_init_xscale();
835 #endif /* CPU_XSCALE_IXP425 */
837 * Bzzzz. And the answer was ...
839 panic("No support for this CPU type (%08x) in kernel", cputype);
840 return(ARCHITECTURE_NOT_PRESENT);
842 uma_set_align(arm_dcache_align_mask);
854 int cpuctrl, cpuctrlmask;
856 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
857 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
858 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
859 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
860 CPU_CONTROL_ROUNDROBIN;
861 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
862 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
863 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
864 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
865 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
866 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
867 | CPU_CONTROL_ROUNDROBIN;
869 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
870 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
874 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
876 if (vector_page == ARM_VECTORS_HIGH)
877 cpuctrl |= CPU_CONTROL_VECRELOC;
879 /* Clear out the cache */
880 cpu_idcache_wbinv_all();
882 /* Set the control register (SCTLR) */
883 cpu_control(cpuctrlmask, cpuctrl);
886 #endif /* CPU_ARM9 */
888 #if defined(CPU_ARM9E)
892 int cpuctrl, cpuctrlmask;
894 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
895 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
896 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
897 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
898 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
899 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
900 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
901 | CPU_CONTROL_BPRD_ENABLE
902 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
904 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
905 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
909 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
912 /* Clear out the cache */
913 cpu_idcache_wbinv_all();
915 /* Now really make sure they are clean. */
916 __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
918 if (vector_page == ARM_VECTORS_HIGH)
919 cpuctrl |= CPU_CONTROL_VECRELOC;
921 /* Set the control register */
922 cpu_control(0xffffffff, cpuctrl);
925 cpu_idcache_wbinv_all();
927 #endif /* CPU_ARM9E || CPU_ARM10 */
929 #if defined(CPU_ARM1176) \
930 || defined(CPU_MV_PJ4B) \
931 || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
933 cpu_scc_setup_ccnt(void)
935 /* This is how you give userland access to the CCNT and PMCn
937 * BEWARE! This gives write access also, which may not be what
940 #ifdef _PMC_USER_READ_WRITE_
941 /* Set PMUSERENR[0] to allow userland access */
942 cp15_pmuserenr_set(1);
944 #if defined(CPU_ARM1176)
945 /* Set PMCR[2,0] to enable counters and reset CCNT */
948 /* Set up the PMCCNTR register as a cyclecounter:
949 * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
950 * Set PMCR[2,0] to enable counters and reset CCNT
951 * Set PMCNTENSET to 0x80000000 to enable CCNT */
952 cp15_pminten_clr(0xFFFFFFFF);
954 cp15_pmcnten_set(0x80000000);
959 #if defined(CPU_ARM1176)
963 uint32_t auxctrl, auxctrl_wax;
973 * Enable an errata workaround
975 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
976 auxctrl = ARM1176_AUXCTL_PHD;
977 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
980 tmp = cp15_actlr_get();
987 cpu_scc_setup_ccnt();
989 #endif /* CPU_ARM1176 */
997 cpu_scc_setup_ccnt();
999 #endif /* CPU_MV_PJ4B */
1001 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1007 cpu_scc_setup_ccnt();
1009 #endif /* CPU_CORTEXA */
1011 #if defined(CPU_FA526)
1015 int cpuctrl, cpuctrlmask;
1017 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1018 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1019 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1020 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1021 | CPU_CONTROL_BPRD_ENABLE;
1022 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1023 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1024 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1025 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1026 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1027 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1028 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1030 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1031 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1035 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1038 if (vector_page == ARM_VECTORS_HIGH)
1039 cpuctrl |= CPU_CONTROL_VECRELOC;
1041 /* Clear out the cache */
1042 cpu_idcache_wbinv_all();
1044 /* Set the control register */
1045 cpu_control(0xffffffff, cpuctrl);
1047 #endif /* CPU_FA526 */
1049 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1050 defined(CPU_XSCALE_81342)
1055 int cpuctrl, cpuctrlmask;
1058 * The XScale Write Buffer is always enabled. Our option
1059 * is to enable/disable coalescing. Note that bits 6:3
1060 * must always be enabled.
1063 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1064 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1065 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1066 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1067 | CPU_CONTROL_BPRD_ENABLE;
1068 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1069 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1070 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1071 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1072 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1073 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1074 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
1075 CPU_CONTROL_L2_ENABLE;
1077 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1078 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1082 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1085 if (vector_page == ARM_VECTORS_HIGH)
1086 cpuctrl |= CPU_CONTROL_VECRELOC;
1087 #ifdef CPU_XSCALE_CORE3
1088 cpuctrl |= CPU_CONTROL_L2_ENABLE;
1091 /* Clear out the cache */
1092 cpu_idcache_wbinv_all();
1095 * Set the control register. Note that bits 6:3 must always
1098 /* cpu_control(cpuctrlmask, cpuctrl);*/
1099 cpu_control(0xffffffff, cpuctrl);
1101 /* Make sure write coalescing is turned on */
1102 __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1104 #ifdef XSCALE_NO_COALESCE_WRITES
1105 auxctl |= XSCALE_AUXCTL_K;
1107 auxctl &= ~XSCALE_AUXCTL_K;
1109 #ifdef CPU_XSCALE_CORE3
1110 auxctl |= XSCALE_AUXCTL_LLR;
1111 auxctl |= XSCALE_AUXCTL_MD_MASK;
1113 __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1116 #endif /* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */