1 /* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
4 * arm9 support code Copyright (C) 2001 ARM Ltd
5 * Copyright (c) 1997 Mark Brinicombe.
6 * Copyright (c) 1997 Causality Limited
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by Causality Limited.
20 * 4. The name of Causality Limited may not be used to endorse or promote
21 * products derived from this software without specific prior written
24 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * RiscBSD kernel project
40 * C functions for supporting CPU / MMU / TLB specific operations.
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
47 #include <sys/param.h>
48 #include <sys/systm.h>
50 #include <sys/mutex.h>
52 #include <machine/bus.h>
53 #include <machine/cpu.h>
54 #include <machine/disassem.h>
60 #include <machine/cpufunc.h>
62 #if defined(CPU_XSCALE_81342)
63 #include <arm/xscale/i8134x/i81342reg.h>
66 #ifdef CPU_XSCALE_IXP425
67 #include <arm/xscale/ixp425/ixp425reg.h>
68 #include <arm/xscale/ixp425/ixp425var.h>
71 /* PRIMARY CACHE VARIABLES */
73 int arm_picache_line_size;
76 int arm_pdcache_size; /* and unified */
77 int arm_pdcache_line_size;
81 int arm_pcache_unified;
84 int arm_dcache_align_mask;
86 u_int arm_cache_level;
87 u_int arm_cache_type[14];
91 struct cpu_functions arm9_cpufuncs = {
94 cpufunc_nullop, /* cpwait */
98 cpufunc_control, /* control */
99 arm9_setttb, /* Setttb */
103 armv4_tlb_flushID, /* tlb_flushID */
104 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
105 armv4_tlb_flushD, /* tlb_flushD */
106 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
108 /* Cache operations */
110 arm9_icache_sync_range, /* icache_sync_range */
112 arm9_dcache_wbinv_all, /* dcache_wbinv_all */
113 arm9_dcache_wbinv_range, /* dcache_wbinv_range */
114 arm9_dcache_inv_range, /* dcache_inv_range */
115 arm9_dcache_wb_range, /* dcache_wb_range */
117 armv4_idcache_inv_all, /* idcache_inv_all */
118 arm9_idcache_wbinv_all, /* idcache_wbinv_all */
119 arm9_idcache_wbinv_range, /* idcache_wbinv_range */
120 cpufunc_nullop, /* l2cache_wbinv_all */
121 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
122 (void *)cpufunc_nullop, /* l2cache_inv_range */
123 (void *)cpufunc_nullop, /* l2cache_wb_range */
124 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
126 /* Other functions */
128 armv4_drain_writebuf, /* drain_writebuf */
130 (void *)cpufunc_nullop, /* sleep */
134 arm9_context_switch, /* context_switch */
136 arm9_setup /* cpu setup */
139 #endif /* CPU_ARM9 */
141 #if defined(CPU_ARM9E)
142 struct cpu_functions armv5_ec_cpufuncs = {
145 cpufunc_nullop, /* cpwait */
149 cpufunc_control, /* control */
150 armv5_ec_setttb, /* Setttb */
154 armv4_tlb_flushID, /* tlb_flushID */
155 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
156 armv4_tlb_flushD, /* tlb_flushD */
157 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
159 /* Cache operations */
161 armv5_ec_icache_sync_range, /* icache_sync_range */
163 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
164 armv5_ec_dcache_wbinv_range, /* dcache_wbinv_range */
165 armv5_ec_dcache_inv_range, /* dcache_inv_range */
166 armv5_ec_dcache_wb_range, /* dcache_wb_range */
168 armv4_idcache_inv_all, /* idcache_inv_all */
169 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
170 armv5_ec_idcache_wbinv_range, /* idcache_wbinv_range */
172 cpufunc_nullop, /* l2cache_wbinv_all */
173 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
174 (void *)cpufunc_nullop, /* l2cache_inv_range */
175 (void *)cpufunc_nullop, /* l2cache_wb_range */
176 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
178 /* Other functions */
180 armv4_drain_writebuf, /* drain_writebuf */
182 (void *)cpufunc_nullop, /* sleep */
186 arm9_context_switch, /* context_switch */
188 arm10_setup /* cpu setup */
192 struct cpu_functions sheeva_cpufuncs = {
195 cpufunc_nullop, /* cpwait */
199 cpufunc_control, /* control */
200 sheeva_setttb, /* Setttb */
204 armv4_tlb_flushID, /* tlb_flushID */
205 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
206 armv4_tlb_flushD, /* tlb_flushD */
207 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
209 /* Cache operations */
211 armv5_ec_icache_sync_range, /* icache_sync_range */
213 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
214 sheeva_dcache_wbinv_range, /* dcache_wbinv_range */
215 sheeva_dcache_inv_range, /* dcache_inv_range */
216 sheeva_dcache_wb_range, /* dcache_wb_range */
218 armv4_idcache_inv_all, /* idcache_inv_all */
219 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
220 sheeva_idcache_wbinv_range, /* idcache_wbinv_all */
222 sheeva_l2cache_wbinv_all, /* l2cache_wbinv_all */
223 sheeva_l2cache_wbinv_range, /* l2cache_wbinv_range */
224 sheeva_l2cache_inv_range, /* l2cache_inv_range */
225 sheeva_l2cache_wb_range, /* l2cache_wb_range */
226 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
228 /* Other functions */
230 armv4_drain_writebuf, /* drain_writebuf */
232 sheeva_cpu_sleep, /* sleep */
236 arm9_context_switch, /* context_switch */
238 arm10_setup /* cpu setup */
240 #endif /* CPU_ARM9E */
243 struct cpu_functions pj4bv7_cpufuncs = {
245 /* Cache operations */
246 .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
247 .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
248 .cf_l2cache_inv_range = (void *)cpufunc_nullop,
249 .cf_l2cache_wb_range = (void *)cpufunc_nullop,
250 .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
252 /* Other functions */
253 .cf_sleep = (void *)cpufunc_nullop,
256 .cf_setup = pj4bv7_setup
258 #endif /* CPU_MV_PJ4B */
260 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
262 struct cpu_functions xscale_cpufuncs = {
265 xscale_cpwait, /* cpwait */
269 xscale_control, /* control */
270 xscale_setttb, /* setttb */
274 armv4_tlb_flushID, /* tlb_flushID */
275 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
276 armv4_tlb_flushD, /* tlb_flushD */
277 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
279 /* Cache operations */
281 xscale_cache_syncI_rng, /* icache_sync_range */
283 xscale_cache_purgeD, /* dcache_wbinv_all */
284 xscale_cache_purgeD_rng, /* dcache_wbinv_range */
285 xscale_cache_flushD_rng, /* dcache_inv_range */
286 xscale_cache_cleanD_rng, /* dcache_wb_range */
288 xscale_cache_flushID, /* idcache_inv_all */
289 xscale_cache_purgeID, /* idcache_wbinv_all */
290 xscale_cache_purgeID_rng, /* idcache_wbinv_range */
291 cpufunc_nullop, /* l2cache_wbinv_all */
292 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
293 (void *)cpufunc_nullop, /* l2cache_inv_range */
294 (void *)cpufunc_nullop, /* l2cache_wb_range */
295 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
297 /* Other functions */
299 armv4_drain_writebuf, /* drain_writebuf */
301 xscale_cpu_sleep, /* sleep */
305 xscale_context_switch, /* context_switch */
307 xscale_setup /* cpu setup */
310 /* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
312 #ifdef CPU_XSCALE_81342
313 struct cpu_functions xscalec3_cpufuncs = {
316 xscale_cpwait, /* cpwait */
320 xscale_control, /* control */
321 xscalec3_setttb, /* setttb */
325 armv4_tlb_flushID, /* tlb_flushID */
326 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
327 armv4_tlb_flushD, /* tlb_flushD */
328 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
330 /* Cache operations */
332 xscalec3_cache_syncI_rng, /* icache_sync_range */
334 xscalec3_cache_purgeD, /* dcache_wbinv_all */
335 xscalec3_cache_purgeD_rng, /* dcache_wbinv_range */
336 xscale_cache_flushD_rng, /* dcache_inv_range */
337 xscalec3_cache_cleanD_rng, /* dcache_wb_range */
339 xscale_cache_flushID, /* idcache_inv_all */
340 xscalec3_cache_purgeID, /* idcache_wbinv_all */
341 xscalec3_cache_purgeID_rng, /* idcache_wbinv_range */
342 xscalec3_l2cache_purge, /* l2cache_wbinv_all */
343 xscalec3_l2cache_purge_rng, /* l2cache_wbinv_range */
344 xscalec3_l2cache_flush_rng, /* l2cache_inv_range */
345 xscalec3_l2cache_clean_rng, /* l2cache_wb_range */
346 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
348 /* Other functions */
350 armv4_drain_writebuf, /* drain_writebuf */
352 xscale_cpu_sleep, /* sleep */
356 xscalec3_context_switch, /* context_switch */
358 xscale_setup /* cpu setup */
360 #endif /* CPU_XSCALE_81342 */
363 #if defined(CPU_FA526)
364 struct cpu_functions fa526_cpufuncs = {
367 cpufunc_nullop, /* cpwait */
371 cpufunc_control, /* control */
372 fa526_setttb, /* setttb */
376 armv4_tlb_flushID, /* tlb_flushID */
377 fa526_tlb_flushID_SE, /* tlb_flushID_SE */
378 armv4_tlb_flushD, /* tlb_flushD */
379 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
381 /* Cache operations */
383 fa526_icache_sync_range, /* icache_sync_range */
385 fa526_dcache_wbinv_all, /* dcache_wbinv_all */
386 fa526_dcache_wbinv_range, /* dcache_wbinv_range */
387 fa526_dcache_inv_range, /* dcache_inv_range */
388 fa526_dcache_wb_range, /* dcache_wb_range */
390 armv4_idcache_inv_all, /* idcache_inv_all */
391 fa526_idcache_wbinv_all, /* idcache_wbinv_all */
392 fa526_idcache_wbinv_range, /* idcache_wbinv_range */
393 cpufunc_nullop, /* l2cache_wbinv_all */
394 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
395 (void *)cpufunc_nullop, /* l2cache_inv_range */
396 (void *)cpufunc_nullop, /* l2cache_wb_range */
397 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
399 /* Other functions */
401 armv4_drain_writebuf, /* drain_writebuf */
403 fa526_cpu_sleep, /* sleep */
408 fa526_context_switch, /* context_switch */
410 fa526_setup /* cpu setup */
412 #endif /* CPU_FA526 */
414 #if defined(CPU_ARM1176)
415 struct cpu_functions arm1176_cpufuncs = {
417 /* Cache operations */
418 .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
419 .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
420 .cf_l2cache_inv_range = (void *)cpufunc_nullop,
421 .cf_l2cache_wb_range = (void *)cpufunc_nullop,
422 .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
424 /* Other functions */
425 .cf_sleep = arm11x6_sleep,
428 .cf_setup = arm11x6_setup
430 #endif /*CPU_ARM1176 */
432 #if defined(CPU_CORTEXA8) || defined(CPU_CORTEXA_MP) || defined(CPU_KRAIT)
433 struct cpu_functions cortexa_cpufuncs = {
435 /* Cache operations */
438 * Note: For CPUs using the PL310 the L2 ops are filled in when the
439 * L2 cache controller is actually enabled.
441 .cf_l2cache_wbinv_all = cpufunc_nullop,
442 .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
443 .cf_l2cache_inv_range = (void *)cpufunc_nullop,
444 .cf_l2cache_wb_range = (void *)cpufunc_nullop,
445 .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
447 /* Other functions */
448 .cf_sleep = armv7_cpu_sleep,
451 .cf_setup = cortexa_setup
453 #endif /* CPU_CORTEXA8 || CPU_CORTEXA_MP || CPU_KRAIT */
456 * Global constants also used by locore.s
459 struct cpu_functions cpufuncs;
462 u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore-v4.s */
465 #if defined(CPU_ARM9) || \
466 defined (CPU_ARM9E) || \
467 defined(CPU_ARM1176) || \
468 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
469 defined(CPU_FA526) || defined(CPU_MV_PJ4B) || \
470 defined(CPU_XSCALE_81342) || \
471 defined(CPU_CORTEXA8) || defined(CPU_CORTEXA_MP) || defined(CPU_KRAIT)
473 /* Global cache line sizes, use 32 as default */
474 int arm_dcache_min_line_size = 32;
475 int arm_icache_min_line_size = 32;
476 int arm_idcache_min_line_size = 32;
478 static void get_cachetype_cp15(void);
480 /* Additional cache information local to this file. Log2 of some of the
482 static int arm_dcache_l2_nsets;
483 static int arm_dcache_l2_assoc;
484 static int arm_dcache_l2_linesize;
487 get_cachetype_cp15(void)
489 u_int ctype, isize, dsize, cpuid;
490 u_int clevel, csize, i, sel;
494 __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
499 * ...and thus spake the ARM ARM:
501 * If an <opcode2> value corresponding to an unimplemented or
502 * reserved ID register is encountered, the System Control
503 * processor returns the value of the main ID register.
508 if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
509 /* Resolve minimal cache line sizes */
510 arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
511 arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
512 arm_idcache_min_line_size =
513 min(arm_icache_min_line_size, arm_dcache_min_line_size);
515 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
517 arm_cache_level = clevel;
518 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
520 while ((type = (clevel & 0x7)) && i < 7) {
521 if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
522 type == CACHE_SEP_CACHE) {
524 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
526 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
528 arm_cache_type[sel] = csize;
529 arm_dcache_align = 1 <<
530 (CPUV7_CT_xSIZE_LEN(csize) + 4);
531 arm_dcache_align_mask = arm_dcache_align - 1;
533 if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
535 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
537 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
539 arm_cache_type[sel] = csize;
545 if ((ctype & CPU_CT_S) == 0)
546 arm_pcache_unified = 1;
549 * If you want to know how this code works, go read the ARM ARM.
552 arm_pcache_type = CPU_CT_CTYPE(ctype);
554 if (arm_pcache_unified == 0) {
555 isize = CPU_CT_ISIZE(ctype);
556 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
557 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
558 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
559 if (isize & CPU_CT_xSIZE_M)
560 arm_picache_line_size = 0; /* not present */
562 arm_picache_ways = 1;
564 arm_picache_ways = multiplier <<
565 (CPU_CT_xSIZE_ASSOC(isize) - 1);
567 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
570 dsize = CPU_CT_DSIZE(ctype);
571 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
572 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
573 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
574 if (dsize & CPU_CT_xSIZE_M)
575 arm_pdcache_line_size = 0; /* not present */
577 arm_pdcache_ways = 1;
579 arm_pdcache_ways = multiplier <<
580 (CPU_CT_xSIZE_ASSOC(dsize) - 1);
582 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
584 arm_dcache_align = arm_pdcache_line_size;
586 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
587 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
588 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
589 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
592 arm_dcache_align_mask = arm_dcache_align - 1;
595 #endif /* ARM9 || XSCALE */
598 * Cannot panic here as we may not have a console yet ...
604 cputype = cpu_ident();
605 cputype &= CPU_ID_CPU_MASK;
608 if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
609 (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
610 (cputype & 0x0000f000) == 0x00009000) {
611 cpufuncs = arm9_cpufuncs;
612 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
613 get_cachetype_cp15();
614 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
615 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
616 arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
617 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
618 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
619 pmap_pte_init_generic();
622 #endif /* CPU_ARM9 */
623 #if defined(CPU_ARM9E)
624 if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
625 cputype == CPU_ID_MV88FR571_41) {
626 uint32_t sheeva_ctrl;
628 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
631 * Workaround for Marvell MV78100 CPU: Cache prefetch
632 * mechanism may affect the cache coherency validity,
633 * so it needs to be disabled.
635 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
636 * L2 Prefetching Mechanism) for details.
638 if (cputype == CPU_ID_MV88FR571_VD ||
639 cputype == CPU_ID_MV88FR571_41)
640 sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
642 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
644 cpufuncs = sheeva_cpufuncs;
645 get_cachetype_cp15();
646 pmap_pte_init_generic();
648 } else if (cputype == CPU_ID_ARM926EJS) {
649 cpufuncs = armv5_ec_cpufuncs;
650 get_cachetype_cp15();
651 pmap_pte_init_generic();
654 #endif /* CPU_ARM9E */
655 #if defined(CPU_ARM1176)
656 if (cputype == CPU_ID_ARM1176JZS) {
657 cpufuncs = arm1176_cpufuncs;
658 get_cachetype_cp15();
661 #endif /* CPU_ARM1176 */
662 #if defined(CPU_CORTEXA8) || defined(CPU_CORTEXA_MP) || defined(CPU_KRAIT)
663 switch(cputype & CPU_ID_SCHEME_MASK) {
664 case CPU_ID_CORTEXA5:
665 case CPU_ID_CORTEXA7:
666 case CPU_ID_CORTEXA8:
667 case CPU_ID_CORTEXA9:
668 case CPU_ID_CORTEXA12:
669 case CPU_ID_CORTEXA15:
670 case CPU_ID_CORTEXA53:
671 case CPU_ID_CORTEXA57:
672 case CPU_ID_CORTEXA72:
673 case CPU_ID_KRAIT300:
674 cpufuncs = cortexa_cpufuncs;
675 get_cachetype_cp15();
680 #endif /* CPU_CORTEXA8 || CPU_CORTEXA_MP || CPU_KRAIT */
682 #if defined(CPU_MV_PJ4B)
683 if (cputype == CPU_ID_MV88SV581X_V7 ||
684 cputype == CPU_ID_MV88SV584X_V7 ||
685 cputype == CPU_ID_ARM_88SV581X_V7) {
686 cpufuncs = pj4bv7_cpufuncs;
687 get_cachetype_cp15();
690 #endif /* CPU_MV_PJ4B */
692 #if defined(CPU_FA526)
693 if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
694 cpufuncs = fa526_cpufuncs;
695 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
696 get_cachetype_cp15();
697 pmap_pte_init_generic();
701 #endif /* CPU_FA526 */
703 #if defined(CPU_XSCALE_81342)
704 if (cputype == CPU_ID_81342) {
705 cpufuncs = xscalec3_cpufuncs;
706 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
707 get_cachetype_cp15();
708 pmap_pte_init_xscale();
711 #endif /* CPU_XSCALE_81342 */
712 #ifdef CPU_XSCALE_PXA2X0
713 /* ignore core revision to test PXA2xx CPUs */
714 if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
715 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
716 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
718 cpufuncs = xscale_cpufuncs;
719 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
720 get_cachetype_cp15();
721 pmap_pte_init_xscale();
725 #endif /* CPU_XSCALE_PXA2X0 */
726 #ifdef CPU_XSCALE_IXP425
727 if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
728 cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
730 cpufuncs = xscale_cpufuncs;
731 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
732 get_cachetype_cp15();
733 pmap_pte_init_xscale();
737 #endif /* CPU_XSCALE_IXP425 */
739 * Bzzzz. And the answer was ...
741 panic("No support for this CPU type (%08x) in kernel", cputype);
742 return(ARCHITECTURE_NOT_PRESENT);
744 uma_set_align(arm_dcache_align_mask);
756 int cpuctrl, cpuctrlmask;
758 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
759 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
760 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
761 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
762 CPU_CONTROL_ROUNDROBIN;
763 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
764 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
765 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
766 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
767 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
768 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
769 | CPU_CONTROL_ROUNDROBIN;
771 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
772 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
776 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
778 if (vector_page == ARM_VECTORS_HIGH)
779 cpuctrl |= CPU_CONTROL_VECRELOC;
781 /* Clear out the cache */
782 cpu_idcache_wbinv_all();
784 /* Set the control register (SCTLR) */
785 cpu_control(cpuctrlmask, cpuctrl);
788 #endif /* CPU_ARM9 */
790 #if defined(CPU_ARM9E)
794 int cpuctrl, cpuctrlmask;
796 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
797 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
798 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
799 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
800 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
801 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
802 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
803 | CPU_CONTROL_BPRD_ENABLE
804 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
806 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
807 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
811 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
814 /* Clear out the cache */
815 cpu_idcache_wbinv_all();
817 /* Now really make sure they are clean. */
818 __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
820 if (vector_page == ARM_VECTORS_HIGH)
821 cpuctrl |= CPU_CONTROL_VECRELOC;
823 /* Set the control register */
824 cpu_control(0xffffffff, cpuctrl);
827 cpu_idcache_wbinv_all();
829 #endif /* CPU_ARM9E || CPU_ARM10 */
831 #if defined(CPU_ARM1176) \
832 || defined(CPU_MV_PJ4B) \
833 || defined(CPU_CORTEXA8) || defined(CPU_CORTEXA_MP) || defined(CPU_KRAIT)
835 cpu_scc_setup_ccnt(void)
837 /* This is how you give userland access to the CCNT and PMCn
839 * BEWARE! This gives write access also, which may not be what
842 #ifdef _PMC_USER_READ_WRITE_
843 /* Set PMUSERENR[0] to allow userland access */
844 cp15_pmuserenr_set(1);
846 #if defined(CPU_ARM1176)
847 /* Set PMCR[2,0] to enable counters and reset CCNT */
850 /* Set up the PMCCNTR register as a cyclecounter:
851 * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
852 * Set PMCR[2,0] to enable counters and reset CCNT
853 * Set PMCNTENSET to 0x80000000 to enable CCNT */
854 cp15_pminten_clr(0xFFFFFFFF);
856 cp15_pmcnten_set(0x80000000);
861 #if defined(CPU_ARM1176)
865 uint32_t auxctrl, auxctrl_wax;
875 * Enable an errata workaround
877 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
878 auxctrl = ARM1176_AUXCTL_PHD;
879 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
882 tmp = cp15_actlr_get();
889 cpu_scc_setup_ccnt();
891 #endif /* CPU_ARM1176 */
899 cpu_scc_setup_ccnt();
901 #endif /* CPU_MV_PJ4B */
903 #if defined(CPU_CORTEXA8) || defined(CPU_CORTEXA_MP) || defined(CPU_KRAIT)
909 cpu_scc_setup_ccnt();
911 #endif /* CPU_CORTEXA8 || CPU_CORTEXA_MP || CPU_KRAIT */
913 #if defined(CPU_FA526)
917 int cpuctrl, cpuctrlmask;
919 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
920 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
921 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
922 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
923 | CPU_CONTROL_BPRD_ENABLE;
924 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
925 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
926 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
927 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
928 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
929 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
930 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
932 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
933 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
937 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
940 if (vector_page == ARM_VECTORS_HIGH)
941 cpuctrl |= CPU_CONTROL_VECRELOC;
943 /* Clear out the cache */
944 cpu_idcache_wbinv_all();
946 /* Set the control register */
947 cpu_control(0xffffffff, cpuctrl);
949 #endif /* CPU_FA526 */
951 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
952 defined(CPU_XSCALE_81342)
957 int cpuctrl, cpuctrlmask;
960 * The XScale Write Buffer is always enabled. Our option
961 * is to enable/disable coalescing. Note that bits 6:3
962 * must always be enabled.
965 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
966 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
967 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
968 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
969 | CPU_CONTROL_BPRD_ENABLE;
970 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
971 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
972 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
973 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
974 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
975 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
976 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
977 CPU_CONTROL_L2_ENABLE;
979 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
980 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
984 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
987 if (vector_page == ARM_VECTORS_HIGH)
988 cpuctrl |= CPU_CONTROL_VECRELOC;
989 #ifdef CPU_XSCALE_CORE3
990 cpuctrl |= CPU_CONTROL_L2_ENABLE;
993 /* Clear out the cache */
994 cpu_idcache_wbinv_all();
997 * Set the control register. Note that bits 6:3 must always
1000 /* cpu_control(cpuctrlmask, cpuctrl);*/
1001 cpu_control(0xffffffff, cpuctrl);
1003 /* Make sure write coalescing is turned on */
1004 __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1006 #ifdef XSCALE_NO_COALESCE_WRITES
1007 auxctl |= XSCALE_AUXCTL_K;
1009 auxctl &= ~XSCALE_AUXCTL_K;
1011 #ifdef CPU_XSCALE_CORE3
1012 auxctl |= XSCALE_AUXCTL_LLR;
1013 auxctl |= XSCALE_AUXCTL_MD_MASK;
1015 __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1018 #endif /* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */