1 /* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
4 * SPDX-License-Identifier: BSD-4-Clause
6 * arm9 support code Copyright (C) 2001 ARM Ltd
7 * Copyright (c) 1997 Mark Brinicombe.
8 * Copyright (c) 1997 Causality Limited
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by Causality Limited.
22 * 4. The name of Causality Limited may not be used to endorse or promote
23 * products derived from this software without specific prior written
26 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
27 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
28 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
29 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
30 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
32 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * RiscBSD kernel project
42 * C functions for supporting CPU / MMU / TLB specific operations.
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD$");
49 #include <sys/param.h>
50 #include <sys/systm.h>
52 #include <sys/mutex.h>
54 #include <machine/bus.h>
55 #include <machine/cpu.h>
56 #include <machine/disassem.h>
62 #include <machine/cpufunc.h>
64 #if defined(CPU_XSCALE_81342)
65 #include <arm/xscale/i8134x/i81342reg.h>
68 #ifdef CPU_XSCALE_IXP425
69 #include <arm/xscale/ixp425/ixp425reg.h>
70 #include <arm/xscale/ixp425/ixp425var.h>
73 /* PRIMARY CACHE VARIABLES */
75 int arm_picache_line_size;
78 int arm_pdcache_size; /* and unified */
79 int arm_pdcache_line_size;
83 int arm_pcache_unified;
86 int arm_dcache_align_mask;
88 u_int arm_cache_level;
89 u_int arm_cache_type[14];
93 struct cpu_functions arm9_cpufuncs = {
96 cpufunc_nullop, /* cpwait */
100 cpufunc_control, /* control */
101 arm9_setttb, /* Setttb */
105 armv4_tlb_flushID, /* tlb_flushID */
106 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
107 armv4_tlb_flushD, /* tlb_flushD */
108 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
110 /* Cache operations */
112 arm9_icache_sync_range, /* icache_sync_range */
114 arm9_dcache_wbinv_all, /* dcache_wbinv_all */
115 arm9_dcache_wbinv_range, /* dcache_wbinv_range */
116 arm9_dcache_inv_range, /* dcache_inv_range */
117 arm9_dcache_wb_range, /* dcache_wb_range */
119 armv4_idcache_inv_all, /* idcache_inv_all */
120 arm9_idcache_wbinv_all, /* idcache_wbinv_all */
121 arm9_idcache_wbinv_range, /* idcache_wbinv_range */
122 cpufunc_nullop, /* l2cache_wbinv_all */
123 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
124 (void *)cpufunc_nullop, /* l2cache_inv_range */
125 (void *)cpufunc_nullop, /* l2cache_wb_range */
126 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
128 /* Other functions */
130 armv4_drain_writebuf, /* drain_writebuf */
132 (void *)cpufunc_nullop, /* sleep */
136 arm9_context_switch, /* context_switch */
138 arm9_setup /* cpu setup */
141 #endif /* CPU_ARM9 */
143 #if defined(CPU_ARM9E)
144 struct cpu_functions armv5_ec_cpufuncs = {
147 cpufunc_nullop, /* cpwait */
151 cpufunc_control, /* control */
152 armv5_ec_setttb, /* Setttb */
156 armv4_tlb_flushID, /* tlb_flushID */
157 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
158 armv4_tlb_flushD, /* tlb_flushD */
159 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
161 /* Cache operations */
163 armv5_ec_icache_sync_range, /* icache_sync_range */
165 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
166 armv5_ec_dcache_wbinv_range, /* dcache_wbinv_range */
167 armv5_ec_dcache_inv_range, /* dcache_inv_range */
168 armv5_ec_dcache_wb_range, /* dcache_wb_range */
170 armv4_idcache_inv_all, /* idcache_inv_all */
171 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
172 armv5_ec_idcache_wbinv_range, /* idcache_wbinv_range */
174 cpufunc_nullop, /* l2cache_wbinv_all */
175 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
176 (void *)cpufunc_nullop, /* l2cache_inv_range */
177 (void *)cpufunc_nullop, /* l2cache_wb_range */
178 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
180 /* Other functions */
182 armv4_drain_writebuf, /* drain_writebuf */
184 (void *)cpufunc_nullop, /* sleep */
188 arm9_context_switch, /* context_switch */
190 arm10_setup /* cpu setup */
194 struct cpu_functions sheeva_cpufuncs = {
197 cpufunc_nullop, /* cpwait */
201 cpufunc_control, /* control */
202 sheeva_setttb, /* Setttb */
206 armv4_tlb_flushID, /* tlb_flushID */
207 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
208 armv4_tlb_flushD, /* tlb_flushD */
209 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
211 /* Cache operations */
213 armv5_ec_icache_sync_range, /* icache_sync_range */
215 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
216 sheeva_dcache_wbinv_range, /* dcache_wbinv_range */
217 sheeva_dcache_inv_range, /* dcache_inv_range */
218 sheeva_dcache_wb_range, /* dcache_wb_range */
220 armv4_idcache_inv_all, /* idcache_inv_all */
221 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
222 sheeva_idcache_wbinv_range, /* idcache_wbinv_all */
224 sheeva_l2cache_wbinv_all, /* l2cache_wbinv_all */
225 sheeva_l2cache_wbinv_range, /* l2cache_wbinv_range */
226 sheeva_l2cache_inv_range, /* l2cache_inv_range */
227 sheeva_l2cache_wb_range, /* l2cache_wb_range */
228 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
230 /* Other functions */
232 armv4_drain_writebuf, /* drain_writebuf */
234 sheeva_cpu_sleep, /* sleep */
238 arm9_context_switch, /* context_switch */
240 arm10_setup /* cpu setup */
242 #endif /* CPU_ARM9E */
245 struct cpu_functions pj4bv7_cpufuncs = {
247 /* Cache operations */
248 .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
249 .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
250 .cf_l2cache_inv_range = (void *)cpufunc_nullop,
251 .cf_l2cache_wb_range = (void *)cpufunc_nullop,
252 .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
254 /* Other functions */
255 .cf_sleep = (void *)cpufunc_nullop,
258 .cf_setup = pj4bv7_setup
260 #endif /* CPU_MV_PJ4B */
262 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
264 struct cpu_functions xscale_cpufuncs = {
267 xscale_cpwait, /* cpwait */
271 xscale_control, /* control */
272 xscale_setttb, /* setttb */
276 armv4_tlb_flushID, /* tlb_flushID */
277 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
278 armv4_tlb_flushD, /* tlb_flushD */
279 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
281 /* Cache operations */
283 xscale_cache_syncI_rng, /* icache_sync_range */
285 xscale_cache_purgeD, /* dcache_wbinv_all */
286 xscale_cache_purgeD_rng, /* dcache_wbinv_range */
287 xscale_cache_flushD_rng, /* dcache_inv_range */
288 xscale_cache_cleanD_rng, /* dcache_wb_range */
290 xscale_cache_flushID, /* idcache_inv_all */
291 xscale_cache_purgeID, /* idcache_wbinv_all */
292 xscale_cache_purgeID_rng, /* idcache_wbinv_range */
293 cpufunc_nullop, /* l2cache_wbinv_all */
294 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
295 (void *)cpufunc_nullop, /* l2cache_inv_range */
296 (void *)cpufunc_nullop, /* l2cache_wb_range */
297 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
299 /* Other functions */
301 armv4_drain_writebuf, /* drain_writebuf */
303 xscale_cpu_sleep, /* sleep */
307 xscale_context_switch, /* context_switch */
309 xscale_setup /* cpu setup */
312 /* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
314 #ifdef CPU_XSCALE_81342
315 struct cpu_functions xscalec3_cpufuncs = {
318 xscale_cpwait, /* cpwait */
322 xscale_control, /* control */
323 xscalec3_setttb, /* setttb */
327 armv4_tlb_flushID, /* tlb_flushID */
328 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
329 armv4_tlb_flushD, /* tlb_flushD */
330 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
332 /* Cache operations */
334 xscalec3_cache_syncI_rng, /* icache_sync_range */
336 xscalec3_cache_purgeD, /* dcache_wbinv_all */
337 xscalec3_cache_purgeD_rng, /* dcache_wbinv_range */
338 xscale_cache_flushD_rng, /* dcache_inv_range */
339 xscalec3_cache_cleanD_rng, /* dcache_wb_range */
341 xscale_cache_flushID, /* idcache_inv_all */
342 xscalec3_cache_purgeID, /* idcache_wbinv_all */
343 xscalec3_cache_purgeID_rng, /* idcache_wbinv_range */
344 xscalec3_l2cache_purge, /* l2cache_wbinv_all */
345 xscalec3_l2cache_purge_rng, /* l2cache_wbinv_range */
346 xscalec3_l2cache_flush_rng, /* l2cache_inv_range */
347 xscalec3_l2cache_clean_rng, /* l2cache_wb_range */
348 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
350 /* Other functions */
352 armv4_drain_writebuf, /* drain_writebuf */
354 xscale_cpu_sleep, /* sleep */
358 xscalec3_context_switch, /* context_switch */
360 xscale_setup /* cpu setup */
362 #endif /* CPU_XSCALE_81342 */
365 #if defined(CPU_FA526)
366 struct cpu_functions fa526_cpufuncs = {
369 cpufunc_nullop, /* cpwait */
373 cpufunc_control, /* control */
374 fa526_setttb, /* setttb */
378 armv4_tlb_flushID, /* tlb_flushID */
379 fa526_tlb_flushID_SE, /* tlb_flushID_SE */
380 armv4_tlb_flushD, /* tlb_flushD */
381 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
383 /* Cache operations */
385 fa526_icache_sync_range, /* icache_sync_range */
387 fa526_dcache_wbinv_all, /* dcache_wbinv_all */
388 fa526_dcache_wbinv_range, /* dcache_wbinv_range */
389 fa526_dcache_inv_range, /* dcache_inv_range */
390 fa526_dcache_wb_range, /* dcache_wb_range */
392 armv4_idcache_inv_all, /* idcache_inv_all */
393 fa526_idcache_wbinv_all, /* idcache_wbinv_all */
394 fa526_idcache_wbinv_range, /* idcache_wbinv_range */
395 cpufunc_nullop, /* l2cache_wbinv_all */
396 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
397 (void *)cpufunc_nullop, /* l2cache_inv_range */
398 (void *)cpufunc_nullop, /* l2cache_wb_range */
399 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
401 /* Other functions */
403 armv4_drain_writebuf, /* drain_writebuf */
405 fa526_cpu_sleep, /* sleep */
410 fa526_context_switch, /* context_switch */
412 fa526_setup /* cpu setup */
414 #endif /* CPU_FA526 */
416 #if defined(CPU_ARM1176)
417 struct cpu_functions arm1176_cpufuncs = {
419 /* Cache operations */
420 .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
421 .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
422 .cf_l2cache_inv_range = (void *)cpufunc_nullop,
423 .cf_l2cache_wb_range = (void *)cpufunc_nullop,
424 .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
426 /* Other functions */
427 .cf_sleep = arm11x6_sleep,
430 .cf_setup = arm11x6_setup
432 #endif /*CPU_ARM1176 */
434 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
435 struct cpu_functions cortexa_cpufuncs = {
437 /* Cache operations */
440 * Note: For CPUs using the PL310 the L2 ops are filled in when the
441 * L2 cache controller is actually enabled.
443 .cf_l2cache_wbinv_all = cpufunc_nullop,
444 .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
445 .cf_l2cache_inv_range = (void *)cpufunc_nullop,
446 .cf_l2cache_wb_range = (void *)cpufunc_nullop,
447 .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
449 /* Other functions */
450 .cf_sleep = armv7_cpu_sleep,
453 .cf_setup = cortexa_setup
455 #endif /* CPU_CORTEXA || CPU_KRAIT */
458 * Global constants also used by locore.s
461 struct cpu_functions cpufuncs;
464 u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore-v4.s */
467 #if defined(CPU_ARM9) || \
468 defined (CPU_ARM9E) || \
469 defined(CPU_ARM1176) || \
470 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
471 defined(CPU_FA526) || defined(CPU_MV_PJ4B) || \
472 defined(CPU_XSCALE_81342) || \
473 defined(CPU_CORTEXA) || defined(CPU_KRAIT)
475 /* Global cache line sizes, use 32 as default */
476 int arm_dcache_min_line_size = 32;
477 int arm_icache_min_line_size = 32;
478 int arm_idcache_min_line_size = 32;
480 static void get_cachetype_cp15(void);
482 /* Additional cache information local to this file. Log2 of some of the
484 static int arm_dcache_l2_nsets;
485 static int arm_dcache_l2_assoc;
486 static int arm_dcache_l2_linesize;
489 get_cachetype_cp15(void)
491 u_int ctype, isize, dsize, cpuid;
492 u_int clevel, csize, i, sel;
496 __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
501 * ...and thus spake the ARM ARM:
503 * If an <opcode2> value corresponding to an unimplemented or
504 * reserved ID register is encountered, the System Control
505 * processor returns the value of the main ID register.
510 if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
511 /* Resolve minimal cache line sizes */
512 arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
513 arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
514 arm_idcache_min_line_size =
515 min(arm_icache_min_line_size, arm_dcache_min_line_size);
517 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
519 arm_cache_level = clevel;
520 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
522 while ((type = (clevel & 0x7)) && i < 7) {
523 if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
524 type == CACHE_SEP_CACHE) {
526 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
528 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
530 arm_cache_type[sel] = csize;
531 arm_dcache_align = 1 <<
532 (CPUV7_CT_xSIZE_LEN(csize) + 4);
533 arm_dcache_align_mask = arm_dcache_align - 1;
535 if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
537 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
539 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
541 arm_cache_type[sel] = csize;
547 if ((ctype & CPU_CT_S) == 0)
548 arm_pcache_unified = 1;
551 * If you want to know how this code works, go read the ARM ARM.
554 arm_pcache_type = CPU_CT_CTYPE(ctype);
556 if (arm_pcache_unified == 0) {
557 isize = CPU_CT_ISIZE(ctype);
558 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
559 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
560 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
561 if (isize & CPU_CT_xSIZE_M)
562 arm_picache_line_size = 0; /* not present */
564 arm_picache_ways = 1;
566 arm_picache_ways = multiplier <<
567 (CPU_CT_xSIZE_ASSOC(isize) - 1);
569 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
572 dsize = CPU_CT_DSIZE(ctype);
573 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
574 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
575 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
576 if (dsize & CPU_CT_xSIZE_M)
577 arm_pdcache_line_size = 0; /* not present */
579 arm_pdcache_ways = 1;
581 arm_pdcache_ways = multiplier <<
582 (CPU_CT_xSIZE_ASSOC(dsize) - 1);
584 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
586 arm_dcache_align = arm_pdcache_line_size;
588 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
589 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
590 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
591 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
594 arm_dcache_align_mask = arm_dcache_align - 1;
597 #endif /* ARM9 || XSCALE */
600 * Cannot panic here as we may not have a console yet ...
606 cputype = cpu_ident();
607 cputype &= CPU_ID_CPU_MASK;
610 if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
611 (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
612 (cputype & 0x0000f000) == 0x00009000) {
613 cpufuncs = arm9_cpufuncs;
614 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
615 get_cachetype_cp15();
616 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
617 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
618 arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
619 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
620 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
621 pmap_pte_init_generic();
624 #endif /* CPU_ARM9 */
625 #if defined(CPU_ARM9E)
626 if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
627 cputype == CPU_ID_MV88FR571_41) {
628 uint32_t sheeva_ctrl;
630 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
633 * Workaround for Marvell MV78100 CPU: Cache prefetch
634 * mechanism may affect the cache coherency validity,
635 * so it needs to be disabled.
637 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
638 * L2 Prefetching Mechanism) for details.
640 if (cputype == CPU_ID_MV88FR571_VD ||
641 cputype == CPU_ID_MV88FR571_41)
642 sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
644 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
646 cpufuncs = sheeva_cpufuncs;
647 get_cachetype_cp15();
648 pmap_pte_init_generic();
650 } else if (cputype == CPU_ID_ARM926EJS) {
651 cpufuncs = armv5_ec_cpufuncs;
652 get_cachetype_cp15();
653 pmap_pte_init_generic();
656 #endif /* CPU_ARM9E */
657 #if defined(CPU_ARM1176)
658 if (cputype == CPU_ID_ARM1176JZS) {
659 cpufuncs = arm1176_cpufuncs;
660 get_cachetype_cp15();
663 #endif /* CPU_ARM1176 */
664 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
665 switch(cputype & CPU_ID_SCHEME_MASK) {
666 case CPU_ID_CORTEXA5:
667 case CPU_ID_CORTEXA7:
668 case CPU_ID_CORTEXA8:
669 case CPU_ID_CORTEXA9:
670 case CPU_ID_CORTEXA12:
671 case CPU_ID_CORTEXA15:
672 case CPU_ID_CORTEXA53:
673 case CPU_ID_CORTEXA57:
674 case CPU_ID_CORTEXA72:
675 case CPU_ID_KRAIT300:
676 cpufuncs = cortexa_cpufuncs;
677 get_cachetype_cp15();
682 #endif /* CPU_CORTEXA || CPU_KRAIT */
684 #if defined(CPU_MV_PJ4B)
685 if (cputype == CPU_ID_MV88SV581X_V7 ||
686 cputype == CPU_ID_MV88SV584X_V7 ||
687 cputype == CPU_ID_ARM_88SV581X_V7) {
688 cpufuncs = pj4bv7_cpufuncs;
689 get_cachetype_cp15();
692 #endif /* CPU_MV_PJ4B */
694 #if defined(CPU_FA526)
695 if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
696 cpufuncs = fa526_cpufuncs;
697 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
698 get_cachetype_cp15();
699 pmap_pte_init_generic();
703 #endif /* CPU_FA526 */
705 #if defined(CPU_XSCALE_81342)
706 if (cputype == CPU_ID_81342) {
707 cpufuncs = xscalec3_cpufuncs;
708 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
709 get_cachetype_cp15();
710 pmap_pte_init_xscale();
713 #endif /* CPU_XSCALE_81342 */
714 #ifdef CPU_XSCALE_PXA2X0
715 /* ignore core revision to test PXA2xx CPUs */
716 if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
717 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
718 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
720 cpufuncs = xscale_cpufuncs;
721 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
722 get_cachetype_cp15();
723 pmap_pte_init_xscale();
727 #endif /* CPU_XSCALE_PXA2X0 */
728 #ifdef CPU_XSCALE_IXP425
729 if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
730 cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
732 cpufuncs = xscale_cpufuncs;
733 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
734 get_cachetype_cp15();
735 pmap_pte_init_xscale();
739 #endif /* CPU_XSCALE_IXP425 */
741 * Bzzzz. And the answer was ...
743 panic("No support for this CPU type (%08x) in kernel", cputype);
744 return(ARCHITECTURE_NOT_PRESENT);
746 uma_set_align(arm_dcache_align_mask);
758 int cpuctrl, cpuctrlmask;
760 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
761 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
762 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
763 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
764 CPU_CONTROL_ROUNDROBIN;
765 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
766 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
767 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
768 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
769 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
770 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
771 | CPU_CONTROL_ROUNDROBIN;
773 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
774 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
778 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
780 if (vector_page == ARM_VECTORS_HIGH)
781 cpuctrl |= CPU_CONTROL_VECRELOC;
783 /* Clear out the cache */
784 cpu_idcache_wbinv_all();
786 /* Set the control register (SCTLR) */
787 cpu_control(cpuctrlmask, cpuctrl);
790 #endif /* CPU_ARM9 */
792 #if defined(CPU_ARM9E)
796 int cpuctrl, cpuctrlmask;
798 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
799 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
800 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
801 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
802 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
803 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
804 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
805 | CPU_CONTROL_BPRD_ENABLE
806 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
808 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
809 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
813 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
816 /* Clear out the cache */
817 cpu_idcache_wbinv_all();
819 /* Now really make sure they are clean. */
820 __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
822 if (vector_page == ARM_VECTORS_HIGH)
823 cpuctrl |= CPU_CONTROL_VECRELOC;
825 /* Set the control register */
826 cpu_control(0xffffffff, cpuctrl);
829 cpu_idcache_wbinv_all();
831 #endif /* CPU_ARM9E || CPU_ARM10 */
833 #if defined(CPU_ARM1176) \
834 || defined(CPU_MV_PJ4B) \
835 || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
837 cpu_scc_setup_ccnt(void)
839 /* This is how you give userland access to the CCNT and PMCn
841 * BEWARE! This gives write access also, which may not be what
844 #ifdef _PMC_USER_READ_WRITE_
845 /* Set PMUSERENR[0] to allow userland access */
846 cp15_pmuserenr_set(1);
848 #if defined(CPU_ARM1176)
849 /* Set PMCR[2,0] to enable counters and reset CCNT */
852 /* Set up the PMCCNTR register as a cyclecounter:
853 * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
854 * Set PMCR[2,0] to enable counters and reset CCNT
855 * Set PMCNTENSET to 0x80000000 to enable CCNT */
856 cp15_pminten_clr(0xFFFFFFFF);
858 cp15_pmcnten_set(0x80000000);
863 #if defined(CPU_ARM1176)
867 uint32_t auxctrl, auxctrl_wax;
877 * Enable an errata workaround
879 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
880 auxctrl = ARM1176_AUXCTL_PHD;
881 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
884 tmp = cp15_actlr_get();
891 cpu_scc_setup_ccnt();
893 #endif /* CPU_ARM1176 */
901 cpu_scc_setup_ccnt();
903 #endif /* CPU_MV_PJ4B */
905 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
911 cpu_scc_setup_ccnt();
913 #endif /* CPU_CORTEXA || CPU_KRAIT */
915 #if defined(CPU_FA526)
919 int cpuctrl, cpuctrlmask;
921 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
922 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
923 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
924 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
925 | CPU_CONTROL_BPRD_ENABLE;
926 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
927 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
928 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
929 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
930 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
931 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
932 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
934 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
935 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
939 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
942 if (vector_page == ARM_VECTORS_HIGH)
943 cpuctrl |= CPU_CONTROL_VECRELOC;
945 /* Clear out the cache */
946 cpu_idcache_wbinv_all();
948 /* Set the control register */
949 cpu_control(0xffffffff, cpuctrl);
951 #endif /* CPU_FA526 */
953 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
954 defined(CPU_XSCALE_81342)
959 int cpuctrl, cpuctrlmask;
962 * The XScale Write Buffer is always enabled. Our option
963 * is to enable/disable coalescing. Note that bits 6:3
964 * must always be enabled.
967 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
968 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
969 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
970 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
971 | CPU_CONTROL_BPRD_ENABLE;
972 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
973 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
974 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
975 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
976 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
977 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
978 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
979 CPU_CONTROL_L2_ENABLE;
981 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
982 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
986 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
989 if (vector_page == ARM_VECTORS_HIGH)
990 cpuctrl |= CPU_CONTROL_VECRELOC;
991 #ifdef CPU_XSCALE_CORE3
992 cpuctrl |= CPU_CONTROL_L2_ENABLE;
995 /* Clear out the cache */
996 cpu_idcache_wbinv_all();
999 * Set the control register. Note that bits 6:3 must always
1002 /* cpu_control(cpuctrlmask, cpuctrl);*/
1003 cpu_control(0xffffffff, cpuctrl);
1005 /* Make sure write coalescing is turned on */
1006 __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1008 #ifdef XSCALE_NO_COALESCE_WRITES
1009 auxctl |= XSCALE_AUXCTL_K;
1011 auxctl &= ~XSCALE_AUXCTL_K;
1013 #ifdef CPU_XSCALE_CORE3
1014 auxctl |= XSCALE_AUXCTL_LLR;
1015 auxctl |= XSCALE_AUXCTL_MD_MASK;
1017 __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1020 #endif /* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */