1 /* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
4 * arm9 support code Copyright (C) 2001 ARM Ltd
5 * Copyright (c) 1997 Mark Brinicombe.
6 * Copyright (c) 1997 Causality Limited
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by Causality Limited.
20 * 4. The name of Causality Limited may not be used to endorse or promote
21 * products derived from this software without specific prior written
24 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * RiscBSD kernel project
40 * C functions for supporting CPU / MMU / TLB specific operations.
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
47 #include <sys/param.h>
48 #include <sys/systm.h>
50 #include <sys/mutex.h>
52 #include <machine/bus.h>
53 #include <machine/cpu.h>
54 #include <machine/disassem.h>
60 #include <machine/cpuconf.h>
61 #include <machine/cpufunc.h>
63 #if defined(CPU_XSCALE_81342)
64 #include <arm/xscale/i8134x/i81342reg.h>
67 #ifdef CPU_XSCALE_IXP425
68 #include <arm/xscale/ixp425/ixp425reg.h>
69 #include <arm/xscale/ixp425/ixp425var.h>
72 /* PRIMARY CACHE VARIABLES */
74 int arm_picache_line_size;
77 int arm_pdcache_size; /* and unified */
78 int arm_pdcache_line_size;
82 int arm_pcache_unified;
85 int arm_dcache_align_mask;
87 u_int arm_cache_level;
88 u_int arm_cache_type[14];
92 struct cpu_functions arm9_cpufuncs = {
95 cpufunc_nullop, /* cpwait */
99 cpufunc_control, /* control */
100 arm9_setttb, /* Setttb */
104 armv4_tlb_flushID, /* tlb_flushID */
105 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
106 armv4_tlb_flushD, /* tlb_flushD */
107 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
109 /* Cache operations */
111 arm9_icache_sync_range, /* icache_sync_range */
113 arm9_dcache_wbinv_all, /* dcache_wbinv_all */
114 arm9_dcache_wbinv_range, /* dcache_wbinv_range */
115 arm9_dcache_inv_range, /* dcache_inv_range */
116 arm9_dcache_wb_range, /* dcache_wb_range */
118 armv4_idcache_inv_all, /* idcache_inv_all */
119 arm9_idcache_wbinv_all, /* idcache_wbinv_all */
120 arm9_idcache_wbinv_range, /* idcache_wbinv_range */
121 cpufunc_nullop, /* l2cache_wbinv_all */
122 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
123 (void *)cpufunc_nullop, /* l2cache_inv_range */
124 (void *)cpufunc_nullop, /* l2cache_wb_range */
125 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
127 /* Other functions */
129 armv4_drain_writebuf, /* drain_writebuf */
131 (void *)cpufunc_nullop, /* sleep */
135 arm9_context_switch, /* context_switch */
137 arm9_setup /* cpu setup */
140 #endif /* CPU_ARM9 */
142 #if defined(CPU_ARM9E)
143 struct cpu_functions armv5_ec_cpufuncs = {
146 cpufunc_nullop, /* cpwait */
150 cpufunc_control, /* control */
151 armv5_ec_setttb, /* Setttb */
155 armv4_tlb_flushID, /* tlb_flushID */
156 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
157 armv4_tlb_flushD, /* tlb_flushD */
158 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
160 /* Cache operations */
162 armv5_ec_icache_sync_range, /* icache_sync_range */
164 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
165 armv5_ec_dcache_wbinv_range, /* dcache_wbinv_range */
166 armv5_ec_dcache_inv_range, /* dcache_inv_range */
167 armv5_ec_dcache_wb_range, /* dcache_wb_range */
169 armv4_idcache_inv_all, /* idcache_inv_all */
170 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
171 armv5_ec_idcache_wbinv_range, /* idcache_wbinv_range */
173 cpufunc_nullop, /* l2cache_wbinv_all */
174 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
175 (void *)cpufunc_nullop, /* l2cache_inv_range */
176 (void *)cpufunc_nullop, /* l2cache_wb_range */
177 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
179 /* Other functions */
181 armv4_drain_writebuf, /* drain_writebuf */
183 (void *)cpufunc_nullop, /* sleep */
187 arm9_context_switch, /* context_switch */
189 arm10_setup /* cpu setup */
193 struct cpu_functions sheeva_cpufuncs = {
196 cpufunc_nullop, /* cpwait */
200 cpufunc_control, /* control */
201 sheeva_setttb, /* Setttb */
205 armv4_tlb_flushID, /* tlb_flushID */
206 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
207 armv4_tlb_flushD, /* tlb_flushD */
208 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
210 /* Cache operations */
212 armv5_ec_icache_sync_range, /* icache_sync_range */
214 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
215 sheeva_dcache_wbinv_range, /* dcache_wbinv_range */
216 sheeva_dcache_inv_range, /* dcache_inv_range */
217 sheeva_dcache_wb_range, /* dcache_wb_range */
219 armv4_idcache_inv_all, /* idcache_inv_all */
220 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
221 sheeva_idcache_wbinv_range, /* idcache_wbinv_all */
223 sheeva_l2cache_wbinv_all, /* l2cache_wbinv_all */
224 sheeva_l2cache_wbinv_range, /* l2cache_wbinv_range */
225 sheeva_l2cache_inv_range, /* l2cache_inv_range */
226 sheeva_l2cache_wb_range, /* l2cache_wb_range */
227 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
229 /* Other functions */
231 armv4_drain_writebuf, /* drain_writebuf */
233 sheeva_cpu_sleep, /* sleep */
237 arm9_context_switch, /* context_switch */
239 arm10_setup /* cpu setup */
241 #endif /* CPU_ARM9E */
244 struct cpu_functions pj4bv7_cpufuncs = {
246 /* Cache operations */
247 .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
248 .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
249 .cf_l2cache_inv_range = (void *)cpufunc_nullop,
250 .cf_l2cache_wb_range = (void *)cpufunc_nullop,
251 .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
253 /* Other functions */
254 .cf_sleep = (void *)cpufunc_nullop,
257 .cf_setup = pj4bv7_setup
259 #endif /* CPU_MV_PJ4B */
261 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
263 struct cpu_functions xscale_cpufuncs = {
266 xscale_cpwait, /* cpwait */
270 xscale_control, /* control */
271 xscale_setttb, /* setttb */
275 armv4_tlb_flushID, /* tlb_flushID */
276 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
277 armv4_tlb_flushD, /* tlb_flushD */
278 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
280 /* Cache operations */
282 xscale_cache_syncI_rng, /* icache_sync_range */
284 xscale_cache_purgeD, /* dcache_wbinv_all */
285 xscale_cache_purgeD_rng, /* dcache_wbinv_range */
286 xscale_cache_flushD_rng, /* dcache_inv_range */
287 xscale_cache_cleanD_rng, /* dcache_wb_range */
289 xscale_cache_flushID, /* idcache_inv_all */
290 xscale_cache_purgeID, /* idcache_wbinv_all */
291 xscale_cache_purgeID_rng, /* idcache_wbinv_range */
292 cpufunc_nullop, /* l2cache_wbinv_all */
293 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
294 (void *)cpufunc_nullop, /* l2cache_inv_range */
295 (void *)cpufunc_nullop, /* l2cache_wb_range */
296 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
298 /* Other functions */
300 armv4_drain_writebuf, /* drain_writebuf */
302 xscale_cpu_sleep, /* sleep */
306 xscale_context_switch, /* context_switch */
308 xscale_setup /* cpu setup */
311 /* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
313 #ifdef CPU_XSCALE_81342
314 struct cpu_functions xscalec3_cpufuncs = {
317 xscale_cpwait, /* cpwait */
321 xscale_control, /* control */
322 xscalec3_setttb, /* setttb */
326 armv4_tlb_flushID, /* tlb_flushID */
327 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
328 armv4_tlb_flushD, /* tlb_flushD */
329 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
331 /* Cache operations */
333 xscalec3_cache_syncI_rng, /* icache_sync_range */
335 xscalec3_cache_purgeD, /* dcache_wbinv_all */
336 xscalec3_cache_purgeD_rng, /* dcache_wbinv_range */
337 xscale_cache_flushD_rng, /* dcache_inv_range */
338 xscalec3_cache_cleanD_rng, /* dcache_wb_range */
340 xscale_cache_flushID, /* idcache_inv_all */
341 xscalec3_cache_purgeID, /* idcache_wbinv_all */
342 xscalec3_cache_purgeID_rng, /* idcache_wbinv_range */
343 xscalec3_l2cache_purge, /* l2cache_wbinv_all */
344 xscalec3_l2cache_purge_rng, /* l2cache_wbinv_range */
345 xscalec3_l2cache_flush_rng, /* l2cache_inv_range */
346 xscalec3_l2cache_clean_rng, /* l2cache_wb_range */
347 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
349 /* Other functions */
351 armv4_drain_writebuf, /* drain_writebuf */
353 xscale_cpu_sleep, /* sleep */
357 xscalec3_context_switch, /* context_switch */
359 xscale_setup /* cpu setup */
361 #endif /* CPU_XSCALE_81342 */
364 #if defined(CPU_FA526)
365 struct cpu_functions fa526_cpufuncs = {
368 cpufunc_nullop, /* cpwait */
372 cpufunc_control, /* control */
373 fa526_setttb, /* setttb */
377 armv4_tlb_flushID, /* tlb_flushID */
378 fa526_tlb_flushID_SE, /* tlb_flushID_SE */
379 armv4_tlb_flushD, /* tlb_flushD */
380 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
382 /* Cache operations */
384 fa526_icache_sync_range, /* icache_sync_range */
386 fa526_dcache_wbinv_all, /* dcache_wbinv_all */
387 fa526_dcache_wbinv_range, /* dcache_wbinv_range */
388 fa526_dcache_inv_range, /* dcache_inv_range */
389 fa526_dcache_wb_range, /* dcache_wb_range */
391 armv4_idcache_inv_all, /* idcache_inv_all */
392 fa526_idcache_wbinv_all, /* idcache_wbinv_all */
393 fa526_idcache_wbinv_range, /* idcache_wbinv_range */
394 cpufunc_nullop, /* l2cache_wbinv_all */
395 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
396 (void *)cpufunc_nullop, /* l2cache_inv_range */
397 (void *)cpufunc_nullop, /* l2cache_wb_range */
398 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
400 /* Other functions */
402 armv4_drain_writebuf, /* drain_writebuf */
404 fa526_cpu_sleep, /* sleep */
409 fa526_context_switch, /* context_switch */
411 fa526_setup /* cpu setup */
413 #endif /* CPU_FA526 */
415 #if defined(CPU_ARM1176)
416 struct cpu_functions arm1176_cpufuncs = {
418 /* Cache operations */
419 .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
420 .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
421 .cf_l2cache_inv_range = (void *)cpufunc_nullop,
422 .cf_l2cache_wb_range = (void *)cpufunc_nullop,
423 .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
425 /* Other functions */
426 .cf_sleep = arm11x6_sleep,
429 .cf_setup = arm11x6_setup
431 #endif /*CPU_ARM1176 */
433 #if defined(CPU_CORTEXA8) || defined(CPU_CORTEXA_MP) || defined(CPU_KRAIT)
434 struct cpu_functions cortexa_cpufuncs = {
436 /* Cache operations */
439 * Note: For CPUs using the PL310 the L2 ops are filled in when the
440 * L2 cache controller is actually enabled.
442 .cf_l2cache_wbinv_all = cpufunc_nullop,
443 .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
444 .cf_l2cache_inv_range = (void *)cpufunc_nullop,
445 .cf_l2cache_wb_range = (void *)cpufunc_nullop,
446 .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
448 /* Other functions */
449 .cf_sleep = armv7_cpu_sleep,
452 .cf_setup = cortexa_setup
454 #endif /* CPU_CORTEXA8 || CPU_CORTEXA_MP || CPU_KRAIT */
457 * Global constants also used by locore.s
460 struct cpu_functions cpufuncs;
463 u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore-v4.s */
466 #if defined(CPU_ARM9) || \
467 defined (CPU_ARM9E) || \
468 defined(CPU_ARM1176) || \
469 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
470 defined(CPU_FA526) || defined(CPU_MV_PJ4B) || \
471 defined(CPU_XSCALE_81342) || \
472 defined(CPU_CORTEXA8) || defined(CPU_CORTEXA_MP) || defined(CPU_KRAIT)
474 /* Global cache line sizes, use 32 as default */
475 int arm_dcache_min_line_size = 32;
476 int arm_icache_min_line_size = 32;
477 int arm_idcache_min_line_size = 32;
479 static void get_cachetype_cp15(void);
481 /* Additional cache information local to this file. Log2 of some of the
483 static int arm_dcache_l2_nsets;
484 static int arm_dcache_l2_assoc;
485 static int arm_dcache_l2_linesize;
488 get_cachetype_cp15(void)
490 u_int ctype, isize, dsize, cpuid;
491 u_int clevel, csize, i, sel;
495 __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
500 * ...and thus spake the ARM ARM:
502 * If an <opcode2> value corresponding to an unimplemented or
503 * reserved ID register is encountered, the System Control
504 * processor returns the value of the main ID register.
509 if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
510 /* Resolve minimal cache line sizes */
511 arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
512 arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
513 arm_idcache_min_line_size =
514 min(arm_icache_min_line_size, arm_dcache_min_line_size);
516 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
518 arm_cache_level = clevel;
519 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
521 while ((type = (clevel & 0x7)) && i < 7) {
522 if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
523 type == CACHE_SEP_CACHE) {
525 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
527 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
529 arm_cache_type[sel] = csize;
530 arm_dcache_align = 1 <<
531 (CPUV7_CT_xSIZE_LEN(csize) + 4);
532 arm_dcache_align_mask = arm_dcache_align - 1;
534 if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
536 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
538 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
540 arm_cache_type[sel] = csize;
546 if ((ctype & CPU_CT_S) == 0)
547 arm_pcache_unified = 1;
550 * If you want to know how this code works, go read the ARM ARM.
553 arm_pcache_type = CPU_CT_CTYPE(ctype);
555 if (arm_pcache_unified == 0) {
556 isize = CPU_CT_ISIZE(ctype);
557 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
558 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
559 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
560 if (isize & CPU_CT_xSIZE_M)
561 arm_picache_line_size = 0; /* not present */
563 arm_picache_ways = 1;
565 arm_picache_ways = multiplier <<
566 (CPU_CT_xSIZE_ASSOC(isize) - 1);
568 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
571 dsize = CPU_CT_DSIZE(ctype);
572 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
573 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
574 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
575 if (dsize & CPU_CT_xSIZE_M)
576 arm_pdcache_line_size = 0; /* not present */
578 arm_pdcache_ways = 1;
580 arm_pdcache_ways = multiplier <<
581 (CPU_CT_xSIZE_ASSOC(dsize) - 1);
583 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
585 arm_dcache_align = arm_pdcache_line_size;
587 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
588 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
589 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
590 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
593 arm_dcache_align_mask = arm_dcache_align - 1;
596 #endif /* ARM9 || XSCALE */
599 * Cannot panic here as we may not have a console yet ...
605 cputype = cpu_ident();
606 cputype &= CPU_ID_CPU_MASK;
609 if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
610 (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
611 (cputype & 0x0000f000) == 0x00009000) {
612 cpufuncs = arm9_cpufuncs;
613 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
614 get_cachetype_cp15();
615 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
616 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
617 arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
618 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
619 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
620 pmap_pte_init_generic();
623 #endif /* CPU_ARM9 */
624 #if defined(CPU_ARM9E)
625 if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
626 cputype == CPU_ID_MV88FR571_41) {
627 uint32_t sheeva_ctrl;
629 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
632 * Workaround for Marvell MV78100 CPU: Cache prefetch
633 * mechanism may affect the cache coherency validity,
634 * so it needs to be disabled.
636 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
637 * L2 Prefetching Mechanism) for details.
639 if (cputype == CPU_ID_MV88FR571_VD ||
640 cputype == CPU_ID_MV88FR571_41)
641 sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
643 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
645 cpufuncs = sheeva_cpufuncs;
646 get_cachetype_cp15();
647 pmap_pte_init_generic();
649 } else if (cputype == CPU_ID_ARM926EJS) {
650 cpufuncs = armv5_ec_cpufuncs;
651 get_cachetype_cp15();
652 pmap_pte_init_generic();
655 #endif /* CPU_ARM9E */
656 #if defined(CPU_ARM1176)
657 if (cputype == CPU_ID_ARM1176JZS) {
658 cpufuncs = arm1176_cpufuncs;
659 get_cachetype_cp15();
662 #endif /* CPU_ARM1176 */
663 #if defined(CPU_CORTEXA8) || defined(CPU_CORTEXA_MP) || defined(CPU_KRAIT)
664 switch(cputype & CPU_ID_SCHEME_MASK) {
665 case CPU_ID_CORTEXA5:
666 case CPU_ID_CORTEXA7:
667 case CPU_ID_CORTEXA8:
668 case CPU_ID_CORTEXA9:
669 case CPU_ID_CORTEXA12:
670 case CPU_ID_CORTEXA15:
671 case CPU_ID_CORTEXA53:
672 case CPU_ID_CORTEXA57:
673 case CPU_ID_CORTEXA72:
674 case CPU_ID_KRAIT300:
675 cpufuncs = cortexa_cpufuncs;
676 get_cachetype_cp15();
681 #endif /* CPU_CORTEXA8 || CPU_CORTEXA_MP || CPU_KRAIT */
683 #if defined(CPU_MV_PJ4B)
684 if (cputype == CPU_ID_MV88SV581X_V7 ||
685 cputype == CPU_ID_MV88SV584X_V7 ||
686 cputype == CPU_ID_ARM_88SV581X_V7) {
687 cpufuncs = pj4bv7_cpufuncs;
688 get_cachetype_cp15();
691 #endif /* CPU_MV_PJ4B */
693 #if defined(CPU_FA526)
694 if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
695 cpufuncs = fa526_cpufuncs;
696 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
697 get_cachetype_cp15();
698 pmap_pte_init_generic();
702 #endif /* CPU_FA526 */
704 #if defined(CPU_XSCALE_81342)
705 if (cputype == CPU_ID_81342) {
706 cpufuncs = xscalec3_cpufuncs;
707 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
708 get_cachetype_cp15();
709 pmap_pte_init_xscale();
712 #endif /* CPU_XSCALE_81342 */
713 #ifdef CPU_XSCALE_PXA2X0
714 /* ignore core revision to test PXA2xx CPUs */
715 if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
716 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
717 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
719 cpufuncs = xscale_cpufuncs;
720 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
721 get_cachetype_cp15();
722 pmap_pte_init_xscale();
726 #endif /* CPU_XSCALE_PXA2X0 */
727 #ifdef CPU_XSCALE_IXP425
728 if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
729 cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
731 cpufuncs = xscale_cpufuncs;
732 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
733 get_cachetype_cp15();
734 pmap_pte_init_xscale();
738 #endif /* CPU_XSCALE_IXP425 */
740 * Bzzzz. And the answer was ...
742 panic("No support for this CPU type (%08x) in kernel", cputype);
743 return(ARCHITECTURE_NOT_PRESENT);
745 uma_set_align(arm_dcache_align_mask);
757 int cpuctrl, cpuctrlmask;
759 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
760 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
761 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
762 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
763 CPU_CONTROL_ROUNDROBIN;
764 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
765 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
766 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
767 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
768 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
769 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
770 | CPU_CONTROL_ROUNDROBIN;
772 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
773 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
777 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
779 if (vector_page == ARM_VECTORS_HIGH)
780 cpuctrl |= CPU_CONTROL_VECRELOC;
782 /* Clear out the cache */
783 cpu_idcache_wbinv_all();
785 /* Set the control register (SCTLR) */
786 cpu_control(cpuctrlmask, cpuctrl);
789 #endif /* CPU_ARM9 */
791 #if defined(CPU_ARM9E)
795 int cpuctrl, cpuctrlmask;
797 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
798 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
799 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
800 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
801 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
802 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
803 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
804 | CPU_CONTROL_BPRD_ENABLE
805 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
807 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
808 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
812 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
815 /* Clear out the cache */
816 cpu_idcache_wbinv_all();
818 /* Now really make sure they are clean. */
819 __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
821 if (vector_page == ARM_VECTORS_HIGH)
822 cpuctrl |= CPU_CONTROL_VECRELOC;
824 /* Set the control register */
825 cpu_control(0xffffffff, cpuctrl);
828 cpu_idcache_wbinv_all();
830 #endif /* CPU_ARM9E || CPU_ARM10 */
832 #if defined(CPU_ARM1176) \
833 || defined(CPU_MV_PJ4B) \
834 || defined(CPU_CORTEXA8) || defined(CPU_CORTEXA_MP) || defined(CPU_KRAIT)
836 cpu_scc_setup_ccnt(void)
838 /* This is how you give userland access to the CCNT and PMCn
840 * BEWARE! This gives write access also, which may not be what
843 #ifdef _PMC_USER_READ_WRITE_
844 /* Set PMUSERENR[0] to allow userland access */
845 cp15_pmuserenr_set(1);
847 #if defined(CPU_ARM1176)
848 /* Set PMCR[2,0] to enable counters and reset CCNT */
851 /* Set up the PMCCNTR register as a cyclecounter:
852 * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
853 * Set PMCR[2,0] to enable counters and reset CCNT
854 * Set PMCNTENSET to 0x80000000 to enable CCNT */
855 cp15_pminten_clr(0xFFFFFFFF);
857 cp15_pmcnten_set(0x80000000);
862 #if defined(CPU_ARM1176)
866 uint32_t auxctrl, auxctrl_wax;
876 * Enable an errata workaround
878 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
879 auxctrl = ARM1176_AUXCTL_PHD;
880 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
883 tmp = cp15_actlr_get();
890 cpu_scc_setup_ccnt();
892 #endif /* CPU_ARM1176 */
900 cpu_scc_setup_ccnt();
902 #endif /* CPU_MV_PJ4B */
904 #if defined(CPU_CORTEXA8) || defined(CPU_CORTEXA_MP) || defined(CPU_KRAIT)
910 cpu_scc_setup_ccnt();
912 #endif /* CPU_CORTEXA8 || CPU_CORTEXA_MP || CPU_KRAIT */
914 #if defined(CPU_FA526)
918 int cpuctrl, cpuctrlmask;
920 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
921 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
922 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
923 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
924 | CPU_CONTROL_BPRD_ENABLE;
925 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
926 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
927 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
928 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
929 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
930 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
931 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
933 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
934 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
938 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
941 if (vector_page == ARM_VECTORS_HIGH)
942 cpuctrl |= CPU_CONTROL_VECRELOC;
944 /* Clear out the cache */
945 cpu_idcache_wbinv_all();
947 /* Set the control register */
948 cpu_control(0xffffffff, cpuctrl);
950 #endif /* CPU_FA526 */
952 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
953 defined(CPU_XSCALE_81342)
958 int cpuctrl, cpuctrlmask;
961 * The XScale Write Buffer is always enabled. Our option
962 * is to enable/disable coalescing. Note that bits 6:3
963 * must always be enabled.
966 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
967 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
968 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
969 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
970 | CPU_CONTROL_BPRD_ENABLE;
971 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
972 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
973 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
974 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
975 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
976 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
977 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
978 CPU_CONTROL_L2_ENABLE;
980 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
981 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
985 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
988 if (vector_page == ARM_VECTORS_HIGH)
989 cpuctrl |= CPU_CONTROL_VECRELOC;
990 #ifdef CPU_XSCALE_CORE3
991 cpuctrl |= CPU_CONTROL_L2_ENABLE;
994 /* Clear out the cache */
995 cpu_idcache_wbinv_all();
998 * Set the control register. Note that bits 6:3 must always
1001 /* cpu_control(cpuctrlmask, cpuctrl);*/
1002 cpu_control(0xffffffff, cpuctrl);
1004 /* Make sure write coalescing is turned on */
1005 __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1007 #ifdef XSCALE_NO_COALESCE_WRITES
1008 auxctl |= XSCALE_AUXCTL_K;
1010 auxctl &= ~XSCALE_AUXCTL_K;
1012 #ifdef CPU_XSCALE_CORE3
1013 auxctl |= XSCALE_AUXCTL_LLR;
1014 auxctl |= XSCALE_AUXCTL_MD_MASK;
1016 __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1019 #endif /* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */