1 /* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
4 * SPDX-License-Identifier: BSD-4-Clause
6 * arm9 support code Copyright (C) 2001 ARM Ltd
7 * Copyright (c) 1997 Mark Brinicombe.
8 * Copyright (c) 1997 Causality Limited
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by Causality Limited.
22 * 4. The name of Causality Limited may not be used to endorse or promote
23 * products derived from this software without specific prior written
26 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
27 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
28 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
29 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
30 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
32 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * RiscBSD kernel project
42 * C functions for supporting CPU / MMU / TLB specific operations.
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD$");
49 #include <sys/param.h>
50 #include <sys/systm.h>
52 #include <sys/mutex.h>
54 #include <machine/bus.h>
55 #include <machine/cpu.h>
56 #include <machine/disassem.h>
62 #include <machine/cpufunc.h>
64 #if defined(CPU_XSCALE_81342)
65 #include <arm/xscale/i8134x/i81342reg.h>
68 /* PRIMARY CACHE VARIABLES */
70 int arm_picache_line_size;
73 int arm_pdcache_size; /* and unified */
74 int arm_pdcache_line_size;
78 int arm_pcache_unified;
81 int arm_dcache_align_mask;
83 u_int arm_cache_level;
84 u_int arm_cache_type[14];
88 struct cpu_functions arm9_cpufuncs = {
91 cpufunc_nullop, /* cpwait */
95 cpufunc_control, /* control */
96 arm9_setttb, /* Setttb */
100 armv4_tlb_flushID, /* tlb_flushID */
101 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
102 armv4_tlb_flushD, /* tlb_flushD */
103 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
105 /* Cache operations */
107 arm9_icache_sync_range, /* icache_sync_range */
109 arm9_dcache_wbinv_all, /* dcache_wbinv_all */
110 arm9_dcache_wbinv_range, /* dcache_wbinv_range */
111 arm9_dcache_inv_range, /* dcache_inv_range */
112 arm9_dcache_wb_range, /* dcache_wb_range */
114 armv4_idcache_inv_all, /* idcache_inv_all */
115 arm9_idcache_wbinv_all, /* idcache_wbinv_all */
116 arm9_idcache_wbinv_range, /* idcache_wbinv_range */
117 cpufunc_nullop, /* l2cache_wbinv_all */
118 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
119 (void *)cpufunc_nullop, /* l2cache_inv_range */
120 (void *)cpufunc_nullop, /* l2cache_wb_range */
121 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
123 /* Other functions */
125 armv4_drain_writebuf, /* drain_writebuf */
127 (void *)cpufunc_nullop, /* sleep */
131 arm9_context_switch, /* context_switch */
133 arm9_setup /* cpu setup */
136 #endif /* CPU_ARM9 */
138 #if defined(CPU_ARM9E)
139 struct cpu_functions armv5_ec_cpufuncs = {
142 cpufunc_nullop, /* cpwait */
146 cpufunc_control, /* control */
147 armv5_ec_setttb, /* Setttb */
151 armv4_tlb_flushID, /* tlb_flushID */
152 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
153 armv4_tlb_flushD, /* tlb_flushD */
154 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
156 /* Cache operations */
158 armv5_ec_icache_sync_range, /* icache_sync_range */
160 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
161 armv5_ec_dcache_wbinv_range, /* dcache_wbinv_range */
162 armv5_ec_dcache_inv_range, /* dcache_inv_range */
163 armv5_ec_dcache_wb_range, /* dcache_wb_range */
165 armv4_idcache_inv_all, /* idcache_inv_all */
166 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
167 armv5_ec_idcache_wbinv_range, /* idcache_wbinv_range */
169 cpufunc_nullop, /* l2cache_wbinv_all */
170 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
171 (void *)cpufunc_nullop, /* l2cache_inv_range */
172 (void *)cpufunc_nullop, /* l2cache_wb_range */
173 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
175 /* Other functions */
177 armv4_drain_writebuf, /* drain_writebuf */
179 (void *)cpufunc_nullop, /* sleep */
183 arm9_context_switch, /* context_switch */
185 arm10_setup /* cpu setup */
189 struct cpu_functions sheeva_cpufuncs = {
192 cpufunc_nullop, /* cpwait */
196 cpufunc_control, /* control */
197 sheeva_setttb, /* Setttb */
201 armv4_tlb_flushID, /* tlb_flushID */
202 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
203 armv4_tlb_flushD, /* tlb_flushD */
204 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
206 /* Cache operations */
208 armv5_ec_icache_sync_range, /* icache_sync_range */
210 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
211 sheeva_dcache_wbinv_range, /* dcache_wbinv_range */
212 sheeva_dcache_inv_range, /* dcache_inv_range */
213 sheeva_dcache_wb_range, /* dcache_wb_range */
215 armv4_idcache_inv_all, /* idcache_inv_all */
216 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
217 sheeva_idcache_wbinv_range, /* idcache_wbinv_all */
219 sheeva_l2cache_wbinv_all, /* l2cache_wbinv_all */
220 sheeva_l2cache_wbinv_range, /* l2cache_wbinv_range */
221 sheeva_l2cache_inv_range, /* l2cache_inv_range */
222 sheeva_l2cache_wb_range, /* l2cache_wb_range */
223 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
225 /* Other functions */
227 armv4_drain_writebuf, /* drain_writebuf */
229 sheeva_cpu_sleep, /* sleep */
233 arm9_context_switch, /* context_switch */
235 arm10_setup /* cpu setup */
237 #endif /* CPU_ARM9E */
240 struct cpu_functions pj4bv7_cpufuncs = {
242 /* Cache operations */
243 .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
244 .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
245 .cf_l2cache_inv_range = (void *)cpufunc_nullop,
246 .cf_l2cache_wb_range = (void *)cpufunc_nullop,
247 .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
249 /* Other functions */
250 .cf_sleep = (void *)cpufunc_nullop,
253 .cf_setup = pj4bv7_setup
255 #endif /* CPU_MV_PJ4B */
257 #if defined(CPU_XSCALE_PXA2X0)
259 struct cpu_functions xscale_cpufuncs = {
262 xscale_cpwait, /* cpwait */
266 xscale_control, /* control */
267 xscale_setttb, /* setttb */
271 armv4_tlb_flushID, /* tlb_flushID */
272 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
273 armv4_tlb_flushD, /* tlb_flushD */
274 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
276 /* Cache operations */
278 xscale_cache_syncI_rng, /* icache_sync_range */
280 xscale_cache_purgeD, /* dcache_wbinv_all */
281 xscale_cache_purgeD_rng, /* dcache_wbinv_range */
282 xscale_cache_flushD_rng, /* dcache_inv_range */
283 xscale_cache_cleanD_rng, /* dcache_wb_range */
285 xscale_cache_flushID, /* idcache_inv_all */
286 xscale_cache_purgeID, /* idcache_wbinv_all */
287 xscale_cache_purgeID_rng, /* idcache_wbinv_range */
288 cpufunc_nullop, /* l2cache_wbinv_all */
289 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
290 (void *)cpufunc_nullop, /* l2cache_inv_range */
291 (void *)cpufunc_nullop, /* l2cache_wb_range */
292 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
294 /* Other functions */
296 armv4_drain_writebuf, /* drain_writebuf */
298 xscale_cpu_sleep, /* sleep */
302 xscale_context_switch, /* context_switch */
304 xscale_setup /* cpu setup */
307 /* CPU_XSCALE_PXA2X0 */
309 #ifdef CPU_XSCALE_81342
310 struct cpu_functions xscalec3_cpufuncs = {
313 xscale_cpwait, /* cpwait */
317 xscale_control, /* control */
318 xscalec3_setttb, /* setttb */
322 armv4_tlb_flushID, /* tlb_flushID */
323 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
324 armv4_tlb_flushD, /* tlb_flushD */
325 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
327 /* Cache operations */
329 xscalec3_cache_syncI_rng, /* icache_sync_range */
331 xscalec3_cache_purgeD, /* dcache_wbinv_all */
332 xscalec3_cache_purgeD_rng, /* dcache_wbinv_range */
333 xscale_cache_flushD_rng, /* dcache_inv_range */
334 xscalec3_cache_cleanD_rng, /* dcache_wb_range */
336 xscale_cache_flushID, /* idcache_inv_all */
337 xscalec3_cache_purgeID, /* idcache_wbinv_all */
338 xscalec3_cache_purgeID_rng, /* idcache_wbinv_range */
339 xscalec3_l2cache_purge, /* l2cache_wbinv_all */
340 xscalec3_l2cache_purge_rng, /* l2cache_wbinv_range */
341 xscalec3_l2cache_flush_rng, /* l2cache_inv_range */
342 xscalec3_l2cache_clean_rng, /* l2cache_wb_range */
343 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
345 /* Other functions */
347 armv4_drain_writebuf, /* drain_writebuf */
349 xscale_cpu_sleep, /* sleep */
353 xscalec3_context_switch, /* context_switch */
355 xscale_setup /* cpu setup */
357 #endif /* CPU_XSCALE_81342 */
360 #if defined(CPU_FA526)
361 struct cpu_functions fa526_cpufuncs = {
364 cpufunc_nullop, /* cpwait */
368 cpufunc_control, /* control */
369 fa526_setttb, /* setttb */
373 armv4_tlb_flushID, /* tlb_flushID */
374 fa526_tlb_flushID_SE, /* tlb_flushID_SE */
375 armv4_tlb_flushD, /* tlb_flushD */
376 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
378 /* Cache operations */
380 fa526_icache_sync_range, /* icache_sync_range */
382 fa526_dcache_wbinv_all, /* dcache_wbinv_all */
383 fa526_dcache_wbinv_range, /* dcache_wbinv_range */
384 fa526_dcache_inv_range, /* dcache_inv_range */
385 fa526_dcache_wb_range, /* dcache_wb_range */
387 armv4_idcache_inv_all, /* idcache_inv_all */
388 fa526_idcache_wbinv_all, /* idcache_wbinv_all */
389 fa526_idcache_wbinv_range, /* idcache_wbinv_range */
390 cpufunc_nullop, /* l2cache_wbinv_all */
391 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
392 (void *)cpufunc_nullop, /* l2cache_inv_range */
393 (void *)cpufunc_nullop, /* l2cache_wb_range */
394 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
396 /* Other functions */
398 armv4_drain_writebuf, /* drain_writebuf */
400 fa526_cpu_sleep, /* sleep */
405 fa526_context_switch, /* context_switch */
407 fa526_setup /* cpu setup */
409 #endif /* CPU_FA526 */
411 #if defined(CPU_ARM1176)
412 struct cpu_functions arm1176_cpufuncs = {
414 /* Cache operations */
415 .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
416 .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
417 .cf_l2cache_inv_range = (void *)cpufunc_nullop,
418 .cf_l2cache_wb_range = (void *)cpufunc_nullop,
419 .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
421 /* Other functions */
422 .cf_sleep = arm11x6_sleep,
425 .cf_setup = arm11x6_setup
427 #endif /*CPU_ARM1176 */
429 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
430 struct cpu_functions cortexa_cpufuncs = {
432 /* Cache operations */
435 * Note: For CPUs using the PL310 the L2 ops are filled in when the
436 * L2 cache controller is actually enabled.
438 .cf_l2cache_wbinv_all = cpufunc_nullop,
439 .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
440 .cf_l2cache_inv_range = (void *)cpufunc_nullop,
441 .cf_l2cache_wb_range = (void *)cpufunc_nullop,
442 .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
444 /* Other functions */
445 .cf_sleep = armv7_cpu_sleep,
448 .cf_setup = cortexa_setup
450 #endif /* CPU_CORTEXA || CPU_KRAIT */
453 * Global constants also used by locore.s
456 struct cpu_functions cpufuncs;
459 u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore-v4.s */
462 #if defined(CPU_ARM9) || \
463 defined (CPU_ARM9E) || \
464 defined(CPU_ARM1176) || \
465 defined(CPU_XSCALE_PXA2X0) || \
466 defined(CPU_FA526) || defined(CPU_MV_PJ4B) || \
467 defined(CPU_XSCALE_81342) || \
468 defined(CPU_CORTEXA) || defined(CPU_KRAIT)
470 /* Global cache line sizes, use 32 as default */
471 int arm_dcache_min_line_size = 32;
472 int arm_icache_min_line_size = 32;
473 int arm_idcache_min_line_size = 32;
475 static void get_cachetype_cp15(void);
477 /* Additional cache information local to this file. Log2 of some of the
479 static int arm_dcache_l2_nsets;
480 static int arm_dcache_l2_assoc;
481 static int arm_dcache_l2_linesize;
484 get_cachetype_cp15(void)
486 u_int ctype, isize, dsize, cpuid;
487 u_int clevel, csize, i, sel;
491 __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
496 * ...and thus spake the ARM ARM:
498 * If an <opcode2> value corresponding to an unimplemented or
499 * reserved ID register is encountered, the System Control
500 * processor returns the value of the main ID register.
505 if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
506 /* Resolve minimal cache line sizes */
507 arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
508 arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
509 arm_idcache_min_line_size =
510 min(arm_icache_min_line_size, arm_dcache_min_line_size);
512 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
514 arm_cache_level = clevel;
515 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
517 while ((type = (clevel & 0x7)) && i < 7) {
518 if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
519 type == CACHE_SEP_CACHE) {
521 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
523 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
525 arm_cache_type[sel] = csize;
526 arm_dcache_align = 1 <<
527 (CPUV7_CT_xSIZE_LEN(csize) + 4);
528 arm_dcache_align_mask = arm_dcache_align - 1;
530 if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
532 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
534 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
536 arm_cache_type[sel] = csize;
542 if ((ctype & CPU_CT_S) == 0)
543 arm_pcache_unified = 1;
546 * If you want to know how this code works, go read the ARM ARM.
549 arm_pcache_type = CPU_CT_CTYPE(ctype);
551 if (arm_pcache_unified == 0) {
552 isize = CPU_CT_ISIZE(ctype);
553 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
554 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
555 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
556 if (isize & CPU_CT_xSIZE_M)
557 arm_picache_line_size = 0; /* not present */
559 arm_picache_ways = 1;
561 arm_picache_ways = multiplier <<
562 (CPU_CT_xSIZE_ASSOC(isize) - 1);
564 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
567 dsize = CPU_CT_DSIZE(ctype);
568 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
569 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
570 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
571 if (dsize & CPU_CT_xSIZE_M)
572 arm_pdcache_line_size = 0; /* not present */
574 arm_pdcache_ways = 1;
576 arm_pdcache_ways = multiplier <<
577 (CPU_CT_xSIZE_ASSOC(dsize) - 1);
579 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
581 arm_dcache_align = arm_pdcache_line_size;
583 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
584 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
585 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
586 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
589 arm_dcache_align_mask = arm_dcache_align - 1;
592 #endif /* ARM9 || XSCALE */
595 * Cannot panic here as we may not have a console yet ...
601 cputype = cpu_ident();
602 cputype &= CPU_ID_CPU_MASK;
605 if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
606 (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
607 (cputype & 0x0000f000) == 0x00009000) {
608 cpufuncs = arm9_cpufuncs;
609 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
610 get_cachetype_cp15();
611 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
612 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
613 arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
614 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
615 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
616 pmap_pte_init_generic();
619 #endif /* CPU_ARM9 */
620 #if defined(CPU_ARM9E)
621 if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
622 cputype == CPU_ID_MV88FR571_41) {
623 uint32_t sheeva_ctrl;
625 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
628 * Workaround for Marvell MV78100 CPU: Cache prefetch
629 * mechanism may affect the cache coherency validity,
630 * so it needs to be disabled.
632 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
633 * L2 Prefetching Mechanism) for details.
635 if (cputype == CPU_ID_MV88FR571_VD ||
636 cputype == CPU_ID_MV88FR571_41)
637 sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
639 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
641 cpufuncs = sheeva_cpufuncs;
642 get_cachetype_cp15();
643 pmap_pte_init_generic();
645 } else if (cputype == CPU_ID_ARM926EJS) {
646 cpufuncs = armv5_ec_cpufuncs;
647 get_cachetype_cp15();
648 pmap_pte_init_generic();
651 #endif /* CPU_ARM9E */
652 #if defined(CPU_ARM1176)
653 if (cputype == CPU_ID_ARM1176JZS) {
654 cpufuncs = arm1176_cpufuncs;
655 get_cachetype_cp15();
658 #endif /* CPU_ARM1176 */
659 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
660 switch(cputype & CPU_ID_SCHEME_MASK) {
661 case CPU_ID_CORTEXA5:
662 case CPU_ID_CORTEXA7:
663 case CPU_ID_CORTEXA8:
664 case CPU_ID_CORTEXA9:
665 case CPU_ID_CORTEXA12:
666 case CPU_ID_CORTEXA15:
667 case CPU_ID_CORTEXA53:
668 case CPU_ID_CORTEXA57:
669 case CPU_ID_CORTEXA72:
670 case CPU_ID_KRAIT300:
671 cpufuncs = cortexa_cpufuncs;
672 get_cachetype_cp15();
677 #endif /* CPU_CORTEXA || CPU_KRAIT */
679 #if defined(CPU_MV_PJ4B)
680 if (cputype == CPU_ID_MV88SV581X_V7 ||
681 cputype == CPU_ID_MV88SV584X_V7 ||
682 cputype == CPU_ID_ARM_88SV581X_V7) {
683 cpufuncs = pj4bv7_cpufuncs;
684 get_cachetype_cp15();
687 #endif /* CPU_MV_PJ4B */
689 #if defined(CPU_FA526)
690 if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
691 cpufuncs = fa526_cpufuncs;
692 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
693 get_cachetype_cp15();
694 pmap_pte_init_generic();
698 #endif /* CPU_FA526 */
700 #if defined(CPU_XSCALE_81342)
701 if (cputype == CPU_ID_81342) {
702 cpufuncs = xscalec3_cpufuncs;
703 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
704 get_cachetype_cp15();
705 pmap_pte_init_xscale();
708 #endif /* CPU_XSCALE_81342 */
709 #ifdef CPU_XSCALE_PXA2X0
710 /* ignore core revision to test PXA2xx CPUs */
711 if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
712 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
713 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
715 cpufuncs = xscale_cpufuncs;
716 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
717 get_cachetype_cp15();
718 pmap_pte_init_xscale();
722 #endif /* CPU_XSCALE_PXA2X0 */
724 * Bzzzz. And the answer was ...
726 panic("No support for this CPU type (%08x) in kernel", cputype);
727 return(ARCHITECTURE_NOT_PRESENT);
729 uma_set_align(arm_dcache_align_mask);
741 int cpuctrl, cpuctrlmask;
743 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
744 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
745 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
746 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
747 CPU_CONTROL_ROUNDROBIN;
748 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
749 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
750 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
751 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
752 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
753 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
754 | CPU_CONTROL_ROUNDROBIN;
756 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
757 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
761 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
763 if (vector_page == ARM_VECTORS_HIGH)
764 cpuctrl |= CPU_CONTROL_VECRELOC;
766 /* Clear out the cache */
767 cpu_idcache_wbinv_all();
769 /* Set the control register (SCTLR) */
770 cpu_control(cpuctrlmask, cpuctrl);
773 #endif /* CPU_ARM9 */
775 #if defined(CPU_ARM9E)
779 int cpuctrl, cpuctrlmask;
781 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
782 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
783 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
784 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
785 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
786 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
787 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
788 | CPU_CONTROL_BPRD_ENABLE
789 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
791 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
792 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
796 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
799 /* Clear out the cache */
800 cpu_idcache_wbinv_all();
802 /* Now really make sure they are clean. */
803 __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
805 if (vector_page == ARM_VECTORS_HIGH)
806 cpuctrl |= CPU_CONTROL_VECRELOC;
808 /* Set the control register */
809 cpu_control(0xffffffff, cpuctrl);
812 cpu_idcache_wbinv_all();
814 #endif /* CPU_ARM9E || CPU_ARM10 */
816 #if defined(CPU_ARM1176) \
817 || defined(CPU_MV_PJ4B) \
818 || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
820 cpu_scc_setup_ccnt(void)
822 /* This is how you give userland access to the CCNT and PMCn
824 * BEWARE! This gives write access also, which may not be what
827 #ifdef _PMC_USER_READ_WRITE_
828 /* Set PMUSERENR[0] to allow userland access */
829 cp15_pmuserenr_set(1);
831 #if defined(CPU_ARM1176)
832 /* Set PMCR[2,0] to enable counters and reset CCNT */
835 /* Set up the PMCCNTR register as a cyclecounter:
836 * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
837 * Set PMCR[2,0] to enable counters and reset CCNT
838 * Set PMCNTENSET to 0x80000000 to enable CCNT */
839 cp15_pminten_clr(0xFFFFFFFF);
841 cp15_pmcnten_set(0x80000000);
846 #if defined(CPU_ARM1176)
850 uint32_t auxctrl, auxctrl_wax;
860 * Enable an errata workaround
862 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
863 auxctrl = ARM1176_AUXCTL_PHD;
864 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
867 tmp = cp15_actlr_get();
874 cpu_scc_setup_ccnt();
876 #endif /* CPU_ARM1176 */
884 cpu_scc_setup_ccnt();
886 #endif /* CPU_MV_PJ4B */
888 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
894 cpu_scc_setup_ccnt();
896 #endif /* CPU_CORTEXA || CPU_KRAIT */
898 #if defined(CPU_FA526)
902 int cpuctrl, cpuctrlmask;
904 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
905 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
906 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
907 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
908 | CPU_CONTROL_BPRD_ENABLE;
909 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
910 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
911 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
912 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
913 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
914 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
915 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
917 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
918 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
922 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
925 if (vector_page == ARM_VECTORS_HIGH)
926 cpuctrl |= CPU_CONTROL_VECRELOC;
928 /* Clear out the cache */
929 cpu_idcache_wbinv_all();
931 /* Set the control register */
932 cpu_control(0xffffffff, cpuctrl);
934 #endif /* CPU_FA526 */
936 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_81342)
941 int cpuctrl, cpuctrlmask;
944 * The XScale Write Buffer is always enabled. Our option
945 * is to enable/disable coalescing. Note that bits 6:3
946 * must always be enabled.
949 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
950 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
951 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
952 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
953 | CPU_CONTROL_BPRD_ENABLE;
954 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
955 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
956 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
957 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
958 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
959 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
960 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
961 CPU_CONTROL_L2_ENABLE;
963 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
964 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
968 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
971 if (vector_page == ARM_VECTORS_HIGH)
972 cpuctrl |= CPU_CONTROL_VECRELOC;
973 #ifdef CPU_XSCALE_CORE3
974 cpuctrl |= CPU_CONTROL_L2_ENABLE;
977 /* Clear out the cache */
978 cpu_idcache_wbinv_all();
981 * Set the control register. Note that bits 6:3 must always
984 /* cpu_control(cpuctrlmask, cpuctrl);*/
985 cpu_control(0xffffffff, cpuctrl);
987 /* Make sure write coalescing is turned on */
988 __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
990 #ifdef XSCALE_NO_COALESCE_WRITES
991 auxctl |= XSCALE_AUXCTL_K;
993 auxctl &= ~XSCALE_AUXCTL_K;
995 #ifdef CPU_XSCALE_CORE3
996 auxctl |= XSCALE_AUXCTL_LLR;
997 auxctl |= XSCALE_AUXCTL_MD_MASK;
999 __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1002 #endif /* CPU_XSCALE_PXA2X0 */