1 /* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
4 * arm9 support code Copyright (C) 2001 ARM Ltd
5 * Copyright (c) 1997 Mark Brinicombe.
6 * Copyright (c) 1997 Causality Limited
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by Causality Limited.
20 * 4. The name of Causality Limited may not be used to endorse or promote
21 * products derived from this software without specific prior written
24 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * RiscBSD kernel project
40 * C functions for supporting CPU / MMU / TLB specific operations.
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
47 #include <sys/param.h>
48 #include <sys/systm.h>
50 #include <sys/mutex.h>
52 #include <machine/bus.h>
53 #include <machine/cpu.h>
54 #include <machine/disassem.h>
60 #include <machine/cpuconf.h>
61 #include <machine/cpufunc.h>
63 #if defined(CPU_XSCALE_81342)
64 #include <arm/xscale/i8134x/i81342reg.h>
67 #ifdef CPU_XSCALE_IXP425
68 #include <arm/xscale/ixp425/ixp425reg.h>
69 #include <arm/xscale/ixp425/ixp425var.h>
72 /* PRIMARY CACHE VARIABLES */
74 int arm_picache_line_size;
77 int arm_pdcache_size; /* and unified */
78 int arm_pdcache_line_size;
82 int arm_pcache_unified;
85 int arm_dcache_align_mask;
87 u_int arm_cache_level;
88 u_int arm_cache_type[14];
92 struct cpu_functions arm9_cpufuncs = {
95 cpufunc_nullop, /* cpwait */
99 cpufunc_control, /* control */
100 arm9_setttb, /* Setttb */
104 armv4_tlb_flushID, /* tlb_flushID */
105 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
106 armv4_tlb_flushD, /* tlb_flushD */
107 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
109 /* Cache operations */
111 arm9_icache_sync_range, /* icache_sync_range */
113 arm9_dcache_wbinv_all, /* dcache_wbinv_all */
114 arm9_dcache_wbinv_range, /* dcache_wbinv_range */
115 arm9_dcache_inv_range, /* dcache_inv_range */
116 arm9_dcache_wb_range, /* dcache_wb_range */
118 armv4_idcache_inv_all, /* idcache_inv_all */
119 arm9_idcache_wbinv_all, /* idcache_wbinv_all */
120 arm9_idcache_wbinv_range, /* idcache_wbinv_range */
121 cpufunc_nullop, /* l2cache_wbinv_all */
122 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
123 (void *)cpufunc_nullop, /* l2cache_inv_range */
124 (void *)cpufunc_nullop, /* l2cache_wb_range */
125 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
127 /* Other functions */
129 armv4_drain_writebuf, /* drain_writebuf */
131 (void *)cpufunc_nullop, /* sleep */
135 arm9_context_switch, /* context_switch */
137 arm9_setup /* cpu setup */
140 #endif /* CPU_ARM9 */
142 #if defined(CPU_ARM9E)
143 struct cpu_functions armv5_ec_cpufuncs = {
146 cpufunc_nullop, /* cpwait */
150 cpufunc_control, /* control */
151 armv5_ec_setttb, /* Setttb */
155 armv4_tlb_flushID, /* tlb_flushID */
156 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
157 armv4_tlb_flushD, /* tlb_flushD */
158 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
160 /* Cache operations */
162 armv5_ec_icache_sync_range, /* icache_sync_range */
164 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
165 armv5_ec_dcache_wbinv_range, /* dcache_wbinv_range */
166 armv5_ec_dcache_inv_range, /* dcache_inv_range */
167 armv5_ec_dcache_wb_range, /* dcache_wb_range */
169 armv4_idcache_inv_all, /* idcache_inv_all */
170 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
171 armv5_ec_idcache_wbinv_range, /* idcache_wbinv_range */
173 cpufunc_nullop, /* l2cache_wbinv_all */
174 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
175 (void *)cpufunc_nullop, /* l2cache_inv_range */
176 (void *)cpufunc_nullop, /* l2cache_wb_range */
177 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
179 /* Other functions */
181 armv4_drain_writebuf, /* drain_writebuf */
183 (void *)cpufunc_nullop, /* sleep */
187 arm9_context_switch, /* context_switch */
189 arm10_setup /* cpu setup */
193 struct cpu_functions sheeva_cpufuncs = {
196 cpufunc_nullop, /* cpwait */
200 cpufunc_control, /* control */
201 sheeva_setttb, /* Setttb */
205 armv4_tlb_flushID, /* tlb_flushID */
206 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
207 armv4_tlb_flushD, /* tlb_flushD */
208 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
210 /* Cache operations */
212 armv5_ec_icache_sync_range, /* icache_sync_range */
214 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
215 sheeva_dcache_wbinv_range, /* dcache_wbinv_range */
216 sheeva_dcache_inv_range, /* dcache_inv_range */
217 sheeva_dcache_wb_range, /* dcache_wb_range */
219 armv4_idcache_inv_all, /* idcache_inv_all */
220 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
221 sheeva_idcache_wbinv_range, /* idcache_wbinv_all */
223 sheeva_l2cache_wbinv_all, /* l2cache_wbinv_all */
224 sheeva_l2cache_wbinv_range, /* l2cache_wbinv_range */
225 sheeva_l2cache_inv_range, /* l2cache_inv_range */
226 sheeva_l2cache_wb_range, /* l2cache_wb_range */
227 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
229 /* Other functions */
231 armv4_drain_writebuf, /* drain_writebuf */
233 sheeva_cpu_sleep, /* sleep */
237 arm9_context_switch, /* context_switch */
239 arm10_setup /* cpu setup */
241 #endif /* CPU_ARM9E */
244 struct cpu_functions pj4bv7_cpufuncs = {
246 .cf_control = cpufunc_control,
247 .cf_setttb = armv7_setttb,
249 /* Cache operations */
250 .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
251 .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
252 .cf_l2cache_inv_range = (void *)cpufunc_nullop,
253 .cf_l2cache_wb_range = (void *)cpufunc_nullop,
254 .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
256 /* Other functions */
257 .cf_drain_writebuf = armv7_drain_writebuf,
258 .cf_sleep = (void *)cpufunc_nullop,
261 .cf_setup = pj4bv7_setup
263 #endif /* CPU_MV_PJ4B */
265 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
267 struct cpu_functions xscale_cpufuncs = {
270 xscale_cpwait, /* cpwait */
274 xscale_control, /* control */
275 xscale_setttb, /* setttb */
279 armv4_tlb_flushID, /* tlb_flushID */
280 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
281 armv4_tlb_flushD, /* tlb_flushD */
282 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
284 /* Cache operations */
286 xscale_cache_syncI_rng, /* icache_sync_range */
288 xscale_cache_purgeD, /* dcache_wbinv_all */
289 xscale_cache_purgeD_rng, /* dcache_wbinv_range */
290 xscale_cache_flushD_rng, /* dcache_inv_range */
291 xscale_cache_cleanD_rng, /* dcache_wb_range */
293 xscale_cache_flushID, /* idcache_inv_all */
294 xscale_cache_purgeID, /* idcache_wbinv_all */
295 xscale_cache_purgeID_rng, /* idcache_wbinv_range */
296 cpufunc_nullop, /* l2cache_wbinv_all */
297 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
298 (void *)cpufunc_nullop, /* l2cache_inv_range */
299 (void *)cpufunc_nullop, /* l2cache_wb_range */
300 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
302 /* Other functions */
304 armv4_drain_writebuf, /* drain_writebuf */
306 xscale_cpu_sleep, /* sleep */
310 xscale_context_switch, /* context_switch */
312 xscale_setup /* cpu setup */
315 /* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
317 #ifdef CPU_XSCALE_81342
318 struct cpu_functions xscalec3_cpufuncs = {
321 xscale_cpwait, /* cpwait */
325 xscale_control, /* control */
326 xscalec3_setttb, /* setttb */
330 armv4_tlb_flushID, /* tlb_flushID */
331 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
332 armv4_tlb_flushD, /* tlb_flushD */
333 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
335 /* Cache operations */
337 xscalec3_cache_syncI_rng, /* icache_sync_range */
339 xscalec3_cache_purgeD, /* dcache_wbinv_all */
340 xscalec3_cache_purgeD_rng, /* dcache_wbinv_range */
341 xscale_cache_flushD_rng, /* dcache_inv_range */
342 xscalec3_cache_cleanD_rng, /* dcache_wb_range */
344 xscale_cache_flushID, /* idcache_inv_all */
345 xscalec3_cache_purgeID, /* idcache_wbinv_all */
346 xscalec3_cache_purgeID_rng, /* idcache_wbinv_range */
347 xscalec3_l2cache_purge, /* l2cache_wbinv_all */
348 xscalec3_l2cache_purge_rng, /* l2cache_wbinv_range */
349 xscalec3_l2cache_flush_rng, /* l2cache_inv_range */
350 xscalec3_l2cache_clean_rng, /* l2cache_wb_range */
351 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
353 /* Other functions */
355 armv4_drain_writebuf, /* drain_writebuf */
357 xscale_cpu_sleep, /* sleep */
361 xscalec3_context_switch, /* context_switch */
363 xscale_setup /* cpu setup */
365 #endif /* CPU_XSCALE_81342 */
368 #if defined(CPU_FA526)
369 struct cpu_functions fa526_cpufuncs = {
372 cpufunc_nullop, /* cpwait */
376 cpufunc_control, /* control */
377 fa526_setttb, /* setttb */
381 armv4_tlb_flushID, /* tlb_flushID */
382 fa526_tlb_flushID_SE, /* tlb_flushID_SE */
383 armv4_tlb_flushD, /* tlb_flushD */
384 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
386 /* Cache operations */
388 fa526_icache_sync_range, /* icache_sync_range */
390 fa526_dcache_wbinv_all, /* dcache_wbinv_all */
391 fa526_dcache_wbinv_range, /* dcache_wbinv_range */
392 fa526_dcache_inv_range, /* dcache_inv_range */
393 fa526_dcache_wb_range, /* dcache_wb_range */
395 armv4_idcache_inv_all, /* idcache_inv_all */
396 fa526_idcache_wbinv_all, /* idcache_wbinv_all */
397 fa526_idcache_wbinv_range, /* idcache_wbinv_range */
398 cpufunc_nullop, /* l2cache_wbinv_all */
399 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
400 (void *)cpufunc_nullop, /* l2cache_inv_range */
401 (void *)cpufunc_nullop, /* l2cache_wb_range */
402 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
404 /* Other functions */
406 armv4_drain_writebuf, /* drain_writebuf */
408 fa526_cpu_sleep, /* sleep */
413 fa526_context_switch, /* context_switch */
415 fa526_setup /* cpu setup */
417 #endif /* CPU_FA526 */
419 #if defined(CPU_ARM1176)
420 struct cpu_functions arm1176_cpufuncs = {
422 .cf_control = cpufunc_control,
423 .cf_setttb = arm11x6_setttb,
425 /* Cache operations */
426 .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
427 .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
428 .cf_l2cache_inv_range = (void *)cpufunc_nullop,
429 .cf_l2cache_wb_range = (void *)cpufunc_nullop,
430 .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
432 /* Other functions */
433 .cf_drain_writebuf = arm11_drain_writebuf,
434 .cf_sleep = arm11x6_sleep,
437 .cf_setup = arm11x6_setup
439 #endif /*CPU_ARM1176 */
441 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
442 struct cpu_functions cortexa_cpufuncs = {
444 .cf_control = cpufunc_control,
445 .cf_setttb = armv7_setttb,
447 /* Cache operations */
450 * Note: For CPUs using the PL310 the L2 ops are filled in when the
451 * L2 cache controller is actually enabled.
453 .cf_l2cache_wbinv_all = cpufunc_nullop,
454 .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
455 .cf_l2cache_inv_range = (void *)cpufunc_nullop,
456 .cf_l2cache_wb_range = (void *)cpufunc_nullop,
457 .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
459 /* Other functions */
460 .cf_drain_writebuf = armv7_drain_writebuf,
461 .cf_sleep = armv7_cpu_sleep,
464 .cf_setup = cortexa_setup
466 #endif /* CPU_CORTEXA */
469 * Global constants also used by locore.s
472 struct cpu_functions cpufuncs;
475 u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore-v4.s */
478 #if defined(CPU_ARM9) || \
479 defined (CPU_ARM9E) || \
480 defined(CPU_ARM1176) || \
481 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
482 defined(CPU_FA526) || defined(CPU_MV_PJ4B) || \
483 defined(CPU_XSCALE_81342) || \
484 defined(CPU_CORTEXA) || defined(CPU_KRAIT)
486 /* Global cache line sizes, use 32 as default */
487 int arm_dcache_min_line_size = 32;
488 int arm_icache_min_line_size = 32;
489 int arm_idcache_min_line_size = 32;
491 static void get_cachetype_cp15(void);
493 /* Additional cache information local to this file. Log2 of some of the
495 static int arm_dcache_l2_nsets;
496 static int arm_dcache_l2_assoc;
497 static int arm_dcache_l2_linesize;
502 u_int ctype, isize, dsize, cpuid;
503 u_int clevel, csize, i, sel;
507 __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
512 * ...and thus spake the ARM ARM:
514 * If an <opcode2> value corresponding to an unimplemented or
515 * reserved ID register is encountered, the System Control
516 * processor returns the value of the main ID register.
521 if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
522 /* Resolve minimal cache line sizes */
523 arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
524 arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
525 arm_idcache_min_line_size =
526 min(arm_icache_min_line_size, arm_dcache_min_line_size);
528 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
530 arm_cache_level = clevel;
531 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
533 while ((type = (clevel & 0x7)) && i < 7) {
534 if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
535 type == CACHE_SEP_CACHE) {
537 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
539 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
541 arm_cache_type[sel] = csize;
542 arm_dcache_align = 1 <<
543 (CPUV7_CT_xSIZE_LEN(csize) + 4);
544 arm_dcache_align_mask = arm_dcache_align - 1;
546 if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
548 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
550 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
552 arm_cache_type[sel] = csize;
558 if ((ctype & CPU_CT_S) == 0)
559 arm_pcache_unified = 1;
562 * If you want to know how this code works, go read the ARM ARM.
565 arm_pcache_type = CPU_CT_CTYPE(ctype);
567 if (arm_pcache_unified == 0) {
568 isize = CPU_CT_ISIZE(ctype);
569 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
570 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
571 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
572 if (isize & CPU_CT_xSIZE_M)
573 arm_picache_line_size = 0; /* not present */
575 arm_picache_ways = 1;
577 arm_picache_ways = multiplier <<
578 (CPU_CT_xSIZE_ASSOC(isize) - 1);
580 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
583 dsize = CPU_CT_DSIZE(ctype);
584 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
585 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
586 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
587 if (dsize & CPU_CT_xSIZE_M)
588 arm_pdcache_line_size = 0; /* not present */
590 arm_pdcache_ways = 1;
592 arm_pdcache_ways = multiplier <<
593 (CPU_CT_xSIZE_ASSOC(dsize) - 1);
595 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
597 arm_dcache_align = arm_pdcache_line_size;
599 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
600 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
601 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
602 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
605 arm_dcache_align_mask = arm_dcache_align - 1;
608 #endif /* ARM9 || XSCALE */
611 * Cannot panic here as we may not have a console yet ...
617 cputype = cpu_ident();
618 cputype &= CPU_ID_CPU_MASK;
621 if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
622 (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
623 (cputype & 0x0000f000) == 0x00009000) {
624 cpufuncs = arm9_cpufuncs;
625 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
626 get_cachetype_cp15();
627 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
628 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
629 arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
630 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
631 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
632 pmap_pte_init_generic();
635 #endif /* CPU_ARM9 */
636 #if defined(CPU_ARM9E)
637 if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
638 cputype == CPU_ID_MV88FR571_41) {
639 uint32_t sheeva_ctrl;
641 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
644 * Workaround for Marvell MV78100 CPU: Cache prefetch
645 * mechanism may affect the cache coherency validity,
646 * so it needs to be disabled.
648 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
649 * L2 Prefetching Mechanism) for details.
651 if (cputype == CPU_ID_MV88FR571_VD ||
652 cputype == CPU_ID_MV88FR571_41)
653 sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
655 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
657 cpufuncs = sheeva_cpufuncs;
658 get_cachetype_cp15();
659 pmap_pte_init_generic();
661 } else if (cputype == CPU_ID_ARM926EJS) {
662 cpufuncs = armv5_ec_cpufuncs;
663 get_cachetype_cp15();
664 pmap_pte_init_generic();
667 #endif /* CPU_ARM9E */
668 #if defined(CPU_ARM1176)
669 if (cputype == CPU_ID_ARM1176JZS) {
670 cpufuncs = arm1176_cpufuncs;
671 get_cachetype_cp15();
674 #endif /* CPU_ARM1176 */
675 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
676 switch(cputype & CPU_ID_SCHEME_MASK) {
677 case CPU_ID_CORTEXA5:
678 case CPU_ID_CORTEXA7:
679 case CPU_ID_CORTEXA8:
680 case CPU_ID_CORTEXA9:
681 case CPU_ID_CORTEXA12:
682 case CPU_ID_CORTEXA15:
683 case CPU_ID_KRAIT300:
684 cpufuncs = cortexa_cpufuncs;
685 get_cachetype_cp15();
690 #endif /* CPU_CORTEXA */
692 #if defined(CPU_MV_PJ4B)
693 if (cputype == CPU_ID_MV88SV581X_V7 ||
694 cputype == CPU_ID_MV88SV584X_V7 ||
695 cputype == CPU_ID_ARM_88SV581X_V7) {
696 cpufuncs = pj4bv7_cpufuncs;
697 get_cachetype_cp15();
700 #endif /* CPU_MV_PJ4B */
702 #if defined(CPU_FA526)
703 if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
704 cpufuncs = fa526_cpufuncs;
705 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
706 get_cachetype_cp15();
707 pmap_pte_init_generic();
711 #endif /* CPU_FA526 */
713 #if defined(CPU_XSCALE_81342)
714 if (cputype == CPU_ID_81342) {
715 cpufuncs = xscalec3_cpufuncs;
716 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
717 get_cachetype_cp15();
718 pmap_pte_init_xscale();
721 #endif /* CPU_XSCALE_81342 */
722 #ifdef CPU_XSCALE_PXA2X0
723 /* ignore core revision to test PXA2xx CPUs */
724 if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
725 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
726 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
728 cpufuncs = xscale_cpufuncs;
729 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
730 get_cachetype_cp15();
731 pmap_pte_init_xscale();
735 #endif /* CPU_XSCALE_PXA2X0 */
736 #ifdef CPU_XSCALE_IXP425
737 if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
738 cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
740 cpufuncs = xscale_cpufuncs;
741 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
742 get_cachetype_cp15();
743 pmap_pte_init_xscale();
747 #endif /* CPU_XSCALE_IXP425 */
749 * Bzzzz. And the answer was ...
751 panic("No support for this CPU type (%08x) in kernel", cputype);
752 return(ARCHITECTURE_NOT_PRESENT);
754 uma_set_align(arm_dcache_align_mask);
766 int cpuctrl, cpuctrlmask;
768 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
769 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
770 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
771 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
772 CPU_CONTROL_ROUNDROBIN;
773 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
774 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
775 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
776 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
777 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
778 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
779 | CPU_CONTROL_ROUNDROBIN;
781 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
782 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
786 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
788 if (vector_page == ARM_VECTORS_HIGH)
789 cpuctrl |= CPU_CONTROL_VECRELOC;
791 /* Clear out the cache */
792 cpu_idcache_wbinv_all();
794 /* Set the control register (SCTLR) */
795 cpu_control(cpuctrlmask, cpuctrl);
798 #endif /* CPU_ARM9 */
800 #if defined(CPU_ARM9E)
804 int cpuctrl, cpuctrlmask;
806 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
807 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
808 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
809 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
810 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
811 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
812 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
813 | CPU_CONTROL_BPRD_ENABLE
814 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
816 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
817 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
821 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
824 /* Clear out the cache */
825 cpu_idcache_wbinv_all();
827 /* Now really make sure they are clean. */
828 __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
830 if (vector_page == ARM_VECTORS_HIGH)
831 cpuctrl |= CPU_CONTROL_VECRELOC;
833 /* Set the control register */
834 cpu_control(0xffffffff, cpuctrl);
837 cpu_idcache_wbinv_all();
839 #endif /* CPU_ARM9E || CPU_ARM10 */
841 #if defined(CPU_ARM1176) \
842 || defined(CPU_MV_PJ4B) \
843 || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
845 cpu_scc_setup_ccnt(void)
847 /* This is how you give userland access to the CCNT and PMCn
849 * BEWARE! This gives write access also, which may not be what
852 #ifdef _PMC_USER_READ_WRITE_
853 /* Set PMUSERENR[0] to allow userland access */
854 cp15_pmuserenr_set(1);
856 #if defined(CPU_ARM1176)
857 /* Set PMCR[2,0] to enable counters and reset CCNT */
860 /* Set up the PMCCNTR register as a cyclecounter:
861 * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
862 * Set PMCR[2,0] to enable counters and reset CCNT
863 * Set PMCNTENSET to 0x80000000 to enable CCNT */
864 cp15_pminten_clr(0xFFFFFFFF);
866 cp15_pmcnten_set(0x80000000);
871 #if defined(CPU_ARM1176)
875 uint32_t auxctrl, auxctrl_wax;
885 * Enable an errata workaround
887 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
888 auxctrl = ARM1176_AUXCTL_PHD;
889 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
892 tmp = cp15_actlr_get();
899 cpu_scc_setup_ccnt();
901 #endif /* CPU_ARM1176 */
909 cpu_scc_setup_ccnt();
911 #endif /* CPU_MV_PJ4B */
913 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
919 cpu_scc_setup_ccnt();
921 #endif /* CPU_CORTEXA */
923 #if defined(CPU_FA526)
927 int cpuctrl, cpuctrlmask;
929 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
930 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
931 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
932 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
933 | CPU_CONTROL_BPRD_ENABLE;
934 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
935 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
936 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
937 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
938 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
939 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
940 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
942 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
943 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
947 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
950 if (vector_page == ARM_VECTORS_HIGH)
951 cpuctrl |= CPU_CONTROL_VECRELOC;
953 /* Clear out the cache */
954 cpu_idcache_wbinv_all();
956 /* Set the control register */
957 cpu_control(0xffffffff, cpuctrl);
959 #endif /* CPU_FA526 */
961 #if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
962 defined(CPU_XSCALE_81342)
967 int cpuctrl, cpuctrlmask;
970 * The XScale Write Buffer is always enabled. Our option
971 * is to enable/disable coalescing. Note that bits 6:3
972 * must always be enabled.
975 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
976 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
977 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
978 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
979 | CPU_CONTROL_BPRD_ENABLE;
980 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
981 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
982 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
983 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
984 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
985 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
986 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
987 CPU_CONTROL_L2_ENABLE;
989 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
990 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
994 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
997 if (vector_page == ARM_VECTORS_HIGH)
998 cpuctrl |= CPU_CONTROL_VECRELOC;
999 #ifdef CPU_XSCALE_CORE3
1000 cpuctrl |= CPU_CONTROL_L2_ENABLE;
1003 /* Clear out the cache */
1004 cpu_idcache_wbinv_all();
1007 * Set the control register. Note that bits 6:3 must always
1010 /* cpu_control(cpuctrlmask, cpuctrl);*/
1011 cpu_control(0xffffffff, cpuctrl);
1013 /* Make sure write coalescing is turned on */
1014 __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1016 #ifdef XSCALE_NO_COALESCE_WRITES
1017 auxctl |= XSCALE_AUXCTL_K;
1019 auxctl &= ~XSCALE_AUXCTL_K;
1021 #ifdef CPU_XSCALE_CORE3
1022 auxctl |= XSCALE_AUXCTL_LLR;
1023 auxctl |= XSCALE_AUXCTL_MD_MASK;
1025 __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1028 #endif /* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */