1 /* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
4 * arm9 support code Copyright (C) 2001 ARM Ltd
5 * Copyright (c) 1997 Mark Brinicombe.
6 * Copyright (c) 1997 Causality Limited
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by Causality Limited.
20 * 4. The name of Causality Limited may not be used to endorse or promote
21 * products derived from this software without specific prior written
24 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * RiscBSD kernel project
40 * C functions for supporting CPU / MMU / TLB specific operations.
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
47 #include <sys/param.h>
48 #include <sys/systm.h>
50 #include <sys/mutex.h>
52 #include <machine/bus.h>
53 #include <machine/cpu.h>
54 #include <machine/disassem.h>
60 #include <machine/cpuconf.h>
61 #include <machine/cpufunc.h>
63 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
64 #include <arm/xscale/i80321/i80321reg.h>
65 #include <arm/xscale/i80321/i80321var.h>
69 * Some definitions in i81342reg.h clash with i80321reg.h.
70 * This only happens for the LINT kernel. As it happens,
71 * we don't need anything from i81342reg.h that we already
72 * got from somewhere else during a LINT compile.
74 #if defined(CPU_XSCALE_81342) && !defined(COMPILING_LINT)
75 #include <arm/xscale/i8134x/i81342reg.h>
78 #ifdef CPU_XSCALE_IXP425
79 #include <arm/xscale/ixp425/ixp425reg.h>
80 #include <arm/xscale/ixp425/ixp425var.h>
83 /* PRIMARY CACHE VARIABLES */
85 int arm_picache_line_size;
88 int arm_pdcache_size; /* and unified */
89 int arm_pdcache_line_size;
93 int arm_pcache_unified;
96 int arm_dcache_align_mask;
98 u_int arm_cache_level;
99 u_int arm_cache_type[14];
105 struct cpu_functions arm9_cpufuncs = {
108 cpufunc_nullop, /* cpwait */
112 cpufunc_control, /* control */
113 arm9_setttb, /* Setttb */
117 armv4_tlb_flushID, /* tlb_flushID */
118 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
119 armv4_tlb_flushD, /* tlb_flushD */
120 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
122 /* Cache operations */
124 arm9_icache_sync_all, /* icache_sync_all */
125 arm9_icache_sync_range, /* icache_sync_range */
127 arm9_dcache_wbinv_all, /* dcache_wbinv_all */
128 arm9_dcache_wbinv_range, /* dcache_wbinv_range */
129 arm9_dcache_inv_range, /* dcache_inv_range */
130 arm9_dcache_wb_range, /* dcache_wb_range */
132 armv4_idcache_inv_all, /* idcache_inv_all */
133 arm9_idcache_wbinv_all, /* idcache_wbinv_all */
134 arm9_idcache_wbinv_range, /* idcache_wbinv_range */
135 cpufunc_nullop, /* l2cache_wbinv_all */
136 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
137 (void *)cpufunc_nullop, /* l2cache_inv_range */
138 (void *)cpufunc_nullop, /* l2cache_wb_range */
139 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
141 /* Other functions */
143 armv4_drain_writebuf, /* drain_writebuf */
145 (void *)cpufunc_nullop, /* sleep */
149 arm9_context_switch, /* context_switch */
151 arm9_setup /* cpu setup */
154 #endif /* CPU_ARM9 */
156 #if defined(CPU_ARM9E)
157 struct cpu_functions armv5_ec_cpufuncs = {
160 cpufunc_nullop, /* cpwait */
164 cpufunc_control, /* control */
165 armv5_ec_setttb, /* Setttb */
169 armv4_tlb_flushID, /* tlb_flushID */
170 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
171 armv4_tlb_flushD, /* tlb_flushD */
172 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
174 /* Cache operations */
176 armv5_ec_icache_sync_all, /* icache_sync_all */
177 armv5_ec_icache_sync_range, /* icache_sync_range */
179 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
180 armv5_ec_dcache_wbinv_range, /* dcache_wbinv_range */
181 armv5_ec_dcache_inv_range, /* dcache_inv_range */
182 armv5_ec_dcache_wb_range, /* dcache_wb_range */
184 armv4_idcache_inv_all, /* idcache_inv_all */
185 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
186 armv5_ec_idcache_wbinv_range, /* idcache_wbinv_range */
188 cpufunc_nullop, /* l2cache_wbinv_all */
189 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
190 (void *)cpufunc_nullop, /* l2cache_inv_range */
191 (void *)cpufunc_nullop, /* l2cache_wb_range */
192 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
194 /* Other functions */
196 armv4_drain_writebuf, /* drain_writebuf */
198 (void *)cpufunc_nullop, /* sleep */
202 arm9_context_switch, /* context_switch */
204 arm10_setup /* cpu setup */
208 struct cpu_functions sheeva_cpufuncs = {
211 cpufunc_nullop, /* cpwait */
215 cpufunc_control, /* control */
216 sheeva_setttb, /* Setttb */
220 armv4_tlb_flushID, /* tlb_flushID */
221 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
222 armv4_tlb_flushD, /* tlb_flushD */
223 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
225 /* Cache operations */
227 armv5_ec_icache_sync_all, /* icache_sync_all */
228 armv5_ec_icache_sync_range, /* icache_sync_range */
230 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
231 sheeva_dcache_wbinv_range, /* dcache_wbinv_range */
232 sheeva_dcache_inv_range, /* dcache_inv_range */
233 sheeva_dcache_wb_range, /* dcache_wb_range */
235 armv4_idcache_inv_all, /* idcache_inv_all */
236 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
237 sheeva_idcache_wbinv_range, /* idcache_wbinv_all */
239 sheeva_l2cache_wbinv_all, /* l2cache_wbinv_all */
240 sheeva_l2cache_wbinv_range, /* l2cache_wbinv_range */
241 sheeva_l2cache_inv_range, /* l2cache_inv_range */
242 sheeva_l2cache_wb_range, /* l2cache_wb_range */
243 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
245 /* Other functions */
247 armv4_drain_writebuf, /* drain_writebuf */
249 sheeva_cpu_sleep, /* sleep */
253 arm9_context_switch, /* context_switch */
255 arm10_setup /* cpu setup */
257 #endif /* CPU_ARM9E */
260 struct cpu_functions pj4bv7_cpufuncs = {
263 armv7_drain_writebuf, /* cpwait */
267 cpufunc_control, /* control */
268 armv7_setttb, /* Setttb */
272 armv7_tlb_flushID, /* tlb_flushID */
273 armv7_tlb_flushID_SE, /* tlb_flushID_SE */
274 armv7_tlb_flushID, /* tlb_flushD */
275 armv7_tlb_flushID_SE, /* tlb_flushD_SE */
277 /* Cache operations */
278 armv7_idcache_wbinv_all, /* icache_sync_all */
279 armv7_icache_sync_range, /* icache_sync_range */
281 armv7_dcache_wbinv_all, /* dcache_wbinv_all */
282 armv7_dcache_wbinv_range, /* dcache_wbinv_range */
283 armv7_dcache_inv_range, /* dcache_inv_range */
284 armv7_dcache_wb_range, /* dcache_wb_range */
286 armv7_idcache_inv_all, /* idcache_inv_all */
287 armv7_idcache_wbinv_all, /* idcache_wbinv_all */
288 armv7_idcache_wbinv_range, /* idcache_wbinv_all */
290 (void *)cpufunc_nullop, /* l2cache_wbinv_all */
291 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
292 (void *)cpufunc_nullop, /* l2cache_inv_range */
293 (void *)cpufunc_nullop, /* l2cache_wb_range */
294 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
296 /* Other functions */
298 armv7_drain_writebuf, /* drain_writebuf */
300 (void *)cpufunc_nullop, /* sleep */
303 armv7_context_switch, /* context_switch */
305 pj4bv7_setup /* cpu setup */
307 #endif /* CPU_MV_PJ4B */
309 #if defined(CPU_XSCALE_80321) || \
310 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
311 defined(CPU_XSCALE_80219)
313 struct cpu_functions xscale_cpufuncs = {
316 xscale_cpwait, /* cpwait */
320 xscale_control, /* control */
321 xscale_setttb, /* setttb */
325 armv4_tlb_flushID, /* tlb_flushID */
326 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
327 armv4_tlb_flushD, /* tlb_flushD */
328 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
330 /* Cache operations */
332 xscale_cache_syncI, /* icache_sync_all */
333 xscale_cache_syncI_rng, /* icache_sync_range */
335 xscale_cache_purgeD, /* dcache_wbinv_all */
336 xscale_cache_purgeD_rng, /* dcache_wbinv_range */
337 xscale_cache_flushD_rng, /* dcache_inv_range */
338 xscale_cache_cleanD_rng, /* dcache_wb_range */
340 xscale_cache_flushID, /* idcache_inv_all */
341 xscale_cache_purgeID, /* idcache_wbinv_all */
342 xscale_cache_purgeID_rng, /* idcache_wbinv_range */
343 cpufunc_nullop, /* l2cache_wbinv_all */
344 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
345 (void *)cpufunc_nullop, /* l2cache_inv_range */
346 (void *)cpufunc_nullop, /* l2cache_wb_range */
347 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
349 /* Other functions */
351 armv4_drain_writebuf, /* drain_writebuf */
353 xscale_cpu_sleep, /* sleep */
357 xscale_context_switch, /* context_switch */
359 xscale_setup /* cpu setup */
362 /* CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
365 #ifdef CPU_XSCALE_81342
366 struct cpu_functions xscalec3_cpufuncs = {
369 xscale_cpwait, /* cpwait */
373 xscale_control, /* control */
374 xscalec3_setttb, /* setttb */
378 armv4_tlb_flushID, /* tlb_flushID */
379 xscale_tlb_flushID_SE, /* tlb_flushID_SE */
380 armv4_tlb_flushD, /* tlb_flushD */
381 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
383 /* Cache operations */
385 xscalec3_cache_syncI, /* icache_sync_all */
386 xscalec3_cache_syncI_rng, /* icache_sync_range */
388 xscalec3_cache_purgeD, /* dcache_wbinv_all */
389 xscalec3_cache_purgeD_rng, /* dcache_wbinv_range */
390 xscale_cache_flushD_rng, /* dcache_inv_range */
391 xscalec3_cache_cleanD_rng, /* dcache_wb_range */
393 xscale_cache_flushID, /* idcache_inv_all */
394 xscalec3_cache_purgeID, /* idcache_wbinv_all */
395 xscalec3_cache_purgeID_rng, /* idcache_wbinv_range */
396 xscalec3_l2cache_purge, /* l2cache_wbinv_all */
397 xscalec3_l2cache_purge_rng, /* l2cache_wbinv_range */
398 xscalec3_l2cache_flush_rng, /* l2cache_inv_range */
399 xscalec3_l2cache_clean_rng, /* l2cache_wb_range */
400 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
402 /* Other functions */
404 armv4_drain_writebuf, /* drain_writebuf */
406 xscale_cpu_sleep, /* sleep */
410 xscalec3_context_switch, /* context_switch */
412 xscale_setup /* cpu setup */
414 #endif /* CPU_XSCALE_81342 */
417 #if defined(CPU_FA526)
418 struct cpu_functions fa526_cpufuncs = {
421 cpufunc_nullop, /* cpwait */
425 cpufunc_control, /* control */
426 fa526_setttb, /* setttb */
430 armv4_tlb_flushID, /* tlb_flushID */
431 fa526_tlb_flushID_SE, /* tlb_flushID_SE */
432 armv4_tlb_flushD, /* tlb_flushD */
433 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
435 /* Cache operations */
437 fa526_icache_sync_all, /* icache_sync_all */
438 fa526_icache_sync_range, /* icache_sync_range */
440 fa526_dcache_wbinv_all, /* dcache_wbinv_all */
441 fa526_dcache_wbinv_range, /* dcache_wbinv_range */
442 fa526_dcache_inv_range, /* dcache_inv_range */
443 fa526_dcache_wb_range, /* dcache_wb_range */
445 armv4_idcache_inv_all, /* idcache_inv_all */
446 fa526_idcache_wbinv_all, /* idcache_wbinv_all */
447 fa526_idcache_wbinv_range, /* idcache_wbinv_range */
448 cpufunc_nullop, /* l2cache_wbinv_all */
449 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
450 (void *)cpufunc_nullop, /* l2cache_inv_range */
451 (void *)cpufunc_nullop, /* l2cache_wb_range */
452 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
454 /* Other functions */
456 armv4_drain_writebuf, /* drain_writebuf */
458 fa526_cpu_sleep, /* sleep */
463 fa526_context_switch, /* context_switch */
465 fa526_setup /* cpu setup */
467 #endif /* CPU_FA526 */
469 #if defined(CPU_ARM1176)
470 struct cpu_functions arm1176_cpufuncs = {
473 cpufunc_nullop, /* cpwait */
477 cpufunc_control, /* control */
478 arm11x6_setttb, /* Setttb */
482 arm11_tlb_flushID, /* tlb_flushID */
483 arm11_tlb_flushID_SE, /* tlb_flushID_SE */
484 arm11_tlb_flushD, /* tlb_flushD */
485 arm11_tlb_flushD_SE, /* tlb_flushD_SE */
487 /* Cache operations */
489 arm11x6_icache_sync_all, /* icache_sync_all */
490 arm11x6_icache_sync_range, /* icache_sync_range */
492 arm11x6_dcache_wbinv_all, /* dcache_wbinv_all */
493 armv6_dcache_wbinv_range, /* dcache_wbinv_range */
494 armv6_dcache_inv_range, /* dcache_inv_range */
495 armv6_dcache_wb_range, /* dcache_wb_range */
497 armv6_idcache_inv_all, /* idcache_inv_all */
498 arm11x6_idcache_wbinv_all, /* idcache_wbinv_all */
499 arm11x6_idcache_wbinv_range, /* idcache_wbinv_range */
501 (void *)cpufunc_nullop, /* l2cache_wbinv_all */
502 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
503 (void *)cpufunc_nullop, /* l2cache_inv_range */
504 (void *)cpufunc_nullop, /* l2cache_wb_range */
505 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
507 /* Other functions */
509 arm11_drain_writebuf, /* drain_writebuf */
511 arm11x6_sleep, /* sleep */
515 arm11_context_switch, /* context_switch */
517 arm11x6_setup /* cpu setup */
519 #endif /*CPU_ARM1176 */
521 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
522 struct cpu_functions cortexa_cpufuncs = {
525 cpufunc_nullop, /* cpwait */
529 cpufunc_control, /* control */
530 armv7_setttb, /* Setttb */
533 * TLB functions. ARMv7 does all TLB ops based on a unified TLB model
534 * whether the hardware implements separate I+D or not, so we use the
535 * same 'ID' functions for all 3 variations.
538 armv7_tlb_flushID, /* tlb_flushID */
539 armv7_tlb_flushID_SE, /* tlb_flushID_SE */
540 armv7_tlb_flushID, /* tlb_flushD */
541 armv7_tlb_flushID_SE, /* tlb_flushD_SE */
543 /* Cache operations */
545 armv7_icache_sync_all, /* icache_sync_all */
546 armv7_icache_sync_range, /* icache_sync_range */
548 armv7_dcache_wbinv_all, /* dcache_wbinv_all */
549 armv7_dcache_wbinv_range, /* dcache_wbinv_range */
550 armv7_dcache_inv_range, /* dcache_inv_range */
551 armv7_dcache_wb_range, /* dcache_wb_range */
553 armv7_idcache_inv_all, /* idcache_inv_all */
554 armv7_idcache_wbinv_all, /* idcache_wbinv_all */
555 armv7_idcache_wbinv_range, /* idcache_wbinv_range */
558 * Note: For CPUs using the PL310 the L2 ops are filled in when the
559 * L2 cache controller is actually enabled.
561 cpufunc_nullop, /* l2cache_wbinv_all */
562 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
563 (void *)cpufunc_nullop, /* l2cache_inv_range */
564 (void *)cpufunc_nullop, /* l2cache_wb_range */
565 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
567 /* Other functions */
569 armv7_drain_writebuf, /* drain_writebuf */
571 armv7_cpu_sleep, /* sleep */
575 armv7_context_switch, /* context_switch */
577 cortexa_setup /* cpu setup */
579 #endif /* CPU_CORTEXA */
582 * Global constants also used by locore.s
585 struct cpu_functions cpufuncs;
587 u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore.s */
589 #if defined(CPU_ARM9) || \
590 defined (CPU_ARM9E) || \
591 defined(CPU_ARM1176) || defined(CPU_XSCALE_80321) || \
592 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
593 defined(CPU_FA526) || defined(CPU_MV_PJ4B) || \
594 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
595 defined(CPU_CORTEXA) || defined(CPU_KRAIT)
597 /* Global cache line sizes, use 32 as default */
598 int arm_dcache_min_line_size = 32;
599 int arm_icache_min_line_size = 32;
600 int arm_idcache_min_line_size = 32;
602 static void get_cachetype_cp15(void);
604 /* Additional cache information local to this file. Log2 of some of the
606 static int arm_dcache_l2_nsets;
607 static int arm_dcache_l2_assoc;
608 static int arm_dcache_l2_linesize;
613 u_int ctype, isize, dsize, cpuid;
614 u_int clevel, csize, i, sel;
618 __asm __volatile("mrc p15, 0, %0, c0, c0, 1"
623 * ...and thus spake the ARM ARM:
625 * If an <opcode2> value corresponding to an unimplemented or
626 * reserved ID register is encountered, the System Control
627 * processor returns the value of the main ID register.
632 if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
633 /* Resolve minimal cache line sizes */
634 arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
635 arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
636 arm_idcache_min_line_size =
637 min(arm_icache_min_line_size, arm_dcache_min_line_size);
639 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
641 arm_cache_level = clevel;
642 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
644 while ((type = (clevel & 0x7)) && i < 7) {
645 if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
646 type == CACHE_SEP_CACHE) {
648 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
650 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
652 arm_cache_type[sel] = csize;
653 arm_dcache_align = 1 <<
654 (CPUV7_CT_xSIZE_LEN(csize) + 4);
655 arm_dcache_align_mask = arm_dcache_align - 1;
657 if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
659 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
661 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
663 arm_cache_type[sel] = csize;
669 if ((ctype & CPU_CT_S) == 0)
670 arm_pcache_unified = 1;
673 * If you want to know how this code works, go read the ARM ARM.
676 arm_pcache_type = CPU_CT_CTYPE(ctype);
678 if (arm_pcache_unified == 0) {
679 isize = CPU_CT_ISIZE(ctype);
680 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
681 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
682 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
683 if (isize & CPU_CT_xSIZE_M)
684 arm_picache_line_size = 0; /* not present */
686 arm_picache_ways = 1;
688 arm_picache_ways = multiplier <<
689 (CPU_CT_xSIZE_ASSOC(isize) - 1);
691 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
694 dsize = CPU_CT_DSIZE(ctype);
695 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
696 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
697 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
698 if (dsize & CPU_CT_xSIZE_M)
699 arm_pdcache_line_size = 0; /* not present */
701 arm_pdcache_ways = 1;
703 arm_pdcache_ways = multiplier <<
704 (CPU_CT_xSIZE_ASSOC(dsize) - 1);
706 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
708 arm_dcache_align = arm_pdcache_line_size;
710 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
711 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
712 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
713 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
716 arm_dcache_align_mask = arm_dcache_align - 1;
719 #endif /* ARM9 || XSCALE */
722 * Cannot panic here as we may not have a console yet ...
728 cputype = cpu_ident();
729 cputype &= CPU_ID_CPU_MASK;
732 if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
733 (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
734 (cputype & 0x0000f000) == 0x00009000) {
735 cpufuncs = arm9_cpufuncs;
736 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
737 get_cachetype_cp15();
738 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
739 arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
740 arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
741 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
742 arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
743 pmap_pte_init_generic();
746 #endif /* CPU_ARM9 */
747 #if defined(CPU_ARM9E)
748 if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
749 cputype == CPU_ID_MV88FR571_41) {
750 uint32_t sheeva_ctrl;
752 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
755 * Workaround for Marvell MV78100 CPU: Cache prefetch
756 * mechanism may affect the cache coherency validity,
757 * so it needs to be disabled.
759 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
760 * L2 Prefetching Mechanism) for details.
762 if (cputype == CPU_ID_MV88FR571_VD ||
763 cputype == CPU_ID_MV88FR571_41)
764 sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
766 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
768 cpufuncs = sheeva_cpufuncs;
769 get_cachetype_cp15();
770 pmap_pte_init_generic();
772 } else if (cputype == CPU_ID_ARM926EJS) {
773 cpufuncs = armv5_ec_cpufuncs;
774 get_cachetype_cp15();
775 pmap_pte_init_generic();
778 #endif /* CPU_ARM9E */
779 #if defined(CPU_ARM1176)
780 if (cputype == CPU_ID_ARM1176JZS) {
781 cpufuncs = arm1176_cpufuncs;
782 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
783 get_cachetype_cp15();
786 #endif /* CPU_ARM1176 */
787 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
788 if (cputype == CPU_ID_CORTEXA5 ||
789 cputype == CPU_ID_CORTEXA7 ||
790 cputype == CPU_ID_CORTEXA8R1 ||
791 cputype == CPU_ID_CORTEXA8R2 ||
792 cputype == CPU_ID_CORTEXA8R3 ||
793 cputype == CPU_ID_CORTEXA9R1 ||
794 cputype == CPU_ID_CORTEXA9R2 ||
795 cputype == CPU_ID_CORTEXA9R3 ||
796 cputype == CPU_ID_CORTEXA9R4 ||
797 cputype == CPU_ID_CORTEXA12R0 ||
798 cputype == CPU_ID_CORTEXA15R0 ||
799 cputype == CPU_ID_CORTEXA15R1 ||
800 cputype == CPU_ID_CORTEXA15R2 ||
801 cputype == CPU_ID_CORTEXA15R3 ||
802 cputype == CPU_ID_KRAIT300R0 ||
803 cputype == CPU_ID_KRAIT300R1 ) {
804 cpufuncs = cortexa_cpufuncs;
805 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
806 get_cachetype_cp15();
809 #endif /* CPU_CORTEXA */
811 #if defined(CPU_MV_PJ4B)
812 if (cputype == CPU_ID_MV88SV581X_V7 ||
813 cputype == CPU_ID_MV88SV584X_V7 ||
814 cputype == CPU_ID_ARM_88SV581X_V7) {
815 cpufuncs = pj4bv7_cpufuncs;
816 get_cachetype_cp15();
819 #endif /* CPU_MV_PJ4B */
821 #if defined(CPU_FA526)
822 if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
823 cpufuncs = fa526_cpufuncs;
824 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */
825 get_cachetype_cp15();
826 pmap_pte_init_generic();
830 #endif /* CPU_FA526 */
832 #if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
833 if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
834 cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
835 cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
836 cpufuncs = xscale_cpufuncs;
837 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
838 get_cachetype_cp15();
839 pmap_pte_init_xscale();
842 #endif /* CPU_XSCALE_80321 */
844 #if defined(CPU_XSCALE_81342)
845 if (cputype == CPU_ID_81342) {
846 cpufuncs = xscalec3_cpufuncs;
847 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
848 get_cachetype_cp15();
849 pmap_pte_init_xscale();
852 #endif /* CPU_XSCALE_81342 */
853 #ifdef CPU_XSCALE_PXA2X0
854 /* ignore core revision to test PXA2xx CPUs */
855 if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
856 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
857 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
859 cpufuncs = xscale_cpufuncs;
860 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
861 get_cachetype_cp15();
862 pmap_pte_init_xscale();
866 #endif /* CPU_XSCALE_PXA2X0 */
867 #ifdef CPU_XSCALE_IXP425
868 if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
869 cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
871 cpufuncs = xscale_cpufuncs;
872 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */
873 get_cachetype_cp15();
874 pmap_pte_init_xscale();
878 #endif /* CPU_XSCALE_IXP425 */
880 * Bzzzz. And the answer was ...
882 panic("No support for this CPU type (%08x) in kernel", cputype);
883 return(ARCHITECTURE_NOT_PRESENT);
885 uma_set_align(arm_dcache_align_mask);
897 int cpuctrl, cpuctrlmask;
899 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
900 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
901 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
902 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
903 CPU_CONTROL_ROUNDROBIN;
904 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
905 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
906 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
907 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
908 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
909 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
910 | CPU_CONTROL_ROUNDROBIN;
912 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
913 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
917 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
919 if (vector_page == ARM_VECTORS_HIGH)
920 cpuctrl |= CPU_CONTROL_VECRELOC;
922 /* Clear out the cache */
923 cpu_idcache_wbinv_all();
925 /* Set the control register */
926 cpu_control(cpuctrlmask, cpuctrl);
930 #endif /* CPU_ARM9 */
932 #if defined(CPU_ARM9E)
936 int cpuctrl, cpuctrlmask;
938 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
939 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
940 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
941 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
942 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
943 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
944 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
945 | CPU_CONTROL_BPRD_ENABLE
946 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
948 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
949 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
953 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
956 /* Clear out the cache */
957 cpu_idcache_wbinv_all();
959 /* Now really make sure they are clean. */
960 __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
962 if (vector_page == ARM_VECTORS_HIGH)
963 cpuctrl |= CPU_CONTROL_VECRELOC;
965 /* Set the control register */
967 cpu_control(0xffffffff, cpuctrl);
970 cpu_idcache_wbinv_all();
972 #endif /* CPU_ARM9E || CPU_ARM10 */
974 #if defined(CPU_ARM1176) \
975 || defined(CPU_MV_PJ4B) \
976 || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
978 cpu_scc_setup_ccnt(void)
980 /* This is how you give userland access to the CCNT and PMCn
982 * BEWARE! This gives write access also, which may not be what
985 #ifdef _PMC_USER_READ_WRITE_
986 /* Set PMUSERENR[0] to allow userland access */
987 cp15_pmuserenr_set(1);
989 #if defined(CPU_ARM1176)
990 /* Set PMCR[2,0] to enable counters and reset CCNT */
993 /* Set up the PMCCNTR register as a cyclecounter:
994 * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
995 * Set PMCR[2,0] to enable counters and reset CCNT
996 * Set PMCNTENSET to 0x80000000 to enable CCNT */
997 cp15_pminten_clr(0xFFFFFFFF);
999 cp15_pmcnten_set(0x80000000);
1004 #if defined(CPU_ARM1176)
1008 int cpuctrl, cpuctrl_wax;
1009 uint32_t auxctrl, auxctrl_wax;
1014 cpuid = cpu_ident();
1017 CPU_CONTROL_MMU_ENABLE |
1018 CPU_CONTROL_DC_ENABLE |
1019 CPU_CONTROL_WBUF_ENABLE |
1020 CPU_CONTROL_32BP_ENABLE |
1021 CPU_CONTROL_32BD_ENABLE |
1022 CPU_CONTROL_LABT_ENABLE |
1023 CPU_CONTROL_SYST_ENABLE |
1024 CPU_CONTROL_IC_ENABLE |
1025 CPU_CONTROL_UNAL_ENABLE;
1028 * "write as existing" bits
1029 * inverse of this is mask
1032 (3 << 30) | /* SBZ */
1033 (1 << 29) | /* FA */
1034 (1 << 28) | /* TR */
1035 (3 << 26) | /* SBZ */
1036 (3 << 19) | /* SBZ */
1037 (1 << 17); /* SBZ */
1039 cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1040 cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1043 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1046 if (vector_page == ARM_VECTORS_HIGH)
1047 cpuctrl |= CPU_CONTROL_VECRELOC;
1053 * Enable an errata workaround
1055 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
1056 auxctrl = ARM1176_AUXCTL_PHD;
1057 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
1060 /* Clear out the cache */
1061 cpu_idcache_wbinv_all();
1063 /* Now really make sure they are clean. */
1064 __asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
1066 /* Allow detection code to find the VFP if it's fitted. */
1067 cp15_cpacr_set(0x0fffffff);
1069 /* Set the control register */
1071 cpu_control(~cpuctrl_wax, cpuctrl);
1073 tmp = cp15_actlr_get();
1078 cp15_actlr_set(tmp);
1081 cpu_idcache_wbinv_all();
1083 cpu_scc_setup_ccnt();
1085 #endif /* CPU_ARM1176 */
1095 cpuctrl = CPU_CONTROL_MMU_ENABLE;
1096 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1097 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1099 cpuctrl |= CPU_CONTROL_DC_ENABLE;
1100 cpuctrl |= (0xf << 3);
1101 cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1102 cpuctrl |= CPU_CONTROL_IC_ENABLE;
1103 if (vector_page == ARM_VECTORS_HIGH)
1104 cpuctrl |= CPU_CONTROL_VECRELOC;
1105 cpuctrl |= (0x5 << 16) | (1 < 22);
1106 cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1108 /* Clear out the cache */
1109 cpu_idcache_wbinv_all();
1111 /* Set the control register */
1113 cpu_control(0xFFFFFFFF, cpuctrl);
1116 cpu_idcache_wbinv_all();
1118 cpu_scc_setup_ccnt();
1120 #endif /* CPU_MV_PJ4B */
1122 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1127 int cpuctrl, cpuctrlmask;
1129 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | /* MMU enable [0] */
1130 CPU_CONTROL_AFLT_ENABLE | /* Alignment fault [1] */
1131 CPU_CONTROL_DC_ENABLE | /* DCache enable [2] */
1132 CPU_CONTROL_BPRD_ENABLE | /* Branch prediction [11] */
1133 CPU_CONTROL_IC_ENABLE | /* ICache enable [12] */
1134 CPU_CONTROL_VECRELOC; /* Vector relocation [13] */
1136 cpuctrl = CPU_CONTROL_MMU_ENABLE |
1137 CPU_CONTROL_IC_ENABLE |
1138 CPU_CONTROL_DC_ENABLE |
1139 CPU_CONTROL_BPRD_ENABLE;
1141 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1142 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1145 /* Switch to big endian */
1147 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1150 /* Check if the vector page is at the high address (0xffff0000) */
1151 if (vector_page == ARM_VECTORS_HIGH)
1152 cpuctrl |= CPU_CONTROL_VECRELOC;
1154 /* Clear out the cache */
1155 cpu_idcache_wbinv_all();
1157 /* Set the control register */
1159 cpu_control(cpuctrlmask, cpuctrl);
1162 cpu_idcache_wbinv_all();
1163 #if defined(SMP) && !defined(ARM_NEW_PMAP)
1164 armv7_auxctrl((1 << 6) | (1 << 0), (1 << 6) | (1 << 0)); /* Enable SMP + TLB broadcasting */
1167 cpu_scc_setup_ccnt();
1169 #endif /* CPU_CORTEXA */
1171 #if defined(CPU_FA526)
1175 int cpuctrl, cpuctrlmask;
1177 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1178 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1179 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1180 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1181 | CPU_CONTROL_BPRD_ENABLE;
1182 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1183 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1184 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1185 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1186 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1187 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1188 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1190 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1191 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1195 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1198 if (vector_page == ARM_VECTORS_HIGH)
1199 cpuctrl |= CPU_CONTROL_VECRELOC;
1201 /* Clear out the cache */
1202 cpu_idcache_wbinv_all();
1204 /* Set the control register */
1206 cpu_control(0xffffffff, cpuctrl);
1208 #endif /* CPU_FA526 */
1210 #if defined(CPU_XSCALE_80321) || \
1211 defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1212 defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
1217 int cpuctrl, cpuctrlmask;
1220 * The XScale Write Buffer is always enabled. Our option
1221 * is to enable/disable coalescing. Note that bits 6:3
1222 * must always be enabled.
1225 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1226 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1227 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1228 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1229 | CPU_CONTROL_BPRD_ENABLE;
1230 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1231 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1232 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1233 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1234 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1235 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1236 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
1237 CPU_CONTROL_L2_ENABLE;
1239 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1240 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1244 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1247 if (vector_page == ARM_VECTORS_HIGH)
1248 cpuctrl |= CPU_CONTROL_VECRELOC;
1249 #ifdef CPU_XSCALE_CORE3
1250 cpuctrl |= CPU_CONTROL_L2_ENABLE;
1253 /* Clear out the cache */
1254 cpu_idcache_wbinv_all();
1257 * Set the control register. Note that bits 6:3 must always
1261 /* cpu_control(cpuctrlmask, cpuctrl);*/
1262 cpu_control(0xffffffff, cpuctrl);
1264 /* Make sure write coalescing is turned on */
1265 __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1267 #ifdef XSCALE_NO_COALESCE_WRITES
1268 auxctl |= XSCALE_AUXCTL_K;
1270 auxctl &= ~XSCALE_AUXCTL_K;
1272 #ifdef CPU_XSCALE_CORE3
1273 auxctl |= XSCALE_AUXCTL_LLR;
1274 auxctl |= XSCALE_AUXCTL_MD_MASK;
1276 __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1279 #endif /* CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425