1 /* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
4 * SPDX-License-Identifier: BSD-4-Clause
6 * arm9 support code Copyright (C) 2001 ARM Ltd
7 * Copyright (c) 1997 Mark Brinicombe.
8 * Copyright (c) 1997 Causality Limited
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by Causality Limited.
22 * 4. The name of Causality Limited may not be used to endorse or promote
23 * products derived from this software without specific prior written
26 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
27 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
28 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
29 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
30 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
32 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * RiscBSD kernel project
42 * C functions for supporting CPU / MMU / TLB specific operations.
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD$");
49 #include <sys/param.h>
50 #include <sys/systm.h>
52 #include <sys/mutex.h>
54 #include <machine/bus.h>
55 #include <machine/cpu.h>
56 #include <machine/disassem.h>
62 #include <machine/cpufunc.h>
64 /* PRIMARY CACHE VARIABLES */
66 int arm_picache_line_size;
69 int arm_pdcache_size; /* and unified */
70 int arm_pdcache_line_size;
74 int arm_pcache_unified;
77 int arm_dcache_align_mask;
79 u_int arm_cache_level;
80 u_int arm_cache_type[14];
83 #if defined(CPU_ARM9E)
84 static void arm10_setup(void);
87 static void pj4bv7_setup(void);
89 #if defined(CPU_ARM1176)
90 static void arm11x6_setup(void);
92 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
93 static void cortexa_setup(void);
96 #if defined(CPU_ARM9E)
97 struct cpu_functions armv5_ec_cpufuncs = {
100 cpufunc_nullop, /* cpwait */
104 cpufunc_control, /* control */
105 armv5_ec_setttb, /* Setttb */
109 armv4_tlb_flushID, /* tlb_flushID */
110 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
111 armv4_tlb_flushD, /* tlb_flushD */
112 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
114 /* Cache operations */
116 armv5_ec_icache_sync_range, /* icache_sync_range */
118 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
119 armv5_ec_dcache_wbinv_range, /* dcache_wbinv_range */
120 armv5_ec_dcache_inv_range, /* dcache_inv_range */
121 armv5_ec_dcache_wb_range, /* dcache_wb_range */
123 armv4_idcache_inv_all, /* idcache_inv_all */
124 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
125 armv5_ec_idcache_wbinv_range, /* idcache_wbinv_range */
127 cpufunc_nullop, /* l2cache_wbinv_all */
128 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
129 (void *)cpufunc_nullop, /* l2cache_inv_range */
130 (void *)cpufunc_nullop, /* l2cache_wb_range */
131 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
133 /* Other functions */
135 armv4_drain_writebuf, /* drain_writebuf */
137 (void *)cpufunc_nullop, /* sleep */
141 arm9_context_switch, /* context_switch */
143 arm10_setup /* cpu setup */
147 struct cpu_functions sheeva_cpufuncs = {
150 cpufunc_nullop, /* cpwait */
154 cpufunc_control, /* control */
155 sheeva_setttb, /* Setttb */
159 armv4_tlb_flushID, /* tlb_flushID */
160 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
161 armv4_tlb_flushD, /* tlb_flushD */
162 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
164 /* Cache operations */
166 armv5_ec_icache_sync_range, /* icache_sync_range */
168 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
169 sheeva_dcache_wbinv_range, /* dcache_wbinv_range */
170 sheeva_dcache_inv_range, /* dcache_inv_range */
171 sheeva_dcache_wb_range, /* dcache_wb_range */
173 armv4_idcache_inv_all, /* idcache_inv_all */
174 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
175 sheeva_idcache_wbinv_range, /* idcache_wbinv_all */
177 sheeva_l2cache_wbinv_all, /* l2cache_wbinv_all */
178 sheeva_l2cache_wbinv_range, /* l2cache_wbinv_range */
179 sheeva_l2cache_inv_range, /* l2cache_inv_range */
180 sheeva_l2cache_wb_range, /* l2cache_wb_range */
181 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
183 /* Other functions */
185 armv4_drain_writebuf, /* drain_writebuf */
187 sheeva_cpu_sleep, /* sleep */
191 arm9_context_switch, /* context_switch */
193 arm10_setup /* cpu setup */
195 #endif /* CPU_ARM9E */
198 struct cpu_functions pj4bv7_cpufuncs = {
200 /* Cache operations */
201 .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
202 .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
203 .cf_l2cache_inv_range = (void *)cpufunc_nullop,
204 .cf_l2cache_wb_range = (void *)cpufunc_nullop,
205 .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
207 /* Other functions */
208 .cf_sleep = (void *)cpufunc_nullop,
211 .cf_setup = pj4bv7_setup
213 #endif /* CPU_MV_PJ4B */
215 #if defined(CPU_ARM1176)
216 struct cpu_functions arm1176_cpufuncs = {
218 /* Cache operations */
219 .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
220 .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
221 .cf_l2cache_inv_range = (void *)cpufunc_nullop,
222 .cf_l2cache_wb_range = (void *)cpufunc_nullop,
223 .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
225 /* Other functions */
226 .cf_sleep = arm11x6_sleep,
229 .cf_setup = arm11x6_setup
231 #endif /*CPU_ARM1176 */
233 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
234 struct cpu_functions cortexa_cpufuncs = {
236 /* Cache operations */
239 * Note: For CPUs using the PL310 the L2 ops are filled in when the
240 * L2 cache controller is actually enabled.
242 .cf_l2cache_wbinv_all = cpufunc_nullop,
243 .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
244 .cf_l2cache_inv_range = (void *)cpufunc_nullop,
245 .cf_l2cache_wb_range = (void *)cpufunc_nullop,
246 .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
248 /* Other functions */
249 .cf_sleep = armv7_cpu_sleep,
252 .cf_setup = cortexa_setup
254 #endif /* CPU_CORTEXA || CPU_KRAIT */
257 * Global constants also used by locore.s
260 struct cpu_functions cpufuncs;
263 u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore-v4.s */
266 #if defined (CPU_ARM9E) || \
267 defined(CPU_ARM1176) || \
268 defined(CPU_MV_PJ4B) || \
269 defined(CPU_CORTEXA) || defined(CPU_KRAIT)
271 static void get_cachetype_cp15(void);
273 /* Additional cache information local to this file. Log2 of some of the
275 static int arm_dcache_l2_nsets;
276 static int arm_dcache_l2_assoc;
277 static int arm_dcache_l2_linesize;
280 get_cachetype_cp15(void)
282 u_int ctype, isize, dsize, cpuid;
283 u_int clevel, csize, i, sel;
287 ctype = cp15_ctr_get();
288 cpuid = cp15_midr_get();
290 * ...and thus spake the ARM ARM:
292 * If an <opcode2> value corresponding to an unimplemented or
293 * reserved ID register is encountered, the System Control
294 * processor returns the value of the main ID register.
299 if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
300 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
302 arm_cache_level = clevel;
303 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
305 while ((type = (clevel & 0x7)) && i < 7) {
306 if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
307 type == CACHE_SEP_CACHE) {
309 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
311 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
313 arm_cache_type[sel] = csize;
314 arm_dcache_align = 1 <<
315 (CPUV7_CT_xSIZE_LEN(csize) + 4);
316 arm_dcache_align_mask = arm_dcache_align - 1;
318 if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
320 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
322 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
324 arm_cache_type[sel] = csize;
330 if ((ctype & CPU_CT_S) == 0)
331 arm_pcache_unified = 1;
334 * If you want to know how this code works, go read the ARM ARM.
337 arm_pcache_type = CPU_CT_CTYPE(ctype);
339 if (arm_pcache_unified == 0) {
340 isize = CPU_CT_ISIZE(ctype);
341 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
342 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
343 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
344 if (isize & CPU_CT_xSIZE_M)
345 arm_picache_line_size = 0; /* not present */
347 arm_picache_ways = 1;
349 arm_picache_ways = multiplier <<
350 (CPU_CT_xSIZE_ASSOC(isize) - 1);
352 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
355 dsize = CPU_CT_DSIZE(ctype);
356 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
357 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
358 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
359 if (dsize & CPU_CT_xSIZE_M)
360 arm_pdcache_line_size = 0; /* not present */
362 arm_pdcache_ways = 1;
364 arm_pdcache_ways = multiplier <<
365 (CPU_CT_xSIZE_ASSOC(dsize) - 1);
367 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
369 arm_dcache_align = arm_pdcache_line_size;
371 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
372 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
373 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
374 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
377 arm_dcache_align_mask = arm_dcache_align - 1;
380 #endif /* ARM9 || XSCALE */
383 * Cannot panic here as we may not have a console yet ...
389 cputype = cp15_midr_get();
390 cputype &= CPU_ID_CPU_MASK;
392 #if defined(CPU_ARM9E)
393 if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
394 cputype == CPU_ID_MV88FR571_41) {
395 uint32_t sheeva_ctrl;
397 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
400 * Workaround for Marvell MV78100 CPU: Cache prefetch
401 * mechanism may affect the cache coherency validity,
402 * so it needs to be disabled.
404 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
405 * L2 Prefetching Mechanism) for details.
407 if (cputype == CPU_ID_MV88FR571_VD ||
408 cputype == CPU_ID_MV88FR571_41)
409 sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
411 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
413 cpufuncs = sheeva_cpufuncs;
414 get_cachetype_cp15();
415 pmap_pte_init_generic();
417 } else if (cputype == CPU_ID_ARM926EJS) {
418 cpufuncs = armv5_ec_cpufuncs;
419 get_cachetype_cp15();
420 pmap_pte_init_generic();
423 #endif /* CPU_ARM9E */
424 #if defined(CPU_ARM1176)
425 if (cputype == CPU_ID_ARM1176JZS) {
426 cpufuncs = arm1176_cpufuncs;
427 get_cachetype_cp15();
430 #endif /* CPU_ARM1176 */
431 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
432 switch(cputype & CPU_ID_SCHEME_MASK) {
433 case CPU_ID_CORTEXA5:
434 case CPU_ID_CORTEXA7:
435 case CPU_ID_CORTEXA8:
436 case CPU_ID_CORTEXA9:
437 case CPU_ID_CORTEXA12:
438 case CPU_ID_CORTEXA15:
439 case CPU_ID_CORTEXA53:
440 case CPU_ID_CORTEXA57:
441 case CPU_ID_CORTEXA72:
442 case CPU_ID_KRAIT300:
443 cpufuncs = cortexa_cpufuncs;
444 get_cachetype_cp15();
449 #endif /* CPU_CORTEXA || CPU_KRAIT */
451 #if defined(CPU_MV_PJ4B)
452 if (cputype == CPU_ID_MV88SV581X_V7 ||
453 cputype == CPU_ID_MV88SV584X_V7 ||
454 cputype == CPU_ID_ARM_88SV581X_V7) {
455 cpufuncs = pj4bv7_cpufuncs;
456 get_cachetype_cp15();
459 #endif /* CPU_MV_PJ4B */
462 * Bzzzz. And the answer was ...
464 panic("No support for this CPU type (%08x) in kernel", cputype);
465 return(ARCHITECTURE_NOT_PRESENT);
467 uma_set_align(arm_dcache_align_mask);
475 #if defined(CPU_ARM9E)
479 int cpuctrl, cpuctrlmask;
481 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
482 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
483 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
484 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
485 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
486 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
487 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
488 | CPU_CONTROL_BPRD_ENABLE
489 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
491 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
492 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
496 cpuctrl |= CPU_CONTROL_BEND_ENABLE;
499 /* Clear out the cache */
500 cpu_idcache_wbinv_all();
502 /* Now really make sure they are clean. */
503 __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
505 if (vector_page == ARM_VECTORS_HIGH)
506 cpuctrl |= CPU_CONTROL_VECRELOC;
508 /* Set the control register */
509 cpu_control(0xffffffff, cpuctrl);
512 cpu_idcache_wbinv_all();
514 #endif /* CPU_ARM9E || CPU_ARM10 */
516 #if defined(CPU_ARM1176) \
517 || defined(CPU_MV_PJ4B) \
518 || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
520 cpu_scc_setup_ccnt(void)
522 /* This is how you give userland access to the CCNT and PMCn
524 * BEWARE! This gives write access also, which may not be what
527 #ifdef _PMC_USER_READ_WRITE_
528 /* Set PMUSERENR[0] to allow userland access */
529 cp15_pmuserenr_set(1);
531 #if defined(CPU_ARM1176)
532 /* Set PMCR[2,0] to enable counters and reset CCNT */
535 /* Set up the PMCCNTR register as a cyclecounter:
536 * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
537 * Set PMCR[2,0] to enable counters and reset CCNT
538 * Set PMCNTENSET to 0x80000000 to enable CCNT */
539 cp15_pminten_clr(0xFFFFFFFF);
541 cp15_pmcnten_set(0x80000000);
546 #if defined(CPU_ARM1176)
550 uint32_t auxctrl, auxctrl_wax;
554 cpuid = cp15_midr_get();
560 * Enable an errata workaround
562 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
563 auxctrl = ARM1176_AUXCTL_PHD;
564 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
567 tmp = cp15_actlr_get();
574 cpu_scc_setup_ccnt();
576 #endif /* CPU_ARM1176 */
584 cpu_scc_setup_ccnt();
586 #endif /* CPU_MV_PJ4B */
588 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
593 cpu_scc_setup_ccnt();
595 #endif /* CPU_CORTEXA || CPU_KRAIT */