1 /* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
4 * SPDX-License-Identifier: BSD-4-Clause
6 * arm9 support code Copyright (C) 2001 ARM Ltd
7 * Copyright (c) 1997 Mark Brinicombe.
8 * Copyright (c) 1997 Causality Limited
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by Causality Limited.
22 * 4. The name of Causality Limited may not be used to endorse or promote
23 * products derived from this software without specific prior written
26 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
27 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
28 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
29 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
30 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
32 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * RiscBSD kernel project
42 * C functions for supporting CPU / MMU / TLB specific operations.
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD$");
49 #include <sys/param.h>
50 #include <sys/systm.h>
52 #include <sys/mutex.h>
54 #include <machine/bus.h>
55 #include <machine/cpu.h>
56 #include <machine/disassem.h>
62 #include <machine/cpufunc.h>
64 /* PRIMARY CACHE VARIABLES */
66 int arm_picache_line_size;
69 int arm_pdcache_size; /* and unified */
70 int arm_pdcache_line_size;
74 int arm_pcache_unified;
77 int arm_dcache_align_mask;
79 u_int arm_cache_level;
80 u_int arm_cache_type[14];
84 static void pj4bv7_setup(void);
86 #if defined(CPU_ARM1176)
87 static void arm11x6_setup(void);
89 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
90 static void cortexa_setup(void);
94 struct cpu_functions pj4bv7_cpufuncs = {
95 /* Cache operations */
96 .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
97 .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
98 .cf_l2cache_inv_range = (void *)cpufunc_nullop,
99 .cf_l2cache_wb_range = (void *)cpufunc_nullop,
100 .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
102 /* Other functions */
103 .cf_sleep = (void *)cpufunc_nullop,
106 .cf_setup = pj4bv7_setup
108 #endif /* CPU_MV_PJ4B */
110 #if defined(CPU_ARM1176)
111 struct cpu_functions arm1176_cpufuncs = {
112 /* Cache operations */
113 .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
114 .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
115 .cf_l2cache_inv_range = (void *)cpufunc_nullop,
116 .cf_l2cache_wb_range = (void *)cpufunc_nullop,
117 .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
119 /* Other functions */
120 .cf_sleep = arm11x6_sleep,
123 .cf_setup = arm11x6_setup
125 #endif /*CPU_ARM1176 */
127 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
128 struct cpu_functions cortexa_cpufuncs = {
129 /* Cache operations */
132 * Note: For CPUs using the PL310 the L2 ops are filled in when the
133 * L2 cache controller is actually enabled.
135 .cf_l2cache_wbinv_all = cpufunc_nullop,
136 .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
137 .cf_l2cache_inv_range = (void *)cpufunc_nullop,
138 .cf_l2cache_wb_range = (void *)cpufunc_nullop,
139 .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
141 /* Other functions */
142 .cf_sleep = armv7_cpu_sleep,
145 .cf_setup = cortexa_setup
147 #endif /* CPU_CORTEXA || CPU_KRAIT */
150 * Global constants also used by locore.s
153 struct cpu_functions cpufuncs;
156 static void get_cachetype_cp15(void);
158 /* Additional cache information local to this file. Log2 of some of the
160 static int arm_dcache_l2_nsets;
161 static int arm_dcache_l2_assoc;
162 static int arm_dcache_l2_linesize;
165 get_cachetype_cp15(void)
167 u_int ctype, isize, dsize, cpuid;
168 u_int clevel, csize, i, sel;
172 ctype = cp15_ctr_get();
173 cpuid = cp15_midr_get();
175 * ...and thus spake the ARM ARM:
177 * If an <opcode2> value corresponding to an unimplemented or
178 * reserved ID register is encountered, the System Control
179 * processor returns the value of the main ID register.
184 if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
185 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
187 arm_cache_level = clevel;
188 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
190 while ((type = (clevel & 0x7)) && i < 7) {
191 if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
192 type == CACHE_SEP_CACHE) {
194 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
196 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
198 arm_cache_type[sel] = csize;
199 arm_dcache_align = 1 <<
200 (CPUV7_CT_xSIZE_LEN(csize) + 4);
201 arm_dcache_align_mask = arm_dcache_align - 1;
203 if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
205 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
207 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
209 arm_cache_type[sel] = csize;
215 if ((ctype & CPU_CT_S) == 0)
216 arm_pcache_unified = 1;
219 * If you want to know how this code works, go read the ARM ARM.
222 arm_pcache_type = CPU_CT_CTYPE(ctype);
224 if (arm_pcache_unified == 0) {
225 isize = CPU_CT_ISIZE(ctype);
226 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
227 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
228 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
229 if (isize & CPU_CT_xSIZE_M)
230 arm_picache_line_size = 0; /* not present */
232 arm_picache_ways = 1;
234 arm_picache_ways = multiplier <<
235 (CPU_CT_xSIZE_ASSOC(isize) - 1);
237 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
240 dsize = CPU_CT_DSIZE(ctype);
241 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
242 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
243 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
244 if (dsize & CPU_CT_xSIZE_M)
245 arm_pdcache_line_size = 0; /* not present */
247 arm_pdcache_ways = 1;
249 arm_pdcache_ways = multiplier <<
250 (CPU_CT_xSIZE_ASSOC(dsize) - 1);
252 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
254 arm_dcache_align = arm_pdcache_line_size;
256 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
257 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
258 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
259 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
262 arm_dcache_align_mask = arm_dcache_align - 1;
267 * Cannot panic here as we may not have a console yet ...
273 cputype = cp15_midr_get();
274 cputype &= CPU_ID_CPU_MASK;
276 #if defined(CPU_ARM1176)
277 if (cputype == CPU_ID_ARM1176JZS) {
278 cpufuncs = arm1176_cpufuncs;
279 get_cachetype_cp15();
282 #endif /* CPU_ARM1176 */
283 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
284 switch(cputype & CPU_ID_SCHEME_MASK) {
285 case CPU_ID_CORTEXA5:
286 case CPU_ID_CORTEXA7:
287 case CPU_ID_CORTEXA8:
288 case CPU_ID_CORTEXA9:
289 case CPU_ID_CORTEXA12:
290 case CPU_ID_CORTEXA15:
291 case CPU_ID_CORTEXA53:
292 case CPU_ID_CORTEXA57:
293 case CPU_ID_CORTEXA72:
294 case CPU_ID_KRAIT300:
295 cpufuncs = cortexa_cpufuncs;
296 get_cachetype_cp15();
301 #endif /* CPU_CORTEXA || CPU_KRAIT */
303 #if defined(CPU_MV_PJ4B)
304 if (cputype == CPU_ID_MV88SV581X_V7 ||
305 cputype == CPU_ID_MV88SV584X_V7 ||
306 cputype == CPU_ID_ARM_88SV581X_V7) {
307 cpufuncs = pj4bv7_cpufuncs;
308 get_cachetype_cp15();
311 #endif /* CPU_MV_PJ4B */
314 * Bzzzz. And the answer was ...
316 panic("No support for this CPU type (%08x) in kernel", cputype);
317 return(ARCHITECTURE_NOT_PRESENT);
319 uma_set_align(arm_dcache_align_mask);
328 #if defined(CPU_ARM1176) \
329 || defined(CPU_MV_PJ4B) \
330 || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
332 cpu_scc_setup_ccnt(void)
334 /* This is how you give userland access to the CCNT and PMCn
336 * BEWARE! This gives write access also, which may not be what
339 #ifdef _PMC_USER_READ_WRITE_
340 /* Set PMUSERENR[0] to allow userland access */
341 cp15_pmuserenr_set(1);
343 #if defined(CPU_ARM1176)
344 /* Set PMCR[2,0] to enable counters and reset CCNT */
347 /* Set up the PMCCNTR register as a cyclecounter:
348 * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
349 * Set PMCR[2,0] to enable counters and reset CCNT
350 * Set PMCNTENSET to 0x80000000 to enable CCNT */
351 cp15_pminten_clr(0xFFFFFFFF);
353 cp15_pmcnten_set(0x80000000);
358 #if defined(CPU_ARM1176)
362 uint32_t auxctrl, auxctrl_wax;
366 cpuid = cp15_midr_get();
372 * Enable an errata workaround
374 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
375 auxctrl = ARM1176_AUXCTL_PHD;
376 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
379 tmp = cp15_actlr_get();
386 cpu_scc_setup_ccnt();
388 #endif /* CPU_ARM1176 */
396 cpu_scc_setup_ccnt();
398 #endif /* CPU_MV_PJ4B */
400 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
405 cpu_scc_setup_ccnt();
407 #endif /* CPU_CORTEXA || CPU_KRAIT */