1 /* $NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $ */
4 * SPDX-License-Identifier: BSD-4-Clause
6 * arm9 support code Copyright (C) 2001 ARM Ltd
7 * Copyright (c) 1997 Mark Brinicombe.
8 * Copyright (c) 1997 Causality Limited
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by Causality Limited.
22 * 4. The name of Causality Limited may not be used to endorse or promote
23 * products derived from this software without specific prior written
26 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
27 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
28 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
29 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
30 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
32 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * RiscBSD kernel project
42 * C functions for supporting CPU / MMU / TLB specific operations.
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD$");
49 #include <sys/param.h>
50 #include <sys/systm.h>
52 #include <sys/mutex.h>
54 #include <machine/bus.h>
55 #include <machine/cpu.h>
56 #include <machine/disassem.h>
62 #include <machine/cpufunc.h>
64 /* PRIMARY CACHE VARIABLES */
66 int arm_picache_line_size;
69 int arm_pdcache_size; /* and unified */
70 int arm_pdcache_line_size;
74 int arm_pcache_unified;
77 int arm_dcache_align_mask;
79 u_int arm_cache_level;
80 u_int arm_cache_type[14];
83 #if defined(CPU_ARM9E)
84 static void arm10_setup(void);
87 static void pj4bv7_setup(void);
89 #if defined(CPU_ARM1176)
90 static void arm11x6_setup(void);
92 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
93 static void cortexa_setup(void);
96 #if defined(CPU_ARM9E)
97 struct cpu_functions armv5_ec_cpufuncs = {
100 cpufunc_nullop, /* cpwait */
104 cpufunc_control, /* control */
105 armv5_ec_setttb, /* Setttb */
109 armv4_tlb_flushID, /* tlb_flushID */
110 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
111 armv4_tlb_flushD, /* tlb_flushD */
112 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
114 /* Cache operations */
116 armv5_ec_icache_sync_range, /* icache_sync_range */
118 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
119 armv5_ec_dcache_wbinv_range, /* dcache_wbinv_range */
120 armv5_ec_dcache_inv_range, /* dcache_inv_range */
121 armv5_ec_dcache_wb_range, /* dcache_wb_range */
123 armv4_idcache_inv_all, /* idcache_inv_all */
124 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
125 armv5_ec_idcache_wbinv_range, /* idcache_wbinv_range */
127 cpufunc_nullop, /* l2cache_wbinv_all */
128 (void *)cpufunc_nullop, /* l2cache_wbinv_range */
129 (void *)cpufunc_nullop, /* l2cache_inv_range */
130 (void *)cpufunc_nullop, /* l2cache_wb_range */
131 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
133 /* Other functions */
135 armv4_drain_writebuf, /* drain_writebuf */
137 (void *)cpufunc_nullop, /* sleep */
141 arm9_context_switch, /* context_switch */
143 arm10_setup /* cpu setup */
147 struct cpu_functions sheeva_cpufuncs = {
150 cpufunc_nullop, /* cpwait */
154 cpufunc_control, /* control */
155 sheeva_setttb, /* Setttb */
159 armv4_tlb_flushID, /* tlb_flushID */
160 arm9_tlb_flushID_SE, /* tlb_flushID_SE */
161 armv4_tlb_flushD, /* tlb_flushD */
162 armv4_tlb_flushD_SE, /* tlb_flushD_SE */
164 /* Cache operations */
166 armv5_ec_icache_sync_range, /* icache_sync_range */
168 armv5_ec_dcache_wbinv_all, /* dcache_wbinv_all */
169 sheeva_dcache_wbinv_range, /* dcache_wbinv_range */
170 sheeva_dcache_inv_range, /* dcache_inv_range */
171 sheeva_dcache_wb_range, /* dcache_wb_range */
173 armv4_idcache_inv_all, /* idcache_inv_all */
174 armv5_ec_idcache_wbinv_all, /* idcache_wbinv_all */
175 sheeva_idcache_wbinv_range, /* idcache_wbinv_all */
177 sheeva_l2cache_wbinv_all, /* l2cache_wbinv_all */
178 sheeva_l2cache_wbinv_range, /* l2cache_wbinv_range */
179 sheeva_l2cache_inv_range, /* l2cache_inv_range */
180 sheeva_l2cache_wb_range, /* l2cache_wb_range */
181 (void *)cpufunc_nullop, /* l2cache_drain_writebuf */
183 /* Other functions */
185 armv4_drain_writebuf, /* drain_writebuf */
187 sheeva_cpu_sleep, /* sleep */
191 arm9_context_switch, /* context_switch */
193 arm10_setup /* cpu setup */
195 #endif /* CPU_ARM9E */
198 struct cpu_functions pj4bv7_cpufuncs = {
199 /* Cache operations */
200 .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
201 .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
202 .cf_l2cache_inv_range = (void *)cpufunc_nullop,
203 .cf_l2cache_wb_range = (void *)cpufunc_nullop,
204 .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
206 /* Other functions */
207 .cf_sleep = (void *)cpufunc_nullop,
210 .cf_setup = pj4bv7_setup
212 #endif /* CPU_MV_PJ4B */
214 #if defined(CPU_ARM1176)
215 struct cpu_functions arm1176_cpufuncs = {
216 /* Cache operations */
217 .cf_l2cache_wbinv_all = (void *)cpufunc_nullop,
218 .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
219 .cf_l2cache_inv_range = (void *)cpufunc_nullop,
220 .cf_l2cache_wb_range = (void *)cpufunc_nullop,
221 .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
223 /* Other functions */
224 .cf_sleep = arm11x6_sleep,
227 .cf_setup = arm11x6_setup
229 #endif /*CPU_ARM1176 */
231 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
232 struct cpu_functions cortexa_cpufuncs = {
233 /* Cache operations */
236 * Note: For CPUs using the PL310 the L2 ops are filled in when the
237 * L2 cache controller is actually enabled.
239 .cf_l2cache_wbinv_all = cpufunc_nullop,
240 .cf_l2cache_wbinv_range = (void *)cpufunc_nullop,
241 .cf_l2cache_inv_range = (void *)cpufunc_nullop,
242 .cf_l2cache_wb_range = (void *)cpufunc_nullop,
243 .cf_l2cache_drain_writebuf = (void *)cpufunc_nullop,
245 /* Other functions */
246 .cf_sleep = armv7_cpu_sleep,
249 .cf_setup = cortexa_setup
251 #endif /* CPU_CORTEXA || CPU_KRAIT */
254 * Global constants also used by locore.s
257 struct cpu_functions cpufuncs;
260 #if defined (CPU_ARM9E) || \
261 defined(CPU_ARM1176) || \
262 defined(CPU_MV_PJ4B) || \
263 defined(CPU_CORTEXA) || defined(CPU_KRAIT)
265 static void get_cachetype_cp15(void);
267 /* Additional cache information local to this file. Log2 of some of the
269 static int arm_dcache_l2_nsets;
270 static int arm_dcache_l2_assoc;
271 static int arm_dcache_l2_linesize;
274 get_cachetype_cp15(void)
276 u_int ctype, isize, dsize, cpuid;
277 u_int clevel, csize, i, sel;
281 ctype = cp15_ctr_get();
282 cpuid = cp15_midr_get();
284 * ...and thus spake the ARM ARM:
286 * If an <opcode2> value corresponding to an unimplemented or
287 * reserved ID register is encountered, the System Control
288 * processor returns the value of the main ID register.
293 if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
294 __asm __volatile("mrc p15, 1, %0, c0, c0, 1"
296 arm_cache_level = clevel;
297 arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
299 while ((type = (clevel & 0x7)) && i < 7) {
300 if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
301 type == CACHE_SEP_CACHE) {
303 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
305 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
307 arm_cache_type[sel] = csize;
308 arm_dcache_align = 1 <<
309 (CPUV7_CT_xSIZE_LEN(csize) + 4);
310 arm_dcache_align_mask = arm_dcache_align - 1;
312 if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
314 __asm __volatile("mcr p15, 2, %0, c0, c0, 0"
316 __asm __volatile("mrc p15, 1, %0, c0, c0, 0"
318 arm_cache_type[sel] = csize;
324 if ((ctype & CPU_CT_S) == 0)
325 arm_pcache_unified = 1;
328 * If you want to know how this code works, go read the ARM ARM.
331 arm_pcache_type = CPU_CT_CTYPE(ctype);
333 if (arm_pcache_unified == 0) {
334 isize = CPU_CT_ISIZE(ctype);
335 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
336 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
337 if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
338 if (isize & CPU_CT_xSIZE_M)
339 arm_picache_line_size = 0; /* not present */
341 arm_picache_ways = 1;
343 arm_picache_ways = multiplier <<
344 (CPU_CT_xSIZE_ASSOC(isize) - 1);
346 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
349 dsize = CPU_CT_DSIZE(ctype);
350 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
351 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
352 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
353 if (dsize & CPU_CT_xSIZE_M)
354 arm_pdcache_line_size = 0; /* not present */
356 arm_pdcache_ways = 1;
358 arm_pdcache_ways = multiplier <<
359 (CPU_CT_xSIZE_ASSOC(dsize) - 1);
361 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
363 arm_dcache_align = arm_pdcache_line_size;
365 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
366 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
367 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
368 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
371 arm_dcache_align_mask = arm_dcache_align - 1;
374 #endif /* ARM9 || XSCALE */
377 * Cannot panic here as we may not have a console yet ...
383 cputype = cp15_midr_get();
384 cputype &= CPU_ID_CPU_MASK;
386 #if defined(CPU_ARM9E)
387 if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
388 cputype == CPU_ID_MV88FR571_41) {
389 uint32_t sheeva_ctrl;
391 sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
394 * Workaround for Marvell MV78100 CPU: Cache prefetch
395 * mechanism may affect the cache coherency validity,
396 * so it needs to be disabled.
398 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
399 * L2 Prefetching Mechanism) for details.
401 if (cputype == CPU_ID_MV88FR571_VD ||
402 cputype == CPU_ID_MV88FR571_41)
403 sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
405 sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
407 cpufuncs = sheeva_cpufuncs;
408 get_cachetype_cp15();
409 pmap_pte_init_generic();
411 } else if (cputype == CPU_ID_ARM926EJS) {
412 cpufuncs = armv5_ec_cpufuncs;
413 get_cachetype_cp15();
414 pmap_pte_init_generic();
417 #endif /* CPU_ARM9E */
418 #if defined(CPU_ARM1176)
419 if (cputype == CPU_ID_ARM1176JZS) {
420 cpufuncs = arm1176_cpufuncs;
421 get_cachetype_cp15();
424 #endif /* CPU_ARM1176 */
425 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
426 switch(cputype & CPU_ID_SCHEME_MASK) {
427 case CPU_ID_CORTEXA5:
428 case CPU_ID_CORTEXA7:
429 case CPU_ID_CORTEXA8:
430 case CPU_ID_CORTEXA9:
431 case CPU_ID_CORTEXA12:
432 case CPU_ID_CORTEXA15:
433 case CPU_ID_CORTEXA53:
434 case CPU_ID_CORTEXA57:
435 case CPU_ID_CORTEXA72:
436 case CPU_ID_KRAIT300:
437 cpufuncs = cortexa_cpufuncs;
438 get_cachetype_cp15();
443 #endif /* CPU_CORTEXA || CPU_KRAIT */
445 #if defined(CPU_MV_PJ4B)
446 if (cputype == CPU_ID_MV88SV581X_V7 ||
447 cputype == CPU_ID_MV88SV584X_V7 ||
448 cputype == CPU_ID_ARM_88SV581X_V7) {
449 cpufuncs = pj4bv7_cpufuncs;
450 get_cachetype_cp15();
453 #endif /* CPU_MV_PJ4B */
456 * Bzzzz. And the answer was ...
458 panic("No support for this CPU type (%08x) in kernel", cputype);
459 return(ARCHITECTURE_NOT_PRESENT);
461 uma_set_align(arm_dcache_align_mask);
469 #if defined(CPU_ARM9E)
473 int cpuctrl, cpuctrlmask;
475 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
476 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
477 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
478 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
479 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
480 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
481 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
482 | CPU_CONTROL_BPRD_ENABLE
483 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
485 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
486 cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
490 /* Clear out the cache */
491 cpu_idcache_wbinv_all();
493 /* Now really make sure they are clean. */
494 __asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
496 if (vector_page == ARM_VECTORS_HIGH)
497 cpuctrl |= CPU_CONTROL_VECRELOC;
499 /* Set the control register */
500 cpu_control(0xffffffff, cpuctrl);
503 cpu_idcache_wbinv_all();
505 #endif /* CPU_ARM9E || CPU_ARM10 */
507 #if defined(CPU_ARM1176) \
508 || defined(CPU_MV_PJ4B) \
509 || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
511 cpu_scc_setup_ccnt(void)
513 /* This is how you give userland access to the CCNT and PMCn
515 * BEWARE! This gives write access also, which may not be what
518 #ifdef _PMC_USER_READ_WRITE_
519 /* Set PMUSERENR[0] to allow userland access */
520 cp15_pmuserenr_set(1);
522 #if defined(CPU_ARM1176)
523 /* Set PMCR[2,0] to enable counters and reset CCNT */
526 /* Set up the PMCCNTR register as a cyclecounter:
527 * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
528 * Set PMCR[2,0] to enable counters and reset CCNT
529 * Set PMCNTENSET to 0x80000000 to enable CCNT */
530 cp15_pminten_clr(0xFFFFFFFF);
532 cp15_pmcnten_set(0x80000000);
537 #if defined(CPU_ARM1176)
541 uint32_t auxctrl, auxctrl_wax;
545 cpuid = cp15_midr_get();
551 * Enable an errata workaround
553 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
554 auxctrl = ARM1176_AUXCTL_PHD;
555 auxctrl_wax = ~ARM1176_AUXCTL_PHD;
558 tmp = cp15_actlr_get();
565 cpu_scc_setup_ccnt();
567 #endif /* CPU_ARM1176 */
575 cpu_scc_setup_ccnt();
577 #endif /* CPU_MV_PJ4B */
579 #if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
584 cpu_scc_setup_ccnt();
586 #endif /* CPU_CORTEXA || CPU_KRAIT */