2 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
3 * Copyright 2014 Michal Meloun <meloun@miracle.cz>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/sysctl.h>
36 #include <machine/cpu.h>
37 #include <machine/cpuinfo.h>
40 void reinit_mmu(uint32_t ttb, uint32_t aux_clr, uint32_t aux_set);
43 struct cpuinfo cpuinfo =
45 /* Use safe defaults for start */
46 .dcache_line_size = 32,
47 .dcache_line_mask = 31,
48 .icache_line_size = 32,
49 .icache_line_mask = 31,
52 static SYSCTL_NODE(_hw, OID_AUTO, cpu, CTLFLAG_RD, 0,
54 static SYSCTL_NODE(_hw_cpu, OID_AUTO, quirks, CTLFLAG_RD, 0,
59 * Be careful, ACTRL cannot be changed if CPU is started in secure
60 * mode(world) and write to ACTRL can cause exception!
61 * These quirks are intended for optimizing CPU performance, not for
62 * applying errata workarounds. Nobody can expect that CPU with unfixed
63 * errata is stable enough to execute the kernel until quirks are applied.
65 static uint32_t cpu_quirks_actlr_mask;
66 SYSCTL_INT(_hw_cpu_quirks, OID_AUTO, actlr_mask,
67 CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &cpu_quirks_actlr_mask, 0,
68 "Bits to be masked in ACTLR");
70 static uint32_t cpu_quirks_actlr_set;
71 SYSCTL_INT(_hw_cpu_quirks, OID_AUTO, actlr_set,
72 CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &cpu_quirks_actlr_set, 0,
73 "Bits to be set in ACTLR");
76 /* Read and parse CPU id scheme */
82 * Prematurely fetch CPU quirks. Standard fetch for tunable
83 * sysctls is handled using SYSINIT, thus too late for boot CPU.
84 * Keep names in sync with sysctls.
86 TUNABLE_INT_FETCH("hw.cpu.quirks.actlr_mask", &cpu_quirks_actlr_mask);
87 TUNABLE_INT_FETCH("hw.cpu.quirks.actlr_set", &cpu_quirks_actlr_set);
89 cpuinfo.midr = cp15_midr_get();
90 /* Test old version id schemes first */
91 if ((cpuinfo.midr & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD) {
92 if (CPU_ID_ISOLD(cpuinfo.midr)) {
93 /* obsolete ARMv2 or ARMv3 CPU */
97 if (CPU_ID_IS7(cpuinfo.midr)) {
98 if ((cpuinfo.midr & (1 << 23)) == 0) {
99 /* obsolete ARMv3 CPU */
104 cpuinfo.architecture = 1;
105 cpuinfo.revision = (cpuinfo.midr >> 16) & 0x7F;
107 /* ARM new id scheme */
108 cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F;
109 cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F;
112 /* non ARM -> must be new id scheme */
113 cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F;
114 cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F;
116 /* Parse rest of MIDR */
117 cpuinfo.implementer = (cpuinfo.midr >> 24) & 0xFF;
118 cpuinfo.part_number = (cpuinfo.midr >> 4) & 0xFFF;
119 cpuinfo.patch = cpuinfo.midr & 0x0F;
121 /* CP15 c0,c0 regs 0-7 exist on all CPUs (although aliased with MIDR) */
122 cpuinfo.ctr = cp15_ctr_get();
123 cpuinfo.tcmtr = cp15_tcmtr_get();
125 cpuinfo.tlbtr = cp15_tlbtr_get();
126 cpuinfo.mpidr = cp15_mpidr_get();
127 cpuinfo.revidr = cp15_revidr_get();
130 /* if CPU is not v7 cpu id scheme */
131 if (cpuinfo.architecture != 0xF)
134 cpuinfo.id_pfr0 = cp15_id_pfr0_get();
135 cpuinfo.id_pfr1 = cp15_id_pfr1_get();
136 cpuinfo.id_dfr0 = cp15_id_dfr0_get();
137 cpuinfo.id_afr0 = cp15_id_afr0_get();
138 cpuinfo.id_mmfr0 = cp15_id_mmfr0_get();
139 cpuinfo.id_mmfr1 = cp15_id_mmfr1_get();
140 cpuinfo.id_mmfr2 = cp15_id_mmfr2_get();
141 cpuinfo.id_mmfr3 = cp15_id_mmfr3_get();
142 cpuinfo.id_isar0 = cp15_id_isar0_get();
143 cpuinfo.id_isar1 = cp15_id_isar1_get();
144 cpuinfo.id_isar2 = cp15_id_isar2_get();
145 cpuinfo.id_isar3 = cp15_id_isar3_get();
146 cpuinfo.id_isar4 = cp15_id_isar4_get();
147 cpuinfo.id_isar5 = cp15_id_isar5_get();
149 /* Not yet - CBAR only exist on ARM SMP Cortex A CPUs
150 cpuinfo.cbar = cp15_cbar_get();
152 if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) {
153 cpuinfo.ccsidr = cp15_ccsidr_get();
154 cpuinfo.clidr = cp15_clidr_get();
157 /* Test if revidr is implemented */
158 if (cpuinfo.revidr == cpuinfo.midr)
161 /* parsed bits of above registers */
163 cpuinfo.outermost_shareability = (cpuinfo.id_mmfr0 >> 8) & 0xF;
164 cpuinfo.shareability_levels = (cpuinfo.id_mmfr0 >> 12) & 0xF;
165 cpuinfo.auxiliary_registers = (cpuinfo.id_mmfr0 >> 20) & 0xF;
166 cpuinfo.innermost_shareability = (cpuinfo.id_mmfr0 >> 28) & 0xF;
168 cpuinfo.mem_barrier = (cpuinfo.id_mmfr2 >> 20) & 0xF;
170 cpuinfo.coherent_walk = (cpuinfo.id_mmfr3 >> 20) & 0xF;
171 cpuinfo.maintenance_broadcast =(cpuinfo.id_mmfr3 >> 12) & 0xF;
173 cpuinfo.generic_timer_ext = (cpuinfo.id_pfr1 >> 16) & 0xF;
174 cpuinfo.virtualization_ext = (cpuinfo.id_pfr1 >> 12) & 0xF;
175 cpuinfo.security_ext = (cpuinfo.id_pfr1 >> 4) & 0xF;
177 cpuinfo.mp_ext = (cpuinfo.mpidr >> 31u) & 0x1;
180 if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) {
181 cpuinfo.dcache_line_size =
182 1 << (CPU_CT_DMINLINE(cpuinfo.ctr) + 2);
183 cpuinfo.icache_line_size =
184 1 << (CPU_CT_IMINLINE(cpuinfo.ctr) + 2);
186 cpuinfo.dcache_line_size =
187 1 << (CPU_CT_xSIZE_LEN(CPU_CT_DSIZE(cpuinfo.ctr)) + 3);
188 cpuinfo.icache_line_size =
189 1 << (CPU_CT_xSIZE_LEN(CPU_CT_ISIZE(cpuinfo.ctr)) + 3);
191 cpuinfo.dcache_line_mask = cpuinfo.dcache_line_size - 1;
192 cpuinfo.icache_line_mask = cpuinfo.icache_line_size - 1;
198 * Get bits that must be set or cleared in ACLR register.
199 * Note: Bits in ACLR register are IMPLEMENTATION DEFINED.
200 * Its expected that SCU is in operational state before this
201 * function is called.
204 cpuinfo_get_actlr_modifier(uint32_t *actlr_mask, uint32_t *actlr_set)
210 if (cpuinfo.implementer == CPU_IMPLEMENTER_ARM) {
211 switch (cpuinfo.part_number) {
212 case CPU_ARCH_CORTEX_A73:
213 case CPU_ARCH_CORTEX_A72:
214 case CPU_ARCH_CORTEX_A57:
215 case CPU_ARCH_CORTEX_A53:
216 /* Nothing to do for AArch32 */
218 case CPU_ARCH_CORTEX_A17:
219 case CPU_ARCH_CORTEX_A12: /* A12 is merged to A17 */
223 *actlr_mask = (1 << 6);
224 *actlr_set = (1 << 6);
226 case CPU_ARCH_CORTEX_A15:
228 * Enable snoop-delayed exclusive handling
231 *actlr_mask = (1U << 31) |(1 << 6);
232 *actlr_set = (1U << 31) |(1 << 6);
234 case CPU_ARCH_CORTEX_A9:
236 * Disable exclusive L1/L2 cache control
238 * Enable Cache and TLB maintenance broadcast
240 *actlr_mask = (1 << 7) | (1 << 6) | (1 << 0);
241 *actlr_set = (1 << 6) | (1 << 0);
243 case CPU_ARCH_CORTEX_A8:
246 * Enable L1 data cache hardware alias checks
248 *actlr_mask = (1 << 1) | (1 << 0);
249 *actlr_set = (1 << 1);
251 case CPU_ARCH_CORTEX_A7:
255 *actlr_mask = (1 << 6);
256 *actlr_set = (1 << 6);
258 case CPU_ARCH_CORTEX_A5:
260 * Disable exclusive L1/L2 cache control
262 * Enable Cache and TLB maintenance broadcast
264 *actlr_mask = (1 << 7) | (1 << 6) | (1 << 0);
265 *actlr_set = (1 << 6) | (1 << 0);
267 case CPU_ARCH_ARM1176:
269 * Restrict cache size to 16KB
270 * Enable the return stack
271 * Enable dynamic branch prediction
272 * Enable static branch prediction
274 *actlr_mask = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0);
275 *actlr_set = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0);
282 /* Reinitialize MMU to final kernel mapping and apply all CPU quirks. */
284 cpuinfo_reinit_mmu(uint32_t ttb)
289 cpuinfo_get_actlr_modifier(&actlr_mask, &actlr_set);
290 actlr_mask |= cpu_quirks_actlr_mask;
291 actlr_set |= cpu_quirks_actlr_set;
292 reinit_mmu(ttb, actlr_mask, actlr_set);
295 #endif /* __ARM_ARCH >= 6 */