2 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
3 * Copyright 2014 Michal Meloun <meloun@miracle.cz>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
32 #include <sys/systm.h>
34 #include <machine/cpu.h>
35 #include <machine/cpuinfo.h>
36 #include <machine/elf.h>
37 #include <machine/md_var.h>
39 struct cpuinfo cpuinfo =
41 /* Use safe defaults for start */
42 .dcache_line_size = 32,
43 .dcache_line_mask = 31,
44 .icache_line_size = 32,
45 .icache_line_mask = 31,
48 /* Read and parse CPU id scheme */
56 cpuinfo.midr = cp15_midr_get();
57 /* Test old version id schemes first */
58 if ((cpuinfo.midr & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD) {
59 if (CPU_ID_ISOLD(cpuinfo.midr)) {
60 /* obsolete ARMv2 or ARMv3 CPU */
64 if (CPU_ID_IS7(cpuinfo.midr)) {
65 if ((cpuinfo.midr & (1 << 23)) == 0) {
66 /* obsolete ARMv3 CPU */
71 cpuinfo.architecture = 1;
72 cpuinfo.revision = (cpuinfo.midr >> 16) & 0x7F;
74 /* ARM new id scheme */
75 cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F;
76 cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F;
79 /* non ARM -> must be new id scheme */
80 cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F;
81 cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F;
83 /* Parse rest of MIDR */
84 cpuinfo.implementer = (cpuinfo.midr >> 24) & 0xFF;
85 cpuinfo.part_number = (cpuinfo.midr >> 4) & 0xFFF;
86 cpuinfo.patch = cpuinfo.midr & 0x0F;
88 /* CP15 c0,c0 regs 0-7 exist on all CPUs (although aliased with MIDR) */
89 cpuinfo.ctr = cp15_ctr_get();
90 cpuinfo.tcmtr = cp15_tcmtr_get();
92 cpuinfo.tlbtr = cp15_tlbtr_get();
93 cpuinfo.mpidr = cp15_mpidr_get();
94 cpuinfo.revidr = cp15_revidr_get();
97 /* if CPU is not v7 cpu id scheme */
98 if (cpuinfo.architecture != 0xF)
101 cpuinfo.id_pfr0 = cp15_id_pfr0_get();
102 cpuinfo.id_pfr1 = cp15_id_pfr1_get();
103 cpuinfo.id_dfr0 = cp15_id_dfr0_get();
104 cpuinfo.id_afr0 = cp15_id_afr0_get();
105 cpuinfo.id_mmfr0 = cp15_id_mmfr0_get();
106 cpuinfo.id_mmfr1 = cp15_id_mmfr1_get();
107 cpuinfo.id_mmfr2 = cp15_id_mmfr2_get();
108 cpuinfo.id_mmfr3 = cp15_id_mmfr3_get();
109 cpuinfo.id_isar0 = cp15_id_isar0_get();
110 cpuinfo.id_isar1 = cp15_id_isar1_get();
111 cpuinfo.id_isar2 = cp15_id_isar2_get();
112 cpuinfo.id_isar3 = cp15_id_isar3_get();
113 cpuinfo.id_isar4 = cp15_id_isar4_get();
114 cpuinfo.id_isar5 = cp15_id_isar5_get();
116 /* Not yet - CBAR only exist on ARM SMP Cortex A CPUs
117 cpuinfo.cbar = cp15_cbar_get();
119 if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) {
120 cpuinfo.ccsidr = cp15_ccsidr_get();
121 cpuinfo.clidr = cp15_clidr_get();
124 /* Test if revidr is implemented */
125 if (cpuinfo.revidr == cpuinfo.midr)
128 /* parsed bits of above registers */
130 cpuinfo.outermost_shareability = (cpuinfo.id_mmfr0 >> 8) & 0xF;
131 cpuinfo.shareability_levels = (cpuinfo.id_mmfr0 >> 12) & 0xF;
132 cpuinfo.auxiliary_registers = (cpuinfo.id_mmfr0 >> 20) & 0xF;
133 cpuinfo.innermost_shareability = (cpuinfo.id_mmfr0 >> 28) & 0xF;
135 cpuinfo.mem_barrier = (cpuinfo.id_mmfr2 >> 20) & 0xF;
137 cpuinfo.coherent_walk = (cpuinfo.id_mmfr3 >> 20) & 0xF;
138 cpuinfo.maintenance_broadcast =(cpuinfo.id_mmfr3 >> 12) & 0xF;
140 cpuinfo.generic_timer_ext = (cpuinfo.id_pfr1 >> 16) & 0xF;
141 cpuinfo.virtualization_ext = (cpuinfo.id_pfr1 >> 12) & 0xF;
142 cpuinfo.security_ext = (cpuinfo.id_pfr1 >> 4) & 0xF;
145 if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) {
146 cpuinfo.dcache_line_size =
147 1 << (CPU_CT_DMINLINE(cpuinfo.ctr) + 2);
148 cpuinfo.icache_line_size =
149 1 << (CPU_CT_IMINLINE(cpuinfo.ctr) + 2);
151 cpuinfo.dcache_line_size =
152 1 << (CPU_CT_xSIZE_LEN(CPU_CT_DSIZE(cpuinfo.ctr)) + 3);
153 cpuinfo.icache_line_size =
154 1 << (CPU_CT_xSIZE_LEN(CPU_CT_ISIZE(cpuinfo.ctr)) + 3);
156 cpuinfo.dcache_line_mask = cpuinfo.dcache_line_size - 1;
157 cpuinfo.icache_line_mask = cpuinfo.icache_line_size - 1;
159 /* Fill AT_HWCAP bits. */
160 elf_hwcap |= HWCAP_HALF | HWCAP_FAST_MULT; /* Requierd for all CPUs */
161 elf_hwcap |= HWCAP_TLS | HWCAP_EDSP; /* Requierd for v6+ CPUs */
163 tmp = (cpuinfo.id_isar0 >> 24) & 0xF; /* Divide_instrs */
165 elf_hwcap |= HWCAP_IDIVT;
167 elf_hwcap |= HWCAP_IDIVA;
169 tmp = (cpuinfo.id_pfr0 >> 4) & 0xF; /* State1 */
171 elf_hwcap |= HWCAP_THUMB;
173 tmp = (cpuinfo.id_pfr0 >> 12) & 0xF; /* State3 */
175 elf_hwcap |= HWCAP_THUMBEE;
177 tmp = (cpuinfo.id_mmfr0 >> 0) & 0xF; /* VMSA */
179 elf_hwcap |= HWCAP_LPAE;
181 /* Fill AT_HWCAP2 bits. */
182 tmp = (cpuinfo.id_isar5 >> 4) & 0xF; /* AES */
184 elf_hwcap2 |= HWCAP2_AES;
186 elf_hwcap2 |= HWCAP2_PMULL;
188 tmp = (cpuinfo.id_isar5 >> 8) & 0xF; /* SHA1 */
190 elf_hwcap2 |= HWCAP2_SHA1;
192 tmp = (cpuinfo.id_isar5 >> 12) & 0xF; /* SHA2 */
194 elf_hwcap2 |= HWCAP2_SHA2;
196 tmp = (cpuinfo.id_isar5 >> 16) & 0xF; /* CRC32 */
198 elf_hwcap2 |= HWCAP2_CRC32;
203 * Get bits that must be set or cleared in ACLR register.
204 * Note: Bits in ACLR register are IMPLEMENTATION DEFINED.
205 * Its expected that SCU is in operational state before this
206 * function is called.
209 cpuinfo_get_actlr_modifier(uint32_t *actlr_mask, uint32_t *actlr_set)
214 if (cpuinfo.implementer == CPU_IMPLEMENTER_ARM) {
215 switch (cpuinfo.part_number) {
216 case CPU_ARCH_CORTEX_A73:
217 case CPU_ARCH_CORTEX_A72:
218 case CPU_ARCH_CORTEX_A57:
219 case CPU_ARCH_CORTEX_A53:
220 /* Nothing to do for AArch32 */
222 case CPU_ARCH_CORTEX_A17:
223 case CPU_ARCH_CORTEX_A12: /* A12 is merged to A17 */
227 *actlr_mask = (1 << 6);
228 *actlr_set = (1 << 6);
230 case CPU_ARCH_CORTEX_A15:
232 * Enable snoop-delayed exclusive handling
235 *actlr_mask = (1U << 31) |(1 << 6);
236 *actlr_set = (1U << 31) |(1 << 6);
238 case CPU_ARCH_CORTEX_A9:
240 * Disable exclusive L1/L2 cache control
242 * Enable Cache and TLB maintenance broadcast
244 *actlr_mask = (1 << 7) | (1 << 6) | (1 << 0);
245 *actlr_set = (1 << 6) | (1 << 0);
247 case CPU_ARCH_CORTEX_A8:
250 * Enable L1 data cache hardware alias checks
252 *actlr_mask = (1 << 1) | (1 << 0);
253 *actlr_set = (1 << 1);
255 case CPU_ARCH_CORTEX_A7:
259 *actlr_mask = (1 << 6);
260 *actlr_set = (1 << 6);
262 case CPU_ARCH_CORTEX_A5:
264 * Disable exclusive L1/L2 cache control
266 * Enable Cache and TLB maintenance broadcast
268 *actlr_mask = (1 << 7) | (1 << 6) | (1 << 0);
269 *actlr_set = (1 << 6) | (1 << 0);
271 case CPU_ARCH_ARM1176:
273 * Restrict cache size to 16KB
274 * Enable the return stack
275 * Enable dynamic branch prediction
276 * Enable static branch prediction
278 *actlr_mask = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0);
279 *actlr_set = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0);