2 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
3 * Copyright 2014 Michal Meloun <meloun@miracle.cz>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
36 #include <sys/sysctl.h>
38 #include <machine/cpu.h>
39 #include <machine/cpuinfo.h>
40 #include <machine/elf.h>
41 #include <machine/md_var.h>
43 void reinit_mmu(uint32_t ttb, uint32_t aux_clr, uint32_t aux_set);
45 int disable_bp_hardening;
46 int spectre_v2_safe = 1;
48 struct cpuinfo cpuinfo =
50 /* Use safe defaults for start */
51 .dcache_line_size = 32,
52 .dcache_line_mask = 31,
53 .icache_line_size = 32,
54 .icache_line_mask = 31,
57 static SYSCTL_NODE(_hw, OID_AUTO, cpu, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
59 static SYSCTL_NODE(_hw_cpu, OID_AUTO, quirks, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
64 * Be careful, ACTRL cannot be changed if CPU is started in secure
65 * mode(world) and write to ACTRL can cause exception!
66 * These quirks are intended for optimizing CPU performance, not for
67 * applying errata workarounds. Nobody can expect that CPU with unfixed
68 * errata is stable enough to execute the kernel until quirks are applied.
70 static uint32_t cpu_quirks_actlr_mask;
71 SYSCTL_INT(_hw_cpu_quirks, OID_AUTO, actlr_mask,
72 CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &cpu_quirks_actlr_mask, 0,
73 "Bits to be masked in ACTLR");
75 static uint32_t cpu_quirks_actlr_set;
76 SYSCTL_INT(_hw_cpu_quirks, OID_AUTO, actlr_set,
77 CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &cpu_quirks_actlr_set, 0,
78 "Bits to be set in ACTLR");
80 /* Read and parse CPU id scheme */
87 * Prematurely fetch CPU quirks. Standard fetch for tunable
88 * sysctls is handled using SYSINIT, thus too late for boot CPU.
89 * Keep names in sync with sysctls.
91 TUNABLE_INT_FETCH("hw.cpu.quirks.actlr_mask", &cpu_quirks_actlr_mask);
92 TUNABLE_INT_FETCH("hw.cpu.quirks.actlr_set", &cpu_quirks_actlr_set);
94 cpuinfo.midr = cp15_midr_get();
95 /* Test old version id schemes first */
96 if ((cpuinfo.midr & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD) {
97 if (CPU_ID_ISOLD(cpuinfo.midr)) {
98 /* obsolete ARMv2 or ARMv3 CPU */
102 if (CPU_ID_IS7(cpuinfo.midr)) {
103 if ((cpuinfo.midr & (1 << 23)) == 0) {
104 /* obsolete ARMv3 CPU */
109 cpuinfo.architecture = 1;
110 cpuinfo.revision = (cpuinfo.midr >> 16) & 0x7F;
112 /* ARM new id scheme */
113 cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F;
114 cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F;
117 /* non ARM -> must be new id scheme */
118 cpuinfo.architecture = (cpuinfo.midr >> 16) & 0x0F;
119 cpuinfo.revision = (cpuinfo.midr >> 20) & 0x0F;
121 /* Parse rest of MIDR */
122 cpuinfo.implementer = (cpuinfo.midr >> 24) & 0xFF;
123 cpuinfo.part_number = (cpuinfo.midr >> 4) & 0xFFF;
124 cpuinfo.patch = cpuinfo.midr & 0x0F;
126 /* CP15 c0,c0 regs 0-7 exist on all CPUs (although aliased with MIDR) */
127 cpuinfo.ctr = cp15_ctr_get();
128 cpuinfo.tcmtr = cp15_tcmtr_get();
129 cpuinfo.tlbtr = cp15_tlbtr_get();
130 cpuinfo.mpidr = cp15_mpidr_get();
131 cpuinfo.revidr = cp15_revidr_get();
133 /* if CPU is not v7 cpu id scheme */
134 if (cpuinfo.architecture != 0xF)
136 cpuinfo.id_pfr0 = cp15_id_pfr0_get();
137 cpuinfo.id_pfr1 = cp15_id_pfr1_get();
138 cpuinfo.id_dfr0 = cp15_id_dfr0_get();
139 cpuinfo.id_afr0 = cp15_id_afr0_get();
140 cpuinfo.id_mmfr0 = cp15_id_mmfr0_get();
141 cpuinfo.id_mmfr1 = cp15_id_mmfr1_get();
142 cpuinfo.id_mmfr2 = cp15_id_mmfr2_get();
143 cpuinfo.id_mmfr3 = cp15_id_mmfr3_get();
144 cpuinfo.id_isar0 = cp15_id_isar0_get();
145 cpuinfo.id_isar1 = cp15_id_isar1_get();
146 cpuinfo.id_isar2 = cp15_id_isar2_get();
147 cpuinfo.id_isar3 = cp15_id_isar3_get();
148 cpuinfo.id_isar4 = cp15_id_isar4_get();
149 cpuinfo.id_isar5 = cp15_id_isar5_get();
151 /* Not yet - CBAR only exist on ARM SMP Cortex A CPUs
152 cpuinfo.cbar = cp15_cbar_get();
154 if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) {
155 cpuinfo.ccsidr = cp15_ccsidr_get();
156 cpuinfo.clidr = cp15_clidr_get();
159 /* Test if revidr is implemented */
160 if (cpuinfo.revidr == cpuinfo.midr)
163 /* parsed bits of above registers */
165 cpuinfo.outermost_shareability = (cpuinfo.id_mmfr0 >> 8) & 0xF;
166 cpuinfo.shareability_levels = (cpuinfo.id_mmfr0 >> 12) & 0xF;
167 cpuinfo.auxiliary_registers = (cpuinfo.id_mmfr0 >> 20) & 0xF;
168 cpuinfo.innermost_shareability = (cpuinfo.id_mmfr0 >> 28) & 0xF;
170 cpuinfo.mem_barrier = (cpuinfo.id_mmfr2 >> 20) & 0xF;
172 cpuinfo.coherent_walk = (cpuinfo.id_mmfr3 >> 20) & 0xF;
173 cpuinfo.maintenance_broadcast =(cpuinfo.id_mmfr3 >> 12) & 0xF;
175 cpuinfo.generic_timer_ext = (cpuinfo.id_pfr1 >> 16) & 0xF;
176 cpuinfo.virtualization_ext = (cpuinfo.id_pfr1 >> 12) & 0xF;
177 cpuinfo.security_ext = (cpuinfo.id_pfr1 >> 4) & 0xF;
179 cpuinfo.mp_ext = (cpuinfo.mpidr >> 31u) & 0x1;
182 if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) {
183 cpuinfo.dcache_line_size =
184 1 << (CPU_CT_DMINLINE(cpuinfo.ctr) + 2);
185 cpuinfo.icache_line_size =
186 1 << (CPU_CT_IMINLINE(cpuinfo.ctr) + 2);
188 cpuinfo.dcache_line_size =
189 1 << (CPU_CT_xSIZE_LEN(CPU_CT_DSIZE(cpuinfo.ctr)) + 3);
190 cpuinfo.icache_line_size =
191 1 << (CPU_CT_xSIZE_LEN(CPU_CT_ISIZE(cpuinfo.ctr)) + 3);
193 cpuinfo.dcache_line_mask = cpuinfo.dcache_line_size - 1;
194 cpuinfo.icache_line_mask = cpuinfo.icache_line_size - 1;
196 /* Fill AT_HWCAP bits. */
197 elf_hwcap |= HWCAP_HALF | HWCAP_FAST_MULT; /* Required for all CPUs */
198 elf_hwcap |= HWCAP_TLS | HWCAP_EDSP; /* Required for v6+ CPUs */
200 tmp = (cpuinfo.id_isar0 >> 24) & 0xF; /* Divide_instrs */
202 elf_hwcap |= HWCAP_IDIVT;
204 elf_hwcap |= HWCAP_IDIVA;
206 tmp = (cpuinfo.id_pfr0 >> 4) & 0xF; /* State1 */
208 elf_hwcap |= HWCAP_THUMB;
210 tmp = (cpuinfo.id_pfr0 >> 12) & 0xF; /* State3 */
212 elf_hwcap |= HWCAP_THUMBEE;
214 tmp = (cpuinfo.id_mmfr0 >> 0) & 0xF; /* VMSA */
216 elf_hwcap |= HWCAP_LPAE;
218 /* Fill AT_HWCAP2 bits. */
219 tmp = (cpuinfo.id_isar5 >> 4) & 0xF; /* AES */
221 elf_hwcap2 |= HWCAP2_AES;
223 elf_hwcap2 |= HWCAP2_PMULL;
225 tmp = (cpuinfo.id_isar5 >> 8) & 0xF; /* SHA1 */
227 elf_hwcap2 |= HWCAP2_SHA1;
229 tmp = (cpuinfo.id_isar5 >> 12) & 0xF; /* SHA2 */
231 elf_hwcap2 |= HWCAP2_SHA2;
233 tmp = (cpuinfo.id_isar5 >> 16) & 0xF; /* CRC32 */
235 elf_hwcap2 |= HWCAP2_CRC32;
239 * Get bits that must be set or cleared in ACLR register.
240 * Note: Bits in ACLR register are IMPLEMENTATION DEFINED.
241 * Its expected that SCU is in operational state before this
242 * function is called.
245 cpuinfo_get_actlr_modifier(uint32_t *actlr_mask, uint32_t *actlr_set)
251 if (cpuinfo.implementer == CPU_IMPLEMENTER_ARM) {
252 switch (cpuinfo.part_number) {
253 case CPU_ARCH_CORTEX_A75:
254 case CPU_ARCH_CORTEX_A73:
255 case CPU_ARCH_CORTEX_A72:
256 case CPU_ARCH_CORTEX_A57:
257 case CPU_ARCH_CORTEX_A53:
258 /* Nothing to do for AArch32 */
260 case CPU_ARCH_CORTEX_A17:
261 case CPU_ARCH_CORTEX_A12: /* A12 is merged to A17 */
265 *actlr_mask = (1 << 6);
266 *actlr_set = (1 << 6);
268 case CPU_ARCH_CORTEX_A15:
270 * Enable snoop-delayed exclusive handling
273 *actlr_mask = (1U << 31) |(1 << 6);
274 *actlr_set = (1U << 31) |(1 << 6);
276 case CPU_ARCH_CORTEX_A9:
278 * Disable exclusive L1/L2 cache control
280 * Enable Cache and TLB maintenance broadcast
282 *actlr_mask = (1 << 7) | (1 << 6) | (1 << 0);
283 *actlr_set = (1 << 6) | (1 << 0);
285 case CPU_ARCH_CORTEX_A8:
288 * Enable L1 data cache hardware alias checks
290 *actlr_mask = (1 << 1) | (1 << 0);
291 *actlr_set = (1 << 1);
293 case CPU_ARCH_CORTEX_A7:
297 *actlr_mask = (1 << 6);
298 *actlr_set = (1 << 6);
300 case CPU_ARCH_CORTEX_A5:
302 * Disable exclusive L1/L2 cache control
304 * Enable Cache and TLB maintenance broadcast
306 *actlr_mask = (1 << 7) | (1 << 6) | (1 << 0);
307 *actlr_set = (1 << 6) | (1 << 0);
309 case CPU_ARCH_ARM1176:
311 * Restrict cache size to 16KB
312 * Enable the return stack
313 * Enable dynamic branch prediction
314 * Enable static branch prediction
316 *actlr_mask = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0);
317 *actlr_set = (1 << 6) | (1 << 2) | (1 << 1) | (1 << 0);
324 /* Reinitialize MMU to final kernel mapping and apply all CPU quirks. */
326 cpuinfo_reinit_mmu(uint32_t ttb)
331 cpuinfo_get_actlr_modifier(&actlr_mask, &actlr_set);
332 actlr_mask |= cpu_quirks_actlr_mask;
333 actlr_set |= cpu_quirks_actlr_set;
334 reinit_mmu(ttb, actlr_mask, actlr_set);
338 modify_actlr(uint32_t clear, uint32_t set)
340 uint32_t reg, newreg;
342 reg = cp15_actlr_get();
348 cp15_actlr_set(newreg);
350 reg = cp15_actlr_get();
356 /* Apply/restore BP hardening on current core. */
358 apply_bp_hardening(bool enable, int kind, bool actrl, uint32_t set_mask)
361 if (actrl && !modify_actlr(0, set_mask))
363 PCPU_SET(bp_harden_kind, kind);
365 PCPU_SET(bp_harden_kind, PCPU_BP_HARDEN_KIND_NONE);
367 modify_actlr(~0, PCPU_GET(original_actlr));
374 handle_bp_hardening(bool enable)
379 kind = PCPU_BP_HARDEN_KIND_NONE;
381 * Note: Access to ACTRL is locked to secure world on most boards.
382 * This means that full BP hardening depends on updated u-boot/firmware
383 * or is impossible at all (if secure monitor is in on-chip ROM).
385 if (cpuinfo.implementer == CPU_IMPLEMENTER_ARM) {
386 switch (cpuinfo.part_number) {
387 case CPU_ARCH_CORTEX_A8:
389 * For Cortex-A8, IBE bit must be set otherwise
390 * BPIALL is effectively NOP.
391 * Unfortunately, Cortex-A is also affected by
392 * ARM erratum 687067 which causes non-working
393 * BPIALL if IBE bit is set and 'Instruction L1 System
394 * Array Debug Register 0' is not 0.
395 * This register is not reset on power-up and is
396 * accessible only from secure world, so we cannot do
397 * nothing (nor detect) to fix this issue.
398 * I afraid that on chip ROM based secure monitor on
399 * AM335x (BeagleBone) doesn't reset this debug
402 kind = PCPU_BP_HARDEN_KIND_BPIALL;
403 if (apply_bp_hardening(enable, kind, true, 1 << 6) != 0)
408 case CPU_ARCH_CORTEX_A9:
409 case CPU_ARCH_CORTEX_A12:
410 case CPU_ARCH_CORTEX_A17:
411 case CPU_ARCH_CORTEX_A57:
412 case CPU_ARCH_CORTEX_A72:
413 case CPU_ARCH_CORTEX_A73:
414 case CPU_ARCH_CORTEX_A75:
415 kind = PCPU_BP_HARDEN_KIND_BPIALL;
416 if (apply_bp_hardening(enable, kind, false, 0) != 0)
420 case CPU_ARCH_CORTEX_A15:
422 * For Cortex-A15, set 'Enable invalidates of BTB' bit.
423 * Despite this, the BPIALL is still effectively NOP,
424 * but with this bit set, the ICIALLU also flushes
425 * branch predictor as side effect.
427 kind = PCPU_BP_HARDEN_KIND_ICIALLU;
428 if (apply_bp_hardening(enable, kind, true, 1 << 0) != 0)
435 } else if (cpuinfo.implementer == CPU_IMPLEMENTER_QCOM) {
436 printf("!!!WARNING!!! CPU(%d) is vulnerable to speculative "
437 "branch attacks. !!!\n"
438 "Qualcomm Krait cores are known (or believed) to be "
440 "speculative branch attacks, no mitigation exists yet.\n",
442 goto unkonown_mitigation;
444 goto unkonown_mitigation;
449 case PCPU_BP_HARDEN_KIND_NONE:
450 kind_str = "not necessary";
452 case PCPU_BP_HARDEN_KIND_BPIALL:
455 case PCPU_BP_HARDEN_KIND_ICIALLU:
456 kind_str = "ICIALLU";
459 panic("Unknown BP hardering kind (%d).", kind);
461 printf("CPU(%d) applied BP hardening: %s\n", PCPU_GET(cpuid),
468 PCPU_SET(bp_harden_kind, PCPU_BP_HARDEN_KIND_NONE);
473 PCPU_SET(bp_harden_kind, PCPU_BP_HARDEN_KIND_NONE);
475 printf("!!!WARNING!!! CPU(%d) is vulnerable to speculative branch "
477 "We cannot enable required bit(s) in ACTRL register\n"
478 "because it's locked by secure monitor and/or firmware.\n",
483 cpuinfo_init_bp_hardening(void)
487 * Store original unmodified ACTRL, so we can restore it when
488 * BP hardening is disabled by sysctl.
490 PCPU_SET(original_actlr, cp15_actlr_get());
491 handle_bp_hardening(true);
495 bp_hardening_action(void *arg)
498 handle_bp_hardening(disable_bp_hardening == 0);
502 sysctl_disable_bp_hardening(SYSCTL_HANDLER_ARGS)
506 rv = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
508 if (!rv && req->newptr) {
512 smp_rendezvous_cpus(all_cpus, smp_no_rendezvous_barrier,
513 bp_hardening_action, NULL, NULL);
515 bp_hardening_action(NULL);
522 SYSCTL_PROC(_machdep, OID_AUTO, disable_bp_hardening,
523 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
524 &disable_bp_hardening, 0, sysctl_disable_bp_hardening, "I",
525 "Disable BP hardening mitigation.");
527 SYSCTL_INT(_machdep, OID_AUTO, spectre_v2_safe, CTLFLAG_RD,
528 &spectre_v2_safe, 0, "System is safe to Spectre Version 2 attacks");