2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) KATO Takenori, 1997, 1998.
6 * All rights reserved. Unpublished rights reserved under the copyright
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer as
15 * the first lines of this file unmodified.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
37 #include <sys/param.h>
38 #include <sys/kernel.h>
40 #include <sys/systm.h>
41 #include <sys/sysctl.h>
43 #include <machine/cputypes.h>
44 #include <machine/md_var.h>
45 #include <machine/psl.h>
46 #include <machine/specialreg.h>
51 static int hw_instruction_sse;
52 SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD,
53 &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU");
54 static int lower_sharedpage_init;
55 int hw_lower_amd64_sharedpage;
56 SYSCTL_INT(_hw, OID_AUTO, lower_amd64_sharedpage, CTLFLAG_RDTUN,
57 &hw_lower_amd64_sharedpage, 0,
58 "Lower sharedpage to work around Ryzen issue with executing code near the top of user memory");
60 * -1: automatic (default)
61 * 0: keep enable CLFLUSH
62 * 1: force disable CLFLUSH
64 static int hw_clflush_disable = -1;
72 * C1E renders the local APIC timer dead, so we disable it by
73 * reading the Interrupt Pending Message register and clearing
74 * both C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27).
77 * "BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh Processors"
78 * #32559 revision 3.00+
80 * Detect the presence of C1E capability mostly on latest
81 * dual-cores (or future) k8 family. Affected models range is
82 * taken from Linux sources.
84 if ((CPUID_TO_FAMILY(cpu_id) == 0xf ||
85 CPUID_TO_FAMILY(cpu_id) == 0x10) && (cpu_feature2 & CPUID2_HV) == 0)
89 * Work around Erratum 721 for Family 10h and 12h processors.
90 * These processors may incorrectly update the stack pointer
91 * after a long series of push and/or near-call instructions,
92 * or a long series of pop and/or near-return instructions.
94 * http://support.amd.com/us/Processor_TechDocs/41322_10h_Rev_Gd.pdf
95 * http://support.amd.com/us/Processor_TechDocs/44739_12h_Rev_Gd.pdf
97 * Hypervisors do not provide access to the errata MSR,
98 * causing #GP exception on attempt to apply the errata. The
99 * MSR write shall be done on host and persist globally
100 * anyway, so do not try to do it when under virtualization.
102 switch (CPUID_TO_FAMILY(cpu_id)) {
105 if ((cpu_feature2 & CPUID2_HV) == 0)
106 wrmsr(MSR_DE_CFG, rdmsr(MSR_DE_CFG) | 1);
111 * BIOS may fail to set InitApicIdCpuIdLo to 1 as it should per BKDG.
112 * So, do it here or otherwise some tools could be confused by
113 * Initial Local APIC ID reported with CPUID Function 1 in EBX.
115 if (CPUID_TO_FAMILY(cpu_id) == 0x10) {
116 if ((cpu_feature2 & CPUID2_HV) == 0) {
117 msr = rdmsr(MSR_NB_CFG1);
118 msr |= (uint64_t)1 << 54;
119 wrmsr(MSR_NB_CFG1, msr);
124 * BIOS may configure Family 10h processors to convert WC+ cache type
125 * to CD. That can hurt performance of guest VMs using nested paging.
126 * The relevant MSR bit is not documented in the BKDG,
127 * the fix is borrowed from Linux.
129 if (CPUID_TO_FAMILY(cpu_id) == 0x10) {
130 if ((cpu_feature2 & CPUID2_HV) == 0) {
131 msr = rdmsr(0xc001102a);
132 msr &= ~((uint64_t)1 << 24);
133 wrmsr(0xc001102a, msr);
138 * Work around Erratum 793: Specific Combination of Writes to Write
139 * Combined Memory Types and Locked Instructions May Cause Core Hang.
140 * See Revision Guide for AMD Family 16h Models 00h-0Fh Processors,
141 * revision 3.04 or later, publication 51810.
143 if (CPUID_TO_FAMILY(cpu_id) == 0x16 && CPUID_TO_MODEL(cpu_id) <= 0xf) {
144 if ((cpu_feature2 & CPUID2_HV) == 0) {
145 msr = rdmsr(MSR_LS_CFG);
146 msr |= (uint64_t)1 << 15;
147 wrmsr(MSR_LS_CFG, msr);
152 if (CPUID_TO_FAMILY(cpu_id) == 0x17 && CPUID_TO_MODEL(cpu_id) == 0x1 &&
153 (cpu_feature2 & CPUID2_HV) == 0) {
155 msr = rdmsr(MSR_DE_CFG);
157 wrmsr(MSR_DE_CFG, msr);
160 msr = rdmsr(MSR_LS_CFG);
162 wrmsr(MSR_LS_CFG, msr);
165 msr = rdmsr(0xc0011028);
167 wrmsr(0xc0011028, msr);
170 msr = rdmsr(MSR_LS_CFG);
171 msr |= 0x200000000000000;
172 wrmsr(MSR_LS_CFG, msr);
176 * Work around a problem on Ryzen that is triggered by executing
177 * code near the top of user memory, in our case the signal
178 * trampoline code in the shared page on amd64.
180 * This function is executed once for the BSP before tunables take
181 * effect so the value determined here can be overridden by the
182 * tunable. This function is then executed again for each AP and
183 * also on resume. Set a flag the first time so that value set by
184 * the tunable is not overwritten.
186 * The stepping and/or microcode versions should be checked after
187 * this issue is fixed by AMD so that we don't use this mode if not
190 if (lower_sharedpage_init == 0) {
191 lower_sharedpage_init = 1;
192 if (CPUID_TO_FAMILY(cpu_id) == 0x17 ||
193 CPUID_TO_FAMILY(cpu_id) == 0x18) {
194 hw_lower_amd64_sharedpage = 1;
200 * Initialize special VIA features
208 * Check extended CPUID for PadLock features.
210 * http://www.via.com.tw/en/downloads/whitepapers/initiatives/padlock/programming_guide.pdf
212 do_cpuid(0xc0000000, regs);
213 if (regs[0] >= 0xc0000001) {
214 do_cpuid(0xc0000001, regs);
219 /* Enable RNG if present. */
220 if ((val & VIA_CPUID_HAS_RNG) != 0) {
221 via_feature_rng = VIA_HAS_RNG;
222 wrmsr(0x110B, rdmsr(0x110B) | VIA_CPUID_DO_RNG);
225 /* Enable PadLock if present. */
226 if ((val & VIA_CPUID_HAS_ACE) != 0)
227 via_feature_xcrypt |= VIA_HAS_AES;
228 if ((val & VIA_CPUID_HAS_ACE2) != 0)
229 via_feature_xcrypt |= VIA_HAS_AESCTR;
230 if ((val & VIA_CPUID_HAS_PHE) != 0)
231 via_feature_xcrypt |= VIA_HAS_SHA;
232 if ((val & VIA_CPUID_HAS_PMM) != 0)
233 via_feature_xcrypt |= VIA_HAS_MM;
234 if (via_feature_xcrypt != 0)
235 wrmsr(0x1107, rdmsr(0x1107) | (1 << 28));
239 * The value for the TSC_AUX MSR and rdtscp/rdpid on the invoking CPU.
241 * Caller should prevent CPU migration.
246 KASSERT((read_rflags() & PSL_I) == 0, ("context switch possible"));
247 return (PCPU_GET(cpuid));
251 * Initialize CPU control registers
260 if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
261 cr4 |= CR4_FXSR | CR4_XMM;
262 cpu_fxsr = hw_instruction_sse = 1;
264 if (cpu_stdext_feature & CPUID_STDEXT_FSGSBASE)
267 if (cpu_stdext_feature2 & CPUID_STDEXT2_PKU)
271 * If SMEP is present, we only need to flush RSB (by default)
272 * on context switches, to prevent cross-process ret2spec
273 * attacks. Do it automatically if ibrs_disable is set, to
274 * complete the mitigation.
276 * Postpone enabling the SMEP on the boot CPU until the page
277 * tables are switched from the boot loader identity mapping
278 * to the kernel tables. The boot loader enables the U bit in
282 if (cpu_stdext_feature & CPUID_STDEXT_SMEP &&
284 "machdep.mitigations.cpu_flush_rsb_ctxsw",
285 &cpu_flush_rsb_ctxsw) &&
287 cpu_flush_rsb_ctxsw = 1;
289 if (cpu_stdext_feature & CPUID_STDEXT_SMEP)
291 if (cpu_stdext_feature & CPUID_STDEXT_SMAP)
295 if (IS_BSP() && (amd_feature & AMDID_NX) != 0) {
296 msr = rdmsr(MSR_EFER) | EFER_NXE;
297 wrmsr(MSR_EFER, msr);
300 hw_ibrs_recalculate(false);
301 hw_ssb_recalculate(false);
302 amd64_syscall_ret_flush_l1d_recalc();
303 x86_rngds_mitg_recalculate(false);
304 switch (cpu_vendor_id) {
306 case CPU_VENDOR_HYGON:
309 case CPU_VENDOR_CENTAUR:
314 if ((amd_feature & AMDID_RDTSCP) != 0 ||
315 (cpu_stdext_feature2 & CPUID_STDEXT2_RDPID) != 0)
316 wrmsr(MSR_TSC_AUX, cpu_auxmsr());
320 initializecpucache(void)
324 * CPUID with %eax = 1, %ebx returns
325 * Bits 15-8: CLFLUSH line size
326 * (Value * 8 = cache line size in bytes)
328 if ((cpu_feature & CPUID_CLFSH) != 0)
329 cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8;
331 * XXXKIB: (temporary) hack to work around traps generated
332 * when CLFLUSHing APIC register window under virtualization
333 * environments. These environments tend to disable the
334 * CPUID_SS feature even though the native CPU supports it.
336 TUNABLE_INT_FETCH("hw.clflush_disable", &hw_clflush_disable);
337 if (vm_guest != VM_GUEST_NO && hw_clflush_disable == -1) {
338 cpu_feature &= ~CPUID_CLFSH;
339 cpu_stdext_feature &= ~CPUID_STDEXT_CLFLUSHOPT;
343 * The kernel's use of CLFLUSH{,OPT} can be disabled manually
344 * by setting the hw.clflush_disable tunable.
346 if (hw_clflush_disable == 1) {
347 cpu_feature &= ~CPUID_CLFSH;
348 cpu_stdext_feature &= ~CPUID_STDEXT_CLFLUSHOPT;