2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1992 Terrence R. Lambert.
4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
7 * This code is derived from software contributed to Berkeley by
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
44 #include "opt_atpic.h"
45 #include "opt_compat.h"
51 #include "opt_kstack_pages.h"
52 #include "opt_maxmem.h"
53 #include "opt_mp_watchdog.h"
54 #include "opt_perfmon.h"
55 #include "opt_platform.h"
61 #include <sys/param.h>
63 #include <sys/systm.h>
67 #include <sys/kernel.h>
70 #include <sys/malloc.h>
71 #include <sys/mutex.h>
73 #include <sys/rwlock.h>
74 #include <sys/sched.h>
76 #include <sys/sysctl.h>
78 #include <machine/clock.h>
79 #include <machine/cpu.h>
80 #include <machine/cputypes.h>
81 #include <machine/specialreg.h>
82 #include <machine/md_var.h>
83 #include <machine/mp_watchdog.h>
85 #include <machine/perfmon.h>
87 #include <machine/tss.h>
89 #include <machine/smp.h>
92 #include <machine/elan_mmcr.h>
94 #include <x86/acpica_machdep.h>
97 #include <vm/vm_extern.h>
98 #include <vm/vm_kern.h>
99 #include <vm/vm_page.h>
100 #include <vm/vm_map.h>
101 #include <vm/vm_object.h>
102 #include <vm/vm_pager.h>
103 #include <vm/vm_param.h>
106 #include <isa/isareg.h>
109 #define STATE_RUNNING 0x0
110 #define STATE_MWAIT 0x1
111 #define STATE_SLEEPING 0x2
114 static u_int cpu_reset_proxyid;
115 static volatile u_int cpu_reset_proxy_active;
120 * Machine dependent boot() routine
122 * I haven't seen anything to put here yet
123 * Possibly some stuff might be grafted back here from boot()
131 * Flush the D-cache for non-DMA I/O so that the I-cache can
132 * be made coherent later.
135 cpu_flush_dcache(void *ptr, size_t len)
144 __asm __volatile("sti; hlt");
148 * Use mwait to pause execution while waiting for an interrupt or
149 * another thread to signal that there is more work.
151 * NOTE: Interrupts will cause a wakeup; however, this function does
152 * not enable interrupt handling. The caller is responsible to enable
156 acpi_cpu_idle_mwait(uint32_t mwait_hint)
162 * A comment in Linux patch claims that 'CPUs run faster with
163 * speculation protection disabled. All CPU threads in a core
164 * must disable speculation protection for it to be
165 * disabled. Disable it while we are idle so the other
166 * hyperthread can run fast.'
168 * XXXKIB. Software coordination mode should be supported,
169 * but all Intel CPUs provide hardware coordination.
172 state = (int *)PCPU_PTR(monitorbuf);
173 KASSERT(atomic_load_int(state) == STATE_SLEEPING,
174 ("cpu_mwait_cx: wrong monitorbuf state"));
175 atomic_store_int(state, STATE_MWAIT);
176 if (PCPU_GET(ibpb_set) || hw_ssb_active) {
177 v = rdmsr(MSR_IA32_SPEC_CTRL);
178 wrmsr(MSR_IA32_SPEC_CTRL, v & ~(IA32_SPEC_CTRL_IBRS |
179 IA32_SPEC_CTRL_STIBP | IA32_SPEC_CTRL_SSBD));
183 cpu_monitor(state, 0, 0);
184 if (atomic_load_int(state) == STATE_MWAIT)
185 cpu_mwait(MWAIT_INTRBREAK, mwait_hint);
188 * SSB cannot be disabled while we sleep, or rather, if it was
189 * disabled, the sysctl thread will bind to our cpu to tweak
193 wrmsr(MSR_IA32_SPEC_CTRL, v);
196 * We should exit on any event that interrupts mwait, because
197 * that event might be a wanted interrupt.
199 atomic_store_int(state, STATE_RUNNING);
202 /* Get current clock frequency for the given cpu id. */
204 cpu_est_clockrate(int cpu_id, uint64_t *rate)
207 uint64_t acnt, mcnt, perf;
210 if (pcpu_find(cpu_id) == NULL || rate == NULL)
213 if ((cpu_feature & CPUID_TSC) == 0)
218 * If TSC is P-state invariant and APERF/MPERF MSRs do not exist,
219 * DELAY(9) based logic fails.
221 if (tsc_is_invariant && !tsc_perf_stat)
226 /* Schedule ourselves on the indicated cpu. */
227 thread_lock(curthread);
228 sched_bind(curthread, cpu_id);
229 thread_unlock(curthread);
233 /* Calibrate by measuring a short delay. */
234 reg = intr_disable();
235 if (tsc_is_invariant) {
240 mcnt = rdmsr(MSR_MPERF);
241 acnt = rdmsr(MSR_APERF);
244 perf = 1000 * acnt / mcnt;
245 *rate = (tsc2 - tsc1) * perf;
251 *rate = (tsc2 - tsc1) * 1000;
256 thread_lock(curthread);
257 sched_unbind(curthread);
258 thread_unlock(curthread);
266 * Shutdown the CPU as much as possible
278 struct region_descriptor null_idt;
285 if (elan_mmcr != NULL)
286 elan_mmcr->RESCFG = 1;
289 if (cpu == CPU_GEODE1100) {
290 /* Attempt Geode's own reset */
291 outl(0xcf8, 0x80009044ul);
297 * Attempt to do a CPU reset via CPU reset port.
299 if ((inb(0x35) & 0xa0) != 0xa0) {
300 outb(0x37, 0x0f); /* SHUT0 = 0. */
301 outb(0x37, 0x0b); /* SHUT1 = 0. */
303 outb(0xf0, 0x00); /* Reset. */
305 #if !defined(BROKEN_KEYBOARD_RESET)
307 * Attempt to do a CPU reset via the keyboard controller,
308 * do not turn off GateA20, as any machine that fails
309 * to do the reset here would then end up in no man's land.
311 outb(IO_KBD + 4, 0xFE);
312 DELAY(500000); /* wait 0.5 sec to see if that did it */
316 * Attempt to force a reset via the Reset Control register at
317 * I/O port 0xcf9. Bit 2 forces a system reset when it
318 * transitions from 0 to 1. Bit 1 selects the type of reset
319 * to attempt: 0 selects a "soft" reset, and 1 selects a
320 * "hard" reset. We try a "hard" reset. The first write sets
321 * bit 1 to select a "hard" reset and clears bit 2. The
322 * second write forces a 0 -> 1 transition in bit 2 to trigger
327 DELAY(500000); /* wait 0.5 sec to see if that did it */
330 * Attempt to force a reset via the Fast A20 and Init register
331 * at I/O port 0x92. Bit 1 serves as an alternate A20 gate.
332 * Bit 0 asserts INIT# when set to 1. We are careful to only
333 * preserve bit 1 while setting bit 0. We also must clear bit
334 * 0 before setting it if it isn't already clear.
339 outb(0x92, b & 0xfe);
341 DELAY(500000); /* wait 0.5 sec to see if that did it */
345 printf("No known reset method worked, attempting CPU shutdown\n");
346 DELAY(1000000); /* wait 1 sec for printf to complete */
349 null_idt.rd_limit = 0;
350 null_idt.rd_base = 0;
353 /* "good night, sweet prince .... <THUNK!>" */
362 cpu_reset_proxy(void)
365 cpu_reset_proxy_active = 1;
366 while (cpu_reset_proxy_active == 1)
367 ia32_pause(); /* Wait for other cpu to see that we've started */
369 printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid);
384 CPU_CLR(PCPU_GET(cpuid), &map);
385 CPU_NAND(&map, &stopped_cpus);
386 if (!CPU_EMPTY(&map)) {
387 printf("cpu_reset: Stopping other CPUs\n");
391 if (PCPU_GET(cpuid) != 0) {
392 cpu_reset_proxyid = PCPU_GET(cpuid);
393 cpustop_restartfunc = cpu_reset_proxy;
394 cpu_reset_proxy_active = 0;
395 printf("cpu_reset: Restarting BSP\n");
397 /* Restart CPU #0. */
398 CPU_SETOF(0, &started_cpus);
402 while (cpu_reset_proxy_active == 0 && cnt < 10000000) {
404 cnt++; /* Wait for BSP to announce restart */
406 if (cpu_reset_proxy_active == 0) {
407 printf("cpu_reset: Failed to restart BSP\n");
409 cpu_reset_proxy_active = 2;
424 cpu_mwait_usable(void)
427 return ((cpu_feature2 & CPUID2_MON) != 0 && ((cpu_mon_mwait_flags &
428 (CPUID5_MON_MWAIT_EXT | CPUID5_MWAIT_INTRBREAK)) ==
429 (CPUID5_MON_MWAIT_EXT | CPUID5_MWAIT_INTRBREAK)));
432 void (*cpu_idle_hook)(sbintime_t) = NULL; /* ACPI idle hook. */
433 static int cpu_ident_amdc1e = 0; /* AMD C1E supported. */
434 static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */
435 SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RWTUN, &idle_mwait,
436 0, "Use MONITOR/MWAIT for short idle");
440 cpu_idle_acpi(sbintime_t sbt)
444 state = (int *)PCPU_PTR(monitorbuf);
445 atomic_store_int(state, STATE_SLEEPING);
447 /* See comments in cpu_idle_hlt(). */
449 if (sched_runnable())
451 else if (cpu_idle_hook)
455 atomic_store_int(state, STATE_RUNNING);
460 cpu_idle_hlt(sbintime_t sbt)
464 state = (int *)PCPU_PTR(monitorbuf);
465 atomic_store_int(state, STATE_SLEEPING);
468 * Since we may be in a critical section from cpu_idle(), if
469 * an interrupt fires during that critical section we may have
470 * a pending preemption. If the CPU halts, then that thread
471 * may not execute until a later interrupt awakens the CPU.
472 * To handle this race, check for a runnable thread after
473 * disabling interrupts and immediately return if one is
474 * found. Also, we must absolutely guarentee that hlt is
475 * the next instruction after sti. This ensures that any
476 * interrupt that fires after the call to disable_intr() will
477 * immediately awaken the CPU from hlt. Finally, please note
478 * that on x86 this works fine because of interrupts enabled only
479 * after the instruction following sti takes place, while IF is set
480 * to 1 immediately, allowing hlt instruction to acknowledge the
484 if (sched_runnable())
488 atomic_store_int(state, STATE_RUNNING);
492 cpu_idle_mwait(sbintime_t sbt)
496 state = (int *)PCPU_PTR(monitorbuf);
497 atomic_store_int(state, STATE_MWAIT);
499 /* See comments in cpu_idle_hlt(). */
501 if (sched_runnable()) {
502 atomic_store_int(state, STATE_RUNNING);
507 cpu_monitor(state, 0, 0);
508 if (atomic_load_int(state) == STATE_MWAIT)
509 __asm __volatile("sti; mwait" : : "a" (MWAIT_C1), "c" (0));
512 atomic_store_int(state, STATE_RUNNING);
516 cpu_idle_spin(sbintime_t sbt)
521 state = (int *)PCPU_PTR(monitorbuf);
522 atomic_store_int(state, STATE_RUNNING);
525 * The sched_runnable() call is racy but as long as there is
526 * a loop missing it one time will have just a little impact if any
527 * (and it is much better than missing the check at all).
529 for (i = 0; i < 1000; i++) {
530 if (sched_runnable())
537 * C1E renders the local APIC timer dead, so we disable it by
538 * reading the Interrupt Pending Message register and clearing
539 * both C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27).
542 * "BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh Processors"
543 * #32559 revision 3.00+
545 #define MSR_AMDK8_IPM 0xc0010055
546 #define AMDK8_SMIONCMPHALT (1ULL << 27)
547 #define AMDK8_C1EONCMPHALT (1ULL << 28)
548 #define AMDK8_CMPHALT (AMDK8_SMIONCMPHALT | AMDK8_C1EONCMPHALT)
551 cpu_probe_amdc1e(void)
555 * Detect the presence of C1E capability mostly on latest
556 * dual-cores (or future) k8 family.
558 if (cpu_vendor_id == CPU_VENDOR_AMD &&
559 (cpu_id & 0x00000f00) == 0x00000f00 &&
560 (cpu_id & 0x0fff0000) >= 0x00040000) {
561 cpu_ident_amdc1e = 1;
565 #if defined(__i386__) && defined(PC98)
566 void (*cpu_idle_fn)(sbintime_t) = cpu_idle_hlt;
568 void (*cpu_idle_fn)(sbintime_t) = cpu_idle_acpi;
577 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d",
580 ap_watchdog(PCPU_GET(cpuid));
583 /* If we are busy - try to use fast methods. */
585 if ((cpu_feature2 & CPUID2_MON) && idle_mwait) {
586 cpu_idle_mwait(busy);
591 /* If we have time - switch timers into idle mode. */
594 sbt = cpu_idleclock();
597 /* Apply AMD APIC timer C1E workaround. */
598 if (cpu_ident_amdc1e && cpu_disable_c3_sleep) {
599 msr = rdmsr(MSR_AMDK8_IPM);
600 if (msr & AMDK8_CMPHALT)
601 wrmsr(MSR_AMDK8_IPM, msr & ~AMDK8_CMPHALT);
604 /* Call main idle method. */
607 /* Switch timers back into active mode. */
613 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done",
617 static int cpu_idle_apl31_workaround;
618 SYSCTL_INT(_machdep, OID_AUTO, idle_apl31, CTLFLAG_RW,
619 &cpu_idle_apl31_workaround, 0,
620 "Apollo Lake APL31 MWAIT bug workaround");
623 cpu_idle_wakeup(int cpu)
627 state = (int *)pcpu_find(cpu)->pc_monitorbuf;
628 switch (atomic_load_int(state)) {
632 atomic_store_int(state, STATE_RUNNING);
633 return (cpu_idle_apl31_workaround ? 0 : 1);
637 panic("bad monitor state");
643 * Ordered by speed/power consumption.
650 { .id_fn = cpu_idle_spin, .id_name = "spin" },
651 { .id_fn = cpu_idle_mwait, .id_name = "mwait",
652 .id_cpuid2_flag = CPUID2_MON },
653 { .id_fn = cpu_idle_hlt, .id_name = "hlt" },
654 #if !defined(__i386__) || !defined(PC98)
655 { .id_fn = cpu_idle_acpi, .id_name = "acpi" },
660 idle_sysctl_available(SYSCTL_HANDLER_ARGS)
666 avail = malloc(256, M_TEMP, M_WAITOK);
668 for (i = 0; i < nitems(idle_tbl); i++) {
669 if (idle_tbl[i].id_cpuid2_flag != 0 &&
670 (cpu_feature2 & idle_tbl[i].id_cpuid2_flag) == 0)
672 #if !defined(__i386__) || !defined(PC98)
673 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
674 cpu_idle_hook == NULL)
677 p += sprintf(p, "%s%s", p != avail ? ", " : "",
678 idle_tbl[i].id_name);
680 error = sysctl_handle_string(oidp, avail, 0, req);
685 SYSCTL_PROC(_machdep, OID_AUTO, idle_available, CTLTYPE_STRING | CTLFLAG_RD,
686 0, 0, idle_sysctl_available, "A", "list of available idle functions");
689 cpu_idle_selector(const char *new_idle_name)
693 for (i = 0; i < nitems(idle_tbl); i++) {
694 if (idle_tbl[i].id_cpuid2_flag != 0 &&
695 (cpu_feature2 & idle_tbl[i].id_cpuid2_flag) == 0)
697 #if !defined(__i386__) || !defined(PC98)
698 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
699 cpu_idle_hook == NULL)
702 if (strcmp(idle_tbl[i].id_name, new_idle_name))
704 cpu_idle_fn = idle_tbl[i].id_fn;
706 printf("CPU idle set to %s\n", idle_tbl[i].id_name);
713 cpu_idle_sysctl(SYSCTL_HANDLER_ARGS)
719 for (i = 0; i < nitems(idle_tbl); i++) {
720 if (idle_tbl[i].id_fn == cpu_idle_fn) {
721 p = idle_tbl[i].id_name;
725 strncpy(buf, p, sizeof(buf));
726 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
727 if (error != 0 || req->newptr == NULL)
729 return (cpu_idle_selector(buf) ? 0 : EINVAL);
732 SYSCTL_PROC(_machdep, OID_AUTO, idle, CTLTYPE_STRING | CTLFLAG_RW, 0, 0,
733 cpu_idle_sysctl, "A", "currently selected idle function");
736 cpu_idle_tun(void *unused __unused)
740 if (TUNABLE_STR_FETCH("machdep.idle", tunvar, sizeof(tunvar)))
741 cpu_idle_selector(tunvar);
742 if (cpu_vendor_id == CPU_VENDOR_INTEL && cpu_id == 0x506c9) {
744 * Apollo Lake errata APL31 (public errata APL30).
745 * Stores to the armed address range may not trigger
746 * MWAIT to resume execution. OS needs to use
747 * interrupts to wake processors from MWAIT-induced
750 cpu_idle_apl31_workaround = 1;
752 TUNABLE_INT_FETCH("machdep.idle_apl31", &cpu_idle_apl31_workaround);
754 SYSINIT(cpu_idle_tun, SI_SUB_CPU, SI_ORDER_MIDDLE, cpu_idle_tun, NULL);
756 static int panic_on_nmi = 1;
757 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RWTUN,
760 int nmi_is_broadcast = 1;
761 SYSCTL_INT(_machdep, OID_AUTO, nmi_is_broadcast, CTLFLAG_RWTUN,
762 &nmi_is_broadcast, 0,
763 "Chipset NMI is broadcast");
766 SYSCTL_INT(_machdep, OID_AUTO, kdb_on_nmi, CTLFLAG_RWTUN,
773 nmi_call_kdb(u_int cpu, u_int type, struct trapframe *frame)
776 /* machine/parity/power fail/"kitchen sink" faults */
777 if (isa_nmi(frame->tf_err) == 0) {
780 * NMI can be hooked up to a pushbutton for debugging.
783 printf("NMI/cpu%d ... going to debugger\n", cpu);
784 kdb_trap(type, 0, frame);
787 } else if (panic_on_nmi) {
788 panic("NMI indicates hardware failure");
794 nmi_handle_intr(u_int type, struct trapframe *frame)
799 if (nmi_is_broadcast) {
800 nmi_call_kdb_smp(type, frame);
804 nmi_call_kdb(PCPU_GET(cpuid), type, frame);
809 int hw_ibrs_disable = 1;
811 SYSCTL_INT(_hw, OID_AUTO, ibrs_active, CTLFLAG_RD, &hw_ibrs_active, 0,
812 "Indirect Branch Restricted Speculation active");
815 hw_ibrs_recalculate(void)
819 if ((cpu_ia32_arch_caps & IA32_ARCH_CAP_IBRS_ALL) != 0) {
820 if (hw_ibrs_disable) {
821 v = rdmsr(MSR_IA32_SPEC_CTRL);
822 v &= ~(uint64_t)IA32_SPEC_CTRL_IBRS;
823 wrmsr(MSR_IA32_SPEC_CTRL, v);
825 v = rdmsr(MSR_IA32_SPEC_CTRL);
826 v |= IA32_SPEC_CTRL_IBRS;
827 wrmsr(MSR_IA32_SPEC_CTRL, v);
831 hw_ibrs_active = (cpu_stdext_feature3 & CPUID_STDEXT3_IBPB) != 0 &&
836 hw_ibrs_disable_handler(SYSCTL_HANDLER_ARGS)
840 val = hw_ibrs_disable;
841 error = sysctl_handle_int(oidp, &val, 0, req);
842 if (error != 0 || req->newptr == NULL)
844 hw_ibrs_disable = val != 0;
845 hw_ibrs_recalculate();
848 SYSCTL_PROC(_hw, OID_AUTO, ibrs_disable, CTLTYPE_INT | CTLFLAG_RWTUN |
849 CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, hw_ibrs_disable_handler, "I",
850 "Disable Indirect Branch Restricted Speculation");
855 SYSCTL_INT(_hw, OID_AUTO, spec_store_bypass_disable_active, CTLFLAG_RD,
857 "Speculative Store Bypass Disable active");
860 hw_ssb_set_one(bool enable)
864 v = rdmsr(MSR_IA32_SPEC_CTRL);
866 v |= (uint64_t)IA32_SPEC_CTRL_SSBD;
868 v &= ~(uint64_t)IA32_SPEC_CTRL_SSBD;
869 wrmsr(MSR_IA32_SPEC_CTRL, v);
873 hw_ssb_set(bool enable, bool for_all_cpus)
876 int bound_cpu, i, is_bound;
878 if ((cpu_stdext_feature3 & CPUID_STDEXT3_SSBD) == 0) {
882 hw_ssb_active = enable;
886 is_bound = sched_is_bound(td);
887 bound_cpu = td->td_oncpu;
890 hw_ssb_set_one(enable);
893 sched_bind(td, bound_cpu);
898 hw_ssb_set_one(enable);
903 hw_ssb_recalculate(bool all_cpus)
906 switch (hw_ssb_disable) {
911 hw_ssb_set(false, all_cpus);
914 hw_ssb_set(true, all_cpus);
917 hw_ssb_set((cpu_ia32_arch_caps & IA32_ARCH_CAP_SSBD_NO) != 0 ?
918 false : true, all_cpus);
924 hw_ssb_disable_handler(SYSCTL_HANDLER_ARGS)
928 val = hw_ssb_disable;
929 error = sysctl_handle_int(oidp, &val, 0, req);
930 if (error != 0 || req->newptr == NULL)
932 hw_ssb_disable = val;
933 hw_ssb_recalculate(true);
936 SYSCTL_PROC(_hw, OID_AUTO, spec_store_bypass_disable, CTLTYPE_INT |
937 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
938 hw_ssb_disable_handler, "I",
939 "Speculative Store Bypass Disable (0 - off, 1 - on, 2 - auto");
944 * Handler for Microarchitectural Data Sampling issues. Really not a
945 * pointer to C function: on amd64 the code must not change any CPU
946 * architectural state except possibly %rflags. Also, it is always
947 * called with interrupts disabled.
949 void mds_handler_void(void);
950 void mds_handler_verw(void);
951 void mds_handler_ivb(void);
952 void mds_handler_bdw(void);
953 void mds_handler_skl_sse(void);
954 void mds_handler_skl_avx(void);
955 void mds_handler_skl_avx512(void);
956 void mds_handler_silvermont(void);
957 void (*mds_handler)(void) = mds_handler_void;
960 sysctl_hw_mds_disable_state_handler(SYSCTL_HANDLER_ARGS)
964 if (mds_handler == mds_handler_void)
966 else if (mds_handler == mds_handler_verw)
968 else if (mds_handler == mds_handler_ivb)
969 state = "software IvyBridge";
970 else if (mds_handler == mds_handler_bdw)
971 state = "software Broadwell";
972 else if (mds_handler == mds_handler_skl_sse)
973 state = "software Skylake SSE";
974 else if (mds_handler == mds_handler_skl_avx)
975 state = "software Skylake AVX";
976 else if (mds_handler == mds_handler_skl_avx512)
977 state = "software Skylake AVX512";
978 else if (mds_handler == mds_handler_silvermont)
979 state = "software Silvermont";
982 return (SYSCTL_OUT(req, state, strlen(state)));
985 SYSCTL_PROC(_hw, OID_AUTO, mds_disable_state,
986 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
987 sysctl_hw_mds_disable_state_handler, "A",
988 "Microarchitectural Data Sampling Mitigation state");
990 _Static_assert(__offsetof(struct pcpu, pc_mds_tmp) % 64 == 0, "MDS AVX512");
993 hw_mds_recalculate(void)
1001 * Allow user to force VERW variant even if MD_CLEAR is not
1002 * reported. For instance, hypervisor might unknowingly
1003 * filter the cap out.
1004 * For the similar reasons, and for testing, allow to enable
1005 * mitigation even for RDCL_NO or MDS_NO caps.
1007 if (cpu_vendor_id != CPU_VENDOR_INTEL || hw_mds_disable == 0 ||
1008 ((cpu_ia32_arch_caps & (IA32_ARCH_CAP_RDCL_NO |
1009 IA32_ARCH_CAP_MDS_NO)) != 0 && hw_mds_disable == 3)) {
1010 mds_handler = mds_handler_void;
1011 } else if (((cpu_stdext_feature3 & CPUID_STDEXT3_MD_CLEAR) != 0 &&
1012 hw_mds_disable == 3) || hw_mds_disable == 1) {
1013 mds_handler = mds_handler_verw;
1014 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1015 (CPUID_TO_MODEL(cpu_id) == 0x2e || CPUID_TO_MODEL(cpu_id) == 0x1e ||
1016 CPUID_TO_MODEL(cpu_id) == 0x1f || CPUID_TO_MODEL(cpu_id) == 0x1a ||
1017 CPUID_TO_MODEL(cpu_id) == 0x2f || CPUID_TO_MODEL(cpu_id) == 0x25 ||
1018 CPUID_TO_MODEL(cpu_id) == 0x2c || CPUID_TO_MODEL(cpu_id) == 0x2d ||
1019 CPUID_TO_MODEL(cpu_id) == 0x2a || CPUID_TO_MODEL(cpu_id) == 0x3e ||
1020 CPUID_TO_MODEL(cpu_id) == 0x3a) &&
1021 (hw_mds_disable == 2 || hw_mds_disable == 3)) {
1023 * Nehalem, SandyBridge, IvyBridge
1027 if (pc->pc_mds_buf == NULL) {
1028 pc->pc_mds_buf = malloc(672, M_TEMP,
1030 bzero(pc->pc_mds_buf, 16);
1033 mds_handler = mds_handler_ivb;
1034 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1035 (CPUID_TO_MODEL(cpu_id) == 0x3f || CPUID_TO_MODEL(cpu_id) == 0x3c ||
1036 CPUID_TO_MODEL(cpu_id) == 0x45 || CPUID_TO_MODEL(cpu_id) == 0x46 ||
1037 CPUID_TO_MODEL(cpu_id) == 0x56 || CPUID_TO_MODEL(cpu_id) == 0x4f ||
1038 CPUID_TO_MODEL(cpu_id) == 0x47 || CPUID_TO_MODEL(cpu_id) == 0x3d) &&
1039 (hw_mds_disable == 2 || hw_mds_disable == 3)) {
1041 * Haswell, Broadwell
1045 if (pc->pc_mds_buf == NULL) {
1046 pc->pc_mds_buf = malloc(1536, M_TEMP,
1048 bzero(pc->pc_mds_buf, 16);
1051 mds_handler = mds_handler_bdw;
1052 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1053 ((CPUID_TO_MODEL(cpu_id) == 0x55 && (cpu_id &
1054 CPUID_STEPPING) <= 5) ||
1055 CPUID_TO_MODEL(cpu_id) == 0x4e || CPUID_TO_MODEL(cpu_id) == 0x5e ||
1056 (CPUID_TO_MODEL(cpu_id) == 0x8e && (cpu_id &
1057 CPUID_STEPPING) <= 0xb) ||
1058 (CPUID_TO_MODEL(cpu_id) == 0x9e && (cpu_id &
1059 CPUID_STEPPING) <= 0xc)) &&
1060 (hw_mds_disable == 2 || hw_mds_disable == 3)) {
1062 * Skylake, KabyLake, CoffeeLake, WhiskeyLake,
1067 if (pc->pc_mds_buf == NULL) {
1068 pc->pc_mds_buf = malloc(6 * 1024,
1070 b64 = (vm_offset_t)malloc(64 + 63,
1072 pc->pc_mds_buf64 = (void *)roundup2(b64, 64);
1073 bzero(pc->pc_mds_buf64, 64);
1077 if ((xcr0 & XFEATURE_ENABLED_ZMM_HI256) != 0 &&
1078 (cpu_stdext_feature2 & CPUID_STDEXT_AVX512DQ) != 0)
1079 mds_handler = mds_handler_skl_avx512;
1080 else if ((xcr0 & XFEATURE_ENABLED_AVX) != 0 &&
1081 (cpu_feature2 & CPUID2_AVX) != 0)
1082 mds_handler = mds_handler_skl_avx;
1084 mds_handler = mds_handler_skl_sse;
1085 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1086 ((CPUID_TO_MODEL(cpu_id) == 0x37 ||
1087 CPUID_TO_MODEL(cpu_id) == 0x4a ||
1088 CPUID_TO_MODEL(cpu_id) == 0x4c ||
1089 CPUID_TO_MODEL(cpu_id) == 0x4d ||
1090 CPUID_TO_MODEL(cpu_id) == 0x5a ||
1091 CPUID_TO_MODEL(cpu_id) == 0x5d ||
1092 CPUID_TO_MODEL(cpu_id) == 0x6e ||
1093 CPUID_TO_MODEL(cpu_id) == 0x65 ||
1094 CPUID_TO_MODEL(cpu_id) == 0x75 ||
1095 CPUID_TO_MODEL(cpu_id) == 0x1c ||
1096 CPUID_TO_MODEL(cpu_id) == 0x26 ||
1097 CPUID_TO_MODEL(cpu_id) == 0x27 ||
1098 CPUID_TO_MODEL(cpu_id) == 0x35 ||
1099 CPUID_TO_MODEL(cpu_id) == 0x36 ||
1100 CPUID_TO_MODEL(cpu_id) == 0x7a))) {
1101 /* Silvermont, Airmont */
1104 if (pc->pc_mds_buf == NULL)
1105 pc->pc_mds_buf = malloc(256, M_TEMP, M_WAITOK);
1107 mds_handler = mds_handler_silvermont;
1110 mds_handler = mds_handler_void;
1115 sysctl_mds_disable_handler(SYSCTL_HANDLER_ARGS)
1119 val = hw_mds_disable;
1120 error = sysctl_handle_int(oidp, &val, 0, req);
1121 if (error != 0 || req->newptr == NULL)
1123 if (val < 0 || val > 3)
1125 hw_mds_disable = val;
1126 hw_mds_recalculate();
1130 SYSCTL_PROC(_hw, OID_AUTO, mds_disable, CTLTYPE_INT |
1131 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1132 sysctl_mds_disable_handler, "I",
1133 "Microarchitectural Data Sampling Mitigation "
1134 "(0 - off, 1 - on VERW, 2 - on SW, 3 - on AUTO");