2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1992 Terrence R. Lambert.
4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
7 * This code is derived from software contributed to Berkeley by
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
45 #include "opt_atpic.h"
51 #include "opt_kstack_pages.h"
52 #include "opt_maxmem.h"
53 #include "opt_mp_watchdog.h"
54 #include "opt_platform.h"
59 #include <sys/param.h>
61 #include <sys/systm.h>
64 #include <sys/domainset.h>
66 #include <sys/kernel.h>
69 #include <sys/malloc.h>
70 #include <sys/mutex.h>
72 #include <sys/rwlock.h>
73 #include <sys/sched.h>
75 #include <sys/sysctl.h>
77 #include <machine/clock.h>
78 #include <machine/cpu.h>
79 #include <machine/cputypes.h>
80 #include <machine/specialreg.h>
81 #include <machine/md_var.h>
82 #include <machine/mp_watchdog.h>
83 #include <machine/tss.h>
85 #include <machine/smp.h>
88 #include <machine/elan_mmcr.h>
90 #include <x86/acpica_machdep.h>
93 #include <vm/vm_extern.h>
94 #include <vm/vm_kern.h>
95 #include <vm/vm_page.h>
96 #include <vm/vm_map.h>
97 #include <vm/vm_object.h>
98 #include <vm/vm_pager.h>
99 #include <vm/vm_param.h>
101 #include <isa/isareg.h>
103 #include <contrib/dev/acpica/include/acpi.h>
105 #define STATE_RUNNING 0x0
106 #define STATE_MWAIT 0x1
107 #define STATE_SLEEPING 0x2
110 static u_int cpu_reset_proxyid;
111 static volatile u_int cpu_reset_proxy_active;
121 x86_msr_op_one(void *argp)
123 struct msr_op_arg *a;
139 wrmsr(a->msr, a->arg1);
144 #define MSR_OP_EXMODE_MASK 0xf0000000
145 #define MSR_OP_OP_MASK 0x000000ff
148 x86_msr_op(u_int msr, u_int op, uint64_t arg1)
153 int bound_cpu, i, is_bound;
155 a.op = op & MSR_OP_OP_MASK;
156 MPASS(a.op == MSR_OP_ANDNOT || a.op == MSR_OP_OR ||
157 a.op == MSR_OP_WRITE);
158 exmode = op & MSR_OP_EXMODE_MASK;
159 MPASS(exmode == MSR_OP_LOCAL || exmode == MSR_OP_SCHED ||
160 exmode == MSR_OP_RENDEZVOUS);
170 is_bound = sched_is_bound(td);
171 bound_cpu = td->td_oncpu;
177 sched_bind(td, bound_cpu);
182 case MSR_OP_RENDEZVOUS:
183 smp_rendezvous(NULL, x86_msr_op_one, NULL, &a);
189 * Automatically initialized per CPU errata in cpu_idle_tun below.
191 bool mwait_cpustop_broken = false;
192 SYSCTL_BOOL(_machdep, OID_AUTO, mwait_cpustop_broken, CTLFLAG_RDTUN,
193 &mwait_cpustop_broken, 0,
194 "Can not reliably wake MONITOR/MWAIT cpus without interrupts");
197 * Machine dependent boot() routine
199 * I haven't seen anything to put here yet
200 * Possibly some stuff might be grafted back here from boot()
208 * Flush the D-cache for non-DMA I/O so that the I-cache can
209 * be made coherent later.
212 cpu_flush_dcache(void *ptr, size_t len)
221 __asm __volatile("sti; hlt");
225 * Use mwait to pause execution while waiting for an interrupt or
226 * another thread to signal that there is more work.
228 * NOTE: Interrupts will cause a wakeup; however, this function does
229 * not enable interrupt handling. The caller is responsible to enable
233 acpi_cpu_idle_mwait(uint32_t mwait_hint)
239 * A comment in Linux patch claims that 'CPUs run faster with
240 * speculation protection disabled. All CPU threads in a core
241 * must disable speculation protection for it to be
242 * disabled. Disable it while we are idle so the other
243 * hyperthread can run fast.'
245 * XXXKIB. Software coordination mode should be supported,
246 * but all Intel CPUs provide hardware coordination.
249 state = &PCPU_PTR(monitorbuf)->idle_state;
250 KASSERT(atomic_load_int(state) == STATE_SLEEPING,
251 ("cpu_mwait_cx: wrong monitorbuf state"));
252 atomic_store_int(state, STATE_MWAIT);
253 if (PCPU_GET(ibpb_set) || hw_ssb_active) {
254 v = rdmsr(MSR_IA32_SPEC_CTRL);
255 wrmsr(MSR_IA32_SPEC_CTRL, v & ~(IA32_SPEC_CTRL_IBRS |
256 IA32_SPEC_CTRL_STIBP | IA32_SPEC_CTRL_SSBD));
260 cpu_monitor(state, 0, 0);
261 if (atomic_load_int(state) == STATE_MWAIT)
262 cpu_mwait(MWAIT_INTRBREAK, mwait_hint);
265 * SSB cannot be disabled while we sleep, or rather, if it was
266 * disabled, the sysctl thread will bind to our cpu to tweak
270 wrmsr(MSR_IA32_SPEC_CTRL, v);
273 * We should exit on any event that interrupts mwait, because
274 * that event might be a wanted interrupt.
276 atomic_store_int(state, STATE_RUNNING);
279 /* Get current clock frequency for the given cpu id. */
281 cpu_est_clockrate(int cpu_id, uint64_t *rate)
284 uint64_t acnt, mcnt, perf;
287 if (pcpu_find(cpu_id) == NULL || rate == NULL)
290 if ((cpu_feature & CPUID_TSC) == 0)
295 * If TSC is P-state invariant and APERF/MPERF MSRs do not exist,
296 * DELAY(9) based logic fails.
298 if (tsc_is_invariant && !tsc_perf_stat)
303 /* Schedule ourselves on the indicated cpu. */
304 thread_lock(curthread);
305 sched_bind(curthread, cpu_id);
306 thread_unlock(curthread);
310 /* Calibrate by measuring a short delay. */
311 reg = intr_disable();
312 if (tsc_is_invariant) {
317 mcnt = rdmsr(MSR_MPERF);
318 acnt = rdmsr(MSR_APERF);
321 perf = 1000 * acnt / mcnt;
322 *rate = (tsc2 - tsc1) * perf;
328 *rate = (tsc2 - tsc1) * 1000;
333 thread_lock(curthread);
334 sched_unbind(curthread);
335 thread_unlock(curthread);
343 * Shutdown the CPU as much as possible
355 struct region_descriptor null_idt;
360 if (elan_mmcr != NULL)
361 elan_mmcr->RESCFG = 1;
364 if (cpu == CPU_GEODE1100) {
365 /* Attempt Geode's own reset */
366 outl(0xcf8, 0x80009044ul);
370 #if !defined(BROKEN_KEYBOARD_RESET)
372 * Attempt to do a CPU reset via the keyboard controller,
373 * do not turn off GateA20, as any machine that fails
374 * to do the reset here would then end up in no man's land.
376 outb(IO_KBD + 4, 0xFE);
377 DELAY(500000); /* wait 0.5 sec to see if that did it */
381 * Attempt to force a reset via the Reset Control register at
382 * I/O port 0xcf9. Bit 2 forces a system reset when it
383 * transitions from 0 to 1. Bit 1 selects the type of reset
384 * to attempt: 0 selects a "soft" reset, and 1 selects a
385 * "hard" reset. We try a "hard" reset. The first write sets
386 * bit 1 to select a "hard" reset and clears bit 2. The
387 * second write forces a 0 -> 1 transition in bit 2 to trigger
392 DELAY(500000); /* wait 0.5 sec to see if that did it */
395 * Attempt to force a reset via the Fast A20 and Init register
396 * at I/O port 0x92. Bit 1 serves as an alternate A20 gate.
397 * Bit 0 asserts INIT# when set to 1. We are careful to only
398 * preserve bit 1 while setting bit 0. We also must clear bit
399 * 0 before setting it if it isn't already clear.
404 outb(0x92, b & 0xfe);
406 DELAY(500000); /* wait 0.5 sec to see if that did it */
409 printf("No known reset method worked, attempting CPU shutdown\n");
410 DELAY(1000000); /* wait 1 sec for printf to complete */
413 null_idt.rd_limit = 0;
414 null_idt.rd_base = 0;
417 /* "good night, sweet prince .... <THUNK!>" */
426 cpu_reset_proxy(void)
429 cpu_reset_proxy_active = 1;
430 while (cpu_reset_proxy_active == 1)
431 ia32_pause(); /* Wait for other cpu to see that we've started */
433 printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid);
443 struct monitorbuf *mb;
449 CPU_CLR(PCPU_GET(cpuid), &map);
450 CPU_ANDNOT(&map, &stopped_cpus);
451 if (!CPU_EMPTY(&map)) {
452 printf("cpu_reset: Stopping other CPUs\n");
456 if (PCPU_GET(cpuid) != 0) {
457 cpu_reset_proxyid = PCPU_GET(cpuid);
458 cpustop_restartfunc = cpu_reset_proxy;
459 cpu_reset_proxy_active = 0;
460 printf("cpu_reset: Restarting BSP\n");
462 /* Restart CPU #0. */
463 CPU_SETOF(0, &started_cpus);
464 mb = &pcpu_find(0)->pc_monitorbuf;
465 atomic_store_int(&mb->stop_state,
466 MONITOR_STOPSTATE_RUNNING);
469 while (cpu_reset_proxy_active == 0 && cnt < 10000000) {
471 cnt++; /* Wait for BSP to announce restart */
473 if (cpu_reset_proxy_active == 0) {
474 printf("cpu_reset: Failed to restart BSP\n");
476 cpu_reset_proxy_active = 2;
491 cpu_mwait_usable(void)
494 return ((cpu_feature2 & CPUID2_MON) != 0 && ((cpu_mon_mwait_flags &
495 (CPUID5_MON_MWAIT_EXT | CPUID5_MWAIT_INTRBREAK)) ==
496 (CPUID5_MON_MWAIT_EXT | CPUID5_MWAIT_INTRBREAK)));
499 void (*cpu_idle_hook)(sbintime_t) = NULL; /* ACPI idle hook. */
500 static int cpu_ident_amdc1e = 0; /* AMD C1E supported. */
501 static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */
502 SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RWTUN, &idle_mwait,
503 0, "Use MONITOR/MWAIT for short idle");
506 cpu_idle_acpi(sbintime_t sbt)
510 state = &PCPU_PTR(monitorbuf)->idle_state;
511 atomic_store_int(state, STATE_SLEEPING);
513 /* See comments in cpu_idle_hlt(). */
515 if (sched_runnable())
517 else if (cpu_idle_hook)
521 atomic_store_int(state, STATE_RUNNING);
525 cpu_idle_hlt(sbintime_t sbt)
529 state = &PCPU_PTR(monitorbuf)->idle_state;
530 atomic_store_int(state, STATE_SLEEPING);
533 * Since we may be in a critical section from cpu_idle(), if
534 * an interrupt fires during that critical section we may have
535 * a pending preemption. If the CPU halts, then that thread
536 * may not execute until a later interrupt awakens the CPU.
537 * To handle this race, check for a runnable thread after
538 * disabling interrupts and immediately return if one is
539 * found. Also, we must absolutely guarentee that hlt is
540 * the next instruction after sti. This ensures that any
541 * interrupt that fires after the call to disable_intr() will
542 * immediately awaken the CPU from hlt. Finally, please note
543 * that on x86 this works fine because of interrupts enabled only
544 * after the instruction following sti takes place, while IF is set
545 * to 1 immediately, allowing hlt instruction to acknowledge the
549 if (sched_runnable())
553 atomic_store_int(state, STATE_RUNNING);
557 cpu_idle_mwait(sbintime_t sbt)
561 state = &PCPU_PTR(monitorbuf)->idle_state;
562 atomic_store_int(state, STATE_MWAIT);
564 /* See comments in cpu_idle_hlt(). */
566 if (sched_runnable()) {
567 atomic_store_int(state, STATE_RUNNING);
572 cpu_monitor(state, 0, 0);
573 if (atomic_load_int(state) == STATE_MWAIT)
574 __asm __volatile("sti; mwait" : : "a" (MWAIT_C1), "c" (0));
577 atomic_store_int(state, STATE_RUNNING);
581 cpu_idle_spin(sbintime_t sbt)
586 state = &PCPU_PTR(monitorbuf)->idle_state;
587 atomic_store_int(state, STATE_RUNNING);
590 * The sched_runnable() call is racy but as long as there is
591 * a loop missing it one time will have just a little impact if any
592 * (and it is much better than missing the check at all).
594 for (i = 0; i < 1000; i++) {
595 if (sched_runnable())
602 * C1E renders the local APIC timer dead, so we disable it by
603 * reading the Interrupt Pending Message register and clearing
604 * both C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27).
607 * "BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh Processors"
608 * #32559 revision 3.00+
610 #define MSR_AMDK8_IPM 0xc0010055
611 #define AMDK8_SMIONCMPHALT (1ULL << 27)
612 #define AMDK8_C1EONCMPHALT (1ULL << 28)
613 #define AMDK8_CMPHALT (AMDK8_SMIONCMPHALT | AMDK8_C1EONCMPHALT)
616 cpu_probe_amdc1e(void)
620 * Detect the presence of C1E capability mostly on latest
621 * dual-cores (or future) k8 family.
623 if (cpu_vendor_id == CPU_VENDOR_AMD &&
624 (cpu_id & 0x00000f00) == 0x00000f00 &&
625 (cpu_id & 0x0fff0000) >= 0x00040000) {
626 cpu_ident_amdc1e = 1;
630 void (*cpu_idle_fn)(sbintime_t) = cpu_idle_acpi;
638 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d",
641 ap_watchdog(PCPU_GET(cpuid));
644 /* If we are busy - try to use fast methods. */
646 if ((cpu_feature2 & CPUID2_MON) && idle_mwait) {
647 cpu_idle_mwait(busy);
652 /* If we have time - switch timers into idle mode. */
655 sbt = cpu_idleclock();
658 /* Apply AMD APIC timer C1E workaround. */
659 if (cpu_ident_amdc1e && cpu_disable_c3_sleep) {
660 msr = rdmsr(MSR_AMDK8_IPM);
661 if (msr & AMDK8_CMPHALT)
662 wrmsr(MSR_AMDK8_IPM, msr & ~AMDK8_CMPHALT);
665 /* Call main idle method. */
668 /* Switch timers back into active mode. */
674 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done",
678 static int cpu_idle_apl31_workaround;
679 SYSCTL_INT(_machdep, OID_AUTO, idle_apl31, CTLFLAG_RW,
680 &cpu_idle_apl31_workaround, 0,
681 "Apollo Lake APL31 MWAIT bug workaround");
684 cpu_idle_wakeup(int cpu)
686 struct monitorbuf *mb;
689 mb = &pcpu_find(cpu)->pc_monitorbuf;
690 state = &mb->idle_state;
691 switch (atomic_load_int(state)) {
695 atomic_store_int(state, STATE_RUNNING);
696 return (cpu_idle_apl31_workaround ? 0 : 1);
700 panic("bad monitor state");
706 * Ordered by speed/power consumption.
713 { .id_fn = cpu_idle_spin, .id_name = "spin" },
714 { .id_fn = cpu_idle_mwait, .id_name = "mwait",
715 .id_cpuid2_flag = CPUID2_MON },
716 { .id_fn = cpu_idle_hlt, .id_name = "hlt" },
717 { .id_fn = cpu_idle_acpi, .id_name = "acpi" },
721 idle_sysctl_available(SYSCTL_HANDLER_ARGS)
727 avail = malloc(256, M_TEMP, M_WAITOK);
729 for (i = 0; i < nitems(idle_tbl); i++) {
730 if (idle_tbl[i].id_cpuid2_flag != 0 &&
731 (cpu_feature2 & idle_tbl[i].id_cpuid2_flag) == 0)
733 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
734 cpu_idle_hook == NULL)
736 p += sprintf(p, "%s%s", p != avail ? ", " : "",
737 idle_tbl[i].id_name);
739 error = sysctl_handle_string(oidp, avail, 0, req);
744 SYSCTL_PROC(_machdep, OID_AUTO, idle_available, CTLTYPE_STRING | CTLFLAG_RD,
745 0, 0, idle_sysctl_available, "A", "list of available idle functions");
748 cpu_idle_selector(const char *new_idle_name)
752 for (i = 0; i < nitems(idle_tbl); i++) {
753 if (idle_tbl[i].id_cpuid2_flag != 0 &&
754 (cpu_feature2 & idle_tbl[i].id_cpuid2_flag) == 0)
756 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
757 cpu_idle_hook == NULL)
759 if (strcmp(idle_tbl[i].id_name, new_idle_name))
761 cpu_idle_fn = idle_tbl[i].id_fn;
763 printf("CPU idle set to %s\n", idle_tbl[i].id_name);
770 cpu_idle_sysctl(SYSCTL_HANDLER_ARGS)
776 for (i = 0; i < nitems(idle_tbl); i++) {
777 if (idle_tbl[i].id_fn == cpu_idle_fn) {
778 p = idle_tbl[i].id_name;
782 strncpy(buf, p, sizeof(buf));
783 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
784 if (error != 0 || req->newptr == NULL)
786 return (cpu_idle_selector(buf) ? 0 : EINVAL);
789 SYSCTL_PROC(_machdep, OID_AUTO, idle, CTLTYPE_STRING | CTLFLAG_RW, 0, 0,
790 cpu_idle_sysctl, "A", "currently selected idle function");
793 cpu_idle_tun(void *unused __unused)
797 if (TUNABLE_STR_FETCH("machdep.idle", tunvar, sizeof(tunvar)))
798 cpu_idle_selector(tunvar);
799 else if (cpu_vendor_id == CPU_VENDOR_AMD &&
800 CPUID_TO_FAMILY(cpu_id) == 0x17 && CPUID_TO_MODEL(cpu_id) == 0x1) {
801 /* Ryzen erratas 1057, 1109. */
802 cpu_idle_selector("hlt");
804 mwait_cpustop_broken = true;
807 if (cpu_vendor_id == CPU_VENDOR_INTEL && cpu_id == 0x506c9) {
809 * Apollo Lake errata APL31 (public errata APL30).
810 * Stores to the armed address range may not trigger
811 * MWAIT to resume execution. OS needs to use
812 * interrupts to wake processors from MWAIT-induced
815 cpu_idle_apl31_workaround = 1;
816 mwait_cpustop_broken = true;
818 TUNABLE_INT_FETCH("machdep.idle_apl31", &cpu_idle_apl31_workaround);
820 SYSINIT(cpu_idle_tun, SI_SUB_CPU, SI_ORDER_MIDDLE, cpu_idle_tun, NULL);
822 static int panic_on_nmi = 1;
823 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RWTUN,
825 "Panic on NMI raised by hardware failure");
826 int nmi_is_broadcast = 1;
827 SYSCTL_INT(_machdep, OID_AUTO, nmi_is_broadcast, CTLFLAG_RWTUN,
828 &nmi_is_broadcast, 0,
829 "Chipset NMI is broadcast");
832 SYSCTL_INT(_machdep, OID_AUTO, kdb_on_nmi, CTLFLAG_RWTUN,
834 "Go to KDB on NMI with unknown source");
838 nmi_call_kdb(u_int cpu, u_int type, struct trapframe *frame)
840 bool claimed = false;
843 /* machine/parity/power fail/"kitchen sink" faults */
844 if (isa_nmi(frame->tf_err)) {
847 panic("NMI indicates hardware failure");
851 if (!claimed && kdb_on_nmi) {
853 * NMI can be hooked up to a pushbutton for debugging.
855 printf("NMI/cpu%d ... going to debugger\n", cpu);
856 kdb_trap(type, 0, frame);
862 nmi_handle_intr(u_int type, struct trapframe *frame)
866 if (nmi_is_broadcast) {
867 nmi_call_kdb_smp(type, frame);
871 nmi_call_kdb(PCPU_GET(cpuid), type, frame);
875 int hw_ibrs_disable = 1;
877 SYSCTL_INT(_hw, OID_AUTO, ibrs_active, CTLFLAG_RD, &hw_ibrs_active, 0,
878 "Indirect Branch Restricted Speculation active");
880 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, ibrs, CTLFLAG_RW, 0,
881 "Indirect Branch Restricted Speculation active");
883 SYSCTL_INT(_machdep_mitigations_ibrs, OID_AUTO, active, CTLFLAG_RD,
884 &hw_ibrs_active, 0, "Indirect Branch Restricted Speculation active");
887 hw_ibrs_recalculate(void)
889 if ((cpu_ia32_arch_caps & IA32_ARCH_CAP_IBRS_ALL) != 0) {
890 x86_msr_op(MSR_IA32_SPEC_CTRL, MSR_OP_LOCAL |
891 (hw_ibrs_disable ? MSR_OP_ANDNOT : MSR_OP_OR),
892 IA32_SPEC_CTRL_IBRS);
895 hw_ibrs_active = (cpu_stdext_feature3 & CPUID_STDEXT3_IBPB) != 0 &&
900 hw_ibrs_disable_handler(SYSCTL_HANDLER_ARGS)
904 val = hw_ibrs_disable;
905 error = sysctl_handle_int(oidp, &val, 0, req);
906 if (error != 0 || req->newptr == NULL)
908 hw_ibrs_disable = val != 0;
909 hw_ibrs_recalculate();
912 SYSCTL_PROC(_hw, OID_AUTO, ibrs_disable, CTLTYPE_INT | CTLFLAG_RWTUN |
913 CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, hw_ibrs_disable_handler, "I",
914 "Disable Indirect Branch Restricted Speculation");
916 SYSCTL_PROC(_machdep_mitigations_ibrs, OID_AUTO, disable, CTLTYPE_INT |
917 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
918 hw_ibrs_disable_handler, "I",
919 "Disable Indirect Branch Restricted Speculation");
924 SYSCTL_INT(_hw, OID_AUTO, spec_store_bypass_disable_active, CTLFLAG_RD,
926 "Speculative Store Bypass Disable active");
928 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, ssb, CTLFLAG_RW, 0,
929 "Speculative Store Bypass Disable active");
931 SYSCTL_INT(_machdep_mitigations_ssb, OID_AUTO, active, CTLFLAG_RD,
932 &hw_ssb_active, 0, "Speculative Store Bypass Disable active");
935 hw_ssb_set(bool enable, bool for_all_cpus)
938 if ((cpu_stdext_feature3 & CPUID_STDEXT3_SSBD) == 0) {
942 hw_ssb_active = enable;
943 x86_msr_op(MSR_IA32_SPEC_CTRL,
944 (enable ? MSR_OP_OR : MSR_OP_ANDNOT) |
945 (for_all_cpus ? MSR_OP_SCHED : MSR_OP_LOCAL), IA32_SPEC_CTRL_SSBD);
949 hw_ssb_recalculate(bool all_cpus)
952 switch (hw_ssb_disable) {
957 hw_ssb_set(false, all_cpus);
960 hw_ssb_set(true, all_cpus);
963 hw_ssb_set((cpu_ia32_arch_caps & IA32_ARCH_CAP_SSB_NO) != 0 ?
964 false : true, all_cpus);
970 hw_ssb_disable_handler(SYSCTL_HANDLER_ARGS)
974 val = hw_ssb_disable;
975 error = sysctl_handle_int(oidp, &val, 0, req);
976 if (error != 0 || req->newptr == NULL)
978 hw_ssb_disable = val;
979 hw_ssb_recalculate(true);
982 SYSCTL_PROC(_hw, OID_AUTO, spec_store_bypass_disable, CTLTYPE_INT |
983 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
984 hw_ssb_disable_handler, "I",
985 "Speculative Store Bypass Disable (0 - off, 1 - on, 2 - auto");
987 SYSCTL_PROC(_machdep_mitigations_ssb, OID_AUTO, disable, CTLTYPE_INT |
988 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
989 hw_ssb_disable_handler, "I",
990 "Speculative Store Bypass Disable (0 - off, 1 - on, 2 - auto");
995 * Handler for Microarchitectural Data Sampling issues. Really not a
996 * pointer to C function: on amd64 the code must not change any CPU
997 * architectural state except possibly %rflags. Also, it is always
998 * called with interrupts disabled.
1000 void mds_handler_void(void);
1001 void mds_handler_verw(void);
1002 void mds_handler_ivb(void);
1003 void mds_handler_bdw(void);
1004 void mds_handler_skl_sse(void);
1005 void mds_handler_skl_avx(void);
1006 void mds_handler_skl_avx512(void);
1007 void mds_handler_silvermont(void);
1008 void (*mds_handler)(void) = mds_handler_void;
1011 sysctl_hw_mds_disable_state_handler(SYSCTL_HANDLER_ARGS)
1015 if (mds_handler == mds_handler_void)
1017 else if (mds_handler == mds_handler_verw)
1019 else if (mds_handler == mds_handler_ivb)
1020 state = "software IvyBridge";
1021 else if (mds_handler == mds_handler_bdw)
1022 state = "software Broadwell";
1023 else if (mds_handler == mds_handler_skl_sse)
1024 state = "software Skylake SSE";
1025 else if (mds_handler == mds_handler_skl_avx)
1026 state = "software Skylake AVX";
1027 else if (mds_handler == mds_handler_skl_avx512)
1028 state = "software Skylake AVX512";
1029 else if (mds_handler == mds_handler_silvermont)
1030 state = "software Silvermont";
1033 return (SYSCTL_OUT(req, state, strlen(state)));
1036 SYSCTL_PROC(_hw, OID_AUTO, mds_disable_state,
1037 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1038 sysctl_hw_mds_disable_state_handler, "A",
1039 "Microarchitectural Data Sampling Mitigation state");
1041 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, mds, CTLFLAG_RW, 0,
1042 "Microarchitectural Data Sampling Mitigation state");
1044 SYSCTL_PROC(_machdep_mitigations_mds, OID_AUTO, state,
1045 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1046 sysctl_hw_mds_disable_state_handler, "A",
1047 "Microarchitectural Data Sampling Mitigation state");
1049 _Static_assert(__offsetof(struct pcpu, pc_mds_tmp) % 64 == 0, "MDS AVX512");
1052 hw_mds_recalculate(void)
1060 * Allow user to force VERW variant even if MD_CLEAR is not
1061 * reported. For instance, hypervisor might unknowingly
1062 * filter the cap out.
1063 * For the similar reasons, and for testing, allow to enable
1064 * mitigation even for RDCL_NO or MDS_NO caps.
1066 if (cpu_vendor_id != CPU_VENDOR_INTEL || hw_mds_disable == 0 ||
1067 ((cpu_ia32_arch_caps & (IA32_ARCH_CAP_RDCL_NO |
1068 IA32_ARCH_CAP_MDS_NO)) != 0 && hw_mds_disable == 3)) {
1069 mds_handler = mds_handler_void;
1070 } else if (((cpu_stdext_feature3 & CPUID_STDEXT3_MD_CLEAR) != 0 &&
1071 hw_mds_disable == 3) || hw_mds_disable == 1) {
1072 mds_handler = mds_handler_verw;
1073 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1074 (CPUID_TO_MODEL(cpu_id) == 0x2e || CPUID_TO_MODEL(cpu_id) == 0x1e ||
1075 CPUID_TO_MODEL(cpu_id) == 0x1f || CPUID_TO_MODEL(cpu_id) == 0x1a ||
1076 CPUID_TO_MODEL(cpu_id) == 0x2f || CPUID_TO_MODEL(cpu_id) == 0x25 ||
1077 CPUID_TO_MODEL(cpu_id) == 0x2c || CPUID_TO_MODEL(cpu_id) == 0x2d ||
1078 CPUID_TO_MODEL(cpu_id) == 0x2a || CPUID_TO_MODEL(cpu_id) == 0x3e ||
1079 CPUID_TO_MODEL(cpu_id) == 0x3a) &&
1080 (hw_mds_disable == 2 || hw_mds_disable == 3)) {
1082 * Nehalem, SandyBridge, IvyBridge
1086 if (pc->pc_mds_buf == NULL) {
1087 pc->pc_mds_buf = malloc_domainset(672, M_TEMP,
1088 DOMAINSET_PREF(pc->pc_domain), M_WAITOK);
1089 bzero(pc->pc_mds_buf, 16);
1092 mds_handler = mds_handler_ivb;
1093 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1094 (CPUID_TO_MODEL(cpu_id) == 0x3f || CPUID_TO_MODEL(cpu_id) == 0x3c ||
1095 CPUID_TO_MODEL(cpu_id) == 0x45 || CPUID_TO_MODEL(cpu_id) == 0x46 ||
1096 CPUID_TO_MODEL(cpu_id) == 0x56 || CPUID_TO_MODEL(cpu_id) == 0x4f ||
1097 CPUID_TO_MODEL(cpu_id) == 0x47 || CPUID_TO_MODEL(cpu_id) == 0x3d) &&
1098 (hw_mds_disable == 2 || hw_mds_disable == 3)) {
1100 * Haswell, Broadwell
1104 if (pc->pc_mds_buf == NULL) {
1105 pc->pc_mds_buf = malloc_domainset(1536, M_TEMP,
1106 DOMAINSET_PREF(pc->pc_domain), M_WAITOK);
1107 bzero(pc->pc_mds_buf, 16);
1110 mds_handler = mds_handler_bdw;
1111 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1112 ((CPUID_TO_MODEL(cpu_id) == 0x55 && (cpu_id &
1113 CPUID_STEPPING) <= 5) ||
1114 CPUID_TO_MODEL(cpu_id) == 0x4e || CPUID_TO_MODEL(cpu_id) == 0x5e ||
1115 (CPUID_TO_MODEL(cpu_id) == 0x8e && (cpu_id &
1116 CPUID_STEPPING) <= 0xb) ||
1117 (CPUID_TO_MODEL(cpu_id) == 0x9e && (cpu_id &
1118 CPUID_STEPPING) <= 0xc)) &&
1119 (hw_mds_disable == 2 || hw_mds_disable == 3)) {
1121 * Skylake, KabyLake, CoffeeLake, WhiskeyLake,
1126 if (pc->pc_mds_buf == NULL) {
1127 pc->pc_mds_buf = malloc_domainset(6 * 1024,
1128 M_TEMP, DOMAINSET_PREF(pc->pc_domain),
1130 b64 = (vm_offset_t)malloc_domainset(64 + 63,
1131 M_TEMP, DOMAINSET_PREF(pc->pc_domain),
1133 pc->pc_mds_buf64 = (void *)roundup2(b64, 64);
1134 bzero(pc->pc_mds_buf64, 64);
1138 if ((xcr0 & XFEATURE_ENABLED_ZMM_HI256) != 0 &&
1139 (cpu_stdext_feature & CPUID_STDEXT_AVX512DQ) != 0)
1140 mds_handler = mds_handler_skl_avx512;
1141 else if ((xcr0 & XFEATURE_ENABLED_AVX) != 0 &&
1142 (cpu_feature2 & CPUID2_AVX) != 0)
1143 mds_handler = mds_handler_skl_avx;
1145 mds_handler = mds_handler_skl_sse;
1146 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1147 ((CPUID_TO_MODEL(cpu_id) == 0x37 ||
1148 CPUID_TO_MODEL(cpu_id) == 0x4a ||
1149 CPUID_TO_MODEL(cpu_id) == 0x4c ||
1150 CPUID_TO_MODEL(cpu_id) == 0x4d ||
1151 CPUID_TO_MODEL(cpu_id) == 0x5a ||
1152 CPUID_TO_MODEL(cpu_id) == 0x5d ||
1153 CPUID_TO_MODEL(cpu_id) == 0x6e ||
1154 CPUID_TO_MODEL(cpu_id) == 0x65 ||
1155 CPUID_TO_MODEL(cpu_id) == 0x75 ||
1156 CPUID_TO_MODEL(cpu_id) == 0x1c ||
1157 CPUID_TO_MODEL(cpu_id) == 0x26 ||
1158 CPUID_TO_MODEL(cpu_id) == 0x27 ||
1159 CPUID_TO_MODEL(cpu_id) == 0x35 ||
1160 CPUID_TO_MODEL(cpu_id) == 0x36 ||
1161 CPUID_TO_MODEL(cpu_id) == 0x7a))) {
1162 /* Silvermont, Airmont */
1165 if (pc->pc_mds_buf == NULL)
1166 pc->pc_mds_buf = malloc(256, M_TEMP, M_WAITOK);
1168 mds_handler = mds_handler_silvermont;
1171 mds_handler = mds_handler_void;
1176 hw_mds_recalculate_boot(void *arg __unused)
1179 hw_mds_recalculate();
1181 SYSINIT(mds_recalc, SI_SUB_SMP, SI_ORDER_ANY, hw_mds_recalculate_boot, NULL);
1184 sysctl_mds_disable_handler(SYSCTL_HANDLER_ARGS)
1188 val = hw_mds_disable;
1189 error = sysctl_handle_int(oidp, &val, 0, req);
1190 if (error != 0 || req->newptr == NULL)
1192 if (val < 0 || val > 3)
1194 hw_mds_disable = val;
1195 hw_mds_recalculate();
1199 SYSCTL_PROC(_hw, OID_AUTO, mds_disable, CTLTYPE_INT |
1200 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1201 sysctl_mds_disable_handler, "I",
1202 "Microarchitectural Data Sampling Mitigation "
1203 "(0 - off, 1 - on VERW, 2 - on SW, 3 - on AUTO");
1205 SYSCTL_PROC(_machdep_mitigations_mds, OID_AUTO, disable, CTLTYPE_INT |
1206 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1207 sysctl_mds_disable_handler, "I",
1208 "Microarchitectural Data Sampling Mitigation "
1209 "(0 - off, 1 - on VERW, 2 - on SW, 3 - on AUTO");
1212 * Intel Transactional Memory Asynchronous Abort Mitigation
1218 TAA_NONE = 0, /* No mitigation enabled */
1219 TAA_TSX_DISABLE = 1, /* Disable TSX via MSR */
1220 TAA_VERW = 2, /* Use VERW mitigation */
1221 TAA_AUTO = 3, /* Automatically select the mitigation */
1223 /* The states below are not selectable by the operator */
1225 TAA_TAA_UC = 4, /* Mitigation present in microcode */
1226 TAA_NOT_PRESENT = 5 /* TSX is not present */
1230 taa_set(bool enable, bool all)
1233 x86_msr_op(MSR_IA32_TSX_CTRL,
1234 (enable ? MSR_OP_OR : MSR_OP_ANDNOT) |
1235 (all ? MSR_OP_RENDEZVOUS : MSR_OP_LOCAL),
1236 IA32_TSX_CTRL_RTM_DISABLE | IA32_TSX_CTRL_TSX_CPUID_CLEAR);
1240 x86_taa_recalculate(void)
1242 static int taa_saved_mds_disable = 0;
1243 int taa_need = 0, taa_state = 0;
1244 int mds_disable = 0, need_mds_recalc = 0;
1246 /* Check CPUID.07h.EBX.HLE and RTM for the presence of TSX */
1247 if ((cpu_stdext_feature & CPUID_STDEXT_HLE) == 0 ||
1248 (cpu_stdext_feature & CPUID_STDEXT_RTM) == 0) {
1249 /* TSX is not present */
1250 x86_taa_state = TAA_NOT_PRESENT;
1254 /* Check to see what mitigation options the CPU gives us */
1255 if (cpu_ia32_arch_caps & IA32_ARCH_CAP_TAA_NO) {
1256 /* CPU is not suseptible to TAA */
1257 taa_need = TAA_TAA_UC;
1258 } else if (cpu_ia32_arch_caps & IA32_ARCH_CAP_TSX_CTRL) {
1260 * CPU can turn off TSX. This is the next best option
1261 * if TAA_NO hardware mitigation isn't present
1263 taa_need = TAA_TSX_DISABLE;
1265 /* No TSX/TAA specific remedies are available. */
1266 if (x86_taa_enable == TAA_TSX_DISABLE) {
1268 printf("TSX control not available\n");
1271 taa_need = TAA_VERW;
1274 /* Can we automatically take action, or are we being forced? */
1275 if (x86_taa_enable == TAA_AUTO)
1276 taa_state = taa_need;
1278 taa_state = x86_taa_enable;
1280 /* No state change, nothing to do */
1281 if (taa_state == x86_taa_state) {
1283 printf("No TSX change made\n");
1287 /* Does the MSR need to be turned on or off? */
1288 if (taa_state == TAA_TSX_DISABLE)
1289 taa_set(true, true);
1290 else if (x86_taa_state == TAA_TSX_DISABLE)
1291 taa_set(false, true);
1293 /* Does MDS need to be set to turn on VERW? */
1294 if (taa_state == TAA_VERW) {
1295 taa_saved_mds_disable = hw_mds_disable;
1296 mds_disable = hw_mds_disable = 1;
1297 need_mds_recalc = 1;
1298 } else if (x86_taa_state == TAA_VERW) {
1299 mds_disable = hw_mds_disable = taa_saved_mds_disable;
1300 need_mds_recalc = 1;
1302 if (need_mds_recalc) {
1303 hw_mds_recalculate();
1304 if (mds_disable != hw_mds_disable) {
1306 printf("Cannot change MDS state for TAA\n");
1307 /* Don't update our state */
1312 x86_taa_state = taa_state;
1317 taa_recalculate_boot(void * arg __unused)
1320 x86_taa_recalculate();
1322 SYSINIT(taa_recalc, SI_SUB_SMP, SI_ORDER_ANY, taa_recalculate_boot, NULL);
1324 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, taa, CTLFLAG_RW, 0,
1325 "TSX Asynchronous Abort Mitigation");
1328 sysctl_taa_handler(SYSCTL_HANDLER_ARGS)
1332 val = x86_taa_enable;
1333 error = sysctl_handle_int(oidp, &val, 0, req);
1334 if (error != 0 || req->newptr == NULL)
1336 if (val < TAA_NONE || val > TAA_AUTO)
1338 x86_taa_enable = val;
1339 x86_taa_recalculate();
1343 SYSCTL_PROC(_machdep_mitigations_taa, OID_AUTO, enable, CTLTYPE_INT |
1344 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1345 sysctl_taa_handler, "I",
1346 "TAA Mitigation enablement control "
1347 "(0 - off, 1 - disable TSX, 2 - VERW, 3 - on AUTO");
1350 sysctl_taa_state_handler(SYSCTL_HANDLER_ARGS)
1354 switch (x86_taa_state) {
1358 case TAA_TSX_DISABLE:
1359 state = "TSX disabled";
1365 state = "Mitigated in microcode";
1367 case TAA_NOT_PRESENT:
1368 state = "TSX not present";
1374 return (SYSCTL_OUT(req, state, strlen(state)));
1377 SYSCTL_PROC(_machdep_mitigations_taa, OID_AUTO, state,
1378 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1379 sysctl_taa_state_handler, "A",
1380 "TAA Mitigation state");
1383 * Enable and restore kernel text write permissions.
1384 * Callers must ensure that disable_wp()/restore_wp() are executed
1385 * without rescheduling on the same core.
1393 if ((cr0 & CR0_WP) == 0)
1395 load_cr0(cr0 & ~CR0_WP);
1400 restore_wp(bool old_wp)
1404 load_cr0(rcr0() | CR0_WP);
1408 acpi_get_fadt_bootflags(uint16_t *flagsp)
1411 ACPI_TABLE_FADT *fadt;
1412 vm_paddr_t physaddr;
1414 physaddr = acpi_find_table(ACPI_SIG_FADT);
1417 fadt = acpi_map_table(physaddr, ACPI_SIG_FADT);
1420 *flagsp = fadt->BootFlags;
1421 acpi_unmap_table(fadt);