2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1992 Terrence R. Lambert.
4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
7 * This code is derived from software contributed to Berkeley by
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
45 #include "opt_atpic.h"
51 #include "opt_kstack_pages.h"
52 #include "opt_maxmem.h"
53 #include "opt_mp_watchdog.h"
54 #include "opt_platform.h"
55 #include "opt_sched.h"
60 #include <sys/param.h>
62 #include <sys/systm.h>
65 #include <sys/domainset.h>
67 #include <sys/kernel.h>
70 #include <sys/malloc.h>
71 #include <sys/mutex.h>
73 #include <sys/rwlock.h>
74 #include <sys/sched.h>
76 #include <sys/sysctl.h>
78 #include <machine/clock.h>
79 #include <machine/cpu.h>
80 #include <machine/cpufunc.h>
81 #include <machine/cputypes.h>
82 #include <machine/specialreg.h>
83 #include <machine/md_var.h>
84 #include <machine/mp_watchdog.h>
85 #include <machine/tss.h>
87 #include <machine/smp.h>
90 #include <machine/elan_mmcr.h>
92 #include <x86/acpica_machdep.h>
93 #include <x86/ifunc.h>
96 #include <vm/vm_extern.h>
97 #include <vm/vm_kern.h>
98 #include <vm/vm_page.h>
99 #include <vm/vm_map.h>
100 #include <vm/vm_object.h>
101 #include <vm/vm_pager.h>
102 #include <vm/vm_param.h>
104 #include <isa/isareg.h>
106 #include <contrib/dev/acpica/include/acpi.h>
108 #define STATE_RUNNING 0x0
109 #define STATE_MWAIT 0x1
110 #define STATE_SLEEPING 0x2
113 static u_int cpu_reset_proxyid;
114 static volatile u_int cpu_reset_proxy_active;
125 x86_msr_op_one(void *argp)
127 struct msr_op_arg *a;
143 wrmsr(a->msr, a->arg1);
152 #define MSR_OP_EXMODE_MASK 0xf0000000
153 #define MSR_OP_OP_MASK 0x000000ff
154 #define MSR_OP_GET_CPUID(x) (((x) & ~MSR_OP_EXMODE_MASK) >> 8)
157 x86_msr_op(u_int msr, u_int op, uint64_t arg1, uint64_t *res)
163 int bound_cpu, cpu, i, is_bound;
165 a.op = op & MSR_OP_OP_MASK;
166 MPASS(a.op == MSR_OP_ANDNOT || a.op == MSR_OP_OR ||
167 a.op == MSR_OP_WRITE || a.op == MSR_OP_READ);
168 exmode = op & MSR_OP_EXMODE_MASK;
169 MPASS(exmode == MSR_OP_LOCAL || exmode == MSR_OP_SCHED_ALL ||
170 exmode == MSR_OP_SCHED_ONE || exmode == MSR_OP_RENDEZVOUS_ALL ||
171 exmode == MSR_OP_RENDEZVOUS_ONE);
179 case MSR_OP_SCHED_ALL:
182 is_bound = sched_is_bound(td);
183 bound_cpu = td->td_oncpu;
189 sched_bind(td, bound_cpu);
194 case MSR_OP_SCHED_ONE:
196 cpu = MSR_OP_GET_CPUID(op);
198 is_bound = sched_is_bound(td);
199 bound_cpu = td->td_oncpu;
200 if (!is_bound || bound_cpu != cpu)
204 if (bound_cpu != cpu)
205 sched_bind(td, bound_cpu);
211 case MSR_OP_RENDEZVOUS_ALL:
212 smp_rendezvous(smp_no_rendezvous_barrier, x86_msr_op_one,
213 smp_no_rendezvous_barrier, &a);
215 case MSR_OP_RENDEZVOUS_ONE:
216 cpu = MSR_OP_GET_CPUID(op);
217 CPU_SETOF(cpu, &set);
218 smp_rendezvous_cpus(set, smp_no_rendezvous_barrier,
219 x86_msr_op_one, smp_no_rendezvous_barrier, &a);
225 * Automatically initialized per CPU errata in cpu_idle_tun below.
227 bool mwait_cpustop_broken = false;
228 SYSCTL_BOOL(_machdep, OID_AUTO, mwait_cpustop_broken, CTLFLAG_RDTUN,
229 &mwait_cpustop_broken, 0,
230 "Can not reliably wake MONITOR/MWAIT cpus without interrupts");
233 * Flush the D-cache for non-DMA I/O so that the I-cache can
234 * be made coherent later.
237 cpu_flush_dcache(void *ptr, size_t len)
246 __asm __volatile("sti; hlt");
250 * Use mwait to pause execution while waiting for an interrupt or
251 * another thread to signal that there is more work.
253 * NOTE: Interrupts will cause a wakeup; however, this function does
254 * not enable interrupt handling. The caller is responsible to enable
258 acpi_cpu_idle_mwait(uint32_t mwait_hint)
264 * A comment in Linux patch claims that 'CPUs run faster with
265 * speculation protection disabled. All CPU threads in a core
266 * must disable speculation protection for it to be
267 * disabled. Disable it while we are idle so the other
268 * hyperthread can run fast.'
270 * XXXKIB. Software coordination mode should be supported,
271 * but all Intel CPUs provide hardware coordination.
274 state = &PCPU_PTR(monitorbuf)->idle_state;
275 KASSERT(atomic_load_int(state) == STATE_SLEEPING,
276 ("cpu_mwait_cx: wrong monitorbuf state"));
277 atomic_store_int(state, STATE_MWAIT);
278 if (PCPU_GET(ibpb_set) || hw_ssb_active) {
279 v = rdmsr(MSR_IA32_SPEC_CTRL);
280 wrmsr(MSR_IA32_SPEC_CTRL, v & ~(IA32_SPEC_CTRL_IBRS |
281 IA32_SPEC_CTRL_STIBP | IA32_SPEC_CTRL_SSBD));
285 cpu_monitor(state, 0, 0);
286 if (atomic_load_int(state) == STATE_MWAIT)
287 cpu_mwait(MWAIT_INTRBREAK, mwait_hint);
290 * SSB cannot be disabled while we sleep, or rather, if it was
291 * disabled, the sysctl thread will bind to our cpu to tweak
295 wrmsr(MSR_IA32_SPEC_CTRL, v);
298 * We should exit on any event that interrupts mwait, because
299 * that event might be a wanted interrupt.
301 atomic_store_int(state, STATE_RUNNING);
304 /* Get current clock frequency for the given cpu id. */
306 cpu_est_clockrate(int cpu_id, uint64_t *rate)
309 uint64_t acnt, mcnt, perf;
312 if (pcpu_find(cpu_id) == NULL || rate == NULL)
315 if ((cpu_feature & CPUID_TSC) == 0)
320 * If TSC is P-state invariant and APERF/MPERF MSRs do not exist,
321 * DELAY(9) based logic fails.
323 if (tsc_is_invariant && !tsc_perf_stat)
328 /* Schedule ourselves on the indicated cpu. */
329 thread_lock(curthread);
330 sched_bind(curthread, cpu_id);
331 thread_unlock(curthread);
335 /* Calibrate by measuring a short delay. */
336 reg = intr_disable();
337 if (tsc_is_invariant) {
342 mcnt = rdmsr(MSR_MPERF);
343 acnt = rdmsr(MSR_APERF);
346 perf = 1000 * acnt / mcnt;
347 *rate = (tsc2 - tsc1) * perf;
353 *rate = (tsc2 - tsc1) * 1000;
358 thread_lock(curthread);
359 sched_unbind(curthread);
360 thread_unlock(curthread);
368 * Shutdown the CPU as much as possible
380 struct region_descriptor null_idt;
385 if (elan_mmcr != NULL)
386 elan_mmcr->RESCFG = 1;
389 if (cpu == CPU_GEODE1100) {
390 /* Attempt Geode's own reset */
391 outl(0xcf8, 0x80009044ul);
395 #if !defined(BROKEN_KEYBOARD_RESET)
397 * Attempt to do a CPU reset via the keyboard controller,
398 * do not turn off GateA20, as any machine that fails
399 * to do the reset here would then end up in no man's land.
401 outb(IO_KBD + 4, 0xFE);
402 DELAY(500000); /* wait 0.5 sec to see if that did it */
406 * Attempt to force a reset via the Reset Control register at
407 * I/O port 0xcf9. Bit 2 forces a system reset when it
408 * transitions from 0 to 1. Bit 1 selects the type of reset
409 * to attempt: 0 selects a "soft" reset, and 1 selects a
410 * "hard" reset. We try a "hard" reset. The first write sets
411 * bit 1 to select a "hard" reset and clears bit 2. The
412 * second write forces a 0 -> 1 transition in bit 2 to trigger
417 DELAY(500000); /* wait 0.5 sec to see if that did it */
420 * Attempt to force a reset via the Fast A20 and Init register
421 * at I/O port 0x92. Bit 1 serves as an alternate A20 gate.
422 * Bit 0 asserts INIT# when set to 1. We are careful to only
423 * preserve bit 1 while setting bit 0. We also must clear bit
424 * 0 before setting it if it isn't already clear.
429 outb(0x92, b & 0xfe);
431 DELAY(500000); /* wait 0.5 sec to see if that did it */
434 printf("No known reset method worked, attempting CPU shutdown\n");
435 DELAY(1000000); /* wait 1 sec for printf to complete */
438 null_idt.rd_limit = 0;
439 null_idt.rd_base = 0;
442 /* "good night, sweet prince .... <THUNK!>" */
451 cpu_reset_proxy(void)
454 cpu_reset_proxy_active = 1;
455 while (cpu_reset_proxy_active == 1)
456 ia32_pause(); /* Wait for other cpu to see that we've started */
458 printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid);
468 struct monitorbuf *mb;
474 CPU_CLR(PCPU_GET(cpuid), &map);
475 CPU_ANDNOT(&map, &map, &stopped_cpus);
476 if (!CPU_EMPTY(&map)) {
477 printf("cpu_reset: Stopping other CPUs\n");
481 if (PCPU_GET(cpuid) != 0) {
482 cpu_reset_proxyid = PCPU_GET(cpuid);
483 cpustop_restartfunc = cpu_reset_proxy;
484 cpu_reset_proxy_active = 0;
485 printf("cpu_reset: Restarting BSP\n");
487 /* Restart CPU #0. */
488 CPU_SETOF(0, &started_cpus);
489 mb = &pcpu_find(0)->pc_monitorbuf;
490 atomic_store_int(&mb->stop_state,
491 MONITOR_STOPSTATE_RUNNING);
494 while (cpu_reset_proxy_active == 0 && cnt < 10000000) {
496 cnt++; /* Wait for BSP to announce restart */
498 if (cpu_reset_proxy_active == 0) {
499 printf("cpu_reset: Failed to restart BSP\n");
501 cpu_reset_proxy_active = 2;
516 cpu_mwait_usable(void)
519 return ((cpu_feature2 & CPUID2_MON) != 0 && ((cpu_mon_mwait_flags &
520 (CPUID5_MON_MWAIT_EXT | CPUID5_MWAIT_INTRBREAK)) ==
521 (CPUID5_MON_MWAIT_EXT | CPUID5_MWAIT_INTRBREAK)));
524 void (*cpu_idle_hook)(sbintime_t) = NULL; /* ACPI idle hook. */
526 int cpu_amdc1e_bug = 0; /* AMD C1E APIC workaround required. */
528 static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */
529 SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RWTUN, &idle_mwait,
530 0, "Use MONITOR/MWAIT for short idle");
533 cpu_idle_enter(int *statep, int newstate)
535 KASSERT(atomic_load_int(statep) == STATE_RUNNING,
536 ("%s: state %d", __func__, atomic_load_int(statep)));
539 * A fence is needed to prevent reordering of the load in
540 * sched_runnable() with this store to the idle state word. Without it,
541 * cpu_idle_wakeup() can observe the state as STATE_RUNNING after having
542 * added load to the queue, and elide an IPI. Then, sched_runnable()
543 * can observe tdq_load == 0, so the CPU ends up idling with pending
544 * work. tdq_notify() similarly ensures that a prior update to tdq_load
545 * is visible before calling cpu_idle_wakeup().
547 atomic_store_int(statep, newstate);
548 #if defined(SCHED_ULE) && defined(SMP)
549 atomic_thread_fence_seq_cst();
553 * Since we may be in a critical section from cpu_idle(), if
554 * an interrupt fires during that critical section we may have
555 * a pending preemption. If the CPU halts, then that thread
556 * may not execute until a later interrupt awakens the CPU.
557 * To handle this race, check for a runnable thread after
558 * disabling interrupts and immediately return if one is
559 * found. Also, we must absolutely guarentee that hlt is
560 * the next instruction after sti. This ensures that any
561 * interrupt that fires after the call to disable_intr() will
562 * immediately awaken the CPU from hlt. Finally, please note
563 * that on x86 this works fine because of interrupts enabled only
564 * after the instruction following sti takes place, while IF is set
565 * to 1 immediately, allowing hlt instruction to acknowledge the
569 if (sched_runnable()) {
571 atomic_store_int(statep, STATE_RUNNING);
579 cpu_idle_exit(int *statep)
581 atomic_store_int(statep, STATE_RUNNING);
585 cpu_idle_acpi(sbintime_t sbt)
589 state = &PCPU_PTR(monitorbuf)->idle_state;
590 if (cpu_idle_enter(state, STATE_SLEEPING)) {
595 cpu_idle_exit(state);
600 cpu_idle_hlt(sbintime_t sbt)
604 state = &PCPU_PTR(monitorbuf)->idle_state;
605 if (cpu_idle_enter(state, STATE_SLEEPING)) {
607 atomic_store_int(state, STATE_RUNNING);
612 cpu_idle_mwait(sbintime_t sbt)
616 state = &PCPU_PTR(monitorbuf)->idle_state;
617 if (cpu_idle_enter(state, STATE_MWAIT)) {
618 cpu_monitor(state, 0, 0);
619 if (atomic_load_int(state) == STATE_MWAIT)
620 __asm __volatile("sti; mwait" : : "a" (MWAIT_C1), "c" (0));
623 cpu_idle_exit(state);
628 cpu_idle_spin(sbintime_t sbt)
633 state = &PCPU_PTR(monitorbuf)->idle_state;
634 atomic_store_int(state, STATE_RUNNING);
637 * The sched_runnable() call is racy but as long as there is
638 * a loop missing it one time will have just a little impact if any
639 * (and it is much better than missing the check at all).
641 for (i = 0; i < 1000; i++) {
642 if (sched_runnable())
648 void (*cpu_idle_fn)(sbintime_t) = cpu_idle_acpi;
656 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d",
659 ap_watchdog(PCPU_GET(cpuid));
662 /* If we are busy - try to use fast methods. */
664 if ((cpu_feature2 & CPUID2_MON) && idle_mwait) {
665 cpu_idle_mwait(busy);
670 /* If we have time - switch timers into idle mode. */
673 sbt = cpu_idleclock();
676 /* Apply AMD APIC timer C1E workaround. */
677 if (cpu_amdc1e_bug && cpu_disable_c3_sleep) {
678 msr = rdmsr(MSR_AMDK8_IPM);
679 if ((msr & (AMDK8_SMIONCMPHALT | AMDK8_C1EONCMPHALT)) != 0)
680 wrmsr(MSR_AMDK8_IPM, msr & ~(AMDK8_SMIONCMPHALT |
681 AMDK8_C1EONCMPHALT));
684 /* Call main idle method. */
687 /* Switch timers back into active mode. */
693 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done",
697 static int cpu_idle_apl31_workaround;
698 SYSCTL_INT(_machdep, OID_AUTO, idle_apl31, CTLFLAG_RW,
699 &cpu_idle_apl31_workaround, 0,
700 "Apollo Lake APL31 MWAIT bug workaround");
703 cpu_idle_wakeup(int cpu)
705 struct monitorbuf *mb;
708 mb = &pcpu_find(cpu)->pc_monitorbuf;
709 state = &mb->idle_state;
710 switch (atomic_load_int(state)) {
714 atomic_store_int(state, STATE_RUNNING);
715 return (cpu_idle_apl31_workaround ? 0 : 1);
719 panic("bad monitor state");
725 * Ordered by speed/power consumption.
732 { .id_fn = cpu_idle_spin, .id_name = "spin" },
733 { .id_fn = cpu_idle_mwait, .id_name = "mwait",
734 .id_cpuid2_flag = CPUID2_MON },
735 { .id_fn = cpu_idle_hlt, .id_name = "hlt" },
736 { .id_fn = cpu_idle_acpi, .id_name = "acpi" },
740 idle_sysctl_available(SYSCTL_HANDLER_ARGS)
746 avail = malloc(256, M_TEMP, M_WAITOK);
748 for (i = 0; i < nitems(idle_tbl); i++) {
749 if (idle_tbl[i].id_cpuid2_flag != 0 &&
750 (cpu_feature2 & idle_tbl[i].id_cpuid2_flag) == 0)
752 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
753 cpu_idle_hook == NULL)
755 p += sprintf(p, "%s%s", p != avail ? ", " : "",
756 idle_tbl[i].id_name);
758 error = sysctl_handle_string(oidp, avail, 0, req);
763 SYSCTL_PROC(_machdep, OID_AUTO, idle_available,
764 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
765 0, 0, idle_sysctl_available, "A",
766 "list of available idle functions");
769 cpu_idle_selector(const char *new_idle_name)
773 for (i = 0; i < nitems(idle_tbl); i++) {
774 if (idle_tbl[i].id_cpuid2_flag != 0 &&
775 (cpu_feature2 & idle_tbl[i].id_cpuid2_flag) == 0)
777 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
778 cpu_idle_hook == NULL)
780 if (strcmp(idle_tbl[i].id_name, new_idle_name))
782 cpu_idle_fn = idle_tbl[i].id_fn;
784 printf("CPU idle set to %s\n", idle_tbl[i].id_name);
791 cpu_idle_sysctl(SYSCTL_HANDLER_ARGS)
797 for (i = 0; i < nitems(idle_tbl); i++) {
798 if (idle_tbl[i].id_fn == cpu_idle_fn) {
799 p = idle_tbl[i].id_name;
803 strncpy(buf, p, sizeof(buf));
804 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
805 if (error != 0 || req->newptr == NULL)
807 return (cpu_idle_selector(buf) ? 0 : EINVAL);
810 SYSCTL_PROC(_machdep, OID_AUTO, idle,
811 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
812 0, 0, cpu_idle_sysctl, "A",
813 "currently selected idle function");
816 cpu_idle_tun(void *unused __unused)
820 if (TUNABLE_STR_FETCH("machdep.idle", tunvar, sizeof(tunvar)))
821 cpu_idle_selector(tunvar);
822 else if (cpu_vendor_id == CPU_VENDOR_AMD &&
823 CPUID_TO_FAMILY(cpu_id) == 0x17 && CPUID_TO_MODEL(cpu_id) == 0x1) {
824 /* Ryzen erratas 1057, 1109. */
825 cpu_idle_selector("hlt");
827 mwait_cpustop_broken = true;
830 if (cpu_vendor_id == CPU_VENDOR_INTEL && cpu_id == 0x506c9) {
832 * Apollo Lake errata APL31 (public errata APL30).
833 * Stores to the armed address range may not trigger
834 * MWAIT to resume execution. OS needs to use
835 * interrupts to wake processors from MWAIT-induced
838 cpu_idle_apl31_workaround = 1;
839 mwait_cpustop_broken = true;
841 TUNABLE_INT_FETCH("machdep.idle_apl31", &cpu_idle_apl31_workaround);
843 SYSINIT(cpu_idle_tun, SI_SUB_CPU, SI_ORDER_MIDDLE, cpu_idle_tun, NULL);
845 static int panic_on_nmi = 0xff;
846 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RWTUN,
848 "Panic on NMI: 1 = H/W failure; 2 = unknown; 0xff = all");
849 int nmi_is_broadcast = 1;
850 SYSCTL_INT(_machdep, OID_AUTO, nmi_is_broadcast, CTLFLAG_RWTUN,
851 &nmi_is_broadcast, 0,
852 "Chipset NMI is broadcast");
853 int (*apei_nmi)(void);
856 nmi_call_kdb(u_int cpu, u_int type, struct trapframe *frame)
858 bool claimed = false;
861 /* machine/parity/power fail/"kitchen sink" faults */
862 if (isa_nmi(frame->tf_err)) {
864 if ((panic_on_nmi & 1) != 0)
865 panic("NMI indicates hardware failure");
869 /* ACPI Platform Error Interfaces callback. */
870 if (apei_nmi != NULL && (*apei_nmi)())
874 * NMIs can be useful for debugging. They can be hooked up to a
875 * pushbutton, usually on an ISA, PCI, or PCIe card. They can also be
876 * generated by an IPMI BMC, either manually or in response to a
877 * watchdog timeout. For example, see the "power diag" command in
878 * ports/sysutils/ipmitool. They can also be generated by a
879 * hypervisor; see "bhyvectl --inject-nmi".
883 if (!claimed && (panic_on_nmi & 2) != 0) {
884 if (debugger_on_panic) {
885 printf("NMI/cpu%d ... going to debugger\n", cpu);
886 claimed = kdb_trap(type, 0, frame);
891 if (!claimed && panic_on_nmi != 0)
896 nmi_handle_intr(u_int type, struct trapframe *frame)
900 if (nmi_is_broadcast) {
901 nmi_call_kdb_smp(type, frame);
905 nmi_call_kdb(PCPU_GET(cpuid), type, frame);
908 static int hw_ibrs_active;
909 int hw_ibrs_ibpb_active;
910 int hw_ibrs_disable = 1;
912 SYSCTL_INT(_hw, OID_AUTO, ibrs_active, CTLFLAG_RD, &hw_ibrs_active, 0,
913 "Indirect Branch Restricted Speculation active");
915 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, ibrs,
916 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
917 "Indirect Branch Restricted Speculation active");
919 SYSCTL_INT(_machdep_mitigations_ibrs, OID_AUTO, active, CTLFLAG_RD,
920 &hw_ibrs_active, 0, "Indirect Branch Restricted Speculation active");
923 hw_ibrs_recalculate(bool for_all_cpus)
925 if ((cpu_ia32_arch_caps & IA32_ARCH_CAP_IBRS_ALL) != 0) {
926 x86_msr_op(MSR_IA32_SPEC_CTRL, (for_all_cpus ?
927 MSR_OP_RENDEZVOUS_ALL : MSR_OP_LOCAL) |
928 (hw_ibrs_disable != 0 ? MSR_OP_ANDNOT : MSR_OP_OR),
929 IA32_SPEC_CTRL_IBRS, NULL);
930 hw_ibrs_active = hw_ibrs_disable == 0;
931 hw_ibrs_ibpb_active = 0;
933 hw_ibrs_active = hw_ibrs_ibpb_active = (cpu_stdext_feature3 &
934 CPUID_STDEXT3_IBPB) != 0 && !hw_ibrs_disable;
939 hw_ibrs_disable_handler(SYSCTL_HANDLER_ARGS)
943 val = hw_ibrs_disable;
944 error = sysctl_handle_int(oidp, &val, 0, req);
945 if (error != 0 || req->newptr == NULL)
947 hw_ibrs_disable = val != 0;
948 hw_ibrs_recalculate(true);
951 SYSCTL_PROC(_hw, OID_AUTO, ibrs_disable, CTLTYPE_INT | CTLFLAG_RWTUN |
952 CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, hw_ibrs_disable_handler, "I",
953 "Disable Indirect Branch Restricted Speculation");
955 SYSCTL_PROC(_machdep_mitigations_ibrs, OID_AUTO, disable, CTLTYPE_INT |
956 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
957 hw_ibrs_disable_handler, "I",
958 "Disable Indirect Branch Restricted Speculation");
963 SYSCTL_INT(_hw, OID_AUTO, spec_store_bypass_disable_active, CTLFLAG_RD,
965 "Speculative Store Bypass Disable active");
967 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, ssb,
968 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
969 "Speculative Store Bypass Disable active");
971 SYSCTL_INT(_machdep_mitigations_ssb, OID_AUTO, active, CTLFLAG_RD,
972 &hw_ssb_active, 0, "Speculative Store Bypass Disable active");
975 hw_ssb_set(bool enable, bool for_all_cpus)
978 if ((cpu_stdext_feature3 & CPUID_STDEXT3_SSBD) == 0) {
982 hw_ssb_active = enable;
983 x86_msr_op(MSR_IA32_SPEC_CTRL,
984 (enable ? MSR_OP_OR : MSR_OP_ANDNOT) |
985 (for_all_cpus ? MSR_OP_SCHED_ALL : MSR_OP_LOCAL),
986 IA32_SPEC_CTRL_SSBD, NULL);
990 hw_ssb_recalculate(bool all_cpus)
993 switch (hw_ssb_disable) {
998 hw_ssb_set(false, all_cpus);
1001 hw_ssb_set(true, all_cpus);
1004 hw_ssb_set((cpu_ia32_arch_caps & IA32_ARCH_CAP_SSB_NO) != 0 ?
1005 false : true, all_cpus);
1011 hw_ssb_disable_handler(SYSCTL_HANDLER_ARGS)
1015 val = hw_ssb_disable;
1016 error = sysctl_handle_int(oidp, &val, 0, req);
1017 if (error != 0 || req->newptr == NULL)
1019 hw_ssb_disable = val;
1020 hw_ssb_recalculate(true);
1023 SYSCTL_PROC(_hw, OID_AUTO, spec_store_bypass_disable, CTLTYPE_INT |
1024 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1025 hw_ssb_disable_handler, "I",
1026 "Speculative Store Bypass Disable (0 - off, 1 - on, 2 - auto)");
1028 SYSCTL_PROC(_machdep_mitigations_ssb, OID_AUTO, disable, CTLTYPE_INT |
1029 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1030 hw_ssb_disable_handler, "I",
1031 "Speculative Store Bypass Disable (0 - off, 1 - on, 2 - auto)");
1036 * Handler for Microarchitectural Data Sampling issues. Really not a
1037 * pointer to C function: on amd64 the code must not change any CPU
1038 * architectural state except possibly %rflags. Also, it is always
1039 * called with interrupts disabled.
1041 void mds_handler_void(void);
1042 void mds_handler_verw(void);
1043 void mds_handler_ivb(void);
1044 void mds_handler_bdw(void);
1045 void mds_handler_skl_sse(void);
1046 void mds_handler_skl_avx(void);
1047 void mds_handler_skl_avx512(void);
1048 void mds_handler_silvermont(void);
1049 void (*mds_handler)(void) = mds_handler_void;
1052 sysctl_hw_mds_disable_state_handler(SYSCTL_HANDLER_ARGS)
1056 if (mds_handler == mds_handler_void)
1058 else if (mds_handler == mds_handler_verw)
1060 else if (mds_handler == mds_handler_ivb)
1061 state = "software IvyBridge";
1062 else if (mds_handler == mds_handler_bdw)
1063 state = "software Broadwell";
1064 else if (mds_handler == mds_handler_skl_sse)
1065 state = "software Skylake SSE";
1066 else if (mds_handler == mds_handler_skl_avx)
1067 state = "software Skylake AVX";
1068 else if (mds_handler == mds_handler_skl_avx512)
1069 state = "software Skylake AVX512";
1070 else if (mds_handler == mds_handler_silvermont)
1071 state = "software Silvermont";
1074 return (SYSCTL_OUT(req, state, strlen(state)));
1077 SYSCTL_PROC(_hw, OID_AUTO, mds_disable_state,
1078 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1079 sysctl_hw_mds_disable_state_handler, "A",
1080 "Microarchitectural Data Sampling Mitigation state");
1082 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, mds,
1083 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1084 "Microarchitectural Data Sampling Mitigation state");
1086 SYSCTL_PROC(_machdep_mitigations_mds, OID_AUTO, state,
1087 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1088 sysctl_hw_mds_disable_state_handler, "A",
1089 "Microarchitectural Data Sampling Mitigation state");
1091 _Static_assert(__offsetof(struct pcpu, pc_mds_tmp) % 64 == 0, "MDS AVX512");
1094 hw_mds_recalculate(void)
1102 * Allow user to force VERW variant even if MD_CLEAR is not
1103 * reported. For instance, hypervisor might unknowingly
1104 * filter the cap out.
1105 * For the similar reasons, and for testing, allow to enable
1106 * mitigation even when MDS_NO cap is set.
1108 if (cpu_vendor_id != CPU_VENDOR_INTEL || hw_mds_disable == 0 ||
1109 ((cpu_ia32_arch_caps & IA32_ARCH_CAP_MDS_NO) != 0 &&
1110 hw_mds_disable == 3)) {
1111 mds_handler = mds_handler_void;
1112 } else if (((cpu_stdext_feature3 & CPUID_STDEXT3_MD_CLEAR) != 0 &&
1113 hw_mds_disable == 3) || hw_mds_disable == 1) {
1114 mds_handler = mds_handler_verw;
1115 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1116 (CPUID_TO_MODEL(cpu_id) == 0x2e || CPUID_TO_MODEL(cpu_id) == 0x1e ||
1117 CPUID_TO_MODEL(cpu_id) == 0x1f || CPUID_TO_MODEL(cpu_id) == 0x1a ||
1118 CPUID_TO_MODEL(cpu_id) == 0x2f || CPUID_TO_MODEL(cpu_id) == 0x25 ||
1119 CPUID_TO_MODEL(cpu_id) == 0x2c || CPUID_TO_MODEL(cpu_id) == 0x2d ||
1120 CPUID_TO_MODEL(cpu_id) == 0x2a || CPUID_TO_MODEL(cpu_id) == 0x3e ||
1121 CPUID_TO_MODEL(cpu_id) == 0x3a) &&
1122 (hw_mds_disable == 2 || hw_mds_disable == 3)) {
1124 * Nehalem, SandyBridge, IvyBridge
1128 if (pc->pc_mds_buf == NULL) {
1129 pc->pc_mds_buf = malloc_domainset(672, M_TEMP,
1130 DOMAINSET_PREF(pc->pc_domain), M_WAITOK);
1131 bzero(pc->pc_mds_buf, 16);
1134 mds_handler = mds_handler_ivb;
1135 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1136 (CPUID_TO_MODEL(cpu_id) == 0x3f || CPUID_TO_MODEL(cpu_id) == 0x3c ||
1137 CPUID_TO_MODEL(cpu_id) == 0x45 || CPUID_TO_MODEL(cpu_id) == 0x46 ||
1138 CPUID_TO_MODEL(cpu_id) == 0x56 || CPUID_TO_MODEL(cpu_id) == 0x4f ||
1139 CPUID_TO_MODEL(cpu_id) == 0x47 || CPUID_TO_MODEL(cpu_id) == 0x3d) &&
1140 (hw_mds_disable == 2 || hw_mds_disable == 3)) {
1142 * Haswell, Broadwell
1146 if (pc->pc_mds_buf == NULL) {
1147 pc->pc_mds_buf = malloc_domainset(1536, M_TEMP,
1148 DOMAINSET_PREF(pc->pc_domain), M_WAITOK);
1149 bzero(pc->pc_mds_buf, 16);
1152 mds_handler = mds_handler_bdw;
1153 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1154 ((CPUID_TO_MODEL(cpu_id) == 0x55 && (cpu_id &
1155 CPUID_STEPPING) <= 5) ||
1156 CPUID_TO_MODEL(cpu_id) == 0x4e || CPUID_TO_MODEL(cpu_id) == 0x5e ||
1157 (CPUID_TO_MODEL(cpu_id) == 0x8e && (cpu_id &
1158 CPUID_STEPPING) <= 0xb) ||
1159 (CPUID_TO_MODEL(cpu_id) == 0x9e && (cpu_id &
1160 CPUID_STEPPING) <= 0xc)) &&
1161 (hw_mds_disable == 2 || hw_mds_disable == 3)) {
1163 * Skylake, KabyLake, CoffeeLake, WhiskeyLake,
1168 if (pc->pc_mds_buf == NULL) {
1169 pc->pc_mds_buf = malloc_domainset(6 * 1024,
1170 M_TEMP, DOMAINSET_PREF(pc->pc_domain),
1172 b64 = (vm_offset_t)malloc_domainset(64 + 63,
1173 M_TEMP, DOMAINSET_PREF(pc->pc_domain),
1175 pc->pc_mds_buf64 = (void *)roundup2(b64, 64);
1176 bzero(pc->pc_mds_buf64, 64);
1180 if ((xcr0 & XFEATURE_ENABLED_ZMM_HI256) != 0 &&
1181 (cpu_stdext_feature & CPUID_STDEXT_AVX512DQ) != 0)
1182 mds_handler = mds_handler_skl_avx512;
1183 else if ((xcr0 & XFEATURE_ENABLED_AVX) != 0 &&
1184 (cpu_feature2 & CPUID2_AVX) != 0)
1185 mds_handler = mds_handler_skl_avx;
1187 mds_handler = mds_handler_skl_sse;
1188 } else if (CPUID_TO_FAMILY(cpu_id) == 0x6 &&
1189 ((CPUID_TO_MODEL(cpu_id) == 0x37 ||
1190 CPUID_TO_MODEL(cpu_id) == 0x4a ||
1191 CPUID_TO_MODEL(cpu_id) == 0x4c ||
1192 CPUID_TO_MODEL(cpu_id) == 0x4d ||
1193 CPUID_TO_MODEL(cpu_id) == 0x5a ||
1194 CPUID_TO_MODEL(cpu_id) == 0x5d ||
1195 CPUID_TO_MODEL(cpu_id) == 0x6e ||
1196 CPUID_TO_MODEL(cpu_id) == 0x65 ||
1197 CPUID_TO_MODEL(cpu_id) == 0x75 ||
1198 CPUID_TO_MODEL(cpu_id) == 0x1c ||
1199 CPUID_TO_MODEL(cpu_id) == 0x26 ||
1200 CPUID_TO_MODEL(cpu_id) == 0x27 ||
1201 CPUID_TO_MODEL(cpu_id) == 0x35 ||
1202 CPUID_TO_MODEL(cpu_id) == 0x36 ||
1203 CPUID_TO_MODEL(cpu_id) == 0x7a))) {
1204 /* Silvermont, Airmont */
1207 if (pc->pc_mds_buf == NULL)
1208 pc->pc_mds_buf = malloc(256, M_TEMP, M_WAITOK);
1210 mds_handler = mds_handler_silvermont;
1213 mds_handler = mds_handler_void;
1218 hw_mds_recalculate_boot(void *arg __unused)
1221 hw_mds_recalculate();
1223 SYSINIT(mds_recalc, SI_SUB_SMP, SI_ORDER_ANY, hw_mds_recalculate_boot, NULL);
1226 sysctl_mds_disable_handler(SYSCTL_HANDLER_ARGS)
1230 val = hw_mds_disable;
1231 error = sysctl_handle_int(oidp, &val, 0, req);
1232 if (error != 0 || req->newptr == NULL)
1234 if (val < 0 || val > 3)
1236 hw_mds_disable = val;
1237 hw_mds_recalculate();
1241 SYSCTL_PROC(_hw, OID_AUTO, mds_disable, CTLTYPE_INT |
1242 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1243 sysctl_mds_disable_handler, "I",
1244 "Microarchitectural Data Sampling Mitigation "
1245 "(0 - off, 1 - on VERW, 2 - on SW, 3 - on AUTO)");
1247 SYSCTL_PROC(_machdep_mitigations_mds, OID_AUTO, disable, CTLTYPE_INT |
1248 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1249 sysctl_mds_disable_handler, "I",
1250 "Microarchitectural Data Sampling Mitigation "
1251 "(0 - off, 1 - on VERW, 2 - on SW, 3 - on AUTO)");
1254 * Intel Transactional Memory Asynchronous Abort Mitigation
1260 TAA_NONE = 0, /* No mitigation enabled */
1261 TAA_TSX_DISABLE = 1, /* Disable TSX via MSR */
1262 TAA_VERW = 2, /* Use VERW mitigation */
1263 TAA_AUTO = 3, /* Automatically select the mitigation */
1265 /* The states below are not selectable by the operator */
1267 TAA_TAA_UC = 4, /* Mitigation present in microcode */
1268 TAA_NOT_PRESENT = 5 /* TSX is not present */
1272 taa_set(bool enable, bool all)
1275 x86_msr_op(MSR_IA32_TSX_CTRL,
1276 (enable ? MSR_OP_OR : MSR_OP_ANDNOT) |
1277 (all ? MSR_OP_RENDEZVOUS_ALL : MSR_OP_LOCAL),
1278 IA32_TSX_CTRL_RTM_DISABLE | IA32_TSX_CTRL_TSX_CPUID_CLEAR,
1283 x86_taa_recalculate(void)
1285 static int taa_saved_mds_disable = 0;
1286 int taa_need = 0, taa_state = 0;
1287 int mds_disable = 0, need_mds_recalc = 0;
1289 /* Check CPUID.07h.EBX.HLE and RTM for the presence of TSX */
1290 if ((cpu_stdext_feature & CPUID_STDEXT_HLE) == 0 ||
1291 (cpu_stdext_feature & CPUID_STDEXT_RTM) == 0) {
1292 /* TSX is not present */
1293 x86_taa_state = TAA_NOT_PRESENT;
1297 /* Check to see what mitigation options the CPU gives us */
1298 if (cpu_ia32_arch_caps & IA32_ARCH_CAP_TAA_NO) {
1299 /* CPU is not suseptible to TAA */
1300 taa_need = TAA_TAA_UC;
1301 } else if (cpu_ia32_arch_caps & IA32_ARCH_CAP_TSX_CTRL) {
1303 * CPU can turn off TSX. This is the next best option
1304 * if TAA_NO hardware mitigation isn't present
1306 taa_need = TAA_TSX_DISABLE;
1308 /* No TSX/TAA specific remedies are available. */
1309 if (x86_taa_enable == TAA_TSX_DISABLE) {
1311 printf("TSX control not available\n");
1314 taa_need = TAA_VERW;
1317 /* Can we automatically take action, or are we being forced? */
1318 if (x86_taa_enable == TAA_AUTO)
1319 taa_state = taa_need;
1321 taa_state = x86_taa_enable;
1323 /* No state change, nothing to do */
1324 if (taa_state == x86_taa_state) {
1326 printf("No TSX change made\n");
1330 /* Does the MSR need to be turned on or off? */
1331 if (taa_state == TAA_TSX_DISABLE)
1332 taa_set(true, true);
1333 else if (x86_taa_state == TAA_TSX_DISABLE)
1334 taa_set(false, true);
1336 /* Does MDS need to be set to turn on VERW? */
1337 if (taa_state == TAA_VERW) {
1338 taa_saved_mds_disable = hw_mds_disable;
1339 mds_disable = hw_mds_disable = 1;
1340 need_mds_recalc = 1;
1341 } else if (x86_taa_state == TAA_VERW) {
1342 mds_disable = hw_mds_disable = taa_saved_mds_disable;
1343 need_mds_recalc = 1;
1345 if (need_mds_recalc) {
1346 hw_mds_recalculate();
1347 if (mds_disable != hw_mds_disable) {
1349 printf("Cannot change MDS state for TAA\n");
1350 /* Don't update our state */
1355 x86_taa_state = taa_state;
1360 taa_recalculate_boot(void * arg __unused)
1363 x86_taa_recalculate();
1365 SYSINIT(taa_recalc, SI_SUB_SMP, SI_ORDER_ANY, taa_recalculate_boot, NULL);
1367 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, taa,
1368 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1369 "TSX Asynchronous Abort Mitigation");
1372 sysctl_taa_handler(SYSCTL_HANDLER_ARGS)
1376 val = x86_taa_enable;
1377 error = sysctl_handle_int(oidp, &val, 0, req);
1378 if (error != 0 || req->newptr == NULL)
1380 if (val < TAA_NONE || val > TAA_AUTO)
1382 x86_taa_enable = val;
1383 x86_taa_recalculate();
1387 SYSCTL_PROC(_machdep_mitigations_taa, OID_AUTO, enable, CTLTYPE_INT |
1388 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1389 sysctl_taa_handler, "I",
1390 "TAA Mitigation enablement control "
1391 "(0 - off, 1 - disable TSX, 2 - VERW, 3 - on AUTO)");
1394 sysctl_taa_state_handler(SYSCTL_HANDLER_ARGS)
1398 switch (x86_taa_state) {
1402 case TAA_TSX_DISABLE:
1403 state = "TSX disabled";
1409 state = "Mitigated in microcode";
1411 case TAA_NOT_PRESENT:
1412 state = "TSX not present";
1418 return (SYSCTL_OUT(req, state, strlen(state)));
1421 SYSCTL_PROC(_machdep_mitigations_taa, OID_AUTO, state,
1422 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1423 sysctl_taa_state_handler, "A",
1424 "TAA Mitigation state");
1426 int __read_frequently cpu_flush_rsb_ctxsw;
1427 SYSCTL_INT(_machdep_mitigations, OID_AUTO, flush_rsb_ctxsw,
1428 CTLFLAG_RW | CTLFLAG_NOFETCH, &cpu_flush_rsb_ctxsw, 0,
1429 "Flush Return Stack Buffer on context switch");
1431 SYSCTL_NODE(_machdep_mitigations, OID_AUTO, rngds,
1432 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1433 "MCU Optimization, disable RDSEED mitigation");
1435 int x86_rngds_mitg_enable = 1;
1437 x86_rngds_mitg_recalculate(bool all_cpus)
1439 if ((cpu_stdext_feature3 & CPUID_STDEXT3_MCUOPT) == 0)
1441 x86_msr_op(MSR_IA32_MCU_OPT_CTRL,
1442 (x86_rngds_mitg_enable ? MSR_OP_OR : MSR_OP_ANDNOT) |
1443 (all_cpus ? MSR_OP_RENDEZVOUS_ALL : MSR_OP_LOCAL),
1444 IA32_RNGDS_MITG_DIS, NULL);
1448 sysctl_rngds_mitg_enable_handler(SYSCTL_HANDLER_ARGS)
1452 val = x86_rngds_mitg_enable;
1453 error = sysctl_handle_int(oidp, &val, 0, req);
1454 if (error != 0 || req->newptr == NULL)
1456 x86_rngds_mitg_enable = val;
1457 x86_rngds_mitg_recalculate(true);
1460 SYSCTL_PROC(_machdep_mitigations_rngds, OID_AUTO, enable, CTLTYPE_INT |
1461 CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0,
1462 sysctl_rngds_mitg_enable_handler, "I",
1463 "MCU Optimization, disabling RDSEED mitigation control "
1464 "(0 - mitigation disabled (RDSEED optimized), 1 - mitigation enabled)");
1467 sysctl_rngds_state_handler(SYSCTL_HANDLER_ARGS)
1471 if ((cpu_stdext_feature3 & CPUID_STDEXT3_MCUOPT) == 0) {
1472 state = "Not applicable";
1473 } else if (x86_rngds_mitg_enable == 0) {
1474 state = "RDSEED not serialized";
1476 state = "Mitigated";
1478 return (SYSCTL_OUT(req, state, strlen(state)));
1480 SYSCTL_PROC(_machdep_mitigations_rngds, OID_AUTO, state,
1481 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1482 sysctl_rngds_state_handler, "A",
1483 "MCU Optimization state");
1486 * Enable and restore kernel text write permissions.
1487 * Callers must ensure that disable_wp()/restore_wp() are executed
1488 * without rescheduling on the same core.
1496 if ((cr0 & CR0_WP) == 0)
1498 load_cr0(cr0 & ~CR0_WP);
1503 restore_wp(bool old_wp)
1507 load_cr0(rcr0() | CR0_WP);
1511 acpi_get_fadt_bootflags(uint16_t *flagsp)
1514 ACPI_TABLE_FADT *fadt;
1515 vm_paddr_t physaddr;
1517 physaddr = acpi_find_table(ACPI_SIG_FADT);
1520 fadt = acpi_map_table(physaddr, ACPI_SIG_FADT);
1523 *flagsp = fadt->BootFlags;
1524 acpi_unmap_table(fadt);
1531 DEFINE_IFUNC(, uint64_t, rdtsc_ordered, (void))
1533 bool cpu_is_amd = cpu_vendor_id == CPU_VENDOR_AMD ||
1534 cpu_vendor_id == CPU_VENDOR_HYGON;
1536 if ((amd_feature & AMDID_RDTSCP) != 0)
1538 else if ((cpu_feature & CPUID_SSE2) != 0)
1539 return (cpu_is_amd ? rdtsc_ordered_mfence :
1540 rdtsc_ordered_lfence);