2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1992 Terrence R. Lambert.
4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
7 * This code is derived from software contributed to Berkeley by
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
44 #include "opt_atpic.h"
50 #include "opt_kstack_pages.h"
51 #include "opt_maxmem.h"
52 #include "opt_mp_watchdog.h"
53 #include "opt_platform.h"
58 #include <sys/param.h>
60 #include <sys/systm.h>
64 #include <sys/kernel.h>
67 #include <sys/malloc.h>
68 #include <sys/mutex.h>
70 #include <sys/rwlock.h>
71 #include <sys/sched.h>
75 #include <sys/sysctl.h>
77 #include <machine/clock.h>
78 #include <machine/cpu.h>
79 #include <machine/cputypes.h>
80 #include <machine/specialreg.h>
81 #include <machine/md_var.h>
82 #include <machine/mp_watchdog.h>
83 #include <machine/tss.h>
85 #include <machine/smp.h>
88 #include <machine/elan_mmcr.h>
90 #include <x86/acpica_machdep.h>
93 #include <vm/vm_extern.h>
94 #include <vm/vm_kern.h>
95 #include <vm/vm_page.h>
96 #include <vm/vm_map.h>
97 #include <vm/vm_object.h>
98 #include <vm/vm_pager.h>
99 #include <vm/vm_param.h>
101 #include <isa/isareg.h>
103 #define STATE_RUNNING 0x0
104 #define STATE_MWAIT 0x1
105 #define STATE_SLEEPING 0x2
108 static u_int cpu_reset_proxyid;
109 static volatile u_int cpu_reset_proxy_active;
114 * Machine dependent boot() routine
116 * I haven't seen anything to put here yet
117 * Possibly some stuff might be grafted back here from boot()
125 * Flush the D-cache for non-DMA I/O so that the I-cache can
126 * be made coherent later.
129 cpu_flush_dcache(void *ptr, size_t len)
138 __asm __volatile("sti; hlt");
142 * Use mwait to pause execution while waiting for an interrupt or
143 * another thread to signal that there is more work.
145 * NOTE: Interrupts will cause a wakeup; however, this function does
146 * not enable interrupt handling. The caller is responsible to enable
150 acpi_cpu_idle_mwait(uint32_t mwait_hint)
155 * A comment in Linux patch claims that 'CPUs run faster with
156 * speculation protection disabled. All CPU threads in a core
157 * must disable speculation protection for it to be
158 * disabled. Disable it while we are idle so the other
159 * hyperthread can run fast.'
161 * XXXKIB. Software coordination mode should be supported,
162 * but all Intel CPUs provide hardware coordination.
165 state = (int *)PCPU_PTR(monitorbuf);
166 KASSERT(atomic_load_int(state) == STATE_SLEEPING,
167 ("cpu_mwait_cx: wrong monitorbuf state"));
168 atomic_store_int(state, STATE_MWAIT);
170 cpu_monitor(state, 0, 0);
171 if (atomic_load_int(state) == STATE_MWAIT)
172 cpu_mwait(MWAIT_INTRBREAK, mwait_hint);
176 * We should exit on any event that interrupts mwait, because
177 * that event might be a wanted interrupt.
179 atomic_store_int(state, STATE_RUNNING);
182 /* Get current clock frequency for the given cpu id. */
184 cpu_est_clockrate(int cpu_id, uint64_t *rate)
187 uint64_t acnt, mcnt, perf;
190 if (pcpu_find(cpu_id) == NULL || rate == NULL)
193 if ((cpu_feature & CPUID_TSC) == 0)
198 * If TSC is P-state invariant and APERF/MPERF MSRs do not exist,
199 * DELAY(9) based logic fails.
201 if (tsc_is_invariant && !tsc_perf_stat)
206 /* Schedule ourselves on the indicated cpu. */
207 thread_lock(curthread);
208 sched_bind(curthread, cpu_id);
209 thread_unlock(curthread);
213 /* Calibrate by measuring a short delay. */
214 reg = intr_disable();
215 if (tsc_is_invariant) {
220 mcnt = rdmsr(MSR_MPERF);
221 acnt = rdmsr(MSR_APERF);
224 perf = 1000 * acnt / mcnt;
225 *rate = (tsc2 - tsc1) * perf;
231 *rate = (tsc2 - tsc1) * 1000;
236 thread_lock(curthread);
237 sched_unbind(curthread);
238 thread_unlock(curthread);
246 * Shutdown the CPU as much as possible
258 struct region_descriptor null_idt;
263 if (elan_mmcr != NULL)
264 elan_mmcr->RESCFG = 1;
267 if (cpu == CPU_GEODE1100) {
268 /* Attempt Geode's own reset */
269 outl(0xcf8, 0x80009044ul);
273 #if !defined(BROKEN_KEYBOARD_RESET)
275 * Attempt to do a CPU reset via the keyboard controller,
276 * do not turn off GateA20, as any machine that fails
277 * to do the reset here would then end up in no man's land.
279 outb(IO_KBD + 4, 0xFE);
280 DELAY(500000); /* wait 0.5 sec to see if that did it */
284 * Attempt to force a reset via the Reset Control register at
285 * I/O port 0xcf9. Bit 2 forces a system reset when it
286 * transitions from 0 to 1. Bit 1 selects the type of reset
287 * to attempt: 0 selects a "soft" reset, and 1 selects a
288 * "hard" reset. We try a "hard" reset. The first write sets
289 * bit 1 to select a "hard" reset and clears bit 2. The
290 * second write forces a 0 -> 1 transition in bit 2 to trigger
295 DELAY(500000); /* wait 0.5 sec to see if that did it */
298 * Attempt to force a reset via the Fast A20 and Init register
299 * at I/O port 0x92. Bit 1 serves as an alternate A20 gate.
300 * Bit 0 asserts INIT# when set to 1. We are careful to only
301 * preserve bit 1 while setting bit 0. We also must clear bit
302 * 0 before setting it if it isn't already clear.
307 outb(0x92, b & 0xfe);
309 DELAY(500000); /* wait 0.5 sec to see if that did it */
312 printf("No known reset method worked, attempting CPU shutdown\n");
313 DELAY(1000000); /* wait 1 sec for printf to complete */
316 null_idt.rd_limit = 0;
317 null_idt.rd_base = 0;
320 /* "good night, sweet prince .... <THUNK!>" */
329 cpu_reset_proxy(void)
332 cpu_reset_proxy_active = 1;
333 while (cpu_reset_proxy_active == 1)
334 ia32_pause(); /* Wait for other cpu to see that we've started */
336 printf("cpu_reset_proxy: Stopped CPU %d\n", cpu_reset_proxyid);
351 CPU_CLR(PCPU_GET(cpuid), &map);
352 CPU_NAND(&map, &stopped_cpus);
353 if (!CPU_EMPTY(&map)) {
354 printf("cpu_reset: Stopping other CPUs\n");
358 if (PCPU_GET(cpuid) != 0) {
359 cpu_reset_proxyid = PCPU_GET(cpuid);
360 cpustop_restartfunc = cpu_reset_proxy;
361 cpu_reset_proxy_active = 0;
362 printf("cpu_reset: Restarting BSP\n");
364 /* Restart CPU #0. */
365 CPU_SETOF(0, &started_cpus);
369 while (cpu_reset_proxy_active == 0 && cnt < 10000000) {
371 cnt++; /* Wait for BSP to announce restart */
373 if (cpu_reset_proxy_active == 0) {
374 printf("cpu_reset: Failed to restart BSP\n");
376 cpu_reset_proxy_active = 2;
391 cpu_mwait_usable(void)
394 return ((cpu_feature2 & CPUID2_MON) != 0 && ((cpu_mon_mwait_flags &
395 (CPUID5_MON_MWAIT_EXT | CPUID5_MWAIT_INTRBREAK)) ==
396 (CPUID5_MON_MWAIT_EXT | CPUID5_MWAIT_INTRBREAK)));
399 void (*cpu_idle_hook)(sbintime_t) = NULL; /* ACPI idle hook. */
400 static int cpu_ident_amdc1e = 0; /* AMD C1E supported. */
401 static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */
402 SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RWTUN, &idle_mwait,
403 0, "Use MONITOR/MWAIT for short idle");
406 cpu_idle_acpi(sbintime_t sbt)
410 state = (int *)PCPU_PTR(monitorbuf);
411 atomic_store_int(state, STATE_SLEEPING);
413 /* See comments in cpu_idle_hlt(). */
415 if (sched_runnable())
417 else if (cpu_idle_hook)
421 atomic_store_int(state, STATE_RUNNING);
425 cpu_idle_hlt(sbintime_t sbt)
429 state = (int *)PCPU_PTR(monitorbuf);
430 atomic_store_int(state, STATE_SLEEPING);
433 * Since we may be in a critical section from cpu_idle(), if
434 * an interrupt fires during that critical section we may have
435 * a pending preemption. If the CPU halts, then that thread
436 * may not execute until a later interrupt awakens the CPU.
437 * To handle this race, check for a runnable thread after
438 * disabling interrupts and immediately return if one is
439 * found. Also, we must absolutely guarentee that hlt is
440 * the next instruction after sti. This ensures that any
441 * interrupt that fires after the call to disable_intr() will
442 * immediately awaken the CPU from hlt. Finally, please note
443 * that on x86 this works fine because of interrupts enabled only
444 * after the instruction following sti takes place, while IF is set
445 * to 1 immediately, allowing hlt instruction to acknowledge the
449 if (sched_runnable())
453 atomic_store_int(state, STATE_RUNNING);
457 cpu_idle_mwait(sbintime_t sbt)
461 state = (int *)PCPU_PTR(monitorbuf);
462 atomic_store_int(state, STATE_MWAIT);
464 /* See comments in cpu_idle_hlt(). */
466 if (sched_runnable()) {
467 atomic_store_int(state, STATE_RUNNING);
472 cpu_monitor(state, 0, 0);
473 if (atomic_load_int(state) == STATE_MWAIT)
474 __asm __volatile("sti; mwait" : : "a" (MWAIT_C1), "c" (0));
477 atomic_store_int(state, STATE_RUNNING);
481 cpu_idle_spin(sbintime_t sbt)
486 state = (int *)PCPU_PTR(monitorbuf);
487 atomic_store_int(state, STATE_RUNNING);
490 * The sched_runnable() call is racy but as long as there is
491 * a loop missing it one time will have just a little impact if any
492 * (and it is much better than missing the check at all).
494 for (i = 0; i < 1000; i++) {
495 if (sched_runnable())
502 * C1E renders the local APIC timer dead, so we disable it by
503 * reading the Interrupt Pending Message register and clearing
504 * both C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27).
507 * "BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh Processors"
508 * #32559 revision 3.00+
510 #define MSR_AMDK8_IPM 0xc0010055
511 #define AMDK8_SMIONCMPHALT (1ULL << 27)
512 #define AMDK8_C1EONCMPHALT (1ULL << 28)
513 #define AMDK8_CMPHALT (AMDK8_SMIONCMPHALT | AMDK8_C1EONCMPHALT)
516 cpu_probe_amdc1e(void)
520 * Detect the presence of C1E capability mostly on latest
521 * dual-cores (or future) k8 family.
523 if (cpu_vendor_id == CPU_VENDOR_AMD &&
524 (cpu_id & 0x00000f00) == 0x00000f00 &&
525 (cpu_id & 0x0fff0000) >= 0x00040000) {
526 cpu_ident_amdc1e = 1;
530 void (*cpu_idle_fn)(sbintime_t) = cpu_idle_acpi;
538 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d",
541 ap_watchdog(PCPU_GET(cpuid));
544 /* If we are busy - try to use fast methods. */
546 if ((cpu_feature2 & CPUID2_MON) && idle_mwait) {
547 cpu_idle_mwait(busy);
552 /* If we have time - switch timers into idle mode. */
555 sbt = cpu_idleclock();
558 /* Apply AMD APIC timer C1E workaround. */
559 if (cpu_ident_amdc1e && cpu_disable_c3_sleep) {
560 msr = rdmsr(MSR_AMDK8_IPM);
561 if (msr & AMDK8_CMPHALT)
562 wrmsr(MSR_AMDK8_IPM, msr & ~AMDK8_CMPHALT);
565 /* Call main idle method. */
568 /* Switch timers back into active mode. */
574 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done",
578 static int cpu_idle_apl31_workaround;
579 SYSCTL_INT(_machdep, OID_AUTO, idle_apl31, CTLFLAG_RW,
580 &cpu_idle_apl31_workaround, 0,
581 "Apollo Lake APL31 MWAIT bug workaround");
584 cpu_idle_wakeup(int cpu)
588 state = (int *)pcpu_find(cpu)->pc_monitorbuf;
589 switch (atomic_load_int(state)) {
593 atomic_store_int(state, STATE_RUNNING);
594 return (cpu_idle_apl31_workaround ? 0 : 1);
598 panic("bad monitor state");
604 * Ordered by speed/power consumption.
611 { .id_fn = cpu_idle_spin, .id_name = "spin" },
612 { .id_fn = cpu_idle_mwait, .id_name = "mwait",
613 .id_cpuid2_flag = CPUID2_MON },
614 { .id_fn = cpu_idle_hlt, .id_name = "hlt" },
615 { .id_fn = cpu_idle_acpi, .id_name = "acpi" },
619 idle_sysctl_available(SYSCTL_HANDLER_ARGS)
625 avail = malloc(256, M_TEMP, M_WAITOK);
627 for (i = 0; i < nitems(idle_tbl); i++) {
628 if (idle_tbl[i].id_cpuid2_flag != 0 &&
629 (cpu_feature2 & idle_tbl[i].id_cpuid2_flag) == 0)
631 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
632 cpu_idle_hook == NULL)
634 p += sprintf(p, "%s%s", p != avail ? ", " : "",
635 idle_tbl[i].id_name);
637 error = sysctl_handle_string(oidp, avail, 0, req);
642 SYSCTL_PROC(_machdep, OID_AUTO, idle_available, CTLTYPE_STRING | CTLFLAG_RD,
643 0, 0, idle_sysctl_available, "A", "list of available idle functions");
646 cpu_idle_selector(const char *new_idle_name)
650 for (i = 0; i < nitems(idle_tbl); i++) {
651 if (idle_tbl[i].id_cpuid2_flag != 0 &&
652 (cpu_feature2 & idle_tbl[i].id_cpuid2_flag) == 0)
654 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
655 cpu_idle_hook == NULL)
657 if (strcmp(idle_tbl[i].id_name, new_idle_name))
659 cpu_idle_fn = idle_tbl[i].id_fn;
661 printf("CPU idle set to %s\n", idle_tbl[i].id_name);
668 cpu_idle_sysctl(SYSCTL_HANDLER_ARGS)
674 for (i = 0; i < nitems(idle_tbl); i++) {
675 if (idle_tbl[i].id_fn == cpu_idle_fn) {
676 p = idle_tbl[i].id_name;
680 strncpy(buf, p, sizeof(buf));
681 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
682 if (error != 0 || req->newptr == NULL)
684 return (cpu_idle_selector(buf) ? 0 : EINVAL);
687 SYSCTL_PROC(_machdep, OID_AUTO, idle, CTLTYPE_STRING | CTLFLAG_RW, 0, 0,
688 cpu_idle_sysctl, "A", "currently selected idle function");
691 cpu_idle_tun(void *unused __unused)
695 if (TUNABLE_STR_FETCH("machdep.idle", tunvar, sizeof(tunvar)))
696 cpu_idle_selector(tunvar);
697 if (cpu_vendor_id == CPU_VENDOR_INTEL && cpu_id == 0x506c9) {
699 * Apollo Lake errata APL31 (public errata APL30).
700 * Stores to the armed address range may not trigger
701 * MWAIT to resume execution. OS needs to use
702 * interrupts to wake processors from MWAIT-induced
705 cpu_idle_apl31_workaround = 1;
707 TUNABLE_INT_FETCH("machdep.idle_apl31", &cpu_idle_apl31_workaround);
709 SYSINIT(cpu_idle_tun, SI_SUB_CPU, SI_ORDER_MIDDLE, cpu_idle_tun, NULL);
711 static int panic_on_nmi = 1;
712 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RWTUN,
715 int nmi_is_broadcast = 1;
716 SYSCTL_INT(_machdep, OID_AUTO, nmi_is_broadcast, CTLFLAG_RWTUN,
717 &nmi_is_broadcast, 0,
718 "Chipset NMI is broadcast");
721 SYSCTL_INT(_machdep, OID_AUTO, kdb_on_nmi, CTLFLAG_RWTUN,
728 nmi_call_kdb(u_int cpu, u_int type, struct trapframe *frame)
731 /* machine/parity/power fail/"kitchen sink" faults */
732 if (isa_nmi(frame->tf_err) == 0) {
735 * NMI can be hooked up to a pushbutton for debugging.
738 printf("NMI/cpu%d ... going to debugger\n", cpu);
739 kdb_trap(type, 0, frame);
742 } else if (panic_on_nmi) {
743 panic("NMI indicates hardware failure");
749 nmi_handle_intr(u_int type, struct trapframe *frame)
754 if (nmi_is_broadcast) {
755 nmi_call_kdb_smp(type, frame);
759 nmi_call_kdb(PCPU_GET(cpuid), type, frame);
764 int hw_ibrs_disable = 1;
766 SYSCTL_INT(_hw, OID_AUTO, ibrs_active, CTLFLAG_RD, &hw_ibrs_active, 0,
767 "Indirect Branch Restricted Speculation active");
770 hw_ibrs_recalculate(void)
774 if ((cpu_ia32_arch_caps & IA32_ARCH_CAP_IBRS_ALL) != 0) {
775 if (hw_ibrs_disable) {
776 v= rdmsr(MSR_IA32_SPEC_CTRL);
777 v &= ~(uint64_t)IA32_SPEC_CTRL_IBRS;
778 wrmsr(MSR_IA32_SPEC_CTRL, v);
780 v= rdmsr(MSR_IA32_SPEC_CTRL);
781 v |= IA32_SPEC_CTRL_IBRS;
782 wrmsr(MSR_IA32_SPEC_CTRL, v);
786 hw_ibrs_active = (cpu_stdext_feature3 & CPUID_STDEXT3_IBPB) != 0 &&
791 hw_ibrs_disable_handler(SYSCTL_HANDLER_ARGS)
795 val = hw_ibrs_disable;
796 error = sysctl_handle_int(oidp, &val, 0, req);
797 if (error != 0 || req->newptr == NULL)
799 hw_ibrs_disable = val != 0;
800 hw_ibrs_recalculate();
803 SYSCTL_PROC(_hw, OID_AUTO, ibrs_disable, CTLTYPE_INT | CTLFLAG_RWTUN |
804 CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, hw_ibrs_disable_handler, "I",
805 "Disable Indirect Branch Restricted Speculation");
808 * Enable and restore kernel text write permissions.
809 * Callers must ensure that disable_wp()/restore_wp() are executed
810 * without rescheduling on the same core.
818 if ((cr0 & CR0_WP) == 0)
820 load_cr0(cr0 & ~CR0_WP);
825 restore_wp(bool old_wp)
829 load_cr0(rcr0() | CR0_WP);