2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1992 Terrence R. Lambert.
4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
7 * This code is derived from software contributed to Berkeley by
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
44 #include "opt_atpic.h"
45 #include "opt_compat.h"
51 #include "opt_kstack_pages.h"
52 #include "opt_maxmem.h"
53 #include "opt_mp_watchdog.h"
54 #include "opt_platform.h"
59 #include <sys/param.h>
61 #include <sys/systm.h>
65 #include <sys/kernel.h>
68 #include <sys/malloc.h>
69 #include <sys/mutex.h>
71 #include <sys/rwlock.h>
72 #include <sys/sched.h>
76 #include <sys/sysctl.h>
78 #include <machine/clock.h>
79 #include <machine/cpu.h>
80 #include <machine/cputypes.h>
81 #include <machine/specialreg.h>
82 #include <machine/md_var.h>
83 #include <machine/mp_watchdog.h>
84 #include <machine/tss.h>
86 #include <machine/smp.h>
88 #include <x86/acpica_machdep.h>
91 #include <vm/vm_extern.h>
92 #include <vm/vm_kern.h>
93 #include <vm/vm_page.h>
94 #include <vm/vm_map.h>
95 #include <vm/vm_object.h>
96 #include <vm/vm_pager.h>
97 #include <vm/vm_param.h>
99 #define STATE_RUNNING 0x0
100 #define STATE_MWAIT 0x1
101 #define STATE_SLEEPING 0x2
104 * Machine dependent boot() routine
106 * I haven't seen anything to put here yet
107 * Possibly some stuff might be grafted back here from boot()
115 * Flush the D-cache for non-DMA I/O so that the I-cache can
116 * be made coherent later.
119 cpu_flush_dcache(void *ptr, size_t len)
128 __asm __volatile("sti; hlt");
132 * Use mwait to pause execution while waiting for an interrupt or
133 * another thread to signal that there is more work.
135 * NOTE: Interrupts will cause a wakeup; however, this function does
136 * not enable interrupt handling. The caller is responsible to enable
140 acpi_cpu_idle_mwait(uint32_t mwait_hint)
145 * A comment in Linux patch claims that 'CPUs run faster with
146 * speculation protection disabled. All CPU threads in a core
147 * must disable speculation protection for it to be
148 * disabled. Disable it while we are idle so the other
149 * hyperthread can run fast.'
151 * XXXKIB. Software coordination mode should be supported,
152 * but all Intel CPUs provide hardware coordination.
155 state = (int *)PCPU_PTR(monitorbuf);
156 KASSERT(*state == STATE_SLEEPING,
157 ("cpu_mwait_cx: wrong monitorbuf state"));
158 *state = STATE_MWAIT;
160 cpu_monitor(state, 0, 0);
161 if (*state == STATE_MWAIT)
162 cpu_mwait(MWAIT_INTRBREAK, mwait_hint);
166 * We should exit on any event that interrupts mwait, because
167 * that event might be a wanted interrupt.
169 *state = STATE_RUNNING;
172 /* Get current clock frequency for the given cpu id. */
174 cpu_est_clockrate(int cpu_id, uint64_t *rate)
177 uint64_t acnt, mcnt, perf;
180 if (pcpu_find(cpu_id) == NULL || rate == NULL)
183 if ((cpu_feature & CPUID_TSC) == 0)
188 * If TSC is P-state invariant and APERF/MPERF MSRs do not exist,
189 * DELAY(9) based logic fails.
191 if (tsc_is_invariant && !tsc_perf_stat)
196 /* Schedule ourselves on the indicated cpu. */
197 thread_lock(curthread);
198 sched_bind(curthread, cpu_id);
199 thread_unlock(curthread);
203 /* Calibrate by measuring a short delay. */
204 reg = intr_disable();
205 if (tsc_is_invariant) {
210 mcnt = rdmsr(MSR_MPERF);
211 acnt = rdmsr(MSR_APERF);
214 perf = 1000 * acnt / mcnt;
215 *rate = (tsc2 - tsc1) * perf;
221 *rate = (tsc2 - tsc1) * 1000;
226 thread_lock(curthread);
227 sched_unbind(curthread);
228 thread_unlock(curthread);
236 * Shutdown the CPU as much as possible
246 cpu_mwait_usable(void)
249 return ((cpu_feature2 & CPUID2_MON) != 0 && ((cpu_mon_mwait_flags &
250 (CPUID5_MON_MWAIT_EXT | CPUID5_MWAIT_INTRBREAK)) ==
251 (CPUID5_MON_MWAIT_EXT | CPUID5_MWAIT_INTRBREAK)));
254 void (*cpu_idle_hook)(sbintime_t) = NULL; /* ACPI idle hook. */
255 static int cpu_ident_amdc1e = 0; /* AMD C1E supported. */
256 static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */
257 SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RWTUN, &idle_mwait,
258 0, "Use MONITOR/MWAIT for short idle");
261 cpu_idle_acpi(sbintime_t sbt)
265 state = (int *)PCPU_PTR(monitorbuf);
266 *state = STATE_SLEEPING;
268 /* See comments in cpu_idle_hlt(). */
270 if (sched_runnable())
272 else if (cpu_idle_hook)
276 *state = STATE_RUNNING;
280 cpu_idle_hlt(sbintime_t sbt)
284 state = (int *)PCPU_PTR(monitorbuf);
285 *state = STATE_SLEEPING;
288 * Since we may be in a critical section from cpu_idle(), if
289 * an interrupt fires during that critical section we may have
290 * a pending preemption. If the CPU halts, then that thread
291 * may not execute until a later interrupt awakens the CPU.
292 * To handle this race, check for a runnable thread after
293 * disabling interrupts and immediately return if one is
294 * found. Also, we must absolutely guarentee that hlt is
295 * the next instruction after sti. This ensures that any
296 * interrupt that fires after the call to disable_intr() will
297 * immediately awaken the CPU from hlt. Finally, please note
298 * that on x86 this works fine because of interrupts enabled only
299 * after the instruction following sti takes place, while IF is set
300 * to 1 immediately, allowing hlt instruction to acknowledge the
304 if (sched_runnable())
308 *state = STATE_RUNNING;
312 cpu_idle_mwait(sbintime_t sbt)
316 state = (int *)PCPU_PTR(monitorbuf);
317 *state = STATE_MWAIT;
319 /* See comments in cpu_idle_hlt(). */
321 if (sched_runnable()) {
323 *state = STATE_RUNNING;
326 cpu_monitor(state, 0, 0);
327 if (*state == STATE_MWAIT)
328 __asm __volatile("sti; mwait" : : "a" (MWAIT_C1), "c" (0));
331 *state = STATE_RUNNING;
335 cpu_idle_spin(sbintime_t sbt)
340 state = (int *)PCPU_PTR(monitorbuf);
341 *state = STATE_RUNNING;
344 * The sched_runnable() call is racy but as long as there is
345 * a loop missing it one time will have just a little impact if any
346 * (and it is much better than missing the check at all).
348 for (i = 0; i < 1000; i++) {
349 if (sched_runnable())
356 * C1E renders the local APIC timer dead, so we disable it by
357 * reading the Interrupt Pending Message register and clearing
358 * both C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27).
361 * "BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh Processors"
362 * #32559 revision 3.00+
364 #define MSR_AMDK8_IPM 0xc0010055
365 #define AMDK8_SMIONCMPHALT (1ULL << 27)
366 #define AMDK8_C1EONCMPHALT (1ULL << 28)
367 #define AMDK8_CMPHALT (AMDK8_SMIONCMPHALT | AMDK8_C1EONCMPHALT)
370 cpu_probe_amdc1e(void)
374 * Detect the presence of C1E capability mostly on latest
375 * dual-cores (or future) k8 family.
377 if (cpu_vendor_id == CPU_VENDOR_AMD &&
378 (cpu_id & 0x00000f00) == 0x00000f00 &&
379 (cpu_id & 0x0fff0000) >= 0x00040000) {
380 cpu_ident_amdc1e = 1;
384 void (*cpu_idle_fn)(sbintime_t) = cpu_idle_acpi;
392 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d",
395 ap_watchdog(PCPU_GET(cpuid));
398 /* If we are busy - try to use fast methods. */
400 if ((cpu_feature2 & CPUID2_MON) && idle_mwait) {
401 cpu_idle_mwait(busy);
406 /* If we have time - switch timers into idle mode. */
409 sbt = cpu_idleclock();
412 /* Apply AMD APIC timer C1E workaround. */
413 if (cpu_ident_amdc1e && cpu_disable_c3_sleep) {
414 msr = rdmsr(MSR_AMDK8_IPM);
415 if (msr & AMDK8_CMPHALT)
416 wrmsr(MSR_AMDK8_IPM, msr & ~AMDK8_CMPHALT);
419 /* Call main idle method. */
422 /* Switch timers back into active mode. */
428 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done",
433 cpu_idle_wakeup(int cpu)
438 pcpu = pcpu_find(cpu);
439 state = (int *)pcpu->pc_monitorbuf;
441 * This doesn't need to be atomic since missing the race will
442 * simply result in unnecessary IPIs.
444 if (*state == STATE_SLEEPING)
446 if (*state == STATE_MWAIT)
447 *state = STATE_RUNNING;
452 * Ordered by speed/power consumption.
458 { cpu_idle_spin, "spin" },
459 { cpu_idle_mwait, "mwait" },
460 { cpu_idle_hlt, "hlt" },
461 { cpu_idle_acpi, "acpi" },
466 idle_sysctl_available(SYSCTL_HANDLER_ARGS)
472 avail = malloc(256, M_TEMP, M_WAITOK);
474 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
475 if (strstr(idle_tbl[i].id_name, "mwait") &&
476 (cpu_feature2 & CPUID2_MON) == 0)
478 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
479 cpu_idle_hook == NULL)
481 p += sprintf(p, "%s%s", p != avail ? ", " : "",
482 idle_tbl[i].id_name);
484 error = sysctl_handle_string(oidp, avail, 0, req);
489 SYSCTL_PROC(_machdep, OID_AUTO, idle_available, CTLTYPE_STRING | CTLFLAG_RD,
490 0, 0, idle_sysctl_available, "A", "list of available idle functions");
493 idle_sysctl(SYSCTL_HANDLER_ARGS)
501 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
502 if (idle_tbl[i].id_fn == cpu_idle_fn) {
503 p = idle_tbl[i].id_name;
507 strncpy(buf, p, sizeof(buf));
508 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
509 if (error != 0 || req->newptr == NULL)
511 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
512 if (strstr(idle_tbl[i].id_name, "mwait") &&
513 (cpu_feature2 & CPUID2_MON) == 0)
515 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
516 cpu_idle_hook == NULL)
518 if (strcmp(idle_tbl[i].id_name, buf))
520 cpu_idle_fn = idle_tbl[i].id_fn;
526 SYSCTL_PROC(_machdep, OID_AUTO, idle, CTLTYPE_STRING | CTLFLAG_RW, 0, 0,
527 idle_sysctl, "A", "currently selected idle function");
529 static int panic_on_nmi = 1;
530 SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RWTUN,
533 int nmi_is_broadcast = 1;
534 SYSCTL_INT(_machdep, OID_AUTO, nmi_is_broadcast, CTLFLAG_RWTUN,
535 &nmi_is_broadcast, 0,
536 "Chipset NMI is broadcast");
539 SYSCTL_INT(_machdep, OID_AUTO, kdb_on_nmi, CTLFLAG_RWTUN,
546 nmi_call_kdb(u_int cpu, u_int type, struct trapframe *frame)
549 /* machine/parity/power fail/"kitchen sink" faults */
550 if (isa_nmi(frame->tf_err) == 0) {
553 * NMI can be hooked up to a pushbutton for debugging.
556 printf("NMI/cpu%d ... going to debugger\n", cpu);
557 kdb_trap(type, 0, frame);
560 } else if (panic_on_nmi) {
561 panic("NMI indicates hardware failure");
567 nmi_handle_intr(u_int type, struct trapframe *frame)
572 if (nmi_is_broadcast) {
573 nmi_call_kdb_smp(type, frame);
577 nmi_call_kdb(PCPU_GET(cpuid), type, frame);
582 int hw_ibrs_disable = 1;
584 SYSCTL_INT(_hw, OID_AUTO, ibrs_active, CTLFLAG_RD, &hw_ibrs_active, 0,
585 "Indirect Branch Restricted Speculation active");
588 hw_ibrs_recalculate(void)
592 if ((cpu_ia32_arch_caps & IA32_ARCH_CAP_IBRS_ALL) != 0) {
593 if (hw_ibrs_disable) {
594 v= rdmsr(MSR_IA32_SPEC_CTRL);
595 v &= ~(uint64_t)IA32_SPEC_CTRL_IBRS;
596 wrmsr(MSR_IA32_SPEC_CTRL, v);
598 v= rdmsr(MSR_IA32_SPEC_CTRL);
599 v |= IA32_SPEC_CTRL_IBRS;
600 wrmsr(MSR_IA32_SPEC_CTRL, v);
604 hw_ibrs_active = (cpu_stdext_feature3 & CPUID_STDEXT3_IBPB) != 0 &&
609 hw_ibrs_disable_handler(SYSCTL_HANDLER_ARGS)
613 val = hw_ibrs_disable;
614 error = sysctl_handle_int(oidp, &val, 0, req);
615 if (error != 0 || req->newptr == NULL)
617 hw_ibrs_disable = val != 0;
618 hw_ibrs_recalculate();
621 SYSCTL_PROC(_hw, OID_AUTO, ibrs_disable, CTLTYPE_INT | CTLFLAG_RWTUN |
622 CTLFLAG_NOFETCH | CTLFLAG_MPSAFE, NULL, 0, hw_ibrs_disable_handler, "I",
623 "Disable Indirect Branch Restricted Speculation");