2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1992 Terrence R. Lambert.
4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
7 * This code is derived from software contributed to Berkeley by
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
44 #include "opt_atalk.h"
45 #include "opt_atpic.h"
46 #include "opt_compat.h"
52 #include "opt_kstack_pages.h"
53 #include "opt_maxmem.h"
54 #include "opt_mp_watchdog.h"
55 #include "opt_perfmon.h"
56 #include "opt_platform.h"
57 #include "opt_sched.h"
58 #include "opt_kdtrace.h"
60 #include <sys/param.h>
62 #include <sys/systm.h>
66 #include <sys/callout.h>
70 #include <sys/eventhandler.h>
72 #include <sys/imgact.h>
74 #include <sys/kernel.h>
76 #include <sys/linker.h>
78 #include <sys/malloc.h>
79 #include <sys/memrange.h>
80 #include <sys/msgbuf.h>
81 #include <sys/mutex.h>
83 #include <sys/ptrace.h>
84 #include <sys/reboot.h>
85 #include <sys/rwlock.h>
86 #include <sys/sched.h>
87 #include <sys/signalvar.h>
91 #include <sys/syscallsubr.h>
92 #include <sys/sysctl.h>
93 #include <sys/sysent.h>
94 #include <sys/sysproto.h>
95 #include <sys/ucontext.h>
96 #include <sys/vmmeter.h>
99 #include <vm/vm_extern.h>
100 #include <vm/vm_kern.h>
101 #include <vm/vm_page.h>
102 #include <vm/vm_map.h>
103 #include <vm/vm_object.h>
104 #include <vm/vm_pager.h>
105 #include <vm/vm_param.h>
109 #error KDB must be enabled in order for DDB to work!
112 #include <ddb/db_sym.h>
115 #include <net/netisr.h>
117 #include <machine/clock.h>
118 #include <machine/cpu.h>
119 #include <machine/cputypes.h>
120 #include <machine/intr_machdep.h>
122 #include <machine/md_var.h>
123 #include <machine/metadata.h>
124 #include <machine/mp_watchdog.h>
125 #include <machine/pc/bios.h>
126 #include <machine/pcb.h>
127 #include <machine/proc.h>
128 #include <machine/reg.h>
129 #include <machine/sigframe.h>
130 #include <machine/specialreg.h>
132 #include <machine/perfmon.h>
134 #include <machine/tss.h>
136 #include <machine/smp.h>
143 #include <x86/isa/icu.h>
145 #include <machine/apicvar.h>
148 #include <isa/isareg.h>
151 /* Sanity check for __curthread() */
152 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
154 extern u_int64_t hammer_time(u_int64_t, u_int64_t);
156 extern void printcpuinfo(void); /* XXX header file */
157 extern void identify_cpu(void);
158 extern void panicifcpuunsupported(void);
160 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
161 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
163 static void cpu_startup(void *);
164 static void get_fpcontext(struct thread *td, mcontext_t *mcp,
165 char *xfpusave, size_t xfpusave_len);
166 static int set_fpcontext(struct thread *td, const mcontext_t *mcp,
167 char *xfpustate, size_t xfpustate_len);
168 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
171 * The file "conf/ldscript.amd64" defines the symbol "kernphys". Its value is
172 * the physical address at which the kernel is loaded.
174 extern char kernphys[];
176 extern vm_offset_t ksym_start, ksym_end;
179 struct msgbuf *msgbufp;
181 /* Intel ICH registers */
182 #define ICH_PMBASE 0x400
183 #define ICH_SMI_EN ICH_PMBASE + 0x30
185 int _udatasel, _ucodesel, _ucode32sel, _ufssel, _ugssel;
193 * The number of PHYSMAP entries must be one less than the number of
194 * PHYSSEG entries because the PHYSMAP entry that spans the largest
195 * physical address that is accessible by ISA DMA is split into two
198 #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
200 vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
201 vm_paddr_t dump_avail[PHYSMAP_SIZE + 2];
203 /* must be 2 less so 0 0 can signal end of chunks */
204 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2)
205 #define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2)
207 struct kva_md_info kmi;
209 static struct trapframe proc0_tf;
210 struct region_descriptor r_gdt, r_idt;
212 struct pcpu __pcpu[MAXCPU];
216 struct mem_range_softc mem_range_softc;
218 struct mtx dt_lock; /* lock for GDT and LDT */
220 void (*vmm_resume_p)(void);
230 * On MacBooks, we need to disallow the legacy USB circuit to
231 * generate an SMI# because this can cause several problems,
232 * namely: incorrect CPU frequency detection and failure to
234 * We do this by disabling a bit in the SMI_EN (SMI Control and
235 * Enable register) of the Intel ICH LPC Interface Bridge.
237 sysenv = getenv("smbios.system.product");
238 if (sysenv != NULL) {
239 if (strncmp(sysenv, "MacBook1,1", 10) == 0 ||
240 strncmp(sysenv, "MacBook3,1", 10) == 0 ||
241 strncmp(sysenv, "MacBook4,1", 10) == 0 ||
242 strncmp(sysenv, "MacBookPro1,1", 13) == 0 ||
243 strncmp(sysenv, "MacBookPro1,2", 13) == 0 ||
244 strncmp(sysenv, "MacBookPro3,1", 13) == 0 ||
245 strncmp(sysenv, "MacBookPro4,1", 13) == 0 ||
246 strncmp(sysenv, "Macmini1,1", 10) == 0) {
248 printf("Disabling LEGACY_USB_EN bit on "
250 outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8);
256 * Good {morning,afternoon,evening,night}.
260 panicifcpuunsupported();
266 * Display physical memory if SMBIOS reports reasonable amount.
269 sysenv = getenv("smbios.memory.enabled");
270 if (sysenv != NULL) {
271 memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10;
274 if (memsize < ptoa((uintmax_t)cnt.v_free_count))
275 memsize = ptoa((uintmax_t)Maxmem);
276 printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20);
277 realmem = atop(memsize);
280 * Display any holes after the first chunk of extended memory.
285 printf("Physical memory chunk(s):\n");
286 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
289 size = phys_avail[indx + 1] - phys_avail[indx];
291 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
292 (uintmax_t)phys_avail[indx],
293 (uintmax_t)phys_avail[indx + 1] - 1,
294 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
298 vm_ksubmap_init(&kmi);
300 printf("avail memory = %ju (%ju MB)\n",
301 ptoa((uintmax_t)cnt.v_free_count),
302 ptoa((uintmax_t)cnt.v_free_count) / 1048576);
305 * Set up buffers, so they can be used to read disk labels.
308 vm_pager_bufferinit();
314 * Send an interrupt to process.
316 * Stack is set up to allow sigcode stored
317 * at top to call routine, followed by call
318 * to sigreturn routine below. After sigreturn
319 * resets the signal mask, the stack, and the
320 * frame pointer, it returns to the user
324 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
326 struct sigframe sf, *sfp;
332 struct trapframe *regs;
341 PROC_LOCK_ASSERT(p, MA_OWNED);
342 sig = ksi->ksi_signo;
344 mtx_assert(&psp->ps_mtx, MA_OWNED);
346 oonstack = sigonstack(regs->tf_rsp);
348 if (cpu_max_ext_state_size > sizeof(struct savefpu) && use_xsave) {
349 xfpusave_len = cpu_max_ext_state_size - sizeof(struct savefpu);
350 xfpusave = __builtin_alloca(xfpusave_len);
356 /* Save user context. */
357 bzero(&sf, sizeof(sf));
358 sf.sf_uc.uc_sigmask = *mask;
359 sf.sf_uc.uc_stack = td->td_sigstk;
360 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
361 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
362 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
363 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_rdi, sizeof(*regs));
364 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
365 get_fpcontext(td, &sf.sf_uc.uc_mcontext, xfpusave, xfpusave_len);
367 sf.sf_uc.uc_mcontext.mc_fsbase = pcb->pcb_fsbase;
368 sf.sf_uc.uc_mcontext.mc_gsbase = pcb->pcb_gsbase;
369 bzero(sf.sf_uc.uc_mcontext.mc_spare,
370 sizeof(sf.sf_uc.uc_mcontext.mc_spare));
371 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
373 /* Allocate space for the signal handler context. */
374 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
375 SIGISMEMBER(psp->ps_sigonstack, sig)) {
376 sp = td->td_sigstk.ss_sp + td->td_sigstk.ss_size;
377 #if defined(COMPAT_43)
378 td->td_sigstk.ss_flags |= SS_ONSTACK;
381 sp = (char *)regs->tf_rsp - 128;
382 if (xfpusave != NULL) {
384 sp = (char *)((unsigned long)sp & ~0x3Ful);
385 sf.sf_uc.uc_mcontext.mc_xfpustate = (register_t)sp;
387 sp -= sizeof(struct sigframe);
388 /* Align to 16 bytes. */
389 sfp = (struct sigframe *)((unsigned long)sp & ~0xFul);
391 /* Translate the signal if appropriate. */
392 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
393 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
395 /* Build the argument list for the signal handler. */
396 regs->tf_rdi = sig; /* arg 1 in %rdi */
397 regs->tf_rdx = (register_t)&sfp->sf_uc; /* arg 3 in %rdx */
398 bzero(&sf.sf_si, sizeof(sf.sf_si));
399 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
400 /* Signal handler installed with SA_SIGINFO. */
401 regs->tf_rsi = (register_t)&sfp->sf_si; /* arg 2 in %rsi */
402 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
404 /* Fill in POSIX parts */
405 sf.sf_si = ksi->ksi_info;
406 sf.sf_si.si_signo = sig; /* maybe a translated signal */
407 regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */
409 /* Old FreeBSD-style arguments. */
410 regs->tf_rsi = ksi->ksi_code; /* arg 2 in %rsi */
411 regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */
412 sf.sf_ahu.sf_handler = catcher;
414 mtx_unlock(&psp->ps_mtx);
418 * Copy the sigframe out to the user's stack.
420 if (copyout(&sf, sfp, sizeof(*sfp)) != 0 ||
421 (xfpusave != NULL && copyout(xfpusave,
422 (void *)sf.sf_uc.uc_mcontext.mc_xfpustate, xfpusave_len)
425 printf("process %ld has trashed its stack\n", (long)p->p_pid);
431 regs->tf_rsp = (long)sfp;
432 regs->tf_rip = p->p_sysent->sv_sigcode_base;
433 regs->tf_rflags &= ~(PSL_T | PSL_D);
434 regs->tf_cs = _ucodesel;
435 regs->tf_ds = _udatasel;
436 regs->tf_es = _udatasel;
437 regs->tf_fs = _ufssel;
438 regs->tf_gs = _ugssel;
439 regs->tf_flags = TF_HASSEGS;
440 set_pcb_flags(pcb, PCB_FULL_IRET);
442 mtx_lock(&psp->ps_mtx);
446 * System call to cleanup state after a signal
447 * has been taken. Reset signal mask and
448 * stack state from context left by sendsig (above).
449 * Return to previous pc and psl as specified by
450 * context left by sendsig. Check carefully to
451 * make sure that the user has not modified the
452 * state to gain improper privileges.
457 sys_sigreturn(td, uap)
459 struct sigreturn_args /* {
460 const struct __ucontext *sigcntxp;
466 struct trapframe *regs;
469 size_t xfpustate_len;
477 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
479 uprintf("pid %d (%s): sigreturn copyin failed\n",
480 p->p_pid, td->td_name);
484 if ((ucp->uc_mcontext.mc_flags & ~_MC_FLAG_MASK) != 0) {
485 uprintf("pid %d (%s): sigreturn mc_flags %x\n", p->p_pid,
486 td->td_name, ucp->uc_mcontext.mc_flags);
490 rflags = ucp->uc_mcontext.mc_rflags;
492 * Don't allow users to change privileged or reserved flags.
494 if (!EFL_SECURE(rflags, regs->tf_rflags)) {
495 uprintf("pid %d (%s): sigreturn rflags = 0x%lx\n", p->p_pid,
496 td->td_name, rflags);
501 * Don't allow users to load a valid privileged %cs. Let the
502 * hardware check for invalid selectors, excess privilege in
503 * other selectors, invalid %eip's and invalid %esp's.
505 cs = ucp->uc_mcontext.mc_cs;
506 if (!CS_SECURE(cs)) {
507 uprintf("pid %d (%s): sigreturn cs = 0x%x\n", p->p_pid,
509 ksiginfo_init_trap(&ksi);
510 ksi.ksi_signo = SIGBUS;
511 ksi.ksi_code = BUS_OBJERR;
512 ksi.ksi_trapno = T_PROTFLT;
513 ksi.ksi_addr = (void *)regs->tf_rip;
514 trapsignal(td, &ksi);
518 if ((uc.uc_mcontext.mc_flags & _MC_HASFPXSTATE) != 0) {
519 xfpustate_len = uc.uc_mcontext.mc_xfpustate_len;
520 if (xfpustate_len > cpu_max_ext_state_size -
521 sizeof(struct savefpu)) {
522 uprintf("pid %d (%s): sigreturn xfpusave_len = 0x%zx\n",
523 p->p_pid, td->td_name, xfpustate_len);
526 xfpustate = __builtin_alloca(xfpustate_len);
527 error = copyin((const void *)uc.uc_mcontext.mc_xfpustate,
528 xfpustate, xfpustate_len);
531 "pid %d (%s): sigreturn copying xfpustate failed\n",
532 p->p_pid, td->td_name);
539 ret = set_fpcontext(td, &ucp->uc_mcontext, xfpustate, xfpustate_len);
541 uprintf("pid %d (%s): sigreturn set_fpcontext err %d\n",
542 p->p_pid, td->td_name, ret);
545 bcopy(&ucp->uc_mcontext.mc_rdi, regs, sizeof(*regs));
546 pcb->pcb_fsbase = ucp->uc_mcontext.mc_fsbase;
547 pcb->pcb_gsbase = ucp->uc_mcontext.mc_gsbase;
549 #if defined(COMPAT_43)
550 if (ucp->uc_mcontext.mc_onstack & 1)
551 td->td_sigstk.ss_flags |= SS_ONSTACK;
553 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
556 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
557 set_pcb_flags(pcb, PCB_FULL_IRET);
558 return (EJUSTRETURN);
561 #ifdef COMPAT_FREEBSD4
563 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
566 return sys_sigreturn(td, (struct sigreturn_args *)uap);
572 * Machine dependent boot() routine
574 * I haven't seen anything to put here yet
575 * Possibly some stuff might be grafted back here from boot()
583 * Flush the D-cache for non-DMA I/O so that the I-cache can
584 * be made coherent later.
587 cpu_flush_dcache(void *ptr, size_t len)
592 /* Get current clock frequency for the given cpu id. */
594 cpu_est_clockrate(int cpu_id, uint64_t *rate)
597 uint64_t acnt, mcnt, perf;
600 if (pcpu_find(cpu_id) == NULL || rate == NULL)
604 * If TSC is P-state invariant and APERF/MPERF MSRs do not exist,
605 * DELAY(9) based logic fails.
607 if (tsc_is_invariant && !tsc_perf_stat)
612 /* Schedule ourselves on the indicated cpu. */
613 thread_lock(curthread);
614 sched_bind(curthread, cpu_id);
615 thread_unlock(curthread);
619 /* Calibrate by measuring a short delay. */
620 reg = intr_disable();
621 if (tsc_is_invariant) {
626 mcnt = rdmsr(MSR_MPERF);
627 acnt = rdmsr(MSR_APERF);
630 perf = 1000 * acnt / mcnt;
631 *rate = (tsc2 - tsc1) * perf;
637 *rate = (tsc2 - tsc1) * 1000;
642 thread_lock(curthread);
643 sched_unbind(curthread);
644 thread_unlock(curthread);
652 * Shutdown the CPU as much as possible
661 void (*cpu_idle_hook)(sbintime_t) = NULL; /* ACPI idle hook. */
662 static int cpu_ident_amdc1e = 0; /* AMD C1E supported. */
663 static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */
664 TUNABLE_INT("machdep.idle_mwait", &idle_mwait);
665 SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RW, &idle_mwait,
666 0, "Use MONITOR/MWAIT for short idle");
668 #define STATE_RUNNING 0x0
669 #define STATE_MWAIT 0x1
670 #define STATE_SLEEPING 0x2
673 cpu_idle_acpi(sbintime_t sbt)
677 state = (int *)PCPU_PTR(monitorbuf);
678 *state = STATE_SLEEPING;
680 /* See comments in cpu_idle_hlt(). */
682 if (sched_runnable())
684 else if (cpu_idle_hook)
687 __asm __volatile("sti; hlt");
688 *state = STATE_RUNNING;
692 cpu_idle_hlt(sbintime_t sbt)
696 state = (int *)PCPU_PTR(monitorbuf);
697 *state = STATE_SLEEPING;
700 * Since we may be in a critical section from cpu_idle(), if
701 * an interrupt fires during that critical section we may have
702 * a pending preemption. If the CPU halts, then that thread
703 * may not execute until a later interrupt awakens the CPU.
704 * To handle this race, check for a runnable thread after
705 * disabling interrupts and immediately return if one is
706 * found. Also, we must absolutely guarentee that hlt is
707 * the next instruction after sti. This ensures that any
708 * interrupt that fires after the call to disable_intr() will
709 * immediately awaken the CPU from hlt. Finally, please note
710 * that on x86 this works fine because of interrupts enabled only
711 * after the instruction following sti takes place, while IF is set
712 * to 1 immediately, allowing hlt instruction to acknowledge the
716 if (sched_runnable())
719 __asm __volatile("sti; hlt");
720 *state = STATE_RUNNING;
724 * MWAIT cpu power states. Lower 4 bits are sub-states.
726 #define MWAIT_C0 0xf0
727 #define MWAIT_C1 0x00
728 #define MWAIT_C2 0x10
729 #define MWAIT_C3 0x20
730 #define MWAIT_C4 0x30
733 cpu_idle_mwait(sbintime_t sbt)
737 state = (int *)PCPU_PTR(monitorbuf);
738 *state = STATE_MWAIT;
740 /* See comments in cpu_idle_hlt(). */
742 if (sched_runnable()) {
744 *state = STATE_RUNNING;
747 cpu_monitor(state, 0, 0);
748 if (*state == STATE_MWAIT)
749 __asm __volatile("sti; mwait" : : "a" (MWAIT_C1), "c" (0));
752 *state = STATE_RUNNING;
756 cpu_idle_spin(sbintime_t sbt)
761 state = (int *)PCPU_PTR(monitorbuf);
762 *state = STATE_RUNNING;
765 * The sched_runnable() call is racy but as long as there is
766 * a loop missing it one time will have just a little impact if any
767 * (and it is much better than missing the check at all).
769 for (i = 0; i < 1000; i++) {
770 if (sched_runnable())
777 * C1E renders the local APIC timer dead, so we disable it by
778 * reading the Interrupt Pending Message register and clearing
779 * both C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27).
782 * "BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh Processors"
783 * #32559 revision 3.00+
785 #define MSR_AMDK8_IPM 0xc0010055
786 #define AMDK8_SMIONCMPHALT (1ULL << 27)
787 #define AMDK8_C1EONCMPHALT (1ULL << 28)
788 #define AMDK8_CMPHALT (AMDK8_SMIONCMPHALT | AMDK8_C1EONCMPHALT)
791 cpu_probe_amdc1e(void)
795 * Detect the presence of C1E capability mostly on latest
796 * dual-cores (or future) k8 family.
798 if (cpu_vendor_id == CPU_VENDOR_AMD &&
799 (cpu_id & 0x00000f00) == 0x00000f00 &&
800 (cpu_id & 0x0fff0000) >= 0x00040000) {
801 cpu_ident_amdc1e = 1;
805 void (*cpu_idle_fn)(sbintime_t) = cpu_idle_acpi;
813 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d",
816 ap_watchdog(PCPU_GET(cpuid));
818 /* If we are busy - try to use fast methods. */
820 if ((cpu_feature2 & CPUID2_MON) && idle_mwait) {
821 cpu_idle_mwait(busy);
826 /* If we have time - switch timers into idle mode. */
829 sbt = cpu_idleclock();
832 /* Apply AMD APIC timer C1E workaround. */
833 if (cpu_ident_amdc1e && cpu_disable_deep_sleep) {
834 msr = rdmsr(MSR_AMDK8_IPM);
835 if (msr & AMDK8_CMPHALT)
836 wrmsr(MSR_AMDK8_IPM, msr & ~AMDK8_CMPHALT);
839 /* Call main idle method. */
842 /* Switch timers mack into active mode. */
848 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done",
853 cpu_idle_wakeup(int cpu)
858 pcpu = pcpu_find(cpu);
859 state = (int *)pcpu->pc_monitorbuf;
861 * This doesn't need to be atomic since missing the race will
862 * simply result in unnecessary IPIs.
864 if (*state == STATE_SLEEPING)
866 if (*state == STATE_MWAIT)
867 *state = STATE_RUNNING;
872 * Ordered by speed/power consumption.
878 { cpu_idle_spin, "spin" },
879 { cpu_idle_mwait, "mwait" },
880 { cpu_idle_hlt, "hlt" },
881 { cpu_idle_acpi, "acpi" },
886 idle_sysctl_available(SYSCTL_HANDLER_ARGS)
892 avail = malloc(256, M_TEMP, M_WAITOK);
894 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
895 if (strstr(idle_tbl[i].id_name, "mwait") &&
896 (cpu_feature2 & CPUID2_MON) == 0)
898 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
899 cpu_idle_hook == NULL)
901 p += sprintf(p, "%s%s", p != avail ? ", " : "",
902 idle_tbl[i].id_name);
904 error = sysctl_handle_string(oidp, avail, 0, req);
909 SYSCTL_PROC(_machdep, OID_AUTO, idle_available, CTLTYPE_STRING | CTLFLAG_RD,
910 0, 0, idle_sysctl_available, "A", "list of available idle functions");
913 idle_sysctl(SYSCTL_HANDLER_ARGS)
921 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
922 if (idle_tbl[i].id_fn == cpu_idle_fn) {
923 p = idle_tbl[i].id_name;
927 strncpy(buf, p, sizeof(buf));
928 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
929 if (error != 0 || req->newptr == NULL)
931 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
932 if (strstr(idle_tbl[i].id_name, "mwait") &&
933 (cpu_feature2 & CPUID2_MON) == 0)
935 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
936 cpu_idle_hook == NULL)
938 if (strcmp(idle_tbl[i].id_name, buf))
940 cpu_idle_fn = idle_tbl[i].id_fn;
946 SYSCTL_PROC(_machdep, OID_AUTO, idle, CTLTYPE_STRING | CTLFLAG_RW, 0, 0,
947 idle_sysctl, "A", "currently selected idle function");
950 * Reset registers to default values on exec.
953 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
955 struct trapframe *regs = td->td_frame;
956 struct pcb *pcb = td->td_pcb;
959 if (td->td_proc->p_md.md_ldt != NULL)
962 mtx_unlock(&dt_lock);
966 clear_pcb_flags(pcb, PCB_32BIT);
967 pcb->pcb_initial_fpucw = __INITIAL_FPUCW__;
968 set_pcb_flags(pcb, PCB_FULL_IRET);
970 bzero((char *)regs, sizeof(struct trapframe));
971 regs->tf_rip = imgp->entry_addr;
972 regs->tf_rsp = ((stack - 8) & ~0xFul) + 8;
973 regs->tf_rdi = stack; /* argv */
974 regs->tf_rflags = PSL_USER | (regs->tf_rflags & PSL_T);
975 regs->tf_ss = _udatasel;
976 regs->tf_cs = _ucodesel;
977 regs->tf_ds = _udatasel;
978 regs->tf_es = _udatasel;
979 regs->tf_fs = _ufssel;
980 regs->tf_gs = _ugssel;
981 regs->tf_flags = TF_HASSEGS;
982 td->td_retval[1] = 0;
985 * Reset the hardware debug registers if they were in use.
986 * They won't have any meaning for the newly exec'd process.
988 if (pcb->pcb_flags & PCB_DBREGS) {
997 * Clear the debug registers on the running
998 * CPU, otherwise they will end up affecting
999 * the next process we switch to.
1003 clear_pcb_flags(pcb, PCB_DBREGS);
1007 * Drop the FP state if we hold it, so that the process gets a
1008 * clean FP state if it uses the FPU again.
1020 * CR0_MP, CR0_NE and CR0_TS are also set by npx_probe() for the
1021 * BSP. See the comments there about why we set them.
1023 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
1028 * Initialize amd64 and configure to run kernel
1032 * Initialize segments & interrupt table
1035 struct user_segment_descriptor gdt[NGDT * MAXCPU];/* global descriptor tables */
1036 static struct gate_descriptor idt0[NIDT];
1037 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
1039 static char dblfault_stack[PAGE_SIZE] __aligned(16);
1041 static char nmi0_stack[PAGE_SIZE] __aligned(16);
1042 CTASSERT(sizeof(struct nmi_pcpu) == 16);
1044 struct amd64tss common_tss[MAXCPU];
1047 * Software prototypes -- in more palatable form.
1049 * Keep GUFS32, GUGS32, GUCODE32 and GUDATA at the same
1050 * slots as corresponding segments for i386 kernel.
1052 struct soft_segment_descriptor gdt_segs[] = {
1053 /* GNULL_SEL 0 Null Descriptor */
1062 /* GNULL2_SEL 1 Null Descriptor */
1071 /* GUFS32_SEL 2 32 bit %gs Descriptor for user */
1073 .ssd_limit = 0xfffff,
1074 .ssd_type = SDT_MEMRWA,
1080 /* GUGS32_SEL 3 32 bit %fs Descriptor for user */
1082 .ssd_limit = 0xfffff,
1083 .ssd_type = SDT_MEMRWA,
1089 /* GCODE_SEL 4 Code Descriptor for kernel */
1091 .ssd_limit = 0xfffff,
1092 .ssd_type = SDT_MEMERA,
1098 /* GDATA_SEL 5 Data Descriptor for kernel */
1100 .ssd_limit = 0xfffff,
1101 .ssd_type = SDT_MEMRWA,
1107 /* GUCODE32_SEL 6 32 bit Code Descriptor for user */
1109 .ssd_limit = 0xfffff,
1110 .ssd_type = SDT_MEMERA,
1116 /* GUDATA_SEL 7 32/64 bit Data Descriptor for user */
1118 .ssd_limit = 0xfffff,
1119 .ssd_type = SDT_MEMRWA,
1125 /* GUCODE_SEL 8 64 bit Code Descriptor for user */
1127 .ssd_limit = 0xfffff,
1128 .ssd_type = SDT_MEMERA,
1134 /* GPROC0_SEL 9 Proc 0 Tss Descriptor */
1136 .ssd_limit = sizeof(struct amd64tss) + IOPAGES * PAGE_SIZE - 1,
1137 .ssd_type = SDT_SYSTSS,
1143 /* Actually, the TSS is a system descriptor which is double size */
1152 /* GUSERLDT_SEL 11 LDT Descriptor */
1161 /* GUSERLDT_SEL 12 LDT Descriptor, double size */
1173 setidt(idx, func, typ, dpl, ist)
1180 struct gate_descriptor *ip;
1183 ip->gd_looffset = (uintptr_t)func;
1184 ip->gd_selector = GSEL(GCODE_SEL, SEL_KPL);
1190 ip->gd_hioffset = ((uintptr_t)func)>>16 ;
1194 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
1195 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
1196 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
1197 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
1198 IDTVEC(xmm), IDTVEC(dblfault),
1199 #ifdef KDTRACE_HOOKS
1203 IDTVEC(xen_intr_upcall),
1205 IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
1209 * Display the index and function name of any IDT entries that don't use
1210 * the default 'rsvd' entry point.
1212 DB_SHOW_COMMAND(idt, db_show_idt)
1214 struct gate_descriptor *ip;
1219 for (idx = 0; idx < NIDT && !db_pager_quit; idx++) {
1220 func = ((long)ip->gd_hioffset << 16 | ip->gd_looffset);
1221 if (func != (uintptr_t)&IDTVEC(rsvd)) {
1222 db_printf("%3d\t", idx);
1223 db_printsym(func, DB_STGY_PROC);
1230 /* Show privileged registers. */
1231 DB_SHOW_COMMAND(sysregs, db_show_sysregs)
1236 } __packed idtr, gdtr;
1239 __asm __volatile("sidt %0" : "=m" (idtr));
1240 db_printf("idtr\t0x%016lx/%04x\n",
1241 (u_long)idtr.base, (u_int)idtr.limit);
1242 __asm __volatile("sgdt %0" : "=m" (gdtr));
1243 db_printf("gdtr\t0x%016lx/%04x\n",
1244 (u_long)gdtr.base, (u_int)gdtr.limit);
1245 __asm __volatile("sldt %0" : "=r" (ldt));
1246 db_printf("ldtr\t0x%04x\n", ldt);
1247 __asm __volatile("str %0" : "=r" (tr));
1248 db_printf("tr\t0x%04x\n", tr);
1249 db_printf("cr0\t0x%016lx\n", rcr0());
1250 db_printf("cr2\t0x%016lx\n", rcr2());
1251 db_printf("cr3\t0x%016lx\n", rcr3());
1252 db_printf("cr4\t0x%016lx\n", rcr4());
1253 db_printf("EFER\t%016lx\n", rdmsr(MSR_EFER));
1254 db_printf("FEATURES_CTL\t%016lx\n", rdmsr(MSR_IA32_FEATURE_CONTROL));
1255 db_printf("DEBUG_CTL\t%016lx\n", rdmsr(MSR_DEBUGCTLMSR));
1256 db_printf("PAT\t%016lx\n", rdmsr(MSR_PAT));
1257 db_printf("GSBASE\t%016lx\n", rdmsr(MSR_GSBASE));
1263 struct user_segment_descriptor *sd;
1264 struct soft_segment_descriptor *ssd;
1267 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
1268 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
1269 ssd->ssd_type = sd->sd_type;
1270 ssd->ssd_dpl = sd->sd_dpl;
1271 ssd->ssd_p = sd->sd_p;
1272 ssd->ssd_long = sd->sd_long;
1273 ssd->ssd_def32 = sd->sd_def32;
1274 ssd->ssd_gran = sd->sd_gran;
1279 struct soft_segment_descriptor *ssd;
1280 struct user_segment_descriptor *sd;
1283 sd->sd_lobase = (ssd->ssd_base) & 0xffffff;
1284 sd->sd_hibase = (ssd->ssd_base >> 24) & 0xff;
1285 sd->sd_lolimit = (ssd->ssd_limit) & 0xffff;
1286 sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf;
1287 sd->sd_type = ssd->ssd_type;
1288 sd->sd_dpl = ssd->ssd_dpl;
1289 sd->sd_p = ssd->ssd_p;
1290 sd->sd_long = ssd->ssd_long;
1291 sd->sd_def32 = ssd->ssd_def32;
1292 sd->sd_gran = ssd->ssd_gran;
1297 struct soft_segment_descriptor *ssd;
1298 struct system_segment_descriptor *sd;
1301 sd->sd_lobase = (ssd->ssd_base) & 0xffffff;
1302 sd->sd_hibase = (ssd->ssd_base >> 24) & 0xfffffffffful;
1303 sd->sd_lolimit = (ssd->ssd_limit) & 0xffff;
1304 sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf;
1305 sd->sd_type = ssd->ssd_type;
1306 sd->sd_dpl = ssd->ssd_dpl;
1307 sd->sd_p = ssd->ssd_p;
1308 sd->sd_gran = ssd->ssd_gran;
1311 #if !defined(DEV_ATPIC) && defined(DEV_ISA)
1312 #include <isa/isavar.h>
1313 #include <isa/isareg.h>
1315 * Return a bitmap of the current interrupt requests. This is 8259-specific
1316 * and is only suitable for use at probe time.
1317 * This is only here to pacify sio. It is NOT FATAL if this doesn't work.
1318 * It shouldn't be here. There should probably be an APIC centric
1319 * implementation in the apic driver code, if at all.
1322 isa_irq_pending(void)
1327 irr1 = inb(IO_ICU1);
1328 irr2 = inb(IO_ICU2);
1329 return ((irr2 << 8) | irr1);
1336 add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
1339 int i, insert_idx, physmap_idx;
1341 physmap_idx = *physmap_idxp;
1347 * Find insertion point while checking for overlap. Start off by
1348 * assuming the new entry will be added to the end.
1350 insert_idx = physmap_idx + 2;
1351 for (i = 0; i <= physmap_idx; i += 2) {
1352 if (base < physmap[i + 1]) {
1353 if (base + length <= physmap[i]) {
1357 if (boothowto & RB_VERBOSE)
1359 "Overlapping memory regions, ignoring second region\n");
1364 /* See if we can prepend to the next entry. */
1365 if (insert_idx <= physmap_idx && base + length == physmap[insert_idx]) {
1366 physmap[insert_idx] = base;
1370 /* See if we can append to the previous entry. */
1371 if (insert_idx > 0 && base == physmap[insert_idx - 1]) {
1372 physmap[insert_idx - 1] += length;
1377 *physmap_idxp = physmap_idx;
1378 if (physmap_idx == PHYSMAP_SIZE) {
1380 "Too many segments in the physical address map, giving up\n");
1385 * Move the last 'N' entries down to make room for the new
1388 for (i = physmap_idx; i > insert_idx; i -= 2) {
1389 physmap[i] = physmap[i - 2];
1390 physmap[i + 1] = physmap[i - 1];
1393 /* Insert the new entry. */
1394 physmap[insert_idx] = base;
1395 physmap[insert_idx + 1] = base + length;
1400 add_smap_entries(struct bios_smap *smapbase, vm_paddr_t *physmap,
1403 struct bios_smap *smap, *smapend;
1407 * Memory map from INT 15:E820.
1409 * subr_module.c says:
1410 * "Consumer may safely assume that size value precedes data."
1411 * ie: an int32_t immediately precedes smap.
1413 smapsize = *((u_int32_t *)smapbase - 1);
1414 smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize);
1416 for (smap = smapbase; smap < smapend; smap++) {
1417 if (boothowto & RB_VERBOSE)
1418 printf("SMAP type=%02x base=%016lx len=%016lx\n",
1419 smap->type, smap->base, smap->length);
1421 if (smap->type != SMAP_TYPE_MEMORY)
1424 if (!add_physmap_entry(smap->base, smap->length, physmap,
1430 #define efi_next_descriptor(ptr, size) \
1431 ((struct efi_md *)(((uint8_t *) ptr) + size))
1434 add_efi_map_entries(struct efi_map_header *efihdr, vm_paddr_t *physmap,
1437 struct efi_md *map, *p;
1442 static const char *types[] = {
1448 "RuntimeServicesCode",
1449 "RuntimeServicesData",
1450 "ConventionalMemory",
1452 "ACPIReclaimMemory",
1455 "MemoryMappedIOPortSpace",
1460 * Memory map data provided by UEFI via the GetMemoryMap
1461 * Boot Services API.
1463 efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
1464 map = (struct efi_md *)((uint8_t *)efihdr + efisz);
1466 if (efihdr->descriptor_size == 0)
1468 ndesc = efihdr->memory_size / efihdr->descriptor_size;
1470 if (boothowto & RB_VERBOSE)
1471 printf("%23s %12s %12s %8s %4s\n",
1472 "Type", "Physical", "Virtual", "#Pages", "Attr");
1474 for (i = 0, p = map; i < ndesc; i++,
1475 p = efi_next_descriptor(p, efihdr->descriptor_size)) {
1476 if (boothowto & RB_VERBOSE) {
1477 if (p->md_type <= EFI_MD_TYPE_PALCODE)
1478 type = types[p->md_type];
1481 printf("%23s %012lx %12p %08lx ", type, p->md_phys,
1482 p->md_virt, p->md_pages);
1483 if (p->md_attr & EFI_MD_ATTR_UC)
1485 if (p->md_attr & EFI_MD_ATTR_WC)
1487 if (p->md_attr & EFI_MD_ATTR_WT)
1489 if (p->md_attr & EFI_MD_ATTR_WB)
1491 if (p->md_attr & EFI_MD_ATTR_UCE)
1493 if (p->md_attr & EFI_MD_ATTR_WP)
1495 if (p->md_attr & EFI_MD_ATTR_RP)
1497 if (p->md_attr & EFI_MD_ATTR_XP)
1499 if (p->md_attr & EFI_MD_ATTR_RT)
1504 switch (p->md_type) {
1505 case EFI_MD_TYPE_CODE:
1506 case EFI_MD_TYPE_DATA:
1507 case EFI_MD_TYPE_BS_CODE:
1508 case EFI_MD_TYPE_BS_DATA:
1509 case EFI_MD_TYPE_FREE:
1511 * We're allowed to use any entry with these types.
1518 if (!add_physmap_entry(p->md_phys, (p->md_pages * PAGE_SIZE),
1519 physmap, physmap_idx))
1524 static char bootmethod[16] = "";
1525 SYSCTL_STRING(_machdep, OID_AUTO, bootmethod, CTLFLAG_RD, bootmethod, 0,
1526 "System firmware boot method");
1529 * Populate the (physmap) array with base/bound pairs describing the
1530 * available physical memory in the system, then test this memory and
1531 * build the phys_avail array describing the actually-available memory.
1533 * Total memory size may be set by the kernel environment variable
1534 * hw.physmem or the compile-time define MAXMEM.
1536 * XXX first should be vm_paddr_t.
1539 getmemsize(caddr_t kmdp, u_int64_t first)
1541 int i, physmap_idx, pa_indx, da_indx;
1542 vm_paddr_t pa, physmap[PHYSMAP_SIZE];
1543 u_long physmem_start, physmem_tunable, memtest;
1545 struct bios_smap *smapbase;
1546 struct efi_map_header *efihdr;
1547 quad_t dcons_addr, dcons_size;
1549 bzero(physmap, sizeof(physmap));
1553 efihdr = (struct efi_map_header *)preload_search_info(kmdp,
1554 MODINFO_METADATA | MODINFOMD_EFI_MAP);
1555 smapbase = (struct bios_smap *)preload_search_info(kmdp,
1556 MODINFO_METADATA | MODINFOMD_SMAP);
1558 if (efihdr != NULL) {
1559 add_efi_map_entries(efihdr, physmap, &physmap_idx);
1560 strlcpy(bootmethod, "UEFI", sizeof(bootmethod));
1561 } else if (smapbase != NULL) {
1562 add_smap_entries(smapbase, physmap, &physmap_idx);
1563 strlcpy(bootmethod, "BIOS", sizeof(bootmethod));
1565 panic("No BIOS smap or EFI map info from loader!");
1569 * Find the 'base memory' segment for SMP
1572 for (i = 0; i <= physmap_idx; i += 2) {
1573 if (physmap[i] == 0x00000000) {
1574 basemem = physmap[i + 1] / 1024;
1579 panic("BIOS smap did not include a basemem segment!");
1582 /* make hole for AP bootstrap code */
1583 physmap[1] = mp_bootaddress(physmap[1] / 1024);
1587 * Maxmem isn't the "maximum memory", it's one larger than the
1588 * highest page of the physical address space. It should be
1589 * called something like "Maxphyspage". We may adjust this
1590 * based on ``hw.physmem'' and the results of the memory test.
1592 Maxmem = atop(physmap[physmap_idx + 1]);
1595 Maxmem = MAXMEM / 4;
1598 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable))
1599 Maxmem = atop(physmem_tunable);
1602 * By default enable the memory test on real hardware, and disable
1603 * it if we appear to be running in a VM. This avoids touching all
1604 * pages unnecessarily, which doesn't matter on real hardware but is
1605 * bad for shared VM hosts. Use a general name so that
1606 * one could eventually do more with the code than just disable it.
1608 memtest = (vm_guest > VM_GUEST_NO) ? 0 : 1;
1609 TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest);
1612 * Don't allow MAXMEM or hw.physmem to extend the amount of memory
1615 if (Maxmem > atop(physmap[physmap_idx + 1]))
1616 Maxmem = atop(physmap[physmap_idx + 1]);
1618 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
1619 (boothowto & RB_VERBOSE))
1620 printf("Physical memory use set to %ldK\n", Maxmem * 4);
1622 /* call pmap initialization to make new kernel address space */
1623 pmap_bootstrap(&first);
1626 * Size up each available chunk of physical memory.
1628 * XXX Some BIOSes corrupt low 64KB between suspend and resume.
1629 * By default, mask off the first 16 pages unless we appear to be
1632 physmem_start = (vm_guest > VM_GUEST_NO ? 1 : 16) << PAGE_SHIFT;
1633 TUNABLE_ULONG_FETCH("hw.physmem.start", &physmem_start);
1634 if (physmem_start < PAGE_SIZE)
1635 physmap[0] = PAGE_SIZE;
1636 else if (physmem_start >= physmap[1])
1637 physmap[0] = round_page(physmap[1] - PAGE_SIZE);
1639 physmap[0] = round_page(physmem_start);
1642 phys_avail[pa_indx++] = physmap[0];
1643 phys_avail[pa_indx] = physmap[0];
1644 dump_avail[da_indx] = physmap[0];
1648 * Get dcons buffer address
1650 if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
1651 getenv_quad("dcons.size", &dcons_size) == 0)
1655 * physmap is in bytes, so when converting to page boundaries,
1656 * round up the start address and round down the end address.
1658 for (i = 0; i <= physmap_idx; i += 2) {
1661 end = ptoa((vm_paddr_t)Maxmem);
1662 if (physmap[i + 1] < end)
1663 end = trunc_page(physmap[i + 1]);
1664 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
1665 int tmp, page_bad, full;
1666 int *ptr = (int *)CADDR1;
1670 * block out kernel memory as not available.
1672 if (pa >= (vm_paddr_t)kernphys && pa < first)
1676 * block out dcons buffer
1679 && pa >= trunc_page(dcons_addr)
1680 && pa < dcons_addr + dcons_size)
1688 * map page into kernel: valid, read/write,non-cacheable
1690 *pte = pa | PG_V | PG_RW | PG_NC_PWT | PG_NC_PCD;
1695 * Test for alternating 1's and 0's
1697 *(volatile int *)ptr = 0xaaaaaaaa;
1698 if (*(volatile int *)ptr != 0xaaaaaaaa)
1701 * Test for alternating 0's and 1's
1703 *(volatile int *)ptr = 0x55555555;
1704 if (*(volatile int *)ptr != 0x55555555)
1709 *(volatile int *)ptr = 0xffffffff;
1710 if (*(volatile int *)ptr != 0xffffffff)
1715 *(volatile int *)ptr = 0x0;
1716 if (*(volatile int *)ptr != 0x0)
1719 * Restore original value.
1725 * Adjust array of valid/good pages.
1727 if (page_bad == TRUE)
1730 * If this good page is a continuation of the
1731 * previous set of good pages, then just increase
1732 * the end pointer. Otherwise start a new chunk.
1733 * Note that "end" points one higher than end,
1734 * making the range >= start and < end.
1735 * If we're also doing a speculative memory
1736 * test and we at or past the end, bump up Maxmem
1737 * so that we keep going. The first bad page
1738 * will terminate the loop.
1740 if (phys_avail[pa_indx] == pa) {
1741 phys_avail[pa_indx] += PAGE_SIZE;
1744 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
1746 "Too many holes in the physical address space, giving up\n");
1751 phys_avail[pa_indx++] = pa; /* start */
1752 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
1756 if (dump_avail[da_indx] == pa) {
1757 dump_avail[da_indx] += PAGE_SIZE;
1760 if (da_indx == DUMP_AVAIL_ARRAY_END) {
1764 dump_avail[da_indx++] = pa; /* start */
1765 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
1777 * The last chunk must contain at least one page plus the message
1778 * buffer to avoid complicating other code (message buffer address
1779 * calculation, etc.).
1781 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
1782 round_page(msgbufsize) >= phys_avail[pa_indx]) {
1783 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
1784 phys_avail[pa_indx--] = 0;
1785 phys_avail[pa_indx--] = 0;
1788 Maxmem = atop(phys_avail[pa_indx]);
1790 /* Trim off space for the message buffer. */
1791 phys_avail[pa_indx] -= round_page(msgbufsize);
1793 /* Map the message buffer. */
1794 msgbufp = (struct msgbuf *)PHYS_TO_DMAP(phys_avail[pa_indx]);
1798 hammer_time(u_int64_t modulep, u_int64_t physfree)
1803 struct nmi_pcpu *np;
1804 struct xstate_hdr *xhdr;
1809 thread0.td_kstack = physfree + KERNBASE;
1810 thread0.td_kstack_pages = KSTACK_PAGES;
1811 kstack0_sz = thread0.td_kstack_pages * PAGE_SIZE;
1812 bzero((void *)thread0.td_kstack, kstack0_sz);
1813 physfree += kstack0_sz;
1816 * This may be done better later if it gets more high level
1817 * components in it. If so just link td->td_proc here.
1819 proc_linkup0(&proc0, &thread0);
1821 preload_metadata = (caddr_t)(uintptr_t)(modulep + KERNBASE);
1822 preload_bootstrap_relocate(KERNBASE);
1823 kmdp = preload_search_by_type("elf kernel");
1825 kmdp = preload_search_by_type("elf64 kernel");
1826 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
1827 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *) + KERNBASE;
1829 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
1830 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
1833 /* Init basic tunables, hz etc */
1837 * make gdt memory segments
1839 for (x = 0; x < NGDT; x++) {
1840 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1) &&
1841 x != GUSERLDT_SEL && x != (GUSERLDT_SEL) + 1)
1842 ssdtosd(&gdt_segs[x], &gdt[x]);
1844 gdt_segs[GPROC0_SEL].ssd_base = (uintptr_t)&common_tss[0];
1845 ssdtosyssd(&gdt_segs[GPROC0_SEL],
1846 (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
1848 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
1849 r_gdt.rd_base = (long) gdt;
1853 wrmsr(MSR_FSBASE, 0); /* User value */
1854 wrmsr(MSR_GSBASE, (u_int64_t)pc);
1855 wrmsr(MSR_KGSBASE, 0); /* User value while in the kernel */
1857 pcpu_init(pc, 0, sizeof(struct pcpu));
1858 dpcpu_init((void *)(physfree + KERNBASE), 0);
1859 physfree += DPCPU_SIZE;
1860 PCPU_SET(prvspace, pc);
1861 PCPU_SET(curthread, &thread0);
1862 PCPU_SET(tssp, &common_tss[0]);
1863 PCPU_SET(commontssp, &common_tss[0]);
1864 PCPU_SET(tss, (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
1865 PCPU_SET(ldt, (struct system_segment_descriptor *)&gdt[GUSERLDT_SEL]);
1866 PCPU_SET(fs32p, &gdt[GUFS32_SEL]);
1867 PCPU_SET(gs32p, &gdt[GUGS32_SEL]);
1870 * Initialize mutexes.
1872 * icu_lock: in order to allow an interrupt to occur in a critical
1873 * section, to set pcpu->ipending (etc...) properly, we
1874 * must be able to get the icu lock, so it can't be
1878 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS);
1879 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_DEF);
1882 for (x = 0; x < NIDT; x++)
1883 setidt(x, &IDTVEC(rsvd), SDT_SYSIGT, SEL_KPL, 0);
1884 setidt(IDT_DE, &IDTVEC(div), SDT_SYSIGT, SEL_KPL, 0);
1885 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYSIGT, SEL_KPL, 0);
1886 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYSIGT, SEL_KPL, 2);
1887 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYSIGT, SEL_UPL, 0);
1888 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYSIGT, SEL_KPL, 0);
1889 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYSIGT, SEL_KPL, 0);
1890 setidt(IDT_UD, &IDTVEC(ill), SDT_SYSIGT, SEL_KPL, 0);
1891 setidt(IDT_NM, &IDTVEC(dna), SDT_SYSIGT, SEL_KPL, 0);
1892 setidt(IDT_DF, &IDTVEC(dblfault), SDT_SYSIGT, SEL_KPL, 1);
1893 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYSIGT, SEL_KPL, 0);
1894 setidt(IDT_TS, &IDTVEC(tss), SDT_SYSIGT, SEL_KPL, 0);
1895 setidt(IDT_NP, &IDTVEC(missing), SDT_SYSIGT, SEL_KPL, 0);
1896 setidt(IDT_SS, &IDTVEC(stk), SDT_SYSIGT, SEL_KPL, 0);
1897 setidt(IDT_GP, &IDTVEC(prot), SDT_SYSIGT, SEL_KPL, 0);
1898 setidt(IDT_PF, &IDTVEC(page), SDT_SYSIGT, SEL_KPL, 0);
1899 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYSIGT, SEL_KPL, 0);
1900 setidt(IDT_AC, &IDTVEC(align), SDT_SYSIGT, SEL_KPL, 0);
1901 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYSIGT, SEL_KPL, 0);
1902 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYSIGT, SEL_KPL, 0);
1903 #ifdef KDTRACE_HOOKS
1904 setidt(IDT_DTRACE_RET, &IDTVEC(dtrace_ret), SDT_SYSIGT, SEL_UPL, 0);
1907 setidt(IDT_EVTCHN, &IDTVEC(xen_intr_upcall), SDT_SYSIGT, SEL_UPL, 0);
1910 r_idt.rd_limit = sizeof(idt0) - 1;
1911 r_idt.rd_base = (long) idt;
1915 * Initialize the i8254 before the console so that console
1916 * initialization can use DELAY().
1921 * Use vt(4) by default for UEFI boot (during the sc(4)/vt(4)
1924 if (kmdp != NULL && preload_search_info(kmdp,
1925 MODINFO_METADATA | MODINFOMD_EFI_MAP) != NULL)
1926 vty_set_preferred(VTY_VT);
1929 * Initialize the console before we print anything out.
1938 /* Reset and mask the atpics and leave them shut down. */
1942 * Point the ICU spurious interrupt vectors at the APIC spurious
1943 * interrupt handler.
1945 setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0);
1946 setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0);
1949 #error "have you forgotten the isa device?";
1955 if (boothowto & RB_KDB)
1956 kdb_enter(KDB_WHY_BOOTFLAGS,
1957 "Boot flags requested debugger");
1960 identify_cpu(); /* Final stage of CPU initialization */
1961 initializecpu(); /* Initialize CPU registers */
1962 initializecpucache();
1964 /* doublefault stack space, runs on ist1 */
1965 common_tss[0].tss_ist1 = (long)&dblfault_stack[sizeof(dblfault_stack)];
1968 * NMI stack, runs on ist2. The pcpu pointer is stored just
1969 * above the start of the ist2 stack.
1971 np = ((struct nmi_pcpu *) &nmi0_stack[sizeof(nmi0_stack)]) - 1;
1972 np->np_pcpu = (register_t) pc;
1973 common_tss[0].tss_ist2 = (long) np;
1975 /* Set the IO permission bitmap (empty due to tss seg limit) */
1976 common_tss[0].tss_iobase = sizeof(struct amd64tss) +
1977 IOPAGES * PAGE_SIZE;
1979 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
1982 /* Set up the fast syscall stuff */
1983 msr = rdmsr(MSR_EFER) | EFER_SCE;
1984 wrmsr(MSR_EFER, msr);
1985 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
1986 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
1987 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
1988 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
1989 wrmsr(MSR_STAR, msr);
1990 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
1992 getmemsize(kmdp, physfree);
1993 init_param2(physmem);
1995 /* now running on new page tables, configured,and u/iom is accessible */
1997 msgbufinit(msgbufp, msgbufsize);
2001 * Set up thread0 pcb after fpuinit calculated pcb + fpu save
2002 * area size. Zero out the extended state header in fpu save
2005 thread0.td_pcb = get_pcb_td(&thread0);
2006 bzero(get_pcb_user_save_td(&thread0), cpu_max_ext_state_size);
2008 xhdr = (struct xstate_hdr *)(get_pcb_user_save_td(&thread0) +
2010 xhdr->xstate_bv = xsave_mask;
2012 /* make an initial tss so cpu can get interrupt stack on syscall! */
2013 common_tss[0].tss_rsp0 = (vm_offset_t)thread0.td_pcb;
2014 /* Ensure the stack is aligned to 16 bytes */
2015 common_tss[0].tss_rsp0 &= ~0xFul;
2016 PCPU_SET(rsp0, common_tss[0].tss_rsp0);
2017 PCPU_SET(curpcb, thread0.td_pcb);
2019 /* transfer to user mode */
2021 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
2022 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
2023 _ucode32sel = GSEL(GUCODE32_SEL, SEL_UPL);
2024 _ufssel = GSEL(GUFS32_SEL, SEL_UPL);
2025 _ugssel = GSEL(GUGS32_SEL, SEL_UPL);
2031 /* setup proc 0's pcb */
2032 thread0.td_pcb->pcb_flags = 0;
2033 thread0.td_pcb->pcb_cr3 = KPML4phys; /* PCID 0 is reserved for kernel */
2034 thread0.td_frame = &proc0_tf;
2036 env = getenv("kernelname");
2038 strlcpy(kernelname, env, sizeof(kernelname));
2046 /* Location of kernel stack for locore */
2047 return ((u_int64_t)thread0.td_pcb);
2051 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
2054 pcpu->pc_acpi_id = 0xffffffff;
2058 smap_sysctl_handler(SYSCTL_HANDLER_ARGS)
2060 struct bios_smap *smapbase;
2061 struct bios_smap_xattr smap;
2064 int count, error, i;
2066 /* Retrieve the system memory map from the loader. */
2067 kmdp = preload_search_by_type("elf kernel");
2069 kmdp = preload_search_by_type("elf64 kernel");
2070 smapbase = (struct bios_smap *)preload_search_info(kmdp,
2071 MODINFO_METADATA | MODINFOMD_SMAP);
2072 if (smapbase == NULL)
2074 smapattr = (uint32_t *)preload_search_info(kmdp,
2075 MODINFO_METADATA | MODINFOMD_SMAP_XATTR);
2076 count = *((uint32_t *)smapbase - 1) / sizeof(*smapbase);
2078 for (i = 0; i < count; i++) {
2079 smap.base = smapbase[i].base;
2080 smap.length = smapbase[i].length;
2081 smap.type = smapbase[i].type;
2082 if (smapattr != NULL)
2083 smap.xattr = smapattr[i];
2086 error = SYSCTL_OUT(req, &smap, sizeof(smap));
2090 SYSCTL_PROC(_machdep, OID_AUTO, smap, CTLTYPE_OPAQUE|CTLFLAG_RD, NULL, 0,
2091 smap_sysctl_handler, "S,bios_smap_xattr", "Raw BIOS SMAP data");
2094 efi_map_sysctl_handler(SYSCTL_HANDLER_ARGS)
2096 struct efi_map_header *efihdr;
2100 kmdp = preload_search_by_type("elf kernel");
2102 kmdp = preload_search_by_type("elf64 kernel");
2103 efihdr = (struct efi_map_header *)preload_search_info(kmdp,
2104 MODINFO_METADATA | MODINFOMD_EFI_MAP);
2107 efisize = *((uint32_t *)efihdr - 1);
2108 return (SYSCTL_OUT(req, efihdr, efisize));
2110 SYSCTL_PROC(_machdep, OID_AUTO, efi_map, CTLTYPE_OPAQUE|CTLFLAG_RD, NULL, 0,
2111 efi_map_sysctl_handler, "S,efi_map_header", "Raw EFI Memory Map");
2114 spinlock_enter(void)
2120 if (td->td_md.md_spinlock_count == 0) {
2121 flags = intr_disable();
2122 td->td_md.md_spinlock_count = 1;
2123 td->td_md.md_saved_flags = flags;
2125 td->td_md.md_spinlock_count++;
2137 flags = td->td_md.md_saved_flags;
2138 td->td_md.md_spinlock_count--;
2139 if (td->td_md.md_spinlock_count == 0)
2140 intr_restore(flags);
2144 * Construct a PCB from a trapframe. This is called from kdb_trap() where
2145 * we want to start a backtrace from the function that caused us to enter
2146 * the debugger. We have the context in the trapframe, but base the trace
2147 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
2148 * enough for a backtrace.
2151 makectx(struct trapframe *tf, struct pcb *pcb)
2154 pcb->pcb_r12 = tf->tf_r12;
2155 pcb->pcb_r13 = tf->tf_r13;
2156 pcb->pcb_r14 = tf->tf_r14;
2157 pcb->pcb_r15 = tf->tf_r15;
2158 pcb->pcb_rbp = tf->tf_rbp;
2159 pcb->pcb_rbx = tf->tf_rbx;
2160 pcb->pcb_rip = tf->tf_rip;
2161 pcb->pcb_rsp = tf->tf_rsp;
2165 ptrace_set_pc(struct thread *td, unsigned long addr)
2168 td->td_frame->tf_rip = addr;
2169 set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
2174 ptrace_single_step(struct thread *td)
2176 td->td_frame->tf_rflags |= PSL_T;
2181 ptrace_clear_single_step(struct thread *td)
2183 td->td_frame->tf_rflags &= ~PSL_T;
2188 fill_regs(struct thread *td, struct reg *regs)
2190 struct trapframe *tp;
2193 return (fill_frame_regs(tp, regs));
2197 fill_frame_regs(struct trapframe *tp, struct reg *regs)
2199 regs->r_r15 = tp->tf_r15;
2200 regs->r_r14 = tp->tf_r14;
2201 regs->r_r13 = tp->tf_r13;
2202 regs->r_r12 = tp->tf_r12;
2203 regs->r_r11 = tp->tf_r11;
2204 regs->r_r10 = tp->tf_r10;
2205 regs->r_r9 = tp->tf_r9;
2206 regs->r_r8 = tp->tf_r8;
2207 regs->r_rdi = tp->tf_rdi;
2208 regs->r_rsi = tp->tf_rsi;
2209 regs->r_rbp = tp->tf_rbp;
2210 regs->r_rbx = tp->tf_rbx;
2211 regs->r_rdx = tp->tf_rdx;
2212 regs->r_rcx = tp->tf_rcx;
2213 regs->r_rax = tp->tf_rax;
2214 regs->r_rip = tp->tf_rip;
2215 regs->r_cs = tp->tf_cs;
2216 regs->r_rflags = tp->tf_rflags;
2217 regs->r_rsp = tp->tf_rsp;
2218 regs->r_ss = tp->tf_ss;
2219 if (tp->tf_flags & TF_HASSEGS) {
2220 regs->r_ds = tp->tf_ds;
2221 regs->r_es = tp->tf_es;
2222 regs->r_fs = tp->tf_fs;
2223 regs->r_gs = tp->tf_gs;
2234 set_regs(struct thread *td, struct reg *regs)
2236 struct trapframe *tp;
2240 rflags = regs->r_rflags & 0xffffffff;
2241 if (!EFL_SECURE(rflags, tp->tf_rflags) || !CS_SECURE(regs->r_cs))
2243 tp->tf_r15 = regs->r_r15;
2244 tp->tf_r14 = regs->r_r14;
2245 tp->tf_r13 = regs->r_r13;
2246 tp->tf_r12 = regs->r_r12;
2247 tp->tf_r11 = regs->r_r11;
2248 tp->tf_r10 = regs->r_r10;
2249 tp->tf_r9 = regs->r_r9;
2250 tp->tf_r8 = regs->r_r8;
2251 tp->tf_rdi = regs->r_rdi;
2252 tp->tf_rsi = regs->r_rsi;
2253 tp->tf_rbp = regs->r_rbp;
2254 tp->tf_rbx = regs->r_rbx;
2255 tp->tf_rdx = regs->r_rdx;
2256 tp->tf_rcx = regs->r_rcx;
2257 tp->tf_rax = regs->r_rax;
2258 tp->tf_rip = regs->r_rip;
2259 tp->tf_cs = regs->r_cs;
2260 tp->tf_rflags = rflags;
2261 tp->tf_rsp = regs->r_rsp;
2262 tp->tf_ss = regs->r_ss;
2263 if (0) { /* XXXKIB */
2264 tp->tf_ds = regs->r_ds;
2265 tp->tf_es = regs->r_es;
2266 tp->tf_fs = regs->r_fs;
2267 tp->tf_gs = regs->r_gs;
2268 tp->tf_flags = TF_HASSEGS;
2270 set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
2274 /* XXX check all this stuff! */
2275 /* externalize from sv_xmm */
2277 fill_fpregs_xmm(struct savefpu *sv_xmm, struct fpreg *fpregs)
2279 struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env;
2280 struct envxmm *penv_xmm = &sv_xmm->sv_env;
2284 bzero(fpregs, sizeof(*fpregs));
2286 /* FPU control/status */
2287 penv_fpreg->en_cw = penv_xmm->en_cw;
2288 penv_fpreg->en_sw = penv_xmm->en_sw;
2289 penv_fpreg->en_tw = penv_xmm->en_tw;
2290 penv_fpreg->en_opcode = penv_xmm->en_opcode;
2291 penv_fpreg->en_rip = penv_xmm->en_rip;
2292 penv_fpreg->en_rdp = penv_xmm->en_rdp;
2293 penv_fpreg->en_mxcsr = penv_xmm->en_mxcsr;
2294 penv_fpreg->en_mxcsr_mask = penv_xmm->en_mxcsr_mask;
2297 for (i = 0; i < 8; ++i)
2298 bcopy(sv_xmm->sv_fp[i].fp_acc.fp_bytes, fpregs->fpr_acc[i], 10);
2301 for (i = 0; i < 16; ++i)
2302 bcopy(sv_xmm->sv_xmm[i].xmm_bytes, fpregs->fpr_xacc[i], 16);
2305 /* internalize from fpregs into sv_xmm */
2307 set_fpregs_xmm(struct fpreg *fpregs, struct savefpu *sv_xmm)
2309 struct envxmm *penv_xmm = &sv_xmm->sv_env;
2310 struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env;
2314 /* FPU control/status */
2315 penv_xmm->en_cw = penv_fpreg->en_cw;
2316 penv_xmm->en_sw = penv_fpreg->en_sw;
2317 penv_xmm->en_tw = penv_fpreg->en_tw;
2318 penv_xmm->en_opcode = penv_fpreg->en_opcode;
2319 penv_xmm->en_rip = penv_fpreg->en_rip;
2320 penv_xmm->en_rdp = penv_fpreg->en_rdp;
2321 penv_xmm->en_mxcsr = penv_fpreg->en_mxcsr;
2322 penv_xmm->en_mxcsr_mask = penv_fpreg->en_mxcsr_mask & cpu_mxcsr_mask;
2325 for (i = 0; i < 8; ++i)
2326 bcopy(fpregs->fpr_acc[i], sv_xmm->sv_fp[i].fp_acc.fp_bytes, 10);
2329 for (i = 0; i < 16; ++i)
2330 bcopy(fpregs->fpr_xacc[i], sv_xmm->sv_xmm[i].xmm_bytes, 16);
2333 /* externalize from td->pcb */
2335 fill_fpregs(struct thread *td, struct fpreg *fpregs)
2338 KASSERT(td == curthread || TD_IS_SUSPENDED(td) ||
2339 P_SHOULDSTOP(td->td_proc),
2340 ("not suspended thread %p", td));
2342 fill_fpregs_xmm(get_pcb_user_save_td(td), fpregs);
2346 /* internalize to td->pcb */
2348 set_fpregs(struct thread *td, struct fpreg *fpregs)
2351 set_fpregs_xmm(fpregs, get_pcb_user_save_td(td));
2357 * Get machine context.
2360 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
2363 struct trapframe *tp;
2367 PROC_LOCK(curthread->td_proc);
2368 mcp->mc_onstack = sigonstack(tp->tf_rsp);
2369 PROC_UNLOCK(curthread->td_proc);
2370 mcp->mc_r15 = tp->tf_r15;
2371 mcp->mc_r14 = tp->tf_r14;
2372 mcp->mc_r13 = tp->tf_r13;
2373 mcp->mc_r12 = tp->tf_r12;
2374 mcp->mc_r11 = tp->tf_r11;
2375 mcp->mc_r10 = tp->tf_r10;
2376 mcp->mc_r9 = tp->tf_r9;
2377 mcp->mc_r8 = tp->tf_r8;
2378 mcp->mc_rdi = tp->tf_rdi;
2379 mcp->mc_rsi = tp->tf_rsi;
2380 mcp->mc_rbp = tp->tf_rbp;
2381 mcp->mc_rbx = tp->tf_rbx;
2382 mcp->mc_rcx = tp->tf_rcx;
2383 mcp->mc_rflags = tp->tf_rflags;
2384 if (flags & GET_MC_CLEAR_RET) {
2387 mcp->mc_rflags &= ~PSL_C;
2389 mcp->mc_rax = tp->tf_rax;
2390 mcp->mc_rdx = tp->tf_rdx;
2392 mcp->mc_rip = tp->tf_rip;
2393 mcp->mc_cs = tp->tf_cs;
2394 mcp->mc_rsp = tp->tf_rsp;
2395 mcp->mc_ss = tp->tf_ss;
2396 mcp->mc_ds = tp->tf_ds;
2397 mcp->mc_es = tp->tf_es;
2398 mcp->mc_fs = tp->tf_fs;
2399 mcp->mc_gs = tp->tf_gs;
2400 mcp->mc_flags = tp->tf_flags;
2401 mcp->mc_len = sizeof(*mcp);
2402 get_fpcontext(td, mcp, NULL, 0);
2403 mcp->mc_fsbase = pcb->pcb_fsbase;
2404 mcp->mc_gsbase = pcb->pcb_gsbase;
2405 mcp->mc_xfpustate = 0;
2406 mcp->mc_xfpustate_len = 0;
2407 bzero(mcp->mc_spare, sizeof(mcp->mc_spare));
2412 * Set machine context.
2414 * However, we don't set any but the user modifiable flags, and we won't
2415 * touch the cs selector.
2418 set_mcontext(struct thread *td, const mcontext_t *mcp)
2421 struct trapframe *tp;
2428 if (mcp->mc_len != sizeof(*mcp) ||
2429 (mcp->mc_flags & ~_MC_FLAG_MASK) != 0)
2431 rflags = (mcp->mc_rflags & PSL_USERCHANGE) |
2432 (tp->tf_rflags & ~PSL_USERCHANGE);
2433 if (mcp->mc_flags & _MC_HASFPXSTATE) {
2434 if (mcp->mc_xfpustate_len > cpu_max_ext_state_size -
2435 sizeof(struct savefpu))
2437 xfpustate = __builtin_alloca(mcp->mc_xfpustate_len);
2438 ret = copyin((void *)mcp->mc_xfpustate, xfpustate,
2439 mcp->mc_xfpustate_len);
2444 ret = set_fpcontext(td, mcp, xfpustate, mcp->mc_xfpustate_len);
2447 tp->tf_r15 = mcp->mc_r15;
2448 tp->tf_r14 = mcp->mc_r14;
2449 tp->tf_r13 = mcp->mc_r13;
2450 tp->tf_r12 = mcp->mc_r12;
2451 tp->tf_r11 = mcp->mc_r11;
2452 tp->tf_r10 = mcp->mc_r10;
2453 tp->tf_r9 = mcp->mc_r9;
2454 tp->tf_r8 = mcp->mc_r8;
2455 tp->tf_rdi = mcp->mc_rdi;
2456 tp->tf_rsi = mcp->mc_rsi;
2457 tp->tf_rbp = mcp->mc_rbp;
2458 tp->tf_rbx = mcp->mc_rbx;
2459 tp->tf_rdx = mcp->mc_rdx;
2460 tp->tf_rcx = mcp->mc_rcx;
2461 tp->tf_rax = mcp->mc_rax;
2462 tp->tf_rip = mcp->mc_rip;
2463 tp->tf_rflags = rflags;
2464 tp->tf_rsp = mcp->mc_rsp;
2465 tp->tf_ss = mcp->mc_ss;
2466 tp->tf_flags = mcp->mc_flags;
2467 if (tp->tf_flags & TF_HASSEGS) {
2468 tp->tf_ds = mcp->mc_ds;
2469 tp->tf_es = mcp->mc_es;
2470 tp->tf_fs = mcp->mc_fs;
2471 tp->tf_gs = mcp->mc_gs;
2473 if (mcp->mc_flags & _MC_HASBASES) {
2474 pcb->pcb_fsbase = mcp->mc_fsbase;
2475 pcb->pcb_gsbase = mcp->mc_gsbase;
2477 set_pcb_flags(pcb, PCB_FULL_IRET);
2482 get_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpusave,
2483 size_t xfpusave_len)
2485 size_t max_len, len;
2487 mcp->mc_ownedfp = fpugetregs(td);
2488 bcopy(get_pcb_user_save_td(td), &mcp->mc_fpstate[0],
2489 sizeof(mcp->mc_fpstate));
2490 mcp->mc_fpformat = fpuformat();
2491 if (!use_xsave || xfpusave_len == 0)
2493 max_len = cpu_max_ext_state_size - sizeof(struct savefpu);
2495 if (len > max_len) {
2497 bzero(xfpusave + max_len, len - max_len);
2499 mcp->mc_flags |= _MC_HASFPXSTATE;
2500 mcp->mc_xfpustate_len = len;
2501 bcopy(get_pcb_user_save_td(td) + 1, xfpusave, len);
2505 set_fpcontext(struct thread *td, const mcontext_t *mcp, char *xfpustate,
2506 size_t xfpustate_len)
2508 struct savefpu *fpstate;
2511 if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
2513 else if (mcp->mc_fpformat != _MC_FPFMT_XMM)
2515 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE) {
2516 /* We don't care what state is left in the FPU or PCB. */
2519 } else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
2520 mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
2521 fpstate = (struct savefpu *)&mcp->mc_fpstate;
2522 fpstate->sv_env.en_mxcsr &= cpu_mxcsr_mask;
2523 error = fpusetregs(td, fpstate, xfpustate, xfpustate_len);
2530 fpstate_drop(struct thread *td)
2533 KASSERT(PCB_USER_FPU(td->td_pcb), ("fpstate_drop: kernel-owned fpu"));
2535 if (PCPU_GET(fpcurthread) == td)
2538 * XXX force a full drop of the fpu. The above only drops it if we
2541 * XXX I don't much like fpugetuserregs()'s semantics of doing a full
2542 * drop. Dropping only to the pcb matches fnsave's behaviour.
2543 * We only need to drop to !PCB_INITDONE in sendsig(). But
2544 * sendsig() is the only caller of fpugetuserregs()... perhaps we just
2545 * have too many layers.
2547 clear_pcb_flags(curthread->td_pcb,
2548 PCB_FPUINITDONE | PCB_USERFPUINITDONE);
2553 fill_dbregs(struct thread *td, struct dbreg *dbregs)
2558 dbregs->dr[0] = rdr0();
2559 dbregs->dr[1] = rdr1();
2560 dbregs->dr[2] = rdr2();
2561 dbregs->dr[3] = rdr3();
2562 dbregs->dr[6] = rdr6();
2563 dbregs->dr[7] = rdr7();
2566 dbregs->dr[0] = pcb->pcb_dr0;
2567 dbregs->dr[1] = pcb->pcb_dr1;
2568 dbregs->dr[2] = pcb->pcb_dr2;
2569 dbregs->dr[3] = pcb->pcb_dr3;
2570 dbregs->dr[6] = pcb->pcb_dr6;
2571 dbregs->dr[7] = pcb->pcb_dr7;
2587 set_dbregs(struct thread *td, struct dbreg *dbregs)
2593 load_dr0(dbregs->dr[0]);
2594 load_dr1(dbregs->dr[1]);
2595 load_dr2(dbregs->dr[2]);
2596 load_dr3(dbregs->dr[3]);
2597 load_dr6(dbregs->dr[6]);
2598 load_dr7(dbregs->dr[7]);
2601 * Don't let an illegal value for dr7 get set. Specifically,
2602 * check for undefined settings. Setting these bit patterns
2603 * result in undefined behaviour and can lead to an unexpected
2604 * TRCTRAP or a general protection fault right here.
2605 * Upper bits of dr6 and dr7 must not be set
2607 for (i = 0; i < 4; i++) {
2608 if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02)
2610 if (td->td_frame->tf_cs == _ucode32sel &&
2611 DBREG_DR7_LEN(dbregs->dr[7], i) == DBREG_DR7_LEN_8)
2614 if ((dbregs->dr[6] & 0xffffffff00000000ul) != 0 ||
2615 (dbregs->dr[7] & 0xffffffff00000000ul) != 0)
2621 * Don't let a process set a breakpoint that is not within the
2622 * process's address space. If a process could do this, it
2623 * could halt the system by setting a breakpoint in the kernel
2624 * (if ddb was enabled). Thus, we need to check to make sure
2625 * that no breakpoints are being enabled for addresses outside
2626 * process's address space.
2628 * XXX - what about when the watched area of the user's
2629 * address space is written into from within the kernel
2630 * ... wouldn't that still cause a breakpoint to be generated
2631 * from within kernel mode?
2634 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) {
2635 /* dr0 is enabled */
2636 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
2639 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) {
2640 /* dr1 is enabled */
2641 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
2644 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) {
2645 /* dr2 is enabled */
2646 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
2649 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) {
2650 /* dr3 is enabled */
2651 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
2655 pcb->pcb_dr0 = dbregs->dr[0];
2656 pcb->pcb_dr1 = dbregs->dr[1];
2657 pcb->pcb_dr2 = dbregs->dr[2];
2658 pcb->pcb_dr3 = dbregs->dr[3];
2659 pcb->pcb_dr6 = dbregs->dr[6];
2660 pcb->pcb_dr7 = dbregs->dr[7];
2662 set_pcb_flags(pcb, PCB_DBREGS);
2672 load_dr7(0); /* Turn off the control bits first */
2681 * Return > 0 if a hardware breakpoint has been hit, and the
2682 * breakpoint was in user space. Return 0, otherwise.
2685 user_dbreg_trap(void)
2687 u_int64_t dr7, dr6; /* debug registers dr6 and dr7 */
2688 u_int64_t bp; /* breakpoint bits extracted from dr6 */
2689 int nbp; /* number of breakpoints that triggered */
2690 caddr_t addr[4]; /* breakpoint addresses */
2694 if ((dr7 & 0x000000ff) == 0) {
2696 * all GE and LE bits in the dr7 register are zero,
2697 * thus the trap couldn't have been caused by the
2698 * hardware debug registers
2705 bp = dr6 & 0x0000000f;
2709 * None of the breakpoint bits are set meaning this
2710 * trap was not caused by any of the debug registers
2716 * at least one of the breakpoints were hit, check to see
2717 * which ones and if any of them are user space addresses
2721 addr[nbp++] = (caddr_t)rdr0();
2724 addr[nbp++] = (caddr_t)rdr1();
2727 addr[nbp++] = (caddr_t)rdr2();
2730 addr[nbp++] = (caddr_t)rdr3();
2733 for (i = 0; i < nbp; i++) {
2734 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) {
2736 * addr[i] is in user space
2743 * None of the breakpoints are in user space.
2751 * Provide inb() and outb() as functions. They are normally only available as
2752 * inline functions, thus cannot be called from the debugger.
2755 /* silence compiler warnings */
2756 u_char inb_(u_short);
2757 void outb_(u_short, u_char);
2766 outb_(u_short port, u_char data)