2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1992 Terrence R. Lambert.
4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
7 * This code is derived from software contributed to Berkeley by
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
44 #include "opt_atalk.h"
45 #include "opt_atpic.h"
46 #include "opt_compat.h"
52 #include "opt_kstack_pages.h"
53 #include "opt_maxmem.h"
54 #include "opt_msgbuf.h"
55 #include "opt_perfmon.h"
56 #include "opt_sched.h"
57 #include "opt_kdtrace.h"
59 #include <sys/param.h>
61 #include <sys/systm.h>
65 #include <sys/callout.h>
68 #include <sys/eventhandler.h>
70 #include <sys/imgact.h>
72 #include <sys/kernel.h>
74 #include <sys/linker.h>
76 #include <sys/malloc.h>
77 #include <sys/memrange.h>
78 #include <sys/msgbuf.h>
79 #include <sys/mutex.h>
81 #include <sys/ptrace.h>
82 #include <sys/reboot.h>
83 #include <sys/sched.h>
84 #include <sys/signalvar.h>
85 #include <sys/sysctl.h>
86 #include <sys/sysent.h>
87 #include <sys/sysproto.h>
88 #include <sys/ucontext.h>
89 #include <sys/vmmeter.h>
92 #include <vm/vm_extern.h>
93 #include <vm/vm_kern.h>
94 #include <vm/vm_page.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_object.h>
97 #include <vm/vm_pager.h>
98 #include <vm/vm_param.h>
102 #error KDB must be enabled in order for DDB to work!
105 #include <ddb/db_sym.h>
108 #include <net/netisr.h>
110 #include <machine/clock.h>
111 #include <machine/cpu.h>
112 #include <machine/cputypes.h>
113 #include <machine/intr_machdep.h>
114 #include <machine/mca.h>
115 #include <machine/md_var.h>
116 #include <machine/metadata.h>
117 #include <machine/pc/bios.h>
118 #include <machine/pcb.h>
119 #include <machine/proc.h>
120 #include <machine/reg.h>
121 #include <machine/sigframe.h>
122 #include <machine/specialreg.h>
124 #include <machine/perfmon.h>
126 #include <machine/tss.h>
128 #include <machine/smp.h>
132 #include <amd64/isa/icu.h>
134 #include <machine/apicvar.h>
137 #include <isa/isareg.h>
140 /* Sanity check for __curthread() */
141 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
143 extern u_int64_t hammer_time(u_int64_t, u_int64_t);
145 extern void printcpuinfo(void); /* XXX header file */
146 extern void identify_cpu(void);
147 extern void panicifcpuunsupported(void);
149 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
150 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
152 static void cpu_startup(void *);
153 static void get_fpcontext(struct thread *td, mcontext_t *mcp);
154 static int set_fpcontext(struct thread *td, const mcontext_t *mcp);
155 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
158 extern vm_offset_t ksym_start, ksym_end;
161 /* Intel ICH registers */
162 #define ICH_PMBASE 0x400
163 #define ICH_SMI_EN ICH_PMBASE + 0x30
165 int _udatasel, _ucodesel, _ucode32sel, _ufssel, _ugssel;
173 * The number of PHYSMAP entries must be one less than the number of
174 * PHYSSEG entries because the PHYSMAP entry that spans the largest
175 * physical address that is accessible by ISA DMA is split into two
178 #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
180 vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
181 vm_paddr_t dump_avail[PHYSMAP_SIZE + 2];
183 /* must be 2 less so 0 0 can signal end of chunks */
184 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2)
185 #define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2)
187 struct kva_md_info kmi;
189 static struct trapframe proc0_tf;
190 struct region_descriptor r_gdt, r_idt;
192 struct pcpu __pcpu[MAXCPU];
196 struct mem_range_softc mem_range_softc;
198 struct mtx dt_lock; /* lock for GDT and LDT */
208 * On MacBooks, we need to disallow the legacy USB circuit to
209 * generate an SMI# because this can cause several problems,
210 * namely: incorrect CPU frequency detection and failure to
212 * We do this by disabling a bit in the SMI_EN (SMI Control and
213 * Enable register) of the Intel ICH LPC Interface Bridge.
215 sysenv = getenv("smbios.system.product");
216 if (sysenv != NULL) {
217 if (strncmp(sysenv, "MacBook1,1", 10) == 0 ||
218 strncmp(sysenv, "MacBook3,1", 10) == 0 ||
219 strncmp(sysenv, "MacBookPro1,1", 13) == 0 ||
220 strncmp(sysenv, "MacBookPro1,2", 13) == 0 ||
221 strncmp(sysenv, "MacBookPro3,1", 13) == 0 ||
222 strncmp(sysenv, "Macmini1,1", 10) == 0) {
224 printf("Disabling LEGACY_USB_EN bit on "
226 outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8);
232 * Good {morning,afternoon,evening,night}.
236 panicifcpuunsupported();
243 * Display physical memory if SMBIOS reports reasonable amount.
246 sysenv = getenv("smbios.memory.enabled");
247 if (sysenv != NULL) {
248 memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10;
251 if (memsize < ptoa((uintmax_t)cnt.v_free_count))
252 memsize = ptoa((uintmax_t)Maxmem);
253 printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20);
256 * Display any holes after the first chunk of extended memory.
261 printf("Physical memory chunk(s):\n");
262 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
265 size = phys_avail[indx + 1] - phys_avail[indx];
267 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
268 (uintmax_t)phys_avail[indx],
269 (uintmax_t)phys_avail[indx + 1] - 1,
270 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
274 vm_ksubmap_init(&kmi);
276 printf("avail memory = %ju (%ju MB)\n",
277 ptoa((uintmax_t)cnt.v_free_count),
278 ptoa((uintmax_t)cnt.v_free_count) / 1048576);
281 * Set up buffers, so they can be used to read disk labels.
284 vm_pager_bufferinit();
290 * Send an interrupt to process.
292 * Stack is set up to allow sigcode stored
293 * at top to call routine, followed by call
294 * to sigreturn routine below. After sigreturn
295 * resets the signal mask, the stack, and the
296 * frame pointer, it returns to the user
300 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
302 struct sigframe sf, *sfp;
307 struct trapframe *regs;
313 PROC_LOCK_ASSERT(p, MA_OWNED);
314 sig = ksi->ksi_signo;
316 mtx_assert(&psp->ps_mtx, MA_OWNED);
318 oonstack = sigonstack(regs->tf_rsp);
320 /* Save user context. */
321 bzero(&sf, sizeof(sf));
322 sf.sf_uc.uc_sigmask = *mask;
323 sf.sf_uc.uc_stack = td->td_sigstk;
324 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
325 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
326 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
327 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_rdi, sizeof(*regs));
328 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
329 get_fpcontext(td, &sf.sf_uc.uc_mcontext);
331 sf.sf_uc.uc_mcontext.mc_fsbase = td->td_pcb->pcb_fsbase;
332 sf.sf_uc.uc_mcontext.mc_gsbase = td->td_pcb->pcb_gsbase;
333 bzero(sf.sf_uc.uc_mcontext.mc_spare,
334 sizeof(sf.sf_uc.uc_mcontext.mc_spare));
335 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
337 /* Allocate space for the signal handler context. */
338 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
339 SIGISMEMBER(psp->ps_sigonstack, sig)) {
340 sp = td->td_sigstk.ss_sp +
341 td->td_sigstk.ss_size - sizeof(struct sigframe);
342 #if defined(COMPAT_43)
343 td->td_sigstk.ss_flags |= SS_ONSTACK;
346 sp = (char *)regs->tf_rsp - sizeof(struct sigframe) - 128;
347 /* Align to 16 bytes. */
348 sfp = (struct sigframe *)((unsigned long)sp & ~0xFul);
350 /* Translate the signal if appropriate. */
351 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
352 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
354 /* Build the argument list for the signal handler. */
355 regs->tf_rdi = sig; /* arg 1 in %rdi */
356 regs->tf_rdx = (register_t)&sfp->sf_uc; /* arg 3 in %rdx */
357 bzero(&sf.sf_si, sizeof(sf.sf_si));
358 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
359 /* Signal handler installed with SA_SIGINFO. */
360 regs->tf_rsi = (register_t)&sfp->sf_si; /* arg 2 in %rsi */
361 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
363 /* Fill in POSIX parts */
364 sf.sf_si = ksi->ksi_info;
365 sf.sf_si.si_signo = sig; /* maybe a translated signal */
366 regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */
368 /* Old FreeBSD-style arguments. */
369 regs->tf_rsi = ksi->ksi_code; /* arg 2 in %rsi */
370 regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */
371 sf.sf_ahu.sf_handler = catcher;
373 mtx_unlock(&psp->ps_mtx);
377 * Copy the sigframe out to the user's stack.
379 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
381 printf("process %ld has trashed its stack\n", (long)p->p_pid);
387 regs->tf_rsp = (long)sfp;
388 regs->tf_rip = PS_STRINGS - *(p->p_sysent->sv_szsigcode);
389 regs->tf_rflags &= ~(PSL_T | PSL_D);
390 regs->tf_cs = _ucodesel;
391 regs->tf_ds = _udatasel;
392 regs->tf_es = _udatasel;
393 regs->tf_fs = _ufssel;
394 regs->tf_gs = _ugssel;
395 regs->tf_flags = TF_HASSEGS;
396 td->td_pcb->pcb_full_iret = 1;
398 mtx_lock(&psp->ps_mtx);
402 * System call to cleanup state after a signal
403 * has been taken. Reset signal mask and
404 * stack state from context left by sendsig (above).
405 * Return to previous pc and psl as specified by
406 * context left by sendsig. Check carefully to
407 * make sure that the user has not modified the
408 * state to gain improper privileges.
415 struct sigreturn_args /* {
416 const struct __ucontext *sigcntxp;
420 struct proc *p = td->td_proc;
421 struct trapframe *regs;
427 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
429 uprintf("pid %d (%s): sigreturn copyin failed\n",
430 p->p_pid, td->td_name);
434 if ((ucp->uc_mcontext.mc_flags & ~_MC_FLAG_MASK) != 0) {
435 uprintf("pid %d (%s): sigreturn mc_flags %x\n", p->p_pid,
436 td->td_name, ucp->uc_mcontext.mc_flags);
440 rflags = ucp->uc_mcontext.mc_rflags;
442 * Don't allow users to change privileged or reserved flags.
445 * XXX do allow users to change the privileged flag PSL_RF.
446 * The cpu sets PSL_RF in tf_rflags for faults. Debuggers
447 * should sometimes set it there too. tf_rflags is kept in
448 * the signal context during signal handling and there is no
449 * other place to remember it, so the PSL_RF bit may be
450 * corrupted by the signal handler without us knowing.
451 * Corruption of the PSL_RF bit at worst causes one more or
452 * one less debugger trap, so allowing it is fairly harmless.
454 if (!EFL_SECURE(rflags & ~PSL_RF, regs->tf_rflags & ~PSL_RF)) {
455 uprintf("pid %d (%s): sigreturn rflags = 0x%lx\n", p->p_pid,
456 td->td_name, rflags);
461 * Don't allow users to load a valid privileged %cs. Let the
462 * hardware check for invalid selectors, excess privilege in
463 * other selectors, invalid %eip's and invalid %esp's.
465 cs = ucp->uc_mcontext.mc_cs;
466 if (!CS_SECURE(cs)) {
467 uprintf("pid %d (%s): sigreturn cs = 0x%x\n", p->p_pid,
469 ksiginfo_init_trap(&ksi);
470 ksi.ksi_signo = SIGBUS;
471 ksi.ksi_code = BUS_OBJERR;
472 ksi.ksi_trapno = T_PROTFLT;
473 ksi.ksi_addr = (void *)regs->tf_rip;
474 trapsignal(td, &ksi);
478 ret = set_fpcontext(td, &ucp->uc_mcontext);
480 uprintf("pid %d (%s): sigreturn set_fpcontext err %d\n",
481 p->p_pid, td->td_name, ret);
484 bcopy(&ucp->uc_mcontext.mc_rdi, regs, sizeof(*regs));
485 td->td_pcb->pcb_fsbase = ucp->uc_mcontext.mc_fsbase;
486 td->td_pcb->pcb_gsbase = ucp->uc_mcontext.mc_gsbase;
488 #if defined(COMPAT_43)
489 if (ucp->uc_mcontext.mc_onstack & 1)
490 td->td_sigstk.ss_flags |= SS_ONSTACK;
492 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
495 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
496 td->td_pcb->pcb_full_iret = 1;
497 return (EJUSTRETURN);
500 #ifdef COMPAT_FREEBSD4
502 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
505 return sigreturn(td, (struct sigreturn_args *)uap);
511 * Machine dependent boot() routine
513 * I haven't seen anything to put here yet
514 * Possibly some stuff might be grafted back here from boot()
522 * Flush the D-cache for non-DMA I/O so that the I-cache can
523 * be made coherent later.
526 cpu_flush_dcache(void *ptr, size_t len)
531 /* Get current clock frequency for the given cpu id. */
533 cpu_est_clockrate(int cpu_id, uint64_t *rate)
538 if (pcpu_find(cpu_id) == NULL || rate == NULL)
541 /* If we're booting, trust the rate calibrated moments ago. */
548 /* Schedule ourselves on the indicated cpu. */
549 thread_lock(curthread);
550 sched_bind(curthread, cpu_id);
551 thread_unlock(curthread);
554 /* Calibrate by measuring a short delay. */
555 reg = intr_disable();
562 thread_lock(curthread);
563 sched_unbind(curthread);
564 thread_unlock(curthread);
568 * Calculate the difference in readings, convert to Mhz, and
569 * subtract 0.5% of the total. Empirical testing has shown that
570 * overhead in DELAY() works out to approximately this value.
573 *rate = tsc2 * 1000 - tsc2 * 5;
578 * Shutdown the CPU as much as possible
587 void (*cpu_idle_hook)(void) = NULL; /* ACPI idle hook. */
590 cpu_idle_hlt(int busy)
593 * we must absolutely guarentee that hlt is the next instruction
594 * after sti or we introduce a timing window.
597 if (sched_runnable())
600 __asm __volatile("sti; hlt");
604 cpu_idle_acpi(int busy)
607 if (sched_runnable())
609 else if (cpu_idle_hook)
612 __asm __volatile("sti; hlt");
615 static int cpu_ident_amdc1e = 0;
618 cpu_probe_amdc1e(void)
623 * Forget it, if we're not using local APIC timer.
625 if (resource_disabled("apic", 0) ||
626 (resource_int_value("apic", 0, "clock", &i) == 0 && i == 0))
630 * Detect the presence of C1E capability mostly on latest
631 * dual-cores (or future) k8 family.
633 if (cpu_vendor_id == CPU_VENDOR_AMD &&
634 (cpu_id & 0x00000f00) == 0x00000f00 &&
635 (cpu_id & 0x0fff0000) >= 0x00040000) {
636 cpu_ident_amdc1e = 1;
644 * C1E renders the local APIC timer dead, so we disable it by
645 * reading the Interrupt Pending Message register and clearing
646 * both C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27).
649 * "BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh Processors"
650 * #32559 revision 3.00+
652 #define MSR_AMDK8_IPM 0xc0010055
653 #define AMDK8_SMIONCMPHALT (1ULL << 27)
654 #define AMDK8_C1EONCMPHALT (1ULL << 28)
655 #define AMDK8_CMPHALT (AMDK8_SMIONCMPHALT | AMDK8_C1EONCMPHALT)
658 cpu_idle_amdc1e(int busy)
662 if (sched_runnable())
667 msr = rdmsr(MSR_AMDK8_IPM);
668 if (msr & AMDK8_CMPHALT)
669 wrmsr(MSR_AMDK8_IPM, msr & ~AMDK8_CMPHALT);
674 __asm __volatile("sti; hlt");
679 cpu_idle_spin(int busy)
684 void (*cpu_idle_fn)(int) = cpu_idle_acpi;
690 if (mp_grab_cpu_hlt())
697 * mwait cpu power states. Lower 4 bits are sub-states.
699 #define MWAIT_C0 0xf0
700 #define MWAIT_C1 0x00
701 #define MWAIT_C2 0x10
702 #define MWAIT_C3 0x20
703 #define MWAIT_C4 0x30
705 #define MWAIT_DISABLED 0x0
706 #define MWAIT_WOKEN 0x1
707 #define MWAIT_WAITING 0x2
710 cpu_idle_mwait(int busy)
714 mwait = (int *)PCPU_PTR(monitorbuf);
715 *mwait = MWAIT_WAITING;
716 if (sched_runnable())
718 cpu_monitor(mwait, 0, 0);
719 if (*mwait == MWAIT_WAITING)
720 cpu_mwait(0, MWAIT_C1);
724 cpu_idle_mwait_hlt(int busy)
728 mwait = (int *)PCPU_PTR(monitorbuf);
730 *mwait = MWAIT_DISABLED;
734 *mwait = MWAIT_WAITING;
735 if (sched_runnable())
737 cpu_monitor(mwait, 0, 0);
738 if (*mwait == MWAIT_WAITING)
739 cpu_mwait(0, MWAIT_C1);
743 cpu_idle_wakeup(int cpu)
748 if (cpu_idle_fn == cpu_idle_spin)
750 if (cpu_idle_fn != cpu_idle_mwait && cpu_idle_fn != cpu_idle_mwait_hlt)
752 pcpu = pcpu_find(cpu);
753 mwait = (int *)pcpu->pc_monitorbuf;
755 * This doesn't need to be atomic since missing the race will
756 * simply result in unnecessary IPIs.
758 if (cpu_idle_fn == cpu_idle_mwait_hlt && *mwait == MWAIT_DISABLED)
760 *mwait = MWAIT_WOKEN;
766 * Ordered by speed/power consumption.
772 { cpu_idle_spin, "spin" },
773 { cpu_idle_mwait, "mwait" },
774 { cpu_idle_mwait_hlt, "mwait_hlt" },
775 { cpu_idle_amdc1e, "amdc1e" },
776 { cpu_idle_hlt, "hlt" },
777 { cpu_idle_acpi, "acpi" },
782 idle_sysctl_available(SYSCTL_HANDLER_ARGS)
788 avail = malloc(256, M_TEMP, M_WAITOK);
790 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
791 if (strstr(idle_tbl[i].id_name, "mwait") &&
792 (cpu_feature2 & CPUID2_MON) == 0)
794 if (strcmp(idle_tbl[i].id_name, "amdc1e") == 0 &&
795 cpu_ident_amdc1e == 0)
797 p += sprintf(p, "%s, ", idle_tbl[i].id_name);
799 error = sysctl_handle_string(oidp, avail, 0, req);
805 idle_sysctl(SYSCTL_HANDLER_ARGS)
813 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
814 if (idle_tbl[i].id_fn == cpu_idle_fn) {
815 p = idle_tbl[i].id_name;
819 strncpy(buf, p, sizeof(buf));
820 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
821 if (error != 0 || req->newptr == NULL)
823 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
824 if (strstr(idle_tbl[i].id_name, "mwait") &&
825 (cpu_feature2 & CPUID2_MON) == 0)
827 if (strcmp(idle_tbl[i].id_name, "amdc1e") == 0 &&
828 cpu_ident_amdc1e == 0)
830 if (strcmp(idle_tbl[i].id_name, buf))
832 cpu_idle_fn = idle_tbl[i].id_fn;
838 SYSCTL_PROC(_machdep, OID_AUTO, idle_available, CTLTYPE_STRING | CTLFLAG_RD,
839 0, 0, idle_sysctl_available, "A", "list of available idle functions");
841 SYSCTL_PROC(_machdep, OID_AUTO, idle, CTLTYPE_STRING | CTLFLAG_RW, 0, 0,
842 idle_sysctl, "A", "currently selected idle function");
845 * Reset registers to default values on exec.
848 exec_setregs(td, entry, stack, ps_strings)
854 struct trapframe *regs = td->td_frame;
855 struct pcb *pcb = td->td_pcb;
858 if (td->td_proc->p_md.md_ldt != NULL)
861 mtx_unlock(&dt_lock);
865 pcb->pcb_flags &= ~(PCB_32BIT | PCB_GS32BIT);
866 pcb->pcb_initial_fpucw = __INITIAL_FPUCW__;
867 pcb->pcb_full_iret = 1;
869 bzero((char *)regs, sizeof(struct trapframe));
870 regs->tf_rip = entry;
871 regs->tf_rsp = ((stack - 8) & ~0xFul) + 8;
872 regs->tf_rdi = stack; /* argv */
873 regs->tf_rflags = PSL_USER | (regs->tf_rflags & PSL_T);
874 regs->tf_ss = _udatasel;
875 regs->tf_cs = _ucodesel;
876 regs->tf_ds = _udatasel;
877 regs->tf_es = _udatasel;
878 regs->tf_fs = _ufssel;
879 regs->tf_gs = _ugssel;
880 regs->tf_flags = TF_HASSEGS;
881 td->td_retval[1] = 0;
884 * Reset the hardware debug registers if they were in use.
885 * They won't have any meaning for the newly exec'd process.
887 if (pcb->pcb_flags & PCB_DBREGS) {
894 if (pcb == PCPU_GET(curpcb)) {
896 * Clear the debug registers on the running
897 * CPU, otherwise they will end up affecting
898 * the next process we switch to.
902 pcb->pcb_flags &= ~PCB_DBREGS;
906 * Drop the FP state if we hold it, so that the process gets a
907 * clean FP state if it uses the FPU again.
919 * CR0_MP, CR0_NE and CR0_TS are also set by npx_probe() for the
920 * BSP. See the comments there about why we set them.
922 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
927 * Initialize amd64 and configure to run kernel
931 * Initialize segments & interrupt table
934 struct user_segment_descriptor gdt[NGDT * MAXCPU];/* global descriptor tables */
935 static struct gate_descriptor idt0[NIDT];
936 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
938 static char dblfault_stack[PAGE_SIZE] __aligned(16);
940 static char nmi0_stack[PAGE_SIZE] __aligned(16);
941 CTASSERT(sizeof(struct nmi_pcpu) == 16);
943 struct amd64tss common_tss[MAXCPU];
946 * Software prototypes -- in more palatable form.
948 * Keep GUFS32, GUGS32, GUCODE32 and GUDATA at the same
949 * slots as corresponding segments for i386 kernel.
951 struct soft_segment_descriptor gdt_segs[] = {
952 /* GNULL_SEL 0 Null Descriptor */
961 /* GNULL2_SEL 1 Null Descriptor */
970 /* GUFS32_SEL 2 32 bit %gs Descriptor for user */
972 .ssd_limit = 0xfffff,
973 .ssd_type = SDT_MEMRWA,
979 /* GUGS32_SEL 3 32 bit %fs Descriptor for user */
981 .ssd_limit = 0xfffff,
982 .ssd_type = SDT_MEMRWA,
988 /* GCODE_SEL 4 Code Descriptor for kernel */
990 .ssd_limit = 0xfffff,
991 .ssd_type = SDT_MEMERA,
997 /* GDATA_SEL 5 Data Descriptor for kernel */
999 .ssd_limit = 0xfffff,
1000 .ssd_type = SDT_MEMRWA,
1006 /* GUCODE32_SEL 6 32 bit Code Descriptor for user */
1008 .ssd_limit = 0xfffff,
1009 .ssd_type = SDT_MEMERA,
1015 /* GUDATA_SEL 7 32/64 bit Data Descriptor for user */
1017 .ssd_limit = 0xfffff,
1018 .ssd_type = SDT_MEMRWA,
1024 /* GUCODE_SEL 8 64 bit Code Descriptor for user */
1026 .ssd_limit = 0xfffff,
1027 .ssd_type = SDT_MEMERA,
1033 /* GPROC0_SEL 9 Proc 0 Tss Descriptor */
1035 .ssd_limit = sizeof(struct amd64tss) + IOPAGES * PAGE_SIZE - 1,
1036 .ssd_type = SDT_SYSTSS,
1042 /* Actually, the TSS is a system descriptor which is double size */
1051 /* GUSERLDT_SEL 11 LDT Descriptor */
1060 /* GUSERLDT_SEL 12 LDT Descriptor, double size */
1072 setidt(idx, func, typ, dpl, ist)
1079 struct gate_descriptor *ip;
1082 ip->gd_looffset = (uintptr_t)func;
1083 ip->gd_selector = GSEL(GCODE_SEL, SEL_KPL);
1089 ip->gd_hioffset = ((uintptr_t)func)>>16 ;
1093 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
1094 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
1095 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
1096 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
1097 IDTVEC(xmm), IDTVEC(dblfault),
1098 #ifdef KDTRACE_HOOKS
1101 IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
1105 * Display the index and function name of any IDT entries that don't use
1106 * the default 'rsvd' entry point.
1108 DB_SHOW_COMMAND(idt, db_show_idt)
1110 struct gate_descriptor *ip;
1115 for (idx = 0; idx < NIDT && !db_pager_quit; idx++) {
1116 func = ((long)ip->gd_hioffset << 16 | ip->gd_looffset);
1117 if (func != (uintptr_t)&IDTVEC(rsvd)) {
1118 db_printf("%3d\t", idx);
1119 db_printsym(func, DB_STGY_PROC);
1129 struct user_segment_descriptor *sd;
1130 struct soft_segment_descriptor *ssd;
1133 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
1134 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
1135 ssd->ssd_type = sd->sd_type;
1136 ssd->ssd_dpl = sd->sd_dpl;
1137 ssd->ssd_p = sd->sd_p;
1138 ssd->ssd_long = sd->sd_long;
1139 ssd->ssd_def32 = sd->sd_def32;
1140 ssd->ssd_gran = sd->sd_gran;
1145 struct soft_segment_descriptor *ssd;
1146 struct user_segment_descriptor *sd;
1149 sd->sd_lobase = (ssd->ssd_base) & 0xffffff;
1150 sd->sd_hibase = (ssd->ssd_base >> 24) & 0xff;
1151 sd->sd_lolimit = (ssd->ssd_limit) & 0xffff;
1152 sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf;
1153 sd->sd_type = ssd->ssd_type;
1154 sd->sd_dpl = ssd->ssd_dpl;
1155 sd->sd_p = ssd->ssd_p;
1156 sd->sd_long = ssd->ssd_long;
1157 sd->sd_def32 = ssd->ssd_def32;
1158 sd->sd_gran = ssd->ssd_gran;
1163 struct soft_segment_descriptor *ssd;
1164 struct system_segment_descriptor *sd;
1167 sd->sd_lobase = (ssd->ssd_base) & 0xffffff;
1168 sd->sd_hibase = (ssd->ssd_base >> 24) & 0xfffffffffful;
1169 sd->sd_lolimit = (ssd->ssd_limit) & 0xffff;
1170 sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf;
1171 sd->sd_type = ssd->ssd_type;
1172 sd->sd_dpl = ssd->ssd_dpl;
1173 sd->sd_p = ssd->ssd_p;
1174 sd->sd_gran = ssd->ssd_gran;
1177 #if !defined(DEV_ATPIC) && defined(DEV_ISA)
1178 #include <isa/isavar.h>
1179 #include <isa/isareg.h>
1181 * Return a bitmap of the current interrupt requests. This is 8259-specific
1182 * and is only suitable for use at probe time.
1183 * This is only here to pacify sio. It is NOT FATAL if this doesn't work.
1184 * It shouldn't be here. There should probably be an APIC centric
1185 * implementation in the apic driver code, if at all.
1188 isa_irq_pending(void)
1193 irr1 = inb(IO_ICU1);
1194 irr2 = inb(IO_ICU2);
1195 return ((irr2 << 8) | irr1);
1202 add_smap_entry(struct bios_smap *smap, vm_paddr_t *physmap, int *physmap_idxp)
1204 int i, insert_idx, physmap_idx;
1206 physmap_idx = *physmap_idxp;
1208 if (boothowto & RB_VERBOSE)
1209 printf("SMAP type=%02x base=%016lx len=%016lx\n",
1210 smap->type, smap->base, smap->length);
1212 if (smap->type != SMAP_TYPE_MEMORY)
1215 if (smap->length == 0)
1219 * Find insertion point while checking for overlap. Start off by
1220 * assuming the new entry will be added to the end.
1222 insert_idx = physmap_idx + 2;
1223 for (i = 0; i <= physmap_idx; i += 2) {
1224 if (smap->base < physmap[i + 1]) {
1225 if (smap->base + smap->length <= physmap[i]) {
1229 if (boothowto & RB_VERBOSE)
1231 "Overlapping memory regions, ignoring second region\n");
1236 /* See if we can prepend to the next entry. */
1237 if (insert_idx <= physmap_idx &&
1238 smap->base + smap->length == physmap[insert_idx]) {
1239 physmap[insert_idx] = smap->base;
1243 /* See if we can append to the previous entry. */
1244 if (insert_idx > 0 && smap->base == physmap[insert_idx - 1]) {
1245 physmap[insert_idx - 1] += smap->length;
1250 *physmap_idxp = physmap_idx;
1251 if (physmap_idx == PHYSMAP_SIZE) {
1253 "Too many segments in the physical address map, giving up\n");
1258 * Move the last 'N' entries down to make room for the new
1261 for (i = physmap_idx; i > insert_idx; i -= 2) {
1262 physmap[i] = physmap[i - 2];
1263 physmap[i + 1] = physmap[i - 1];
1266 /* Insert the new entry. */
1267 physmap[insert_idx] = smap->base;
1268 physmap[insert_idx + 1] = smap->base + smap->length;
1273 * Populate the (physmap) array with base/bound pairs describing the
1274 * available physical memory in the system, then test this memory and
1275 * build the phys_avail array describing the actually-available memory.
1277 * If we cannot accurately determine the physical memory map, then use
1278 * value from the 0xE801 call, and failing that, the RTC.
1280 * Total memory size may be set by the kernel environment variable
1281 * hw.physmem or the compile-time define MAXMEM.
1283 * XXX first should be vm_paddr_t.
1286 getmemsize(caddr_t kmdp, u_int64_t first)
1288 int i, off, physmap_idx, pa_indx, da_indx;
1289 vm_paddr_t pa, physmap[PHYSMAP_SIZE];
1290 u_long physmem_tunable;
1292 struct bios_smap *smapbase, *smap, *smapend;
1294 quad_t dcons_addr, dcons_size;
1296 bzero(physmap, sizeof(physmap));
1301 * get memory map from INT 15:E820, kindly supplied by the loader.
1303 * subr_module.c says:
1304 * "Consumer may safely assume that size value precedes data."
1305 * ie: an int32_t immediately precedes smap.
1307 smapbase = (struct bios_smap *)preload_search_info(kmdp,
1308 MODINFO_METADATA | MODINFOMD_SMAP);
1309 if (smapbase == NULL)
1310 panic("No BIOS smap info from loader!");
1312 smapsize = *((u_int32_t *)smapbase - 1);
1313 smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize);
1315 for (smap = smapbase; smap < smapend; smap++)
1316 if (!add_smap_entry(smap, physmap, &physmap_idx))
1320 * Find the 'base memory' segment for SMP
1323 for (i = 0; i <= physmap_idx; i += 2) {
1324 if (physmap[i] == 0x00000000) {
1325 basemem = physmap[i + 1] / 1024;
1330 panic("BIOS smap did not include a basemem segment!");
1333 /* make hole for AP bootstrap code */
1334 physmap[1] = mp_bootaddress(physmap[1] / 1024);
1338 * Maxmem isn't the "maximum memory", it's one larger than the
1339 * highest page of the physical address space. It should be
1340 * called something like "Maxphyspage". We may adjust this
1341 * based on ``hw.physmem'' and the results of the memory test.
1343 Maxmem = atop(physmap[physmap_idx + 1]);
1346 Maxmem = MAXMEM / 4;
1349 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable))
1350 Maxmem = atop(physmem_tunable);
1353 * Don't allow MAXMEM or hw.physmem to extend the amount of memory
1356 if (Maxmem > atop(physmap[physmap_idx + 1]))
1357 Maxmem = atop(physmap[physmap_idx + 1]);
1359 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
1360 (boothowto & RB_VERBOSE))
1361 printf("Physical memory use set to %ldK\n", Maxmem * 4);
1363 /* call pmap initialization to make new kernel address space */
1364 pmap_bootstrap(&first);
1367 * Size up each available chunk of physical memory.
1369 physmap[0] = PAGE_SIZE; /* mask off page 0 */
1372 phys_avail[pa_indx++] = physmap[0];
1373 phys_avail[pa_indx] = physmap[0];
1374 dump_avail[da_indx] = physmap[0];
1378 * Get dcons buffer address
1380 if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
1381 getenv_quad("dcons.size", &dcons_size) == 0)
1385 * physmap is in bytes, so when converting to page boundaries,
1386 * round up the start address and round down the end address.
1388 for (i = 0; i <= physmap_idx; i += 2) {
1391 end = ptoa((vm_paddr_t)Maxmem);
1392 if (physmap[i + 1] < end)
1393 end = trunc_page(physmap[i + 1]);
1394 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
1395 int tmp, page_bad, full;
1396 int *ptr = (int *)CADDR1;
1400 * block out kernel memory as not available.
1402 if (pa >= 0x100000 && pa < first)
1406 * block out dcons buffer
1409 && pa >= trunc_page(dcons_addr)
1410 && pa < dcons_addr + dcons_size)
1416 * map page into kernel: valid, read/write,non-cacheable
1418 *pte = pa | PG_V | PG_RW | PG_N;
1423 * Test for alternating 1's and 0's
1425 *(volatile int *)ptr = 0xaaaaaaaa;
1426 if (*(volatile int *)ptr != 0xaaaaaaaa)
1429 * Test for alternating 0's and 1's
1431 *(volatile int *)ptr = 0x55555555;
1432 if (*(volatile int *)ptr != 0x55555555)
1437 *(volatile int *)ptr = 0xffffffff;
1438 if (*(volatile int *)ptr != 0xffffffff)
1443 *(volatile int *)ptr = 0x0;
1444 if (*(volatile int *)ptr != 0x0)
1447 * Restore original value.
1452 * Adjust array of valid/good pages.
1454 if (page_bad == TRUE)
1457 * If this good page is a continuation of the
1458 * previous set of good pages, then just increase
1459 * the end pointer. Otherwise start a new chunk.
1460 * Note that "end" points one higher than end,
1461 * making the range >= start and < end.
1462 * If we're also doing a speculative memory
1463 * test and we at or past the end, bump up Maxmem
1464 * so that we keep going. The first bad page
1465 * will terminate the loop.
1467 if (phys_avail[pa_indx] == pa) {
1468 phys_avail[pa_indx] += PAGE_SIZE;
1471 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
1473 "Too many holes in the physical address space, giving up\n");
1478 phys_avail[pa_indx++] = pa; /* start */
1479 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
1483 if (dump_avail[da_indx] == pa) {
1484 dump_avail[da_indx] += PAGE_SIZE;
1487 if (da_indx == DUMP_AVAIL_ARRAY_END) {
1491 dump_avail[da_indx++] = pa; /* start */
1492 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
1504 * The last chunk must contain at least one page plus the message
1505 * buffer to avoid complicating other code (message buffer address
1506 * calculation, etc.).
1508 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
1509 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) {
1510 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
1511 phys_avail[pa_indx--] = 0;
1512 phys_avail[pa_indx--] = 0;
1515 Maxmem = atop(phys_avail[pa_indx]);
1517 /* Trim off space for the message buffer. */
1518 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE);
1520 /* Map the message buffer. */
1521 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE)
1522 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] +
1527 hammer_time(u_int64_t modulep, u_int64_t physfree)
1532 struct nmi_pcpu *np;
1536 thread0.td_kstack = physfree + KERNBASE;
1537 bzero((void *)thread0.td_kstack, KSTACK_PAGES * PAGE_SIZE);
1538 physfree += KSTACK_PAGES * PAGE_SIZE;
1539 thread0.td_pcb = (struct pcb *)
1540 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
1543 * This may be done better later if it gets more high level
1544 * components in it. If so just link td->td_proc here.
1546 proc_linkup0(&proc0, &thread0);
1548 preload_metadata = (caddr_t)(uintptr_t)(modulep + KERNBASE);
1549 preload_bootstrap_relocate(KERNBASE);
1550 kmdp = preload_search_by_type("elf kernel");
1552 kmdp = preload_search_by_type("elf64 kernel");
1553 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
1554 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *) + KERNBASE;
1556 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
1557 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
1560 /* Init basic tunables, hz etc */
1564 * make gdt memory segments
1566 for (x = 0; x < NGDT; x++) {
1567 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1) &&
1568 x != GUSERLDT_SEL && x != (GUSERLDT_SEL) + 1)
1569 ssdtosd(&gdt_segs[x], &gdt[x]);
1571 gdt_segs[GPROC0_SEL].ssd_base = (uintptr_t)&common_tss[0];
1572 ssdtosyssd(&gdt_segs[GPROC0_SEL],
1573 (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
1575 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
1576 r_gdt.rd_base = (long) gdt;
1580 wrmsr(MSR_FSBASE, 0); /* User value */
1581 wrmsr(MSR_GSBASE, (u_int64_t)pc);
1582 wrmsr(MSR_KGSBASE, 0); /* User value while in the kernel */
1584 pcpu_init(pc, 0, sizeof(struct pcpu));
1585 dpcpu_init((void *)(physfree + KERNBASE), 0);
1586 physfree += DPCPU_SIZE;
1587 PCPU_SET(prvspace, pc);
1588 PCPU_SET(curthread, &thread0);
1589 PCPU_SET(curpcb, thread0.td_pcb);
1590 PCPU_SET(tssp, &common_tss[0]);
1591 PCPU_SET(commontssp, &common_tss[0]);
1592 PCPU_SET(tss, (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
1593 PCPU_SET(ldt, (struct system_segment_descriptor *)&gdt[GUSERLDT_SEL]);
1594 PCPU_SET(fs32p, &gdt[GUFS32_SEL]);
1595 PCPU_SET(gs32p, &gdt[GUGS32_SEL]);
1598 * Initialize mutexes.
1600 * icu_lock: in order to allow an interrupt to occur in a critical
1601 * section, to set pcpu->ipending (etc...) properly, we
1602 * must be able to get the icu lock, so it can't be
1606 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS);
1607 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_DEF);
1610 for (x = 0; x < NIDT; x++)
1611 setidt(x, &IDTVEC(rsvd), SDT_SYSIGT, SEL_KPL, 0);
1612 setidt(IDT_DE, &IDTVEC(div), SDT_SYSIGT, SEL_KPL, 0);
1613 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYSIGT, SEL_KPL, 0);
1614 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYSIGT, SEL_KPL, 2);
1615 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYSIGT, SEL_UPL, 0);
1616 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYSIGT, SEL_KPL, 0);
1617 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYSIGT, SEL_KPL, 0);
1618 setidt(IDT_UD, &IDTVEC(ill), SDT_SYSIGT, SEL_KPL, 0);
1619 setidt(IDT_NM, &IDTVEC(dna), SDT_SYSIGT, SEL_KPL, 0);
1620 setidt(IDT_DF, &IDTVEC(dblfault), SDT_SYSIGT, SEL_KPL, 1);
1621 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYSIGT, SEL_KPL, 0);
1622 setidt(IDT_TS, &IDTVEC(tss), SDT_SYSIGT, SEL_KPL, 0);
1623 setidt(IDT_NP, &IDTVEC(missing), SDT_SYSIGT, SEL_KPL, 0);
1624 setidt(IDT_SS, &IDTVEC(stk), SDT_SYSIGT, SEL_KPL, 0);
1625 setidt(IDT_GP, &IDTVEC(prot), SDT_SYSIGT, SEL_KPL, 0);
1626 setidt(IDT_PF, &IDTVEC(page), SDT_SYSIGT, SEL_KPL, 0);
1627 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYSIGT, SEL_KPL, 0);
1628 setidt(IDT_AC, &IDTVEC(align), SDT_SYSIGT, SEL_KPL, 0);
1629 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYSIGT, SEL_KPL, 0);
1630 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYSIGT, SEL_KPL, 0);
1631 #ifdef KDTRACE_HOOKS
1632 setidt(IDT_DTRACE_RET, &IDTVEC(dtrace_ret), SDT_SYSIGT, SEL_UPL, 0);
1635 r_idt.rd_limit = sizeof(idt0) - 1;
1636 r_idt.rd_base = (long) idt;
1640 * Initialize the i8254 before the console so that console
1641 * initialization can use DELAY().
1646 * Initialize the console before we print anything out.
1655 /* Reset and mask the atpics and leave them shut down. */
1659 * Point the ICU spurious interrupt vectors at the APIC spurious
1660 * interrupt handler.
1662 setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0);
1663 setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0);
1666 #error "have you forgotten the isa device?";
1672 if (boothowto & RB_KDB)
1673 kdb_enter(KDB_WHY_BOOTFLAGS,
1674 "Boot flags requested debugger");
1677 identify_cpu(); /* Final stage of CPU initialization */
1678 initializecpu(); /* Initialize CPU registers */
1679 initializecpucache();
1681 /* make an initial tss so cpu can get interrupt stack on syscall! */
1682 common_tss[0].tss_rsp0 = thread0.td_kstack + \
1683 KSTACK_PAGES * PAGE_SIZE - sizeof(struct pcb);
1684 /* Ensure the stack is aligned to 16 bytes */
1685 common_tss[0].tss_rsp0 &= ~0xFul;
1686 PCPU_SET(rsp0, common_tss[0].tss_rsp0);
1688 /* doublefault stack space, runs on ist1 */
1689 common_tss[0].tss_ist1 = (long)&dblfault_stack[sizeof(dblfault_stack)];
1692 * NMI stack, runs on ist2. The pcpu pointer is stored just
1693 * above the start of the ist2 stack.
1695 np = ((struct nmi_pcpu *) &nmi0_stack[sizeof(nmi0_stack)]) - 1;
1696 np->np_pcpu = (register_t) pc;
1697 common_tss[0].tss_ist2 = (long) np;
1699 /* Set the IO permission bitmap (empty due to tss seg limit) */
1700 common_tss[0].tss_iobase = sizeof(struct amd64tss) +
1701 IOPAGES * PAGE_SIZE;
1703 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
1706 /* Set up the fast syscall stuff */
1707 msr = rdmsr(MSR_EFER) | EFER_SCE;
1708 wrmsr(MSR_EFER, msr);
1709 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
1710 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
1711 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
1712 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
1713 wrmsr(MSR_STAR, msr);
1714 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
1716 getmemsize(kmdp, physfree);
1717 init_param2(physmem);
1719 /* now running on new page tables, configured,and u/iom is accessible */
1721 msgbufinit(msgbufp, MSGBUF_SIZE);
1724 /* transfer to user mode */
1726 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
1727 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
1728 _ucode32sel = GSEL(GUCODE32_SEL, SEL_UPL);
1729 _ufssel = GSEL(GUFS32_SEL, SEL_UPL);
1730 _ugssel = GSEL(GUGS32_SEL, SEL_UPL);
1736 /* setup proc 0's pcb */
1737 thread0.td_pcb->pcb_flags = 0;
1738 thread0.td_pcb->pcb_cr3 = KPML4phys;
1739 thread0.td_frame = &proc0_tf;
1741 env = getenv("kernelname");
1743 strlcpy(kernelname, env, sizeof(kernelname));
1746 if (inw(0x10) == 0x49d2) {
1748 printf("Xen detected: disabling emulated block and network devices\n");
1753 if (cpu_probe_amdc1e())
1754 cpu_idle_fn = cpu_idle_amdc1e;
1756 /* Location of kernel stack for locore */
1757 return ((u_int64_t)thread0.td_pcb);
1761 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
1764 pcpu->pc_acpi_id = 0xffffffff;
1768 spinlock_enter(void)
1774 if (td->td_md.md_spinlock_count == 0) {
1775 flags = intr_disable();
1776 td->td_md.md_spinlock_count = 1;
1777 td->td_md.md_saved_flags = flags;
1779 td->td_md.md_spinlock_count++;
1791 flags = td->td_md.md_saved_flags;
1792 td->td_md.md_spinlock_count--;
1793 if (td->td_md.md_spinlock_count == 0)
1794 intr_restore(flags);
1798 * Construct a PCB from a trapframe. This is called from kdb_trap() where
1799 * we want to start a backtrace from the function that caused us to enter
1800 * the debugger. We have the context in the trapframe, but base the trace
1801 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
1802 * enough for a backtrace.
1805 makectx(struct trapframe *tf, struct pcb *pcb)
1808 pcb->pcb_r12 = tf->tf_r12;
1809 pcb->pcb_r13 = tf->tf_r13;
1810 pcb->pcb_r14 = tf->tf_r14;
1811 pcb->pcb_r15 = tf->tf_r15;
1812 pcb->pcb_rbp = tf->tf_rbp;
1813 pcb->pcb_rbx = tf->tf_rbx;
1814 pcb->pcb_rip = tf->tf_rip;
1815 pcb->pcb_rsp = tf->tf_rsp;
1819 ptrace_set_pc(struct thread *td, unsigned long addr)
1821 td->td_frame->tf_rip = addr;
1826 ptrace_single_step(struct thread *td)
1828 td->td_frame->tf_rflags |= PSL_T;
1833 ptrace_clear_single_step(struct thread *td)
1835 td->td_frame->tf_rflags &= ~PSL_T;
1840 fill_regs(struct thread *td, struct reg *regs)
1842 struct trapframe *tp;
1845 regs->r_r15 = tp->tf_r15;
1846 regs->r_r14 = tp->tf_r14;
1847 regs->r_r13 = tp->tf_r13;
1848 regs->r_r12 = tp->tf_r12;
1849 regs->r_r11 = tp->tf_r11;
1850 regs->r_r10 = tp->tf_r10;
1851 regs->r_r9 = tp->tf_r9;
1852 regs->r_r8 = tp->tf_r8;
1853 regs->r_rdi = tp->tf_rdi;
1854 regs->r_rsi = tp->tf_rsi;
1855 regs->r_rbp = tp->tf_rbp;
1856 regs->r_rbx = tp->tf_rbx;
1857 regs->r_rdx = tp->tf_rdx;
1858 regs->r_rcx = tp->tf_rcx;
1859 regs->r_rax = tp->tf_rax;
1860 regs->r_rip = tp->tf_rip;
1861 regs->r_cs = tp->tf_cs;
1862 regs->r_rflags = tp->tf_rflags;
1863 regs->r_rsp = tp->tf_rsp;
1864 regs->r_ss = tp->tf_ss;
1865 if (tp->tf_flags & TF_HASSEGS) {
1866 regs->r_ds = tp->tf_ds;
1867 regs->r_es = tp->tf_es;
1868 regs->r_fs = tp->tf_fs;
1869 regs->r_gs = tp->tf_gs;
1880 set_regs(struct thread *td, struct reg *regs)
1882 struct trapframe *tp;
1886 rflags = regs->r_rflags & 0xffffffff;
1887 if (!EFL_SECURE(rflags, tp->tf_rflags) || !CS_SECURE(regs->r_cs))
1889 tp->tf_r15 = regs->r_r15;
1890 tp->tf_r14 = regs->r_r14;
1891 tp->tf_r13 = regs->r_r13;
1892 tp->tf_r12 = regs->r_r12;
1893 tp->tf_r11 = regs->r_r11;
1894 tp->tf_r10 = regs->r_r10;
1895 tp->tf_r9 = regs->r_r9;
1896 tp->tf_r8 = regs->r_r8;
1897 tp->tf_rdi = regs->r_rdi;
1898 tp->tf_rsi = regs->r_rsi;
1899 tp->tf_rbp = regs->r_rbp;
1900 tp->tf_rbx = regs->r_rbx;
1901 tp->tf_rdx = regs->r_rdx;
1902 tp->tf_rcx = regs->r_rcx;
1903 tp->tf_rax = regs->r_rax;
1904 tp->tf_rip = regs->r_rip;
1905 tp->tf_cs = regs->r_cs;
1906 tp->tf_rflags = rflags;
1907 tp->tf_rsp = regs->r_rsp;
1908 tp->tf_ss = regs->r_ss;
1909 if (0) { /* XXXKIB */
1910 tp->tf_ds = regs->r_ds;
1911 tp->tf_es = regs->r_es;
1912 tp->tf_fs = regs->r_fs;
1913 tp->tf_gs = regs->r_gs;
1914 tp->tf_flags = TF_HASSEGS;
1915 td->td_pcb->pcb_full_iret = 1;
1920 /* XXX check all this stuff! */
1921 /* externalize from sv_xmm */
1923 fill_fpregs_xmm(struct savefpu *sv_xmm, struct fpreg *fpregs)
1925 struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env;
1926 struct envxmm *penv_xmm = &sv_xmm->sv_env;
1930 bzero(fpregs, sizeof(*fpregs));
1932 /* FPU control/status */
1933 penv_fpreg->en_cw = penv_xmm->en_cw;
1934 penv_fpreg->en_sw = penv_xmm->en_sw;
1935 penv_fpreg->en_tw = penv_xmm->en_tw;
1936 penv_fpreg->en_opcode = penv_xmm->en_opcode;
1937 penv_fpreg->en_rip = penv_xmm->en_rip;
1938 penv_fpreg->en_rdp = penv_xmm->en_rdp;
1939 penv_fpreg->en_mxcsr = penv_xmm->en_mxcsr;
1940 penv_fpreg->en_mxcsr_mask = penv_xmm->en_mxcsr_mask;
1943 for (i = 0; i < 8; ++i)
1944 bcopy(sv_xmm->sv_fp[i].fp_acc.fp_bytes, fpregs->fpr_acc[i], 10);
1947 for (i = 0; i < 16; ++i)
1948 bcopy(sv_xmm->sv_xmm[i].xmm_bytes, fpregs->fpr_xacc[i], 16);
1951 /* internalize from fpregs into sv_xmm */
1953 set_fpregs_xmm(struct fpreg *fpregs, struct savefpu *sv_xmm)
1955 struct envxmm *penv_xmm = &sv_xmm->sv_env;
1956 struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env;
1960 /* FPU control/status */
1961 penv_xmm->en_cw = penv_fpreg->en_cw;
1962 penv_xmm->en_sw = penv_fpreg->en_sw;
1963 penv_xmm->en_tw = penv_fpreg->en_tw;
1964 penv_xmm->en_opcode = penv_fpreg->en_opcode;
1965 penv_xmm->en_rip = penv_fpreg->en_rip;
1966 penv_xmm->en_rdp = penv_fpreg->en_rdp;
1967 penv_xmm->en_mxcsr = penv_fpreg->en_mxcsr;
1968 penv_xmm->en_mxcsr_mask = penv_fpreg->en_mxcsr_mask & cpu_mxcsr_mask;
1971 for (i = 0; i < 8; ++i)
1972 bcopy(fpregs->fpr_acc[i], sv_xmm->sv_fp[i].fp_acc.fp_bytes, 10);
1975 for (i = 0; i < 16; ++i)
1976 bcopy(fpregs->fpr_xacc[i], sv_xmm->sv_xmm[i].xmm_bytes, 16);
1979 /* externalize from td->pcb */
1981 fill_fpregs(struct thread *td, struct fpreg *fpregs)
1984 KASSERT(td == curthread || TD_IS_SUSPENDED(td),
1985 ("not suspended thread %p", td));
1987 fill_fpregs_xmm(&td->td_pcb->pcb_user_save, fpregs);
1991 /* internalize to td->pcb */
1993 set_fpregs(struct thread *td, struct fpreg *fpregs)
1996 set_fpregs_xmm(fpregs, &td->td_pcb->pcb_user_save);
2002 * Get machine context.
2005 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
2007 struct trapframe *tp;
2010 PROC_LOCK(curthread->td_proc);
2011 mcp->mc_onstack = sigonstack(tp->tf_rsp);
2012 PROC_UNLOCK(curthread->td_proc);
2013 mcp->mc_r15 = tp->tf_r15;
2014 mcp->mc_r14 = tp->tf_r14;
2015 mcp->mc_r13 = tp->tf_r13;
2016 mcp->mc_r12 = tp->tf_r12;
2017 mcp->mc_r11 = tp->tf_r11;
2018 mcp->mc_r10 = tp->tf_r10;
2019 mcp->mc_r9 = tp->tf_r9;
2020 mcp->mc_r8 = tp->tf_r8;
2021 mcp->mc_rdi = tp->tf_rdi;
2022 mcp->mc_rsi = tp->tf_rsi;
2023 mcp->mc_rbp = tp->tf_rbp;
2024 mcp->mc_rbx = tp->tf_rbx;
2025 mcp->mc_rcx = tp->tf_rcx;
2026 mcp->mc_rflags = tp->tf_rflags;
2027 if (flags & GET_MC_CLEAR_RET) {
2030 mcp->mc_rflags &= ~PSL_C;
2032 mcp->mc_rax = tp->tf_rax;
2033 mcp->mc_rdx = tp->tf_rdx;
2035 mcp->mc_rip = tp->tf_rip;
2036 mcp->mc_cs = tp->tf_cs;
2037 mcp->mc_rsp = tp->tf_rsp;
2038 mcp->mc_ss = tp->tf_ss;
2039 mcp->mc_ds = tp->tf_ds;
2040 mcp->mc_es = tp->tf_es;
2041 mcp->mc_fs = tp->tf_fs;
2042 mcp->mc_gs = tp->tf_gs;
2043 mcp->mc_flags = tp->tf_flags;
2044 mcp->mc_len = sizeof(*mcp);
2045 get_fpcontext(td, mcp);
2046 mcp->mc_fsbase = td->td_pcb->pcb_fsbase;
2047 mcp->mc_gsbase = td->td_pcb->pcb_gsbase;
2048 bzero(mcp->mc_spare, sizeof(mcp->mc_spare));
2053 * Set machine context.
2055 * However, we don't set any but the user modifiable flags, and we won't
2056 * touch the cs selector.
2059 set_mcontext(struct thread *td, const mcontext_t *mcp)
2061 struct trapframe *tp;
2066 if (mcp->mc_len != sizeof(*mcp) ||
2067 (mcp->mc_flags & ~_MC_FLAG_MASK) != 0)
2069 rflags = (mcp->mc_rflags & PSL_USERCHANGE) |
2070 (tp->tf_rflags & ~PSL_USERCHANGE);
2071 ret = set_fpcontext(td, mcp);
2074 tp->tf_r15 = mcp->mc_r15;
2075 tp->tf_r14 = mcp->mc_r14;
2076 tp->tf_r13 = mcp->mc_r13;
2077 tp->tf_r12 = mcp->mc_r12;
2078 tp->tf_r11 = mcp->mc_r11;
2079 tp->tf_r10 = mcp->mc_r10;
2080 tp->tf_r9 = mcp->mc_r9;
2081 tp->tf_r8 = mcp->mc_r8;
2082 tp->tf_rdi = mcp->mc_rdi;
2083 tp->tf_rsi = mcp->mc_rsi;
2084 tp->tf_rbp = mcp->mc_rbp;
2085 tp->tf_rbx = mcp->mc_rbx;
2086 tp->tf_rdx = mcp->mc_rdx;
2087 tp->tf_rcx = mcp->mc_rcx;
2088 tp->tf_rax = mcp->mc_rax;
2089 tp->tf_rip = mcp->mc_rip;
2090 tp->tf_rflags = rflags;
2091 tp->tf_rsp = mcp->mc_rsp;
2092 tp->tf_ss = mcp->mc_ss;
2093 tp->tf_flags = mcp->mc_flags;
2094 if (tp->tf_flags & TF_HASSEGS) {
2095 tp->tf_ds = mcp->mc_ds;
2096 tp->tf_es = mcp->mc_es;
2097 tp->tf_fs = mcp->mc_fs;
2098 tp->tf_gs = mcp->mc_gs;
2100 if (mcp->mc_flags & _MC_HASBASES) {
2101 td->td_pcb->pcb_fsbase = mcp->mc_fsbase;
2102 td->td_pcb->pcb_gsbase = mcp->mc_gsbase;
2104 td->td_pcb->pcb_full_iret = 1;
2109 get_fpcontext(struct thread *td, mcontext_t *mcp)
2112 mcp->mc_ownedfp = fpugetregs(td);
2113 bcopy(&td->td_pcb->pcb_user_save, &mcp->mc_fpstate,
2114 sizeof(mcp->mc_fpstate));
2115 mcp->mc_fpformat = fpuformat();
2119 set_fpcontext(struct thread *td, const mcontext_t *mcp)
2121 struct savefpu *fpstate;
2123 if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
2125 else if (mcp->mc_fpformat != _MC_FPFMT_XMM)
2127 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE)
2128 /* We don't care what state is left in the FPU or PCB. */
2130 else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
2131 mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
2132 fpstate = (struct savefpu *)&mcp->mc_fpstate;
2133 fpstate->sv_env.en_mxcsr &= cpu_mxcsr_mask;
2134 fpusetregs(td, fpstate);
2141 fpstate_drop(struct thread *td)
2144 KASSERT(PCB_USER_FPU(td->td_pcb), ("fpstate_drop: kernel-owned fpu"));
2146 if (PCPU_GET(fpcurthread) == td)
2149 * XXX force a full drop of the fpu. The above only drops it if we
2152 * XXX I don't much like fpugetuserregs()'s semantics of doing a full
2153 * drop. Dropping only to the pcb matches fnsave's behaviour.
2154 * We only need to drop to !PCB_INITDONE in sendsig(). But
2155 * sendsig() is the only caller of fpugetuserregs()... perhaps we just
2156 * have too many layers.
2158 curthread->td_pcb->pcb_flags &= ~(PCB_FPUINITDONE |
2159 PCB_USERFPUINITDONE);
2164 fill_dbregs(struct thread *td, struct dbreg *dbregs)
2169 dbregs->dr[0] = rdr0();
2170 dbregs->dr[1] = rdr1();
2171 dbregs->dr[2] = rdr2();
2172 dbregs->dr[3] = rdr3();
2173 dbregs->dr[6] = rdr6();
2174 dbregs->dr[7] = rdr7();
2177 dbregs->dr[0] = pcb->pcb_dr0;
2178 dbregs->dr[1] = pcb->pcb_dr1;
2179 dbregs->dr[2] = pcb->pcb_dr2;
2180 dbregs->dr[3] = pcb->pcb_dr3;
2181 dbregs->dr[6] = pcb->pcb_dr6;
2182 dbregs->dr[7] = pcb->pcb_dr7;
2198 set_dbregs(struct thread *td, struct dbreg *dbregs)
2204 load_dr0(dbregs->dr[0]);
2205 load_dr1(dbregs->dr[1]);
2206 load_dr2(dbregs->dr[2]);
2207 load_dr3(dbregs->dr[3]);
2208 load_dr6(dbregs->dr[6]);
2209 load_dr7(dbregs->dr[7]);
2212 * Don't let an illegal value for dr7 get set. Specifically,
2213 * check for undefined settings. Setting these bit patterns
2214 * result in undefined behaviour and can lead to an unexpected
2215 * TRCTRAP or a general protection fault right here.
2216 * Upper bits of dr6 and dr7 must not be set
2218 for (i = 0; i < 4; i++) {
2219 if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02)
2221 if (td->td_frame->tf_cs == _ucode32sel &&
2222 DBREG_DR7_LEN(dbregs->dr[7], i) == DBREG_DR7_LEN_8)
2225 if ((dbregs->dr[6] & 0xffffffff00000000ul) != 0 ||
2226 (dbregs->dr[7] & 0xffffffff00000000ul) != 0)
2232 * Don't let a process set a breakpoint that is not within the
2233 * process's address space. If a process could do this, it
2234 * could halt the system by setting a breakpoint in the kernel
2235 * (if ddb was enabled). Thus, we need to check to make sure
2236 * that no breakpoints are being enabled for addresses outside
2237 * process's address space.
2239 * XXX - what about when the watched area of the user's
2240 * address space is written into from within the kernel
2241 * ... wouldn't that still cause a breakpoint to be generated
2242 * from within kernel mode?
2245 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) {
2246 /* dr0 is enabled */
2247 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
2250 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) {
2251 /* dr1 is enabled */
2252 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
2255 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) {
2256 /* dr2 is enabled */
2257 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
2260 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) {
2261 /* dr3 is enabled */
2262 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
2266 pcb->pcb_dr0 = dbregs->dr[0];
2267 pcb->pcb_dr1 = dbregs->dr[1];
2268 pcb->pcb_dr2 = dbregs->dr[2];
2269 pcb->pcb_dr3 = dbregs->dr[3];
2270 pcb->pcb_dr6 = dbregs->dr[6];
2271 pcb->pcb_dr7 = dbregs->dr[7];
2273 pcb->pcb_flags |= PCB_DBREGS;
2283 load_dr7(0); /* Turn off the control bits first */
2292 * Return > 0 if a hardware breakpoint has been hit, and the
2293 * breakpoint was in user space. Return 0, otherwise.
2296 user_dbreg_trap(void)
2298 u_int64_t dr7, dr6; /* debug registers dr6 and dr7 */
2299 u_int64_t bp; /* breakpoint bits extracted from dr6 */
2300 int nbp; /* number of breakpoints that triggered */
2301 caddr_t addr[4]; /* breakpoint addresses */
2305 if ((dr7 & 0x000000ff) == 0) {
2307 * all GE and LE bits in the dr7 register are zero,
2308 * thus the trap couldn't have been caused by the
2309 * hardware debug registers
2316 bp = dr6 & 0x0000000f;
2320 * None of the breakpoint bits are set meaning this
2321 * trap was not caused by any of the debug registers
2327 * at least one of the breakpoints were hit, check to see
2328 * which ones and if any of them are user space addresses
2332 addr[nbp++] = (caddr_t)rdr0();
2335 addr[nbp++] = (caddr_t)rdr1();
2338 addr[nbp++] = (caddr_t)rdr2();
2341 addr[nbp++] = (caddr_t)rdr3();
2344 for (i = 0; i < nbp; i++) {
2345 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) {
2347 * addr[i] is in user space
2354 * None of the breakpoints are in user space.
2362 * Provide inb() and outb() as functions. They are normally only available as
2363 * inline functions, thus cannot be called from the debugger.
2366 /* silence compiler warnings */
2367 u_char inb_(u_short);
2368 void outb_(u_short, u_char);
2377 outb_(u_short port, u_char data)