2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1992 Terrence R. Lambert.
4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
7 * This code is derived from software contributed to Berkeley by
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
44 #include "opt_atalk.h"
45 #include "opt_atpic.h"
46 #include "opt_compat.h"
52 #include "opt_kstack_pages.h"
53 #include "opt_maxmem.h"
54 #include "opt_perfmon.h"
55 #include "opt_sched.h"
56 #include "opt_kdtrace.h"
58 #include <sys/param.h>
60 #include <sys/systm.h>
64 #include <sys/callout.h>
67 #include <sys/eventhandler.h>
69 #include <sys/imgact.h>
71 #include <sys/kernel.h>
73 #include <sys/linker.h>
75 #include <sys/malloc.h>
76 #include <sys/msgbuf.h>
77 #include <sys/mutex.h>
79 #include <sys/ptrace.h>
80 #include <sys/reboot.h>
81 #include <sys/sched.h>
82 #include <sys/signalvar.h>
83 #include <sys/syscallsubr.h>
84 #include <sys/sysctl.h>
85 #include <sys/sysent.h>
86 #include <sys/sysproto.h>
87 #include <sys/ucontext.h>
88 #include <sys/vmmeter.h>
91 #include <vm/vm_extern.h>
92 #include <vm/vm_kern.h>
93 #include <vm/vm_page.h>
94 #include <vm/vm_map.h>
95 #include <vm/vm_object.h>
96 #include <vm/vm_pager.h>
97 #include <vm/vm_param.h>
101 #error KDB must be enabled in order for DDB to work!
104 #include <ddb/db_sym.h>
107 #include <net/netisr.h>
109 #include <machine/clock.h>
110 #include <machine/cpu.h>
111 #include <machine/cputypes.h>
112 #include <machine/intr_machdep.h>
113 #include <machine/mca.h>
114 #include <machine/md_var.h>
115 #include <machine/metadata.h>
116 #include <machine/pc/bios.h>
117 #include <machine/pcb.h>
118 #include <machine/proc.h>
119 #include <machine/reg.h>
120 #include <machine/sigframe.h>
121 #include <machine/specialreg.h>
123 #include <machine/perfmon.h>
125 #include <machine/tss.h>
127 #include <machine/smp.h>
131 #include <amd64/isa/icu.h>
133 #include <machine/apicvar.h>
136 #include <isa/isareg.h>
139 /* Sanity check for __curthread() */
140 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
142 extern u_int64_t hammer_time(u_int64_t, u_int64_t);
144 extern void printcpuinfo(void); /* XXX header file */
145 extern void identify_cpu(void);
146 extern void panicifcpuunsupported(void);
148 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
149 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
151 static void cpu_startup(void *);
152 static void get_fpcontext(struct thread *td, mcontext_t *mcp);
153 static int set_fpcontext(struct thread *td, const mcontext_t *mcp);
154 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
157 extern vm_offset_t ksym_start, ksym_end;
160 struct msgbuf *msgbufp;
162 /* Intel ICH registers */
163 #define ICH_PMBASE 0x400
164 #define ICH_SMI_EN ICH_PMBASE + 0x30
166 int _udatasel, _ucodesel, _ucode32sel, _ufssel, _ugssel;
174 * The number of PHYSMAP entries must be one less than the number of
175 * PHYSSEG entries because the PHYSMAP entry that spans the largest
176 * physical address that is accessible by ISA DMA is split into two
179 #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
181 vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
182 vm_paddr_t dump_avail[PHYSMAP_SIZE + 2];
184 /* must be 2 less so 0 0 can signal end of chunks */
185 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2)
186 #define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2)
188 struct kva_md_info kmi;
190 static struct trapframe proc0_tf;
191 struct region_descriptor r_gdt, r_idt;
193 struct pcpu __pcpu[MAXCPU];
197 struct mtx dt_lock; /* lock for GDT and LDT */
207 * On MacBooks, we need to disallow the legacy USB circuit to
208 * generate an SMI# because this can cause several problems,
209 * namely: incorrect CPU frequency detection and failure to
211 * We do this by disabling a bit in the SMI_EN (SMI Control and
212 * Enable register) of the Intel ICH LPC Interface Bridge.
214 sysenv = getenv("smbios.system.product");
215 if (sysenv != NULL) {
216 if (strncmp(sysenv, "MacBook1,1", 10) == 0 ||
217 strncmp(sysenv, "MacBook3,1", 10) == 0 ||
218 strncmp(sysenv, "MacBookPro1,1", 13) == 0 ||
219 strncmp(sysenv, "MacBookPro1,2", 13) == 0 ||
220 strncmp(sysenv, "MacBookPro3,1", 13) == 0 ||
221 strncmp(sysenv, "Macmini1,1", 10) == 0) {
223 printf("Disabling LEGACY_USB_EN bit on "
225 outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8);
231 * Good {morning,afternoon,evening,night}.
235 panicifcpuunsupported();
242 * Display physical memory if SMBIOS reports reasonable amount.
245 sysenv = getenv("smbios.memory.enabled");
246 if (sysenv != NULL) {
247 memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10;
250 if (memsize < ptoa((uintmax_t)cnt.v_free_count))
251 memsize = ptoa((uintmax_t)Maxmem);
252 printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20);
255 * Display any holes after the first chunk of extended memory.
260 printf("Physical memory chunk(s):\n");
261 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
264 size = phys_avail[indx + 1] - phys_avail[indx];
266 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
267 (uintmax_t)phys_avail[indx],
268 (uintmax_t)phys_avail[indx + 1] - 1,
269 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
273 vm_ksubmap_init(&kmi);
275 printf("avail memory = %ju (%ju MB)\n",
276 ptoa((uintmax_t)cnt.v_free_count),
277 ptoa((uintmax_t)cnt.v_free_count) / 1048576);
280 * Set up buffers, so they can be used to read disk labels.
283 vm_pager_bufferinit();
289 * Send an interrupt to process.
291 * Stack is set up to allow sigcode stored
292 * at top to call routine, followed by call
293 * to sigreturn routine below. After sigreturn
294 * resets the signal mask, the stack, and the
295 * frame pointer, it returns to the user
299 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
301 struct sigframe sf, *sfp;
307 struct trapframe *regs;
314 PROC_LOCK_ASSERT(p, MA_OWNED);
315 sig = ksi->ksi_signo;
317 mtx_assert(&psp->ps_mtx, MA_OWNED);
319 oonstack = sigonstack(regs->tf_rsp);
321 /* Save user context. */
322 bzero(&sf, sizeof(sf));
323 sf.sf_uc.uc_sigmask = *mask;
324 sf.sf_uc.uc_stack = td->td_sigstk;
325 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
326 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
327 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
328 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_rdi, sizeof(*regs));
329 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
330 get_fpcontext(td, &sf.sf_uc.uc_mcontext);
332 sf.sf_uc.uc_mcontext.mc_fsbase = pcb->pcb_fsbase;
333 sf.sf_uc.uc_mcontext.mc_gsbase = pcb->pcb_gsbase;
334 bzero(sf.sf_uc.uc_mcontext.mc_spare,
335 sizeof(sf.sf_uc.uc_mcontext.mc_spare));
336 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
338 /* Allocate space for the signal handler context. */
339 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
340 SIGISMEMBER(psp->ps_sigonstack, sig)) {
341 sp = td->td_sigstk.ss_sp +
342 td->td_sigstk.ss_size - sizeof(struct sigframe);
343 #if defined(COMPAT_43)
344 td->td_sigstk.ss_flags |= SS_ONSTACK;
347 sp = (char *)regs->tf_rsp - sizeof(struct sigframe) - 128;
348 /* Align to 16 bytes. */
349 sfp = (struct sigframe *)((unsigned long)sp & ~0xFul);
351 /* Translate the signal if appropriate. */
352 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
353 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
355 /* Build the argument list for the signal handler. */
356 regs->tf_rdi = sig; /* arg 1 in %rdi */
357 regs->tf_rdx = (register_t)&sfp->sf_uc; /* arg 3 in %rdx */
358 bzero(&sf.sf_si, sizeof(sf.sf_si));
359 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
360 /* Signal handler installed with SA_SIGINFO. */
361 regs->tf_rsi = (register_t)&sfp->sf_si; /* arg 2 in %rsi */
362 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
364 /* Fill in POSIX parts */
365 sf.sf_si = ksi->ksi_info;
366 sf.sf_si.si_signo = sig; /* maybe a translated signal */
367 regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */
369 /* Old FreeBSD-style arguments. */
370 regs->tf_rsi = ksi->ksi_code; /* arg 2 in %rsi */
371 regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */
372 sf.sf_ahu.sf_handler = catcher;
374 mtx_unlock(&psp->ps_mtx);
378 * Copy the sigframe out to the user's stack.
380 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
382 printf("process %ld has trashed its stack\n", (long)p->p_pid);
388 regs->tf_rsp = (long)sfp;
389 regs->tf_rip = PS_STRINGS - *(p->p_sysent->sv_szsigcode);
390 regs->tf_rflags &= ~(PSL_T | PSL_D);
391 regs->tf_cs = _ucodesel;
392 regs->tf_ds = _udatasel;
393 regs->tf_es = _udatasel;
394 regs->tf_fs = _ufssel;
395 regs->tf_gs = _ugssel;
396 regs->tf_flags = TF_HASSEGS;
397 set_pcb_flags(pcb, PCB_FULL_IRET);
399 mtx_lock(&psp->ps_mtx);
403 * System call to cleanup state after a signal
404 * has been taken. Reset signal mask and
405 * stack state from context left by sendsig (above).
406 * Return to previous pc and psl as specified by
407 * context left by sendsig. Check carefully to
408 * make sure that the user has not modified the
409 * state to gain improper privileges.
416 struct sigreturn_args /* {
417 const struct __ucontext *sigcntxp;
423 struct trapframe *regs;
432 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
434 uprintf("pid %d (%s): sigreturn copyin failed\n",
435 p->p_pid, td->td_name);
439 if ((ucp->uc_mcontext.mc_flags & ~_MC_FLAG_MASK) != 0) {
440 uprintf("pid %d (%s): sigreturn mc_flags %x\n", p->p_pid,
441 td->td_name, ucp->uc_mcontext.mc_flags);
445 rflags = ucp->uc_mcontext.mc_rflags;
447 * Don't allow users to change privileged or reserved flags.
450 * XXX do allow users to change the privileged flag PSL_RF.
451 * The cpu sets PSL_RF in tf_rflags for faults. Debuggers
452 * should sometimes set it there too. tf_rflags is kept in
453 * the signal context during signal handling and there is no
454 * other place to remember it, so the PSL_RF bit may be
455 * corrupted by the signal handler without us knowing.
456 * Corruption of the PSL_RF bit at worst causes one more or
457 * one less debugger trap, so allowing it is fairly harmless.
459 if (!EFL_SECURE(rflags & ~PSL_RF, regs->tf_rflags & ~PSL_RF)) {
460 uprintf("pid %d (%s): sigreturn rflags = 0x%lx\n", p->p_pid,
461 td->td_name, rflags);
466 * Don't allow users to load a valid privileged %cs. Let the
467 * hardware check for invalid selectors, excess privilege in
468 * other selectors, invalid %eip's and invalid %esp's.
470 cs = ucp->uc_mcontext.mc_cs;
471 if (!CS_SECURE(cs)) {
472 uprintf("pid %d (%s): sigreturn cs = 0x%x\n", p->p_pid,
474 ksiginfo_init_trap(&ksi);
475 ksi.ksi_signo = SIGBUS;
476 ksi.ksi_code = BUS_OBJERR;
477 ksi.ksi_trapno = T_PROTFLT;
478 ksi.ksi_addr = (void *)regs->tf_rip;
479 trapsignal(td, &ksi);
483 ret = set_fpcontext(td, &ucp->uc_mcontext);
485 uprintf("pid %d (%s): sigreturn set_fpcontext err %d\n",
486 p->p_pid, td->td_name, ret);
489 bcopy(&ucp->uc_mcontext.mc_rdi, regs, sizeof(*regs));
490 pcb->pcb_fsbase = ucp->uc_mcontext.mc_fsbase;
491 pcb->pcb_gsbase = ucp->uc_mcontext.mc_gsbase;
493 #if defined(COMPAT_43)
494 if (ucp->uc_mcontext.mc_onstack & 1)
495 td->td_sigstk.ss_flags |= SS_ONSTACK;
497 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
500 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
501 set_pcb_flags(pcb, PCB_FULL_IRET);
502 return (EJUSTRETURN);
505 #ifdef COMPAT_FREEBSD4
507 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
510 return sigreturn(td, (struct sigreturn_args *)uap);
516 * Machine dependent boot() routine
518 * I haven't seen anything to put here yet
519 * Possibly some stuff might be grafted back here from boot()
527 * Flush the D-cache for non-DMA I/O so that the I-cache can
528 * be made coherent later.
531 cpu_flush_dcache(void *ptr, size_t len)
536 /* Get current clock frequency for the given cpu id. */
538 cpu_est_clockrate(int cpu_id, uint64_t *rate)
543 if (pcpu_find(cpu_id) == NULL || rate == NULL)
546 /* If we're booting, trust the rate calibrated moments ago. */
553 /* Schedule ourselves on the indicated cpu. */
554 thread_lock(curthread);
555 sched_bind(curthread, cpu_id);
556 thread_unlock(curthread);
559 /* Calibrate by measuring a short delay. */
560 reg = intr_disable();
567 thread_lock(curthread);
568 sched_unbind(curthread);
569 thread_unlock(curthread);
573 * Calculate the difference in readings, convert to Mhz, and
574 * subtract 0.5% of the total. Empirical testing has shown that
575 * overhead in DELAY() works out to approximately this value.
578 *rate = tsc2 * 1000 - tsc2 * 5;
583 * Shutdown the CPU as much as possible
592 void (*cpu_idle_hook)(void) = NULL; /* ACPI idle hook. */
595 cpu_idle_hlt(int busy)
598 * we must absolutely guarentee that hlt is the next instruction
599 * after sti or we introduce a timing window.
602 if (sched_runnable())
605 __asm __volatile("sti; hlt");
609 cpu_idle_acpi(int busy)
612 if (sched_runnable())
614 else if (cpu_idle_hook)
617 __asm __volatile("sti; hlt");
620 static int cpu_ident_amdc1e = 0;
623 cpu_probe_amdc1e(void)
628 * Forget it, if we're not using local APIC timer.
630 if (resource_disabled("apic", 0) ||
631 (resource_int_value("apic", 0, "clock", &i) == 0 && i == 0))
635 * Detect the presence of C1E capability mostly on latest
636 * dual-cores (or future) k8 family.
638 if (cpu_vendor_id == CPU_VENDOR_AMD &&
639 (cpu_id & 0x00000f00) == 0x00000f00 &&
640 (cpu_id & 0x0fff0000) >= 0x00040000) {
641 cpu_ident_amdc1e = 1;
649 * C1E renders the local APIC timer dead, so we disable it by
650 * reading the Interrupt Pending Message register and clearing
651 * both C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27).
654 * "BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh Processors"
655 * #32559 revision 3.00+
657 #define MSR_AMDK8_IPM 0xc0010055
658 #define AMDK8_SMIONCMPHALT (1ULL << 27)
659 #define AMDK8_C1EONCMPHALT (1ULL << 28)
660 #define AMDK8_CMPHALT (AMDK8_SMIONCMPHALT | AMDK8_C1EONCMPHALT)
663 cpu_idle_amdc1e(int busy)
667 if (sched_runnable())
672 msr = rdmsr(MSR_AMDK8_IPM);
673 if (msr & AMDK8_CMPHALT)
674 wrmsr(MSR_AMDK8_IPM, msr & ~AMDK8_CMPHALT);
679 __asm __volatile("sti; hlt");
684 cpu_idle_spin(int busy)
689 void (*cpu_idle_fn)(int) = cpu_idle_acpi;
695 if (mp_grab_cpu_hlt())
702 * mwait cpu power states. Lower 4 bits are sub-states.
704 #define MWAIT_C0 0xf0
705 #define MWAIT_C1 0x00
706 #define MWAIT_C2 0x10
707 #define MWAIT_C3 0x20
708 #define MWAIT_C4 0x30
710 #define MWAIT_DISABLED 0x0
711 #define MWAIT_WOKEN 0x1
712 #define MWAIT_WAITING 0x2
715 cpu_idle_mwait(int busy)
719 mwait = (int *)PCPU_PTR(monitorbuf);
720 *mwait = MWAIT_WAITING;
721 if (sched_runnable())
723 cpu_monitor(mwait, 0, 0);
724 if (*mwait == MWAIT_WAITING)
725 cpu_mwait(0, MWAIT_C1);
729 cpu_idle_mwait_hlt(int busy)
733 mwait = (int *)PCPU_PTR(monitorbuf);
735 *mwait = MWAIT_DISABLED;
739 *mwait = MWAIT_WAITING;
740 if (sched_runnable())
742 cpu_monitor(mwait, 0, 0);
743 if (*mwait == MWAIT_WAITING)
744 cpu_mwait(0, MWAIT_C1);
748 cpu_idle_wakeup(int cpu)
753 if (cpu_idle_fn == cpu_idle_spin)
755 if (cpu_idle_fn != cpu_idle_mwait && cpu_idle_fn != cpu_idle_mwait_hlt)
757 pcpu = pcpu_find(cpu);
758 mwait = (int *)pcpu->pc_monitorbuf;
760 * This doesn't need to be atomic since missing the race will
761 * simply result in unnecessary IPIs.
763 if (cpu_idle_fn == cpu_idle_mwait_hlt && *mwait == MWAIT_DISABLED)
765 *mwait = MWAIT_WOKEN;
771 * Ordered by speed/power consumption.
777 { cpu_idle_spin, "spin" },
778 { cpu_idle_mwait, "mwait" },
779 { cpu_idle_mwait_hlt, "mwait_hlt" },
780 { cpu_idle_amdc1e, "amdc1e" },
781 { cpu_idle_hlt, "hlt" },
782 { cpu_idle_acpi, "acpi" },
787 idle_sysctl_available(SYSCTL_HANDLER_ARGS)
793 avail = malloc(256, M_TEMP, M_WAITOK);
795 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
796 if (strstr(idle_tbl[i].id_name, "mwait") &&
797 (cpu_feature2 & CPUID2_MON) == 0)
799 if (strcmp(idle_tbl[i].id_name, "amdc1e") == 0 &&
800 cpu_ident_amdc1e == 0)
802 p += sprintf(p, "%s, ", idle_tbl[i].id_name);
804 error = sysctl_handle_string(oidp, avail, 0, req);
810 idle_sysctl(SYSCTL_HANDLER_ARGS)
818 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
819 if (idle_tbl[i].id_fn == cpu_idle_fn) {
820 p = idle_tbl[i].id_name;
824 strncpy(buf, p, sizeof(buf));
825 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
826 if (error != 0 || req->newptr == NULL)
828 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
829 if (strstr(idle_tbl[i].id_name, "mwait") &&
830 (cpu_feature2 & CPUID2_MON) == 0)
832 if (strcmp(idle_tbl[i].id_name, "amdc1e") == 0 &&
833 cpu_ident_amdc1e == 0)
835 if (strcmp(idle_tbl[i].id_name, buf))
837 cpu_idle_fn = idle_tbl[i].id_fn;
843 SYSCTL_PROC(_machdep, OID_AUTO, idle_available, CTLTYPE_STRING | CTLFLAG_RD,
844 0, 0, idle_sysctl_available, "A", "list of available idle functions");
846 SYSCTL_PROC(_machdep, OID_AUTO, idle, CTLTYPE_STRING | CTLFLAG_RW, 0, 0,
847 idle_sysctl, "A", "currently selected idle function");
850 * Reset registers to default values on exec.
853 exec_setregs(td, entry, stack, ps_strings)
859 struct trapframe *regs = td->td_frame;
860 struct pcb *pcb = td->td_pcb;
863 if (td->td_proc->p_md.md_ldt != NULL)
866 mtx_unlock(&dt_lock);
870 clear_pcb_flags(pcb, PCB_32BIT | PCB_GS32BIT);
871 pcb->pcb_initial_fpucw = __INITIAL_FPUCW__;
872 set_pcb_flags(pcb, PCB_FULL_IRET);
874 bzero((char *)regs, sizeof(struct trapframe));
875 regs->tf_rip = entry;
876 regs->tf_rsp = ((stack - 8) & ~0xFul) + 8;
877 regs->tf_rdi = stack; /* argv */
878 regs->tf_rflags = PSL_USER | (regs->tf_rflags & PSL_T);
879 regs->tf_ss = _udatasel;
880 regs->tf_cs = _ucodesel;
881 regs->tf_ds = _udatasel;
882 regs->tf_es = _udatasel;
883 regs->tf_fs = _ufssel;
884 regs->tf_gs = _ugssel;
885 regs->tf_flags = TF_HASSEGS;
886 td->td_retval[1] = 0;
889 * Reset the hardware debug registers if they were in use.
890 * They won't have any meaning for the newly exec'd process.
892 if (pcb->pcb_flags & PCB_DBREGS) {
899 if (pcb == PCPU_GET(curpcb)) {
901 * Clear the debug registers on the running
902 * CPU, otherwise they will end up affecting
903 * the next process we switch to.
907 clear_pcb_flags(pcb, PCB_DBREGS);
911 * Drop the FP state if we hold it, so that the process gets a
912 * clean FP state if it uses the FPU again.
924 * CR0_MP, CR0_NE and CR0_TS are also set by npx_probe() for the
925 * BSP. See the comments there about why we set them.
927 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
932 * Initialize amd64 and configure to run kernel
936 * Initialize segments & interrupt table
939 struct user_segment_descriptor gdt[NGDT * MAXCPU];/* global descriptor tables */
940 static struct gate_descriptor idt0[NIDT];
941 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
943 static char dblfault_stack[PAGE_SIZE] __aligned(16);
945 static char nmi0_stack[PAGE_SIZE] __aligned(16);
946 CTASSERT(sizeof(struct nmi_pcpu) == 16);
948 struct amd64tss common_tss[MAXCPU];
951 * Software prototypes -- in more palatable form.
953 * Keep GUFS32, GUGS32, GUCODE32 and GUDATA at the same
954 * slots as corresponding segments for i386 kernel.
956 struct soft_segment_descriptor gdt_segs[] = {
957 /* GNULL_SEL 0 Null Descriptor */
966 /* GNULL2_SEL 1 Null Descriptor */
975 /* GUFS32_SEL 2 32 bit %gs Descriptor for user */
977 .ssd_limit = 0xfffff,
978 .ssd_type = SDT_MEMRWA,
984 /* GUGS32_SEL 3 32 bit %fs Descriptor for user */
986 .ssd_limit = 0xfffff,
987 .ssd_type = SDT_MEMRWA,
993 /* GCODE_SEL 4 Code Descriptor for kernel */
995 .ssd_limit = 0xfffff,
996 .ssd_type = SDT_MEMERA,
1002 /* GDATA_SEL 5 Data Descriptor for kernel */
1004 .ssd_limit = 0xfffff,
1005 .ssd_type = SDT_MEMRWA,
1011 /* GUCODE32_SEL 6 32 bit Code Descriptor for user */
1013 .ssd_limit = 0xfffff,
1014 .ssd_type = SDT_MEMERA,
1020 /* GUDATA_SEL 7 32/64 bit Data Descriptor for user */
1022 .ssd_limit = 0xfffff,
1023 .ssd_type = SDT_MEMRWA,
1029 /* GUCODE_SEL 8 64 bit Code Descriptor for user */
1031 .ssd_limit = 0xfffff,
1032 .ssd_type = SDT_MEMERA,
1038 /* GPROC0_SEL 9 Proc 0 Tss Descriptor */
1040 .ssd_limit = sizeof(struct amd64tss) + IOPAGES * PAGE_SIZE - 1,
1041 .ssd_type = SDT_SYSTSS,
1047 /* Actually, the TSS is a system descriptor which is double size */
1056 /* GUSERLDT_SEL 11 LDT Descriptor */
1065 /* GUSERLDT_SEL 12 LDT Descriptor, double size */
1077 setidt(idx, func, typ, dpl, ist)
1084 struct gate_descriptor *ip;
1087 ip->gd_looffset = (uintptr_t)func;
1088 ip->gd_selector = GSEL(GCODE_SEL, SEL_KPL);
1094 ip->gd_hioffset = ((uintptr_t)func)>>16 ;
1098 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
1099 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
1100 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
1101 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
1102 IDTVEC(xmm), IDTVEC(dblfault),
1103 #ifdef KDTRACE_HOOKS
1106 IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
1110 * Display the index and function name of any IDT entries that don't use
1111 * the default 'rsvd' entry point.
1113 DB_SHOW_COMMAND(idt, db_show_idt)
1115 struct gate_descriptor *ip;
1120 for (idx = 0; idx < NIDT && !db_pager_quit; idx++) {
1121 func = ((long)ip->gd_hioffset << 16 | ip->gd_looffset);
1122 if (func != (uintptr_t)&IDTVEC(rsvd)) {
1123 db_printf("%3d\t", idx);
1124 db_printsym(func, DB_STGY_PROC);
1134 struct user_segment_descriptor *sd;
1135 struct soft_segment_descriptor *ssd;
1138 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
1139 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
1140 ssd->ssd_type = sd->sd_type;
1141 ssd->ssd_dpl = sd->sd_dpl;
1142 ssd->ssd_p = sd->sd_p;
1143 ssd->ssd_long = sd->sd_long;
1144 ssd->ssd_def32 = sd->sd_def32;
1145 ssd->ssd_gran = sd->sd_gran;
1150 struct soft_segment_descriptor *ssd;
1151 struct user_segment_descriptor *sd;
1154 sd->sd_lobase = (ssd->ssd_base) & 0xffffff;
1155 sd->sd_hibase = (ssd->ssd_base >> 24) & 0xff;
1156 sd->sd_lolimit = (ssd->ssd_limit) & 0xffff;
1157 sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf;
1158 sd->sd_type = ssd->ssd_type;
1159 sd->sd_dpl = ssd->ssd_dpl;
1160 sd->sd_p = ssd->ssd_p;
1161 sd->sd_long = ssd->ssd_long;
1162 sd->sd_def32 = ssd->ssd_def32;
1163 sd->sd_gran = ssd->ssd_gran;
1168 struct soft_segment_descriptor *ssd;
1169 struct system_segment_descriptor *sd;
1172 sd->sd_lobase = (ssd->ssd_base) & 0xffffff;
1173 sd->sd_hibase = (ssd->ssd_base >> 24) & 0xfffffffffful;
1174 sd->sd_lolimit = (ssd->ssd_limit) & 0xffff;
1175 sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf;
1176 sd->sd_type = ssd->ssd_type;
1177 sd->sd_dpl = ssd->ssd_dpl;
1178 sd->sd_p = ssd->ssd_p;
1179 sd->sd_gran = ssd->ssd_gran;
1182 #if !defined(DEV_ATPIC) && defined(DEV_ISA)
1183 #include <isa/isavar.h>
1184 #include <isa/isareg.h>
1186 * Return a bitmap of the current interrupt requests. This is 8259-specific
1187 * and is only suitable for use at probe time.
1188 * This is only here to pacify sio. It is NOT FATAL if this doesn't work.
1189 * It shouldn't be here. There should probably be an APIC centric
1190 * implementation in the apic driver code, if at all.
1193 isa_irq_pending(void)
1198 irr1 = inb(IO_ICU1);
1199 irr2 = inb(IO_ICU2);
1200 return ((irr2 << 8) | irr1);
1207 add_smap_entry(struct bios_smap *smap, vm_paddr_t *physmap, int *physmap_idxp)
1209 int i, insert_idx, physmap_idx;
1211 physmap_idx = *physmap_idxp;
1213 if (boothowto & RB_VERBOSE)
1214 printf("SMAP type=%02x base=%016lx len=%016lx\n",
1215 smap->type, smap->base, smap->length);
1217 if (smap->type != SMAP_TYPE_MEMORY)
1220 if (smap->length == 0)
1224 * Find insertion point while checking for overlap. Start off by
1225 * assuming the new entry will be added to the end.
1227 insert_idx = physmap_idx + 2;
1228 for (i = 0; i <= physmap_idx; i += 2) {
1229 if (smap->base < physmap[i + 1]) {
1230 if (smap->base + smap->length <= physmap[i]) {
1234 if (boothowto & RB_VERBOSE)
1236 "Overlapping memory regions, ignoring second region\n");
1241 /* See if we can prepend to the next entry. */
1242 if (insert_idx <= physmap_idx &&
1243 smap->base + smap->length == physmap[insert_idx]) {
1244 physmap[insert_idx] = smap->base;
1248 /* See if we can append to the previous entry. */
1249 if (insert_idx > 0 && smap->base == physmap[insert_idx - 1]) {
1250 physmap[insert_idx - 1] += smap->length;
1255 *physmap_idxp = physmap_idx;
1256 if (physmap_idx == PHYSMAP_SIZE) {
1258 "Too many segments in the physical address map, giving up\n");
1263 * Move the last 'N' entries down to make room for the new
1266 for (i = physmap_idx; i > insert_idx; i -= 2) {
1267 physmap[i] = physmap[i - 2];
1268 physmap[i + 1] = physmap[i - 1];
1271 /* Insert the new entry. */
1272 physmap[insert_idx] = smap->base;
1273 physmap[insert_idx + 1] = smap->base + smap->length;
1278 * Populate the (physmap) array with base/bound pairs describing the
1279 * available physical memory in the system, then test this memory and
1280 * build the phys_avail array describing the actually-available memory.
1282 * Total memory size may be set by the kernel environment variable
1283 * hw.physmem or the compile-time define MAXMEM.
1285 * XXX first should be vm_paddr_t.
1288 getmemsize(caddr_t kmdp, u_int64_t first)
1290 int i, physmap_idx, pa_indx, da_indx;
1291 vm_paddr_t pa, physmap[PHYSMAP_SIZE];
1292 u_long physmem_tunable, memtest;
1294 struct bios_smap *smapbase, *smap, *smapend;
1296 quad_t dcons_addr, dcons_size;
1298 bzero(physmap, sizeof(physmap));
1303 * get memory map from INT 15:E820, kindly supplied by the loader.
1305 * subr_module.c says:
1306 * "Consumer may safely assume that size value precedes data."
1307 * ie: an int32_t immediately precedes smap.
1309 smapbase = (struct bios_smap *)preload_search_info(kmdp,
1310 MODINFO_METADATA | MODINFOMD_SMAP);
1311 if (smapbase == NULL)
1312 panic("No BIOS smap info from loader!");
1314 smapsize = *((u_int32_t *)smapbase - 1);
1315 smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize);
1317 for (smap = smapbase; smap < smapend; smap++)
1318 if (!add_smap_entry(smap, physmap, &physmap_idx))
1322 * Find the 'base memory' segment for SMP
1325 for (i = 0; i <= physmap_idx; i += 2) {
1326 if (physmap[i] == 0x00000000) {
1327 basemem = physmap[i + 1] / 1024;
1332 panic("BIOS smap did not include a basemem segment!");
1335 /* make hole for AP bootstrap code */
1336 physmap[1] = mp_bootaddress(physmap[1] / 1024);
1340 * Maxmem isn't the "maximum memory", it's one larger than the
1341 * highest page of the physical address space. It should be
1342 * called something like "Maxphyspage". We may adjust this
1343 * based on ``hw.physmem'' and the results of the memory test.
1345 Maxmem = atop(physmap[physmap_idx + 1]);
1348 Maxmem = MAXMEM / 4;
1351 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable))
1352 Maxmem = atop(physmem_tunable);
1355 * By default enable the memory test on real hardware, and disable
1356 * it if we appear to be running in a VM. This avoids touching all
1357 * pages unnecessarily, which doesn't matter on real hardware but is
1358 * bad for shared VM hosts. Use a general name so that
1359 * one could eventually do more with the code than just disable it.
1361 memtest = (vm_guest > VM_GUEST_NO) ? 0 : 1;
1362 TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest);
1365 * Don't allow MAXMEM or hw.physmem to extend the amount of memory
1368 if (Maxmem > atop(physmap[physmap_idx + 1]))
1369 Maxmem = atop(physmap[physmap_idx + 1]);
1371 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
1372 (boothowto & RB_VERBOSE))
1373 printf("Physical memory use set to %ldK\n", Maxmem * 4);
1375 /* call pmap initialization to make new kernel address space */
1376 pmap_bootstrap(&first);
1379 * Size up each available chunk of physical memory.
1381 physmap[0] = PAGE_SIZE; /* mask off page 0 */
1384 phys_avail[pa_indx++] = physmap[0];
1385 phys_avail[pa_indx] = physmap[0];
1386 dump_avail[da_indx] = physmap[0];
1390 * Get dcons buffer address
1392 if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
1393 getenv_quad("dcons.size", &dcons_size) == 0)
1397 * physmap is in bytes, so when converting to page boundaries,
1398 * round up the start address and round down the end address.
1400 for (i = 0; i <= physmap_idx; i += 2) {
1403 end = ptoa((vm_paddr_t)Maxmem);
1404 if (physmap[i + 1] < end)
1405 end = trunc_page(physmap[i + 1]);
1406 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
1407 int tmp, page_bad, full;
1408 int *ptr = (int *)CADDR1;
1412 * block out kernel memory as not available.
1414 if (pa >= 0x100000 && pa < first)
1418 * block out dcons buffer
1421 && pa >= trunc_page(dcons_addr)
1422 && pa < dcons_addr + dcons_size)
1430 * map page into kernel: valid, read/write,non-cacheable
1432 *pte = pa | PG_V | PG_RW | PG_N;
1437 * Test for alternating 1's and 0's
1439 *(volatile int *)ptr = 0xaaaaaaaa;
1440 if (*(volatile int *)ptr != 0xaaaaaaaa)
1443 * Test for alternating 0's and 1's
1445 *(volatile int *)ptr = 0x55555555;
1446 if (*(volatile int *)ptr != 0x55555555)
1451 *(volatile int *)ptr = 0xffffffff;
1452 if (*(volatile int *)ptr != 0xffffffff)
1457 *(volatile int *)ptr = 0x0;
1458 if (*(volatile int *)ptr != 0x0)
1461 * Restore original value.
1467 * Adjust array of valid/good pages.
1469 if (page_bad == TRUE)
1472 * If this good page is a continuation of the
1473 * previous set of good pages, then just increase
1474 * the end pointer. Otherwise start a new chunk.
1475 * Note that "end" points one higher than end,
1476 * making the range >= start and < end.
1477 * If we're also doing a speculative memory
1478 * test and we at or past the end, bump up Maxmem
1479 * so that we keep going. The first bad page
1480 * will terminate the loop.
1482 if (phys_avail[pa_indx] == pa) {
1483 phys_avail[pa_indx] += PAGE_SIZE;
1486 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
1488 "Too many holes in the physical address space, giving up\n");
1493 phys_avail[pa_indx++] = pa; /* start */
1494 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
1498 if (dump_avail[da_indx] == pa) {
1499 dump_avail[da_indx] += PAGE_SIZE;
1502 if (da_indx == DUMP_AVAIL_ARRAY_END) {
1506 dump_avail[da_indx++] = pa; /* start */
1507 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
1519 * The last chunk must contain at least one page plus the message
1520 * buffer to avoid complicating other code (message buffer address
1521 * calculation, etc.).
1523 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
1524 round_page(msgbufsize) >= phys_avail[pa_indx]) {
1525 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
1526 phys_avail[pa_indx--] = 0;
1527 phys_avail[pa_indx--] = 0;
1530 Maxmem = atop(phys_avail[pa_indx]);
1532 /* Trim off space for the message buffer. */
1533 phys_avail[pa_indx] -= round_page(msgbufsize);
1535 /* Map the message buffer. */
1536 msgbufp = (struct msgbuf *)PHYS_TO_DMAP(phys_avail[pa_indx]);
1540 hammer_time(u_int64_t modulep, u_int64_t physfree)
1545 struct nmi_pcpu *np;
1550 thread0.td_kstack = physfree + KERNBASE;
1551 thread0.td_kstack_pages = KSTACK_PAGES;
1552 kstack0_sz = thread0.td_kstack_pages * PAGE_SIZE;
1553 bzero((void *)thread0.td_kstack, kstack0_sz);
1554 physfree += kstack0_sz;
1555 thread0.td_pcb = (struct pcb *)(thread0.td_kstack + kstack0_sz) - 1;
1558 * This may be done better later if it gets more high level
1559 * components in it. If so just link td->td_proc here.
1561 proc_linkup0(&proc0, &thread0);
1563 preload_metadata = (caddr_t)(uintptr_t)(modulep + KERNBASE);
1564 preload_bootstrap_relocate(KERNBASE);
1565 kmdp = preload_search_by_type("elf kernel");
1567 kmdp = preload_search_by_type("elf64 kernel");
1568 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
1569 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *) + KERNBASE;
1571 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
1572 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
1575 /* Init basic tunables, hz etc */
1579 * make gdt memory segments
1581 for (x = 0; x < NGDT; x++) {
1582 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1) &&
1583 x != GUSERLDT_SEL && x != (GUSERLDT_SEL) + 1)
1584 ssdtosd(&gdt_segs[x], &gdt[x]);
1586 gdt_segs[GPROC0_SEL].ssd_base = (uintptr_t)&common_tss[0];
1587 ssdtosyssd(&gdt_segs[GPROC0_SEL],
1588 (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
1590 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
1591 r_gdt.rd_base = (long) gdt;
1595 wrmsr(MSR_FSBASE, 0); /* User value */
1596 wrmsr(MSR_GSBASE, (u_int64_t)pc);
1597 wrmsr(MSR_KGSBASE, 0); /* User value while in the kernel */
1599 pcpu_init(pc, 0, sizeof(struct pcpu));
1600 dpcpu_init((void *)(physfree + KERNBASE), 0);
1601 physfree += DPCPU_SIZE;
1602 PCPU_SET(prvspace, pc);
1603 PCPU_SET(curthread, &thread0);
1604 PCPU_SET(curpcb, thread0.td_pcb);
1605 PCPU_SET(tssp, &common_tss[0]);
1606 PCPU_SET(commontssp, &common_tss[0]);
1607 PCPU_SET(tss, (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
1608 PCPU_SET(ldt, (struct system_segment_descriptor *)&gdt[GUSERLDT_SEL]);
1609 PCPU_SET(fs32p, &gdt[GUFS32_SEL]);
1610 PCPU_SET(gs32p, &gdt[GUGS32_SEL]);
1613 * Initialize mutexes.
1615 * icu_lock: in order to allow an interrupt to occur in a critical
1616 * section, to set pcpu->ipending (etc...) properly, we
1617 * must be able to get the icu lock, so it can't be
1621 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS);
1622 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_DEF);
1625 for (x = 0; x < NIDT; x++)
1626 setidt(x, &IDTVEC(rsvd), SDT_SYSIGT, SEL_KPL, 0);
1627 setidt(IDT_DE, &IDTVEC(div), SDT_SYSIGT, SEL_KPL, 0);
1628 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYSIGT, SEL_KPL, 0);
1629 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYSIGT, SEL_KPL, 2);
1630 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYSIGT, SEL_UPL, 0);
1631 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYSIGT, SEL_KPL, 0);
1632 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYSIGT, SEL_KPL, 0);
1633 setidt(IDT_UD, &IDTVEC(ill), SDT_SYSIGT, SEL_KPL, 0);
1634 setidt(IDT_NM, &IDTVEC(dna), SDT_SYSIGT, SEL_KPL, 0);
1635 setidt(IDT_DF, &IDTVEC(dblfault), SDT_SYSIGT, SEL_KPL, 1);
1636 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYSIGT, SEL_KPL, 0);
1637 setidt(IDT_TS, &IDTVEC(tss), SDT_SYSIGT, SEL_KPL, 0);
1638 setidt(IDT_NP, &IDTVEC(missing), SDT_SYSIGT, SEL_KPL, 0);
1639 setidt(IDT_SS, &IDTVEC(stk), SDT_SYSIGT, SEL_KPL, 0);
1640 setidt(IDT_GP, &IDTVEC(prot), SDT_SYSIGT, SEL_KPL, 0);
1641 setidt(IDT_PF, &IDTVEC(page), SDT_SYSIGT, SEL_KPL, 0);
1642 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYSIGT, SEL_KPL, 0);
1643 setidt(IDT_AC, &IDTVEC(align), SDT_SYSIGT, SEL_KPL, 0);
1644 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYSIGT, SEL_KPL, 0);
1645 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYSIGT, SEL_KPL, 0);
1646 #ifdef KDTRACE_HOOKS
1647 setidt(IDT_DTRACE_RET, &IDTVEC(dtrace_ret), SDT_SYSIGT, SEL_UPL, 0);
1650 r_idt.rd_limit = sizeof(idt0) - 1;
1651 r_idt.rd_base = (long) idt;
1655 * Initialize the i8254 before the console so that console
1656 * initialization can use DELAY().
1661 * Initialize the console before we print anything out.
1670 /* Reset and mask the atpics and leave them shut down. */
1674 * Point the ICU spurious interrupt vectors at the APIC spurious
1675 * interrupt handler.
1677 setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0);
1678 setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0);
1681 #error "have you forgotten the isa device?";
1687 if (boothowto & RB_KDB)
1688 kdb_enter(KDB_WHY_BOOTFLAGS,
1689 "Boot flags requested debugger");
1692 identify_cpu(); /* Final stage of CPU initialization */
1693 initializecpu(); /* Initialize CPU registers */
1694 initializecpucache();
1696 /* make an initial tss so cpu can get interrupt stack on syscall! */
1697 common_tss[0].tss_rsp0 = thread0.td_kstack +
1698 kstack0_sz - sizeof(struct pcb);
1699 /* Ensure the stack is aligned to 16 bytes */
1700 common_tss[0].tss_rsp0 &= ~0xFul;
1701 PCPU_SET(rsp0, common_tss[0].tss_rsp0);
1703 /* doublefault stack space, runs on ist1 */
1704 common_tss[0].tss_ist1 = (long)&dblfault_stack[sizeof(dblfault_stack)];
1707 * NMI stack, runs on ist2. The pcpu pointer is stored just
1708 * above the start of the ist2 stack.
1710 np = ((struct nmi_pcpu *) &nmi0_stack[sizeof(nmi0_stack)]) - 1;
1711 np->np_pcpu = (register_t) pc;
1712 common_tss[0].tss_ist2 = (long) np;
1714 /* Set the IO permission bitmap (empty due to tss seg limit) */
1715 common_tss[0].tss_iobase = sizeof(struct amd64tss) +
1716 IOPAGES * PAGE_SIZE;
1718 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
1721 /* Set up the fast syscall stuff */
1722 msr = rdmsr(MSR_EFER) | EFER_SCE;
1723 wrmsr(MSR_EFER, msr);
1724 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
1725 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
1726 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
1727 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
1728 wrmsr(MSR_STAR, msr);
1729 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
1731 getmemsize(kmdp, physfree);
1732 init_param2(physmem);
1734 /* now running on new page tables, configured,and u/iom is accessible */
1736 msgbufinit(msgbufp, msgbufsize);
1739 /* transfer to user mode */
1741 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
1742 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
1743 _ucode32sel = GSEL(GUCODE32_SEL, SEL_UPL);
1744 _ufssel = GSEL(GUFS32_SEL, SEL_UPL);
1745 _ugssel = GSEL(GUGS32_SEL, SEL_UPL);
1751 /* setup proc 0's pcb */
1752 thread0.td_pcb->pcb_flags = 0;
1753 thread0.td_pcb->pcb_cr3 = KPML4phys;
1754 thread0.td_frame = &proc0_tf;
1756 env = getenv("kernelname");
1758 strlcpy(kernelname, env, sizeof(kernelname));
1761 if (inw(0x10) == 0x49d2) {
1763 printf("Xen detected: disabling emulated block and network devices\n");
1768 if (cpu_probe_amdc1e())
1769 cpu_idle_fn = cpu_idle_amdc1e;
1771 /* Location of kernel stack for locore */
1772 return ((u_int64_t)thread0.td_pcb);
1776 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
1779 pcpu->pc_acpi_id = 0xffffffff;
1783 spinlock_enter(void)
1789 if (td->td_md.md_spinlock_count == 0) {
1790 flags = intr_disable();
1791 td->td_md.md_spinlock_count = 1;
1792 td->td_md.md_saved_flags = flags;
1794 td->td_md.md_spinlock_count++;
1806 flags = td->td_md.md_saved_flags;
1807 td->td_md.md_spinlock_count--;
1808 if (td->td_md.md_spinlock_count == 0)
1809 intr_restore(flags);
1813 * Construct a PCB from a trapframe. This is called from kdb_trap() where
1814 * we want to start a backtrace from the function that caused us to enter
1815 * the debugger. We have the context in the trapframe, but base the trace
1816 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
1817 * enough for a backtrace.
1820 makectx(struct trapframe *tf, struct pcb *pcb)
1823 pcb->pcb_r12 = tf->tf_r12;
1824 pcb->pcb_r13 = tf->tf_r13;
1825 pcb->pcb_r14 = tf->tf_r14;
1826 pcb->pcb_r15 = tf->tf_r15;
1827 pcb->pcb_rbp = tf->tf_rbp;
1828 pcb->pcb_rbx = tf->tf_rbx;
1829 pcb->pcb_rip = tf->tf_rip;
1830 pcb->pcb_rsp = tf->tf_rsp;
1834 ptrace_set_pc(struct thread *td, unsigned long addr)
1836 td->td_frame->tf_rip = addr;
1841 ptrace_single_step(struct thread *td)
1843 td->td_frame->tf_rflags |= PSL_T;
1848 ptrace_clear_single_step(struct thread *td)
1850 td->td_frame->tf_rflags &= ~PSL_T;
1855 fill_regs(struct thread *td, struct reg *regs)
1857 struct trapframe *tp;
1860 return (fill_frame_regs(tp, regs));
1864 fill_frame_regs(struct trapframe *tp, struct reg *regs)
1866 regs->r_r15 = tp->tf_r15;
1867 regs->r_r14 = tp->tf_r14;
1868 regs->r_r13 = tp->tf_r13;
1869 regs->r_r12 = tp->tf_r12;
1870 regs->r_r11 = tp->tf_r11;
1871 regs->r_r10 = tp->tf_r10;
1872 regs->r_r9 = tp->tf_r9;
1873 regs->r_r8 = tp->tf_r8;
1874 regs->r_rdi = tp->tf_rdi;
1875 regs->r_rsi = tp->tf_rsi;
1876 regs->r_rbp = tp->tf_rbp;
1877 regs->r_rbx = tp->tf_rbx;
1878 regs->r_rdx = tp->tf_rdx;
1879 regs->r_rcx = tp->tf_rcx;
1880 regs->r_rax = tp->tf_rax;
1881 regs->r_rip = tp->tf_rip;
1882 regs->r_cs = tp->tf_cs;
1883 regs->r_rflags = tp->tf_rflags;
1884 regs->r_rsp = tp->tf_rsp;
1885 regs->r_ss = tp->tf_ss;
1886 if (tp->tf_flags & TF_HASSEGS) {
1887 regs->r_ds = tp->tf_ds;
1888 regs->r_es = tp->tf_es;
1889 regs->r_fs = tp->tf_fs;
1890 regs->r_gs = tp->tf_gs;
1901 set_regs(struct thread *td, struct reg *regs)
1903 struct trapframe *tp;
1907 rflags = regs->r_rflags & 0xffffffff;
1908 if (!EFL_SECURE(rflags, tp->tf_rflags) || !CS_SECURE(regs->r_cs))
1910 tp->tf_r15 = regs->r_r15;
1911 tp->tf_r14 = regs->r_r14;
1912 tp->tf_r13 = regs->r_r13;
1913 tp->tf_r12 = regs->r_r12;
1914 tp->tf_r11 = regs->r_r11;
1915 tp->tf_r10 = regs->r_r10;
1916 tp->tf_r9 = regs->r_r9;
1917 tp->tf_r8 = regs->r_r8;
1918 tp->tf_rdi = regs->r_rdi;
1919 tp->tf_rsi = regs->r_rsi;
1920 tp->tf_rbp = regs->r_rbp;
1921 tp->tf_rbx = regs->r_rbx;
1922 tp->tf_rdx = regs->r_rdx;
1923 tp->tf_rcx = regs->r_rcx;
1924 tp->tf_rax = regs->r_rax;
1925 tp->tf_rip = regs->r_rip;
1926 tp->tf_cs = regs->r_cs;
1927 tp->tf_rflags = rflags;
1928 tp->tf_rsp = regs->r_rsp;
1929 tp->tf_ss = regs->r_ss;
1930 if (0) { /* XXXKIB */
1931 tp->tf_ds = regs->r_ds;
1932 tp->tf_es = regs->r_es;
1933 tp->tf_fs = regs->r_fs;
1934 tp->tf_gs = regs->r_gs;
1935 tp->tf_flags = TF_HASSEGS;
1936 set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
1941 /* XXX check all this stuff! */
1942 /* externalize from sv_xmm */
1944 fill_fpregs_xmm(struct savefpu *sv_xmm, struct fpreg *fpregs)
1946 struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env;
1947 struct envxmm *penv_xmm = &sv_xmm->sv_env;
1951 bzero(fpregs, sizeof(*fpregs));
1953 /* FPU control/status */
1954 penv_fpreg->en_cw = penv_xmm->en_cw;
1955 penv_fpreg->en_sw = penv_xmm->en_sw;
1956 penv_fpreg->en_tw = penv_xmm->en_tw;
1957 penv_fpreg->en_opcode = penv_xmm->en_opcode;
1958 penv_fpreg->en_rip = penv_xmm->en_rip;
1959 penv_fpreg->en_rdp = penv_xmm->en_rdp;
1960 penv_fpreg->en_mxcsr = penv_xmm->en_mxcsr;
1961 penv_fpreg->en_mxcsr_mask = penv_xmm->en_mxcsr_mask;
1964 for (i = 0; i < 8; ++i)
1965 bcopy(sv_xmm->sv_fp[i].fp_acc.fp_bytes, fpregs->fpr_acc[i], 10);
1968 for (i = 0; i < 16; ++i)
1969 bcopy(sv_xmm->sv_xmm[i].xmm_bytes, fpregs->fpr_xacc[i], 16);
1972 /* internalize from fpregs into sv_xmm */
1974 set_fpregs_xmm(struct fpreg *fpregs, struct savefpu *sv_xmm)
1976 struct envxmm *penv_xmm = &sv_xmm->sv_env;
1977 struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env;
1981 /* FPU control/status */
1982 penv_xmm->en_cw = penv_fpreg->en_cw;
1983 penv_xmm->en_sw = penv_fpreg->en_sw;
1984 penv_xmm->en_tw = penv_fpreg->en_tw;
1985 penv_xmm->en_opcode = penv_fpreg->en_opcode;
1986 penv_xmm->en_rip = penv_fpreg->en_rip;
1987 penv_xmm->en_rdp = penv_fpreg->en_rdp;
1988 penv_xmm->en_mxcsr = penv_fpreg->en_mxcsr;
1989 penv_xmm->en_mxcsr_mask = penv_fpreg->en_mxcsr_mask & cpu_mxcsr_mask;
1992 for (i = 0; i < 8; ++i)
1993 bcopy(fpregs->fpr_acc[i], sv_xmm->sv_fp[i].fp_acc.fp_bytes, 10);
1996 for (i = 0; i < 16; ++i)
1997 bcopy(fpregs->fpr_xacc[i], sv_xmm->sv_xmm[i].xmm_bytes, 16);
2000 /* externalize from td->pcb */
2002 fill_fpregs(struct thread *td, struct fpreg *fpregs)
2005 KASSERT(td == curthread || TD_IS_SUSPENDED(td) ||
2006 P_SHOULDSTOP(td->td_proc),
2007 ("not suspended thread %p", td));
2009 fill_fpregs_xmm(&td->td_pcb->pcb_user_save, fpregs);
2013 /* internalize to td->pcb */
2015 set_fpregs(struct thread *td, struct fpreg *fpregs)
2018 set_fpregs_xmm(fpregs, &td->td_pcb->pcb_user_save);
2024 * Get machine context.
2027 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
2030 struct trapframe *tp;
2034 PROC_LOCK(curthread->td_proc);
2035 mcp->mc_onstack = sigonstack(tp->tf_rsp);
2036 PROC_UNLOCK(curthread->td_proc);
2037 mcp->mc_r15 = tp->tf_r15;
2038 mcp->mc_r14 = tp->tf_r14;
2039 mcp->mc_r13 = tp->tf_r13;
2040 mcp->mc_r12 = tp->tf_r12;
2041 mcp->mc_r11 = tp->tf_r11;
2042 mcp->mc_r10 = tp->tf_r10;
2043 mcp->mc_r9 = tp->tf_r9;
2044 mcp->mc_r8 = tp->tf_r8;
2045 mcp->mc_rdi = tp->tf_rdi;
2046 mcp->mc_rsi = tp->tf_rsi;
2047 mcp->mc_rbp = tp->tf_rbp;
2048 mcp->mc_rbx = tp->tf_rbx;
2049 mcp->mc_rcx = tp->tf_rcx;
2050 mcp->mc_rflags = tp->tf_rflags;
2051 if (flags & GET_MC_CLEAR_RET) {
2054 mcp->mc_rflags &= ~PSL_C;
2056 mcp->mc_rax = tp->tf_rax;
2057 mcp->mc_rdx = tp->tf_rdx;
2059 mcp->mc_rip = tp->tf_rip;
2060 mcp->mc_cs = tp->tf_cs;
2061 mcp->mc_rsp = tp->tf_rsp;
2062 mcp->mc_ss = tp->tf_ss;
2063 mcp->mc_ds = tp->tf_ds;
2064 mcp->mc_es = tp->tf_es;
2065 mcp->mc_fs = tp->tf_fs;
2066 mcp->mc_gs = tp->tf_gs;
2067 mcp->mc_flags = tp->tf_flags;
2068 mcp->mc_len = sizeof(*mcp);
2069 get_fpcontext(td, mcp);
2070 mcp->mc_fsbase = pcb->pcb_fsbase;
2071 mcp->mc_gsbase = pcb->pcb_gsbase;
2072 bzero(mcp->mc_spare, sizeof(mcp->mc_spare));
2077 * Set machine context.
2079 * However, we don't set any but the user modifiable flags, and we won't
2080 * touch the cs selector.
2083 set_mcontext(struct thread *td, const mcontext_t *mcp)
2086 struct trapframe *tp;
2092 if (mcp->mc_len != sizeof(*mcp) ||
2093 (mcp->mc_flags & ~_MC_FLAG_MASK) != 0)
2095 rflags = (mcp->mc_rflags & PSL_USERCHANGE) |
2096 (tp->tf_rflags & ~PSL_USERCHANGE);
2097 ret = set_fpcontext(td, mcp);
2100 tp->tf_r15 = mcp->mc_r15;
2101 tp->tf_r14 = mcp->mc_r14;
2102 tp->tf_r13 = mcp->mc_r13;
2103 tp->tf_r12 = mcp->mc_r12;
2104 tp->tf_r11 = mcp->mc_r11;
2105 tp->tf_r10 = mcp->mc_r10;
2106 tp->tf_r9 = mcp->mc_r9;
2107 tp->tf_r8 = mcp->mc_r8;
2108 tp->tf_rdi = mcp->mc_rdi;
2109 tp->tf_rsi = mcp->mc_rsi;
2110 tp->tf_rbp = mcp->mc_rbp;
2111 tp->tf_rbx = mcp->mc_rbx;
2112 tp->tf_rdx = mcp->mc_rdx;
2113 tp->tf_rcx = mcp->mc_rcx;
2114 tp->tf_rax = mcp->mc_rax;
2115 tp->tf_rip = mcp->mc_rip;
2116 tp->tf_rflags = rflags;
2117 tp->tf_rsp = mcp->mc_rsp;
2118 tp->tf_ss = mcp->mc_ss;
2119 tp->tf_flags = mcp->mc_flags;
2120 if (tp->tf_flags & TF_HASSEGS) {
2121 tp->tf_ds = mcp->mc_ds;
2122 tp->tf_es = mcp->mc_es;
2123 tp->tf_fs = mcp->mc_fs;
2124 tp->tf_gs = mcp->mc_gs;
2126 if (mcp->mc_flags & _MC_HASBASES) {
2127 pcb->pcb_fsbase = mcp->mc_fsbase;
2128 pcb->pcb_gsbase = mcp->mc_gsbase;
2130 set_pcb_flags(pcb, PCB_FULL_IRET);
2135 get_fpcontext(struct thread *td, mcontext_t *mcp)
2138 mcp->mc_ownedfp = fpugetregs(td);
2139 bcopy(&td->td_pcb->pcb_user_save, &mcp->mc_fpstate,
2140 sizeof(mcp->mc_fpstate));
2141 mcp->mc_fpformat = fpuformat();
2145 set_fpcontext(struct thread *td, const mcontext_t *mcp)
2147 struct savefpu *fpstate;
2149 if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
2151 else if (mcp->mc_fpformat != _MC_FPFMT_XMM)
2153 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE)
2154 /* We don't care what state is left in the FPU or PCB. */
2156 else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
2157 mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
2158 fpstate = (struct savefpu *)&mcp->mc_fpstate;
2159 fpstate->sv_env.en_mxcsr &= cpu_mxcsr_mask;
2160 fpusetregs(td, fpstate);
2167 fpstate_drop(struct thread *td)
2170 KASSERT(PCB_USER_FPU(td->td_pcb), ("fpstate_drop: kernel-owned fpu"));
2172 if (PCPU_GET(fpcurthread) == td)
2175 * XXX force a full drop of the fpu. The above only drops it if we
2178 * XXX I don't much like fpugetuserregs()'s semantics of doing a full
2179 * drop. Dropping only to the pcb matches fnsave's behaviour.
2180 * We only need to drop to !PCB_INITDONE in sendsig(). But
2181 * sendsig() is the only caller of fpugetuserregs()... perhaps we just
2182 * have too many layers.
2184 clear_pcb_flags(curthread->td_pcb,
2185 PCB_FPUINITDONE | PCB_USERFPUINITDONE);
2190 fill_dbregs(struct thread *td, struct dbreg *dbregs)
2195 dbregs->dr[0] = rdr0();
2196 dbregs->dr[1] = rdr1();
2197 dbregs->dr[2] = rdr2();
2198 dbregs->dr[3] = rdr3();
2199 dbregs->dr[6] = rdr6();
2200 dbregs->dr[7] = rdr7();
2203 dbregs->dr[0] = pcb->pcb_dr0;
2204 dbregs->dr[1] = pcb->pcb_dr1;
2205 dbregs->dr[2] = pcb->pcb_dr2;
2206 dbregs->dr[3] = pcb->pcb_dr3;
2207 dbregs->dr[6] = pcb->pcb_dr6;
2208 dbregs->dr[7] = pcb->pcb_dr7;
2224 set_dbregs(struct thread *td, struct dbreg *dbregs)
2230 load_dr0(dbregs->dr[0]);
2231 load_dr1(dbregs->dr[1]);
2232 load_dr2(dbregs->dr[2]);
2233 load_dr3(dbregs->dr[3]);
2234 load_dr6(dbregs->dr[6]);
2235 load_dr7(dbregs->dr[7]);
2238 * Don't let an illegal value for dr7 get set. Specifically,
2239 * check for undefined settings. Setting these bit patterns
2240 * result in undefined behaviour and can lead to an unexpected
2241 * TRCTRAP or a general protection fault right here.
2242 * Upper bits of dr6 and dr7 must not be set
2244 for (i = 0; i < 4; i++) {
2245 if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02)
2247 if (td->td_frame->tf_cs == _ucode32sel &&
2248 DBREG_DR7_LEN(dbregs->dr[7], i) == DBREG_DR7_LEN_8)
2251 if ((dbregs->dr[6] & 0xffffffff00000000ul) != 0 ||
2252 (dbregs->dr[7] & 0xffffffff00000000ul) != 0)
2258 * Don't let a process set a breakpoint that is not within the
2259 * process's address space. If a process could do this, it
2260 * could halt the system by setting a breakpoint in the kernel
2261 * (if ddb was enabled). Thus, we need to check to make sure
2262 * that no breakpoints are being enabled for addresses outside
2263 * process's address space.
2265 * XXX - what about when the watched area of the user's
2266 * address space is written into from within the kernel
2267 * ... wouldn't that still cause a breakpoint to be generated
2268 * from within kernel mode?
2271 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) {
2272 /* dr0 is enabled */
2273 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
2276 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) {
2277 /* dr1 is enabled */
2278 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
2281 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) {
2282 /* dr2 is enabled */
2283 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
2286 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) {
2287 /* dr3 is enabled */
2288 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
2292 pcb->pcb_dr0 = dbregs->dr[0];
2293 pcb->pcb_dr1 = dbregs->dr[1];
2294 pcb->pcb_dr2 = dbregs->dr[2];
2295 pcb->pcb_dr3 = dbregs->dr[3];
2296 pcb->pcb_dr6 = dbregs->dr[6];
2297 pcb->pcb_dr7 = dbregs->dr[7];
2299 set_pcb_flags(pcb, PCB_DBREGS);
2309 load_dr7(0); /* Turn off the control bits first */
2318 * Return > 0 if a hardware breakpoint has been hit, and the
2319 * breakpoint was in user space. Return 0, otherwise.
2322 user_dbreg_trap(void)
2324 u_int64_t dr7, dr6; /* debug registers dr6 and dr7 */
2325 u_int64_t bp; /* breakpoint bits extracted from dr6 */
2326 int nbp; /* number of breakpoints that triggered */
2327 caddr_t addr[4]; /* breakpoint addresses */
2331 if ((dr7 & 0x000000ff) == 0) {
2333 * all GE and LE bits in the dr7 register are zero,
2334 * thus the trap couldn't have been caused by the
2335 * hardware debug registers
2342 bp = dr6 & 0x0000000f;
2346 * None of the breakpoint bits are set meaning this
2347 * trap was not caused by any of the debug registers
2353 * at least one of the breakpoints were hit, check to see
2354 * which ones and if any of them are user space addresses
2358 addr[nbp++] = (caddr_t)rdr0();
2361 addr[nbp++] = (caddr_t)rdr1();
2364 addr[nbp++] = (caddr_t)rdr2();
2367 addr[nbp++] = (caddr_t)rdr3();
2370 for (i = 0; i < nbp; i++) {
2371 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) {
2373 * addr[i] is in user space
2380 * None of the breakpoints are in user space.
2388 * Provide inb() and outb() as functions. They are normally only available as
2389 * inline functions, thus cannot be called from the debugger.
2392 /* silence compiler warnings */
2393 u_char inb_(u_short);
2394 void outb_(u_short, u_char);
2403 outb_(u_short port, u_char data)