2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
6 * This code is derived from software contributed to Berkeley by
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
44 #include "opt_atalk.h"
45 #include "opt_compat.h"
51 #include "opt_kstack_pages.h"
52 #include "opt_maxmem.h"
53 #include "opt_msgbuf.h"
55 #include "opt_perfmon.h"
58 #include <sys/param.h>
60 #include <sys/systm.h>
64 #include <sys/callout.h>
67 #include <sys/eventhandler.h>
69 #include <sys/imgact.h>
71 #include <sys/kernel.h>
73 #include <sys/linker.h>
75 #include <sys/malloc.h>
76 #include <sys/memrange.h>
77 #include <sys/msgbuf.h>
78 #include <sys/mutex.h>
80 #include <sys/ptrace.h>
81 #include <sys/reboot.h>
82 #include <sys/sched.h>
83 #include <sys/signalvar.h>
84 #include <sys/sysctl.h>
85 #include <sys/sysent.h>
86 #include <sys/sysproto.h>
87 #include <sys/ucontext.h>
88 #include <sys/vmmeter.h>
91 #include <vm/vm_extern.h>
92 #include <vm/vm_kern.h>
93 #include <vm/vm_page.h>
94 #include <vm/vm_map.h>
95 #include <vm/vm_object.h>
96 #include <vm/vm_pager.h>
97 #include <vm/vm_param.h>
101 #error KDB must be enabled in order for DDB to work!
104 #include <ddb/db_sym.h>
109 #include <net/netisr.h>
111 #include <machine/bootinfo.h>
112 #include <machine/clock.h>
113 #include <machine/cpu.h>
114 #include <machine/cputypes.h>
115 #include <machine/intr_machdep.h>
116 #include <machine/mca.h>
117 #include <machine/md_var.h>
118 #include <machine/metadata.h>
119 #include <machine/pc/bios.h>
120 #include <machine/pcb.h>
121 #include <machine/pcb_ext.h>
122 #include <machine/proc.h>
123 #include <machine/reg.h>
124 #include <machine/sigframe.h>
125 #include <machine/specialreg.h>
126 #include <machine/vm86.h>
128 #include <machine/perfmon.h>
131 #include <machine/smp.h>
135 #include <i386/isa/icu.h>
139 #include <machine/xbox.h>
141 int arch_i386_is_xbox = 0;
142 uint32_t arch_i386_xbox_memsize = 0;
147 #include <machine/xen/xen-os.h>
148 #include <xen/hypervisor.h>
149 #include <machine/xen/xen-os.h>
150 #include <machine/xen/xenvar.h>
151 #include <machine/xen/xenfunc.h>
152 #include <xen/xen_intr.h>
154 void Xhypervisor_callback(void);
155 void failsafe_callback(void);
157 extern trap_info_t trap_table[];
158 struct proc_ldt default_proc_ldt;
159 extern int init_first;
161 extern unsigned long physfree;
164 /* Sanity check for __curthread() */
165 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
167 extern void init386(int first);
168 extern void dblfault_handler(void);
170 extern void printcpuinfo(void); /* XXX header file */
171 extern void finishidentcpu(void);
172 extern void panicifcpuunsupported(void);
173 extern void initializecpu(void);
175 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
176 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
178 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
179 #define CPU_ENABLE_SSE
182 static void cpu_startup(void *);
183 static void fpstate_drop(struct thread *td);
184 static void get_fpcontext(struct thread *td, mcontext_t *mcp);
185 static int set_fpcontext(struct thread *td, const mcontext_t *mcp);
186 #ifdef CPU_ENABLE_SSE
187 static void set_fpregs_xmm(struct save87 *, struct savexmm *);
188 static void fill_fpregs_xmm(struct savexmm *, struct save87 *);
189 #endif /* CPU_ENABLE_SSE */
190 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
193 extern vm_offset_t ksym_start, ksym_end;
196 /* Intel ICH registers */
197 #define ICH_PMBASE 0x400
198 #define ICH_SMI_EN ICH_PMBASE + 0x30
200 int _udatasel, _ucodesel;
206 static void osendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
208 #ifdef COMPAT_FREEBSD4
209 static void freebsd4_sendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
216 FEATURE(pae, "Physical Address Extensions");
220 * The number of PHYSMAP entries must be one less than the number of
221 * PHYSSEG entries because the PHYSMAP entry that spans the largest
222 * physical address that is accessible by ISA DMA is split into two
225 #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
227 vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
228 vm_paddr_t dump_avail[PHYSMAP_SIZE + 2];
230 /* must be 2 less so 0 0 can signal end of chunks */
231 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2)
232 #define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2)
234 struct kva_md_info kmi;
236 static struct trapframe proc0_tf;
237 struct pcpu __pcpu[MAXCPU];
241 struct mem_range_softc mem_range_softc;
251 * On MacBooks, we need to disallow the legacy USB circuit to
252 * generate an SMI# because this can cause several problems,
253 * namely: incorrect CPU frequency detection and failure to
255 * We do this by disabling a bit in the SMI_EN (SMI Control and
256 * Enable register) of the Intel ICH LPC Interface Bridge.
258 sysenv = getenv("smbios.system.product");
259 if (sysenv != NULL) {
260 if (strncmp(sysenv, "MacBook1,1", 10) == 0 ||
261 strncmp(sysenv, "MacBook3,1", 10) == 0 ||
262 strncmp(sysenv, "MacBookPro1,1", 13) == 0 ||
263 strncmp(sysenv, "MacBookPro1,2", 13) == 0 ||
264 strncmp(sysenv, "Macmini1,1", 10) == 0) {
266 printf("Disabling LEGACY_USB_EN bit on "
268 outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8);
274 * Good {morning,afternoon,evening,night}.
278 panicifcpuunsupported();
282 sysenv = getenv("smbios.memory.enabled");
283 if (sysenv != NULL) {
284 memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10);
289 printf("real memory = %ju (%ju MB)\n", memsize << 10,
292 printf("real memory = %ju (%ju MB)\n", ptoa((uintmax_t)Maxmem),
293 ptoa((uintmax_t)Maxmem) / 1048576);
296 * Display any holes after the first chunk of extended memory.
301 printf("Physical memory chunk(s):\n");
302 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
305 size = phys_avail[indx + 1] - phys_avail[indx];
307 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
308 (uintmax_t)phys_avail[indx],
309 (uintmax_t)phys_avail[indx + 1] - 1,
310 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
314 vm_ksubmap_init(&kmi);
316 printf("avail memory = %ju (%ju MB)\n",
317 ptoa((uintmax_t)cnt.v_free_count),
318 ptoa((uintmax_t)cnt.v_free_count) / 1048576);
321 * Set up buffers, so they can be used to read disk labels.
324 vm_pager_bufferinit();
332 * Send an interrupt to process.
334 * Stack is set up to allow sigcode stored
335 * at top to call routine, followed by kcall
336 * to sigreturn routine below. After sigreturn
337 * resets the signal mask, the stack, and the
338 * frame pointer, it returns to the user
343 osendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
345 struct osigframe sf, *fp;
349 struct trapframe *regs;
355 PROC_LOCK_ASSERT(p, MA_OWNED);
356 sig = ksi->ksi_signo;
358 mtx_assert(&psp->ps_mtx, MA_OWNED);
360 oonstack = sigonstack(regs->tf_esp);
362 /* Allocate space for the signal handler context. */
363 if ((td->td_pflags & TDP_ALTSTACK) && !oonstack &&
364 SIGISMEMBER(psp->ps_sigonstack, sig)) {
365 fp = (struct osigframe *)(td->td_sigstk.ss_sp +
366 td->td_sigstk.ss_size - sizeof(struct osigframe));
367 #if defined(COMPAT_43)
368 td->td_sigstk.ss_flags |= SS_ONSTACK;
371 fp = (struct osigframe *)regs->tf_esp - 1;
373 /* Translate the signal if appropriate. */
374 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
375 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
377 /* Build the argument list for the signal handler. */
379 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc;
380 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
381 /* Signal handler installed with SA_SIGINFO. */
382 sf.sf_arg2 = (register_t)&fp->sf_siginfo;
383 sf.sf_siginfo.si_signo = sig;
384 sf.sf_siginfo.si_code = ksi->ksi_code;
385 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher;
387 /* Old FreeBSD-style arguments. */
388 sf.sf_arg2 = ksi->ksi_code;
389 sf.sf_addr = (register_t)ksi->ksi_addr;
390 sf.sf_ahu.sf_handler = catcher;
392 mtx_unlock(&psp->ps_mtx);
395 /* Save most if not all of trap frame. */
396 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax;
397 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx;
398 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx;
399 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx;
400 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi;
401 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi;
402 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs;
403 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds;
404 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss;
405 sf.sf_siginfo.si_sc.sc_es = regs->tf_es;
406 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs;
407 sf.sf_siginfo.si_sc.sc_gs = rgs();
408 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp;
410 /* Build the signal context to be used by osigreturn(). */
411 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0;
412 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask);
413 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp;
414 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp;
415 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip;
416 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags;
417 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno;
418 sf.sf_siginfo.si_sc.sc_err = regs->tf_err;
421 * If we're a vm86 process, we want to save the segment registers.
422 * We also change eflags to be our emulated eflags, not the actual
425 if (regs->tf_eflags & PSL_VM) {
426 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */
427 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
428 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
430 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs;
431 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs;
432 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es;
433 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds;
435 if (vm86->vm86_has_vme == 0)
436 sf.sf_siginfo.si_sc.sc_ps =
437 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
438 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
440 /* See sendsig() for comments. */
441 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
445 * Copy the sigframe out to the user's stack.
447 if (copyout(&sf, fp, sizeof(*fp)) != 0) {
449 printf("process %ld has trashed its stack\n", (long)p->p_pid);
455 regs->tf_esp = (int)fp;
456 regs->tf_eip = PS_STRINGS - szosigcode;
457 regs->tf_eflags &= ~(PSL_T | PSL_D);
458 regs->tf_cs = _ucodesel;
459 regs->tf_ds = _udatasel;
460 regs->tf_es = _udatasel;
461 regs->tf_fs = _udatasel;
463 regs->tf_ss = _udatasel;
465 mtx_lock(&psp->ps_mtx);
467 #endif /* COMPAT_43 */
469 #ifdef COMPAT_FREEBSD4
471 freebsd4_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
473 struct sigframe4 sf, *sfp;
477 struct trapframe *regs;
483 PROC_LOCK_ASSERT(p, MA_OWNED);
484 sig = ksi->ksi_signo;
486 mtx_assert(&psp->ps_mtx, MA_OWNED);
488 oonstack = sigonstack(regs->tf_esp);
490 /* Save user context. */
491 bzero(&sf, sizeof(sf));
492 sf.sf_uc.uc_sigmask = *mask;
493 sf.sf_uc.uc_stack = td->td_sigstk;
494 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
495 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
496 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
497 sf.sf_uc.uc_mcontext.mc_gs = rgs();
498 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
500 /* Allocate space for the signal handler context. */
501 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
502 SIGISMEMBER(psp->ps_sigonstack, sig)) {
503 sfp = (struct sigframe4 *)(td->td_sigstk.ss_sp +
504 td->td_sigstk.ss_size - sizeof(struct sigframe4));
505 #if defined(COMPAT_43)
506 td->td_sigstk.ss_flags |= SS_ONSTACK;
509 sfp = (struct sigframe4 *)regs->tf_esp - 1;
511 /* Translate the signal if appropriate. */
512 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
513 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
515 /* Build the argument list for the signal handler. */
517 sf.sf_ucontext = (register_t)&sfp->sf_uc;
518 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
519 /* Signal handler installed with SA_SIGINFO. */
520 sf.sf_siginfo = (register_t)&sfp->sf_si;
521 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
523 /* Fill in POSIX parts */
524 sf.sf_si.si_signo = sig;
525 sf.sf_si.si_code = ksi->ksi_code;
526 sf.sf_si.si_addr = ksi->ksi_addr;
528 /* Old FreeBSD-style arguments. */
529 sf.sf_siginfo = ksi->ksi_code;
530 sf.sf_addr = (register_t)ksi->ksi_addr;
531 sf.sf_ahu.sf_handler = catcher;
533 mtx_unlock(&psp->ps_mtx);
537 * If we're a vm86 process, we want to save the segment registers.
538 * We also change eflags to be our emulated eflags, not the actual
541 if (regs->tf_eflags & PSL_VM) {
542 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
543 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
545 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
546 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
547 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
548 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
550 if (vm86->vm86_has_vme == 0)
551 sf.sf_uc.uc_mcontext.mc_eflags =
552 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
553 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
556 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
557 * syscalls made by the signal handler. This just avoids
558 * wasting time for our lazy fixup of such faults. PSL_NT
559 * does nothing in vm86 mode, but vm86 programs can set it
560 * almost legitimately in probes for old cpu types.
562 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
566 * Copy the sigframe out to the user's stack.
568 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
570 printf("process %ld has trashed its stack\n", (long)p->p_pid);
576 regs->tf_esp = (int)sfp;
577 regs->tf_eip = PS_STRINGS - szfreebsd4_sigcode;
578 regs->tf_eflags &= ~(PSL_T | PSL_D);
579 regs->tf_cs = _ucodesel;
580 regs->tf_ds = _udatasel;
581 regs->tf_es = _udatasel;
582 regs->tf_fs = _udatasel;
583 regs->tf_ss = _udatasel;
585 mtx_lock(&psp->ps_mtx);
587 #endif /* COMPAT_FREEBSD4 */
590 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
592 struct sigframe sf, *sfp;
597 struct trapframe *regs;
598 struct segment_descriptor *sdp;
604 PROC_LOCK_ASSERT(p, MA_OWNED);
605 sig = ksi->ksi_signo;
607 mtx_assert(&psp->ps_mtx, MA_OWNED);
608 #ifdef COMPAT_FREEBSD4
609 if (SIGISMEMBER(psp->ps_freebsd4, sig)) {
610 freebsd4_sendsig(catcher, ksi, mask);
615 if (SIGISMEMBER(psp->ps_osigset, sig)) {
616 osendsig(catcher, ksi, mask);
621 oonstack = sigonstack(regs->tf_esp);
623 /* Save user context. */
624 bzero(&sf, sizeof(sf));
625 sf.sf_uc.uc_sigmask = *mask;
626 sf.sf_uc.uc_stack = td->td_sigstk;
627 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
628 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
629 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
630 sf.sf_uc.uc_mcontext.mc_gs = rgs();
631 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
632 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
633 get_fpcontext(td, &sf.sf_uc.uc_mcontext);
636 * Unconditionally fill the fsbase and gsbase into the mcontext.
638 sdp = &td->td_pcb->pcb_gsd;
639 sf.sf_uc.uc_mcontext.mc_fsbase = sdp->sd_hibase << 24 |
641 sdp = &td->td_pcb->pcb_fsd;
642 sf.sf_uc.uc_mcontext.mc_gsbase = sdp->sd_hibase << 24 |
645 /* Allocate space for the signal handler context. */
646 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
647 SIGISMEMBER(psp->ps_sigonstack, sig)) {
648 sp = td->td_sigstk.ss_sp +
649 td->td_sigstk.ss_size - sizeof(struct sigframe);
650 #if defined(COMPAT_43)
651 td->td_sigstk.ss_flags |= SS_ONSTACK;
654 sp = (char *)regs->tf_esp - sizeof(struct sigframe);
655 /* Align to 16 bytes. */
656 sfp = (struct sigframe *)((unsigned int)sp & ~0xF);
658 /* Translate the signal if appropriate. */
659 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
660 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
662 /* Build the argument list for the signal handler. */
664 sf.sf_ucontext = (register_t)&sfp->sf_uc;
665 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
666 /* Signal handler installed with SA_SIGINFO. */
667 sf.sf_siginfo = (register_t)&sfp->sf_si;
668 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
670 /* Fill in POSIX parts */
671 sf.sf_si = ksi->ksi_info;
672 sf.sf_si.si_signo = sig; /* maybe a translated signal */
674 /* Old FreeBSD-style arguments. */
675 sf.sf_siginfo = ksi->ksi_code;
676 sf.sf_addr = (register_t)ksi->ksi_addr;
677 sf.sf_ahu.sf_handler = catcher;
679 mtx_unlock(&psp->ps_mtx);
683 * If we're a vm86 process, we want to save the segment registers.
684 * We also change eflags to be our emulated eflags, not the actual
687 if (regs->tf_eflags & PSL_VM) {
688 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
689 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
691 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
692 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
693 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
694 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
696 if (vm86->vm86_has_vme == 0)
697 sf.sf_uc.uc_mcontext.mc_eflags =
698 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
699 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
702 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
703 * syscalls made by the signal handler. This just avoids
704 * wasting time for our lazy fixup of such faults. PSL_NT
705 * does nothing in vm86 mode, but vm86 programs can set it
706 * almost legitimately in probes for old cpu types.
708 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
712 * Copy the sigframe out to the user's stack.
714 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
716 printf("process %ld has trashed its stack\n", (long)p->p_pid);
722 regs->tf_esp = (int)sfp;
723 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode);
724 regs->tf_eflags &= ~(PSL_T | PSL_D);
725 regs->tf_cs = _ucodesel;
726 regs->tf_ds = _udatasel;
727 regs->tf_es = _udatasel;
728 regs->tf_fs = _udatasel;
729 regs->tf_ss = _udatasel;
731 mtx_lock(&psp->ps_mtx);
735 * System call to cleanup state after a signal
736 * has been taken. Reset signal mask and
737 * stack state from context left by sendsig (above).
738 * Return to previous pc and psl as specified by
739 * context left by sendsig. Check carefully to
740 * make sure that the user has not modified the
741 * state to gain improper privileges.
749 struct osigreturn_args /* {
750 struct osigcontext *sigcntxp;
753 struct osigcontext sc;
754 struct trapframe *regs;
755 struct osigcontext *scp;
756 struct proc *p = td->td_proc;
761 error = copyin(uap->sigcntxp, &sc, sizeof(sc));
766 if (eflags & PSL_VM) {
767 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
768 struct vm86_kernel *vm86;
771 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
772 * set up the vm86 area, and we can't enter vm86 mode.
774 if (td->td_pcb->pcb_ext == 0)
776 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
777 if (vm86->vm86_inited == 0)
780 /* Go back to user mode if both flags are set. */
781 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
782 ksiginfo_init_trap(&ksi);
783 ksi.ksi_signo = SIGBUS;
784 ksi.ksi_code = BUS_OBJERR;
785 ksi.ksi_addr = (void *)regs->tf_eip;
786 trapsignal(td, &ksi);
789 if (vm86->vm86_has_vme) {
790 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
791 (eflags & VME_USERCHANGE) | PSL_VM;
793 vm86->vm86_eflags = eflags; /* save VIF, VIP */
794 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
795 (eflags & VM_USERCHANGE) | PSL_VM;
797 tf->tf_vm86_ds = scp->sc_ds;
798 tf->tf_vm86_es = scp->sc_es;
799 tf->tf_vm86_fs = scp->sc_fs;
800 tf->tf_vm86_gs = scp->sc_gs;
801 tf->tf_ds = _udatasel;
802 tf->tf_es = _udatasel;
803 tf->tf_fs = _udatasel;
806 * Don't allow users to change privileged or reserved flags.
809 * XXX do allow users to change the privileged flag PSL_RF.
810 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
811 * should sometimes set it there too. tf_eflags is kept in
812 * the signal context during signal handling and there is no
813 * other place to remember it, so the PSL_RF bit may be
814 * corrupted by the signal handler without us knowing.
815 * Corruption of the PSL_RF bit at worst causes one more or
816 * one less debugger trap, so allowing it is fairly harmless.
818 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
823 * Don't allow users to load a valid privileged %cs. Let the
824 * hardware check for invalid selectors, excess privilege in
825 * other selectors, invalid %eip's and invalid %esp's.
827 if (!CS_SECURE(scp->sc_cs)) {
828 ksiginfo_init_trap(&ksi);
829 ksi.ksi_signo = SIGBUS;
830 ksi.ksi_code = BUS_OBJERR;
831 ksi.ksi_trapno = T_PROTFLT;
832 ksi.ksi_addr = (void *)regs->tf_eip;
833 trapsignal(td, &ksi);
836 regs->tf_ds = scp->sc_ds;
837 regs->tf_es = scp->sc_es;
838 regs->tf_fs = scp->sc_fs;
841 /* Restore remaining registers. */
842 regs->tf_eax = scp->sc_eax;
843 regs->tf_ebx = scp->sc_ebx;
844 regs->tf_ecx = scp->sc_ecx;
845 regs->tf_edx = scp->sc_edx;
846 regs->tf_esi = scp->sc_esi;
847 regs->tf_edi = scp->sc_edi;
848 regs->tf_cs = scp->sc_cs;
849 regs->tf_ss = scp->sc_ss;
850 regs->tf_isp = scp->sc_isp;
851 regs->tf_ebp = scp->sc_fp;
852 regs->tf_esp = scp->sc_sp;
853 regs->tf_eip = scp->sc_pc;
854 regs->tf_eflags = eflags;
857 #if defined(COMPAT_43)
858 if (scp->sc_onstack & 1)
859 td->td_sigstk.ss_flags |= SS_ONSTACK;
861 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
863 SIGSETOLD(td->td_sigmask, scp->sc_mask);
864 SIG_CANTMASK(td->td_sigmask);
867 return (EJUSTRETURN);
869 #endif /* COMPAT_43 */
871 #ifdef COMPAT_FREEBSD4
876 freebsd4_sigreturn(td, uap)
878 struct freebsd4_sigreturn_args /* {
879 const ucontext4 *sigcntxp;
883 struct proc *p = td->td_proc;
884 struct trapframe *regs;
885 const struct ucontext4 *ucp;
886 int cs, eflags, error;
889 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
894 eflags = ucp->uc_mcontext.mc_eflags;
895 if (eflags & PSL_VM) {
896 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
897 struct vm86_kernel *vm86;
900 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
901 * set up the vm86 area, and we can't enter vm86 mode.
903 if (td->td_pcb->pcb_ext == 0)
905 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
906 if (vm86->vm86_inited == 0)
909 /* Go back to user mode if both flags are set. */
910 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
911 ksiginfo_init_trap(&ksi);
912 ksi.ksi_signo = SIGBUS;
913 ksi.ksi_code = BUS_OBJERR;
914 ksi.ksi_addr = (void *)regs->tf_eip;
915 trapsignal(td, &ksi);
917 if (vm86->vm86_has_vme) {
918 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
919 (eflags & VME_USERCHANGE) | PSL_VM;
921 vm86->vm86_eflags = eflags; /* save VIF, VIP */
922 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
923 (eflags & VM_USERCHANGE) | PSL_VM;
925 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
926 tf->tf_eflags = eflags;
927 tf->tf_vm86_ds = tf->tf_ds;
928 tf->tf_vm86_es = tf->tf_es;
929 tf->tf_vm86_fs = tf->tf_fs;
930 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
931 tf->tf_ds = _udatasel;
932 tf->tf_es = _udatasel;
933 tf->tf_fs = _udatasel;
936 * Don't allow users to change privileged or reserved flags.
939 * XXX do allow users to change the privileged flag PSL_RF.
940 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
941 * should sometimes set it there too. tf_eflags is kept in
942 * the signal context during signal handling and there is no
943 * other place to remember it, so the PSL_RF bit may be
944 * corrupted by the signal handler without us knowing.
945 * Corruption of the PSL_RF bit at worst causes one more or
946 * one less debugger trap, so allowing it is fairly harmless.
948 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
949 printf("freebsd4_sigreturn: eflags = 0x%x\n", eflags);
954 * Don't allow users to load a valid privileged %cs. Let the
955 * hardware check for invalid selectors, excess privilege in
956 * other selectors, invalid %eip's and invalid %esp's.
958 cs = ucp->uc_mcontext.mc_cs;
959 if (!CS_SECURE(cs)) {
960 printf("freebsd4_sigreturn: cs = 0x%x\n", cs);
961 ksiginfo_init_trap(&ksi);
962 ksi.ksi_signo = SIGBUS;
963 ksi.ksi_code = BUS_OBJERR;
964 ksi.ksi_trapno = T_PROTFLT;
965 ksi.ksi_addr = (void *)regs->tf_eip;
966 trapsignal(td, &ksi);
970 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
974 #if defined(COMPAT_43)
975 if (ucp->uc_mcontext.mc_onstack & 1)
976 td->td_sigstk.ss_flags |= SS_ONSTACK;
978 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
981 td->td_sigmask = ucp->uc_sigmask;
982 SIG_CANTMASK(td->td_sigmask);
985 return (EJUSTRETURN);
987 #endif /* COMPAT_FREEBSD4 */
995 struct sigreturn_args /* {
996 const struct __ucontext *sigcntxp;
1000 struct proc *p = td->td_proc;
1001 struct trapframe *regs;
1002 const ucontext_t *ucp;
1003 int cs, eflags, error, ret;
1006 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
1010 regs = td->td_frame;
1011 eflags = ucp->uc_mcontext.mc_eflags;
1012 if (eflags & PSL_VM) {
1013 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
1014 struct vm86_kernel *vm86;
1017 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
1018 * set up the vm86 area, and we can't enter vm86 mode.
1020 if (td->td_pcb->pcb_ext == 0)
1022 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
1023 if (vm86->vm86_inited == 0)
1026 /* Go back to user mode if both flags are set. */
1027 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
1028 ksiginfo_init_trap(&ksi);
1029 ksi.ksi_signo = SIGBUS;
1030 ksi.ksi_code = BUS_OBJERR;
1031 ksi.ksi_addr = (void *)regs->tf_eip;
1032 trapsignal(td, &ksi);
1035 if (vm86->vm86_has_vme) {
1036 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
1037 (eflags & VME_USERCHANGE) | PSL_VM;
1039 vm86->vm86_eflags = eflags; /* save VIF, VIP */
1040 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
1041 (eflags & VM_USERCHANGE) | PSL_VM;
1043 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
1044 tf->tf_eflags = eflags;
1045 tf->tf_vm86_ds = tf->tf_ds;
1046 tf->tf_vm86_es = tf->tf_es;
1047 tf->tf_vm86_fs = tf->tf_fs;
1048 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
1049 tf->tf_ds = _udatasel;
1050 tf->tf_es = _udatasel;
1051 tf->tf_fs = _udatasel;
1054 * Don't allow users to change privileged or reserved flags.
1057 * XXX do allow users to change the privileged flag PSL_RF.
1058 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
1059 * should sometimes set it there too. tf_eflags is kept in
1060 * the signal context during signal handling and there is no
1061 * other place to remember it, so the PSL_RF bit may be
1062 * corrupted by the signal handler without us knowing.
1063 * Corruption of the PSL_RF bit at worst causes one more or
1064 * one less debugger trap, so allowing it is fairly harmless.
1066 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
1067 printf("sigreturn: eflags = 0x%x\n", eflags);
1072 * Don't allow users to load a valid privileged %cs. Let the
1073 * hardware check for invalid selectors, excess privilege in
1074 * other selectors, invalid %eip's and invalid %esp's.
1076 cs = ucp->uc_mcontext.mc_cs;
1077 if (!CS_SECURE(cs)) {
1078 printf("sigreturn: cs = 0x%x\n", cs);
1079 ksiginfo_init_trap(&ksi);
1080 ksi.ksi_signo = SIGBUS;
1081 ksi.ksi_code = BUS_OBJERR;
1082 ksi.ksi_trapno = T_PROTFLT;
1083 ksi.ksi_addr = (void *)regs->tf_eip;
1084 trapsignal(td, &ksi);
1088 ret = set_fpcontext(td, &ucp->uc_mcontext);
1091 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
1095 #if defined(COMPAT_43)
1096 if (ucp->uc_mcontext.mc_onstack & 1)
1097 td->td_sigstk.ss_flags |= SS_ONSTACK;
1099 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
1102 td->td_sigmask = ucp->uc_sigmask;
1103 SIG_CANTMASK(td->td_sigmask);
1106 return (EJUSTRETURN);
1110 * Machine dependent boot() routine
1112 * I haven't seen anything to put here yet
1113 * Possibly some stuff might be grafted back here from boot()
1121 * Flush the D-cache for non-DMA I/O so that the I-cache can
1122 * be made coherent later.
1125 cpu_flush_dcache(void *ptr, size_t len)
1127 /* Not applicable */
1130 /* Get current clock frequency for the given cpu id. */
1132 cpu_est_clockrate(int cpu_id, uint64_t *rate)
1135 uint64_t tsc1, tsc2;
1137 if (pcpu_find(cpu_id) == NULL || rate == NULL)
1140 return (EOPNOTSUPP);
1142 /* If we're booting, trust the rate calibrated moments ago. */
1149 /* Schedule ourselves on the indicated cpu. */
1150 thread_lock(curthread);
1151 sched_bind(curthread, cpu_id);
1152 thread_unlock(curthread);
1155 /* Calibrate by measuring a short delay. */
1156 reg = intr_disable();
1163 thread_lock(curthread);
1164 sched_unbind(curthread);
1165 thread_unlock(curthread);
1169 * Calculate the difference in readings, convert to Mhz, and
1170 * subtract 0.5% of the total. Empirical testing has shown that
1171 * overhead in DELAY() works out to approximately this value.
1174 *rate = tsc2 * 1000 - tsc2 * 5;
1179 void (*cpu_idle_hook)(void) = NULL; /* ACPI idle hook. */
1186 HYPERVISOR_shutdown(SHUTDOWN_poweroff);
1189 int scheduler_running;
1192 cpu_idle_hlt(int busy)
1195 scheduler_running = 1;
1202 * Shutdown the CPU as much as possible
1212 cpu_idle_hlt(int busy)
1215 * we must absolutely guarentee that hlt is the next instruction
1216 * after sti or we introduce a timing window.
1219 if (sched_runnable())
1222 __asm __volatile("sti; hlt");
1227 cpu_idle_acpi(int busy)
1230 if (sched_runnable())
1232 else if (cpu_idle_hook)
1235 __asm __volatile("sti; hlt");
1238 static int cpu_ident_amdc1e = 0;
1241 cpu_probe_amdc1e(void)
1247 * Forget it, if we're not using local APIC timer.
1249 if (resource_disabled("apic", 0) ||
1250 (resource_int_value("apic", 0, "clock", &i) == 0 && i == 0))
1254 * Detect the presence of C1E capability mostly on latest
1255 * dual-cores (or future) k8 family.
1257 if (cpu_vendor_id == CPU_VENDOR_AMD &&
1258 (cpu_id & 0x00000f00) == 0x00000f00 &&
1259 (cpu_id & 0x0fff0000) >= 0x00040000) {
1260 cpu_ident_amdc1e = 1;
1268 * C1E renders the local APIC timer dead, so we disable it by
1269 * reading the Interrupt Pending Message register and clearing
1270 * both C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27).
1273 * "BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh Processors"
1274 * #32559 revision 3.00+
1276 #define MSR_AMDK8_IPM 0xc0010055
1277 #define AMDK8_SMIONCMPHALT (1ULL << 27)
1278 #define AMDK8_C1EONCMPHALT (1ULL << 28)
1279 #define AMDK8_CMPHALT (AMDK8_SMIONCMPHALT | AMDK8_C1EONCMPHALT)
1282 cpu_idle_amdc1e(int busy)
1286 if (sched_runnable())
1291 msr = rdmsr(MSR_AMDK8_IPM);
1292 if (msr & AMDK8_CMPHALT)
1293 wrmsr(MSR_AMDK8_IPM, msr & ~AMDK8_CMPHALT);
1298 __asm __volatile("sti; hlt");
1303 cpu_idle_spin(int busy)
1309 void (*cpu_idle_fn)(int) = cpu_idle_hlt;
1311 void (*cpu_idle_fn)(int) = cpu_idle_acpi;
1317 #if defined(SMP) && !defined(XEN)
1318 if (mp_grab_cpu_hlt())
1325 * mwait cpu power states. Lower 4 bits are sub-states.
1327 #define MWAIT_C0 0xf0
1328 #define MWAIT_C1 0x00
1329 #define MWAIT_C2 0x10
1330 #define MWAIT_C3 0x20
1331 #define MWAIT_C4 0x30
1333 #define MWAIT_DISABLED 0x0
1334 #define MWAIT_WOKEN 0x1
1335 #define MWAIT_WAITING 0x2
1338 cpu_idle_mwait(int busy)
1342 mwait = (int *)PCPU_PTR(monitorbuf);
1343 *mwait = MWAIT_WAITING;
1344 if (sched_runnable())
1346 cpu_monitor(mwait, 0, 0);
1347 if (*mwait == MWAIT_WAITING)
1348 cpu_mwait(0, MWAIT_C1);
1352 cpu_idle_mwait_hlt(int busy)
1356 mwait = (int *)PCPU_PTR(monitorbuf);
1358 *mwait = MWAIT_DISABLED;
1362 *mwait = MWAIT_WAITING;
1363 if (sched_runnable())
1365 cpu_monitor(mwait, 0, 0);
1366 if (*mwait == MWAIT_WAITING)
1367 cpu_mwait(0, MWAIT_C1);
1371 cpu_idle_wakeup(int cpu)
1376 if (cpu_idle_fn == cpu_idle_spin)
1378 if (cpu_idle_fn != cpu_idle_mwait && cpu_idle_fn != cpu_idle_mwait_hlt)
1380 pcpu = pcpu_find(cpu);
1381 mwait = (int *)pcpu->pc_monitorbuf;
1383 * This doesn't need to be atomic since missing the race will
1384 * simply result in unnecessary IPIs.
1386 if (cpu_idle_fn == cpu_idle_mwait_hlt && *mwait == MWAIT_DISABLED)
1388 *mwait = MWAIT_WOKEN;
1394 * Ordered by speed/power consumption.
1400 { cpu_idle_spin, "spin" },
1401 { cpu_idle_mwait, "mwait" },
1402 { cpu_idle_mwait_hlt, "mwait_hlt" },
1403 { cpu_idle_amdc1e, "amdc1e" },
1404 { cpu_idle_hlt, "hlt" },
1405 { cpu_idle_acpi, "acpi" },
1410 idle_sysctl_available(SYSCTL_HANDLER_ARGS)
1416 avail = malloc(256, M_TEMP, M_WAITOK);
1418 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
1419 if (strstr(idle_tbl[i].id_name, "mwait") &&
1420 (cpu_feature2 & CPUID2_MON) == 0)
1422 if (strcmp(idle_tbl[i].id_name, "amdc1e") == 0 &&
1423 cpu_ident_amdc1e == 0)
1425 p += sprintf(p, "%s, ", idle_tbl[i].id_name);
1427 error = sysctl_handle_string(oidp, avail, 0, req);
1428 free(avail, M_TEMP);
1433 idle_sysctl(SYSCTL_HANDLER_ARGS)
1441 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
1442 if (idle_tbl[i].id_fn == cpu_idle_fn) {
1443 p = idle_tbl[i].id_name;
1447 strncpy(buf, p, sizeof(buf));
1448 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
1449 if (error != 0 || req->newptr == NULL)
1451 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
1452 if (strstr(idle_tbl[i].id_name, "mwait") &&
1453 (cpu_feature2 & CPUID2_MON) == 0)
1455 if (strcmp(idle_tbl[i].id_name, "amdc1e") == 0 &&
1456 cpu_ident_amdc1e == 0)
1458 if (strcmp(idle_tbl[i].id_name, buf))
1460 cpu_idle_fn = idle_tbl[i].id_fn;
1466 SYSCTL_PROC(_machdep, OID_AUTO, idle_available, CTLTYPE_STRING | CTLFLAG_RD,
1467 0, 0, idle_sysctl_available, "A", "list of available idle functions");
1469 SYSCTL_PROC(_machdep, OID_AUTO, idle, CTLTYPE_STRING | CTLFLAG_RW, 0, 0,
1470 idle_sysctl, "A", "currently selected idle function");
1473 * Reset registers to default values on exec.
1476 exec_setregs(td, entry, stack, ps_strings)
1482 struct trapframe *regs = td->td_frame;
1483 struct pcb *pcb = td->td_pcb;
1485 /* Reset pc->pcb_gs and %gs before possibly invalidating it. */
1486 pcb->pcb_gs = _udatasel;
1489 mtx_lock_spin(&dt_lock);
1490 if (td->td_proc->p_md.md_ldt)
1493 mtx_unlock_spin(&dt_lock);
1495 bzero((char *)regs, sizeof(struct trapframe));
1496 regs->tf_eip = entry;
1497 regs->tf_esp = stack;
1498 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T);
1499 regs->tf_ss = _udatasel;
1500 regs->tf_ds = _udatasel;
1501 regs->tf_es = _udatasel;
1502 regs->tf_fs = _udatasel;
1503 regs->tf_cs = _ucodesel;
1505 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */
1506 regs->tf_ebx = ps_strings;
1509 * Reset the hardware debug registers if they were in use.
1510 * They won't have any meaning for the newly exec'd process.
1512 if (pcb->pcb_flags & PCB_DBREGS) {
1519 if (pcb == PCPU_GET(curpcb)) {
1521 * Clear the debug registers on the running
1522 * CPU, otherwise they will end up affecting
1523 * the next process we switch to.
1527 pcb->pcb_flags &= ~PCB_DBREGS;
1531 * Initialize the math emulator (if any) for the current process.
1532 * Actually, just clear the bit that says that the emulator has
1533 * been initialized. Initialization is delayed until the process
1534 * traps to the emulator (if it is done at all) mainly because
1535 * emulators don't provide an entry point for initialization.
1537 td->td_pcb->pcb_flags &= ~FP_SOFTFP;
1538 pcb->pcb_initial_npxcw = __INITIAL_NPXCW__;
1541 * Drop the FP state if we hold it, so that the process gets a
1542 * clean FP state if it uses the FPU again.
1547 * XXX - Linux emulator
1548 * Make sure sure edx is 0x0 on entry. Linux binaries depend
1551 td->td_retval[1] = 0;
1562 * CR0_MP, CR0_NE and CR0_TS are set for NPX (FPU) support:
1564 * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
1565 * instructions. We must set the CR0_MP bit and use the CR0_TS
1566 * bit to control the trap, because setting the CR0_EM bit does
1567 * not cause WAIT instructions to trap. It's important to trap
1568 * WAIT instructions - otherwise the "wait" variants of no-wait
1569 * control instructions would degenerate to the "no-wait" variants
1570 * after FP context switches but work correctly otherwise. It's
1571 * particularly important to trap WAITs when there is no NPX -
1572 * otherwise the "wait" variants would always degenerate.
1574 * Try setting CR0_NE to get correct error reporting on 486DX's.
1575 * Setting it should fail or do nothing on lesser processors.
1577 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
1582 u_long bootdev; /* not a struct cdev *- encoding is different */
1583 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev,
1584 CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in struct cdev *format)");
1587 * Initialize 386 and configure to run kernel
1591 * Initialize segments & interrupt table
1597 union descriptor *gdt;
1598 union descriptor *ldt;
1600 union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */
1601 union descriptor ldt[NLDT]; /* local descriptor table */
1603 static struct gate_descriptor idt0[NIDT];
1604 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
1605 struct region_descriptor r_gdt, r_idt; /* table descriptors */
1606 struct mtx dt_lock; /* lock for GDT and LDT */
1608 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
1609 extern int has_f00f_bug;
1612 static struct i386tss dblfault_tss;
1613 static char dblfault_stack[PAGE_SIZE];
1615 extern vm_offset_t proc0kstack;
1619 * software prototypes -- in more palatable form.
1621 * GCODE_SEL through GUDATA_SEL must be in this order for syscall/sysret
1622 * GUFS_SEL and GUGS_SEL must be in this order (swtch.s knows it)
1624 struct soft_segment_descriptor gdt_segs[] = {
1625 /* GNULL_SEL 0 Null Descriptor */
1631 .ssd_xx = 0, .ssd_xx1 = 0,
1634 /* GPRIV_SEL 1 SMP Per-Processor Private Data Descriptor */
1636 .ssd_limit = 0xfffff,
1637 .ssd_type = SDT_MEMRWA,
1640 .ssd_xx = 0, .ssd_xx1 = 0,
1643 /* GUFS_SEL 2 %fs Descriptor for user */
1645 .ssd_limit = 0xfffff,
1646 .ssd_type = SDT_MEMRWA,
1649 .ssd_xx = 0, .ssd_xx1 = 0,
1652 /* GUGS_SEL 3 %gs Descriptor for user */
1654 .ssd_limit = 0xfffff,
1655 .ssd_type = SDT_MEMRWA,
1658 .ssd_xx = 0, .ssd_xx1 = 0,
1661 /* GCODE_SEL 4 Code Descriptor for kernel */
1663 .ssd_limit = 0xfffff,
1664 .ssd_type = SDT_MEMERA,
1667 .ssd_xx = 0, .ssd_xx1 = 0,
1670 /* GDATA_SEL 5 Data Descriptor for kernel */
1672 .ssd_limit = 0xfffff,
1673 .ssd_type = SDT_MEMRWA,
1676 .ssd_xx = 0, .ssd_xx1 = 0,
1679 /* GUCODE_SEL 6 Code Descriptor for user */
1681 .ssd_limit = 0xfffff,
1682 .ssd_type = SDT_MEMERA,
1685 .ssd_xx = 0, .ssd_xx1 = 0,
1688 /* GUDATA_SEL 7 Data Descriptor for user */
1690 .ssd_limit = 0xfffff,
1691 .ssd_type = SDT_MEMRWA,
1694 .ssd_xx = 0, .ssd_xx1 = 0,
1697 /* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */
1698 { .ssd_base = 0x400,
1699 .ssd_limit = 0xfffff,
1700 .ssd_type = SDT_MEMRWA,
1703 .ssd_xx = 0, .ssd_xx1 = 0,
1707 /* GPROC0_SEL 9 Proc 0 Tss Descriptor */
1710 .ssd_limit = sizeof(struct i386tss)-1,
1711 .ssd_type = SDT_SYS386TSS,
1714 .ssd_xx = 0, .ssd_xx1 = 0,
1717 /* GLDT_SEL 10 LDT Descriptor */
1718 { .ssd_base = (int) ldt,
1719 .ssd_limit = sizeof(ldt)-1,
1720 .ssd_type = SDT_SYSLDT,
1723 .ssd_xx = 0, .ssd_xx1 = 0,
1726 /* GUSERLDT_SEL 11 User LDT Descriptor per process */
1727 { .ssd_base = (int) ldt,
1728 .ssd_limit = (512 * sizeof(union descriptor)-1),
1729 .ssd_type = SDT_SYSLDT,
1732 .ssd_xx = 0, .ssd_xx1 = 0,
1735 /* GPANIC_SEL 12 Panic Tss Descriptor */
1736 { .ssd_base = (int) &dblfault_tss,
1737 .ssd_limit = sizeof(struct i386tss)-1,
1738 .ssd_type = SDT_SYS386TSS,
1741 .ssd_xx = 0, .ssd_xx1 = 0,
1744 /* GBIOSCODE32_SEL 13 BIOS 32-bit interface (32bit Code) */
1746 .ssd_limit = 0xfffff,
1747 .ssd_type = SDT_MEMERA,
1750 .ssd_xx = 0, .ssd_xx1 = 0,
1753 /* GBIOSCODE16_SEL 14 BIOS 32-bit interface (16bit Code) */
1755 .ssd_limit = 0xfffff,
1756 .ssd_type = SDT_MEMERA,
1759 .ssd_xx = 0, .ssd_xx1 = 0,
1762 /* GBIOSDATA_SEL 15 BIOS 32-bit interface (Data) */
1764 .ssd_limit = 0xfffff,
1765 .ssd_type = SDT_MEMRWA,
1768 .ssd_xx = 0, .ssd_xx1 = 0,
1771 /* GBIOSUTIL_SEL 16 BIOS 16-bit interface (Utility) */
1773 .ssd_limit = 0xfffff,
1774 .ssd_type = SDT_MEMRWA,
1777 .ssd_xx = 0, .ssd_xx1 = 0,
1780 /* GBIOSARGS_SEL 17 BIOS 16-bit interface (Arguments) */
1782 .ssd_limit = 0xfffff,
1783 .ssd_type = SDT_MEMRWA,
1786 .ssd_xx = 0, .ssd_xx1 = 0,
1789 /* GNDIS_SEL 18 NDIS Descriptor */
1795 .ssd_xx = 0, .ssd_xx1 = 0,
1801 static struct soft_segment_descriptor ldt_segs[] = {
1802 /* Null Descriptor - overwritten by call gate */
1808 .ssd_xx = 0, .ssd_xx1 = 0,
1811 /* Null Descriptor - overwritten by call gate */
1817 .ssd_xx = 0, .ssd_xx1 = 0,
1820 /* Null Descriptor - overwritten by call gate */
1826 .ssd_xx = 0, .ssd_xx1 = 0,
1829 /* Code Descriptor for user */
1831 .ssd_limit = 0xfffff,
1832 .ssd_type = SDT_MEMERA,
1835 .ssd_xx = 0, .ssd_xx1 = 0,
1838 /* Null Descriptor - overwritten by call gate */
1844 .ssd_xx = 0, .ssd_xx1 = 0,
1847 /* Data Descriptor for user */
1849 .ssd_limit = 0xfffff,
1850 .ssd_type = SDT_MEMRWA,
1853 .ssd_xx = 0, .ssd_xx1 = 0,
1859 setidt(idx, func, typ, dpl, selec)
1866 struct gate_descriptor *ip;
1869 ip->gd_looffset = (int)func;
1870 ip->gd_selector = selec;
1876 ip->gd_hioffset = ((int)func)>>16 ;
1880 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
1881 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
1882 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
1883 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
1884 IDTVEC(xmm), IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall);
1888 * Display the index and function name of any IDT entries that don't use
1889 * the default 'rsvd' entry point.
1891 DB_SHOW_COMMAND(idt, db_show_idt)
1893 struct gate_descriptor *ip;
1898 for (idx = 0; idx < NIDT && !db_pager_quit; idx++) {
1899 func = (ip->gd_hioffset << 16 | ip->gd_looffset);
1900 if (func != (uintptr_t)&IDTVEC(rsvd)) {
1901 db_printf("%3d\t", idx);
1902 db_printsym(func, DB_STGY_PROC);
1909 /* Show privileged registers. */
1910 DB_SHOW_COMMAND(sysregs, db_show_sysregs)
1912 uint64_t idtr, gdtr;
1915 db_printf("idtr\t0x%08x/%04x\n",
1916 (u_int)(idtr >> 16), (u_int)idtr & 0xffff);
1918 db_printf("gdtr\t0x%08x/%04x\n",
1919 (u_int)(gdtr >> 16), (u_int)gdtr & 0xffff);
1920 db_printf("ldtr\t0x%04x\n", rldt());
1921 db_printf("tr\t0x%04x\n", rtr());
1922 db_printf("cr0\t0x%08x\n", rcr0());
1923 db_printf("cr2\t0x%08x\n", rcr2());
1924 db_printf("cr3\t0x%08x\n", rcr3());
1925 db_printf("cr4\t0x%08x\n", rcr4());
1931 struct segment_descriptor *sd;
1932 struct soft_segment_descriptor *ssd;
1934 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
1935 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
1936 ssd->ssd_type = sd->sd_type;
1937 ssd->ssd_dpl = sd->sd_dpl;
1938 ssd->ssd_p = sd->sd_p;
1939 ssd->ssd_def32 = sd->sd_def32;
1940 ssd->ssd_gran = sd->sd_gran;
1944 add_smap_entry(struct bios_smap *smap, vm_paddr_t *physmap, int *physmap_idxp)
1948 physmap_idx = *physmap_idxp;
1950 if (boothowto & RB_VERBOSE)
1951 printf("SMAP type=%02x base=%016llx len=%016llx\n",
1952 smap->type, smap->base, smap->length);
1954 if (smap->type != SMAP_TYPE_MEMORY)
1957 if (smap->length == 0)
1961 if (smap->base >= 0xffffffff) {
1962 printf("%uK of memory above 4GB ignored\n",
1963 (u_int)(smap->length / 1024));
1968 for (i = 0; i <= physmap_idx; i += 2) {
1969 if (smap->base < physmap[i + 1]) {
1970 if (boothowto & RB_VERBOSE)
1972 "Overlapping or non-monotonic memory region, ignoring second region\n");
1977 if (smap->base == physmap[physmap_idx + 1]) {
1978 physmap[physmap_idx + 1] += smap->length;
1983 *physmap_idxp = physmap_idx;
1984 if (physmap_idx == PHYSMAP_SIZE) {
1986 "Too many segments in the physical address map, giving up\n");
1989 physmap[physmap_idx] = smap->base;
1990 physmap[physmap_idx + 1] = smap->base + smap->length;
1995 * Populate the (physmap) array with base/bound pairs describing the
1996 * available physical memory in the system, then test this memory and
1997 * build the phys_avail array describing the actually-available memory.
1999 * If we cannot accurately determine the physical memory map, then use
2000 * value from the 0xE801 call, and failing that, the RTC.
2002 * Total memory size may be set by the kernel environment variable
2003 * hw.physmem or the compile-time define MAXMEM.
2005 * XXX first should be vm_paddr_t.
2008 getmemsize(int first)
2010 int i, off, physmap_idx, pa_indx, da_indx;
2011 int hasbrokenint12, has_smap;
2012 u_long physmem_tunable;
2014 struct vm86frame vmf;
2015 struct vm86context vmc;
2016 vm_paddr_t pa, physmap[PHYSMAP_SIZE];
2018 struct bios_smap *smap, *smapbase, *smapend;
2020 quad_t dcons_addr, dcons_size;
2025 if (arch_i386_is_xbox) {
2027 * We queried the memory size before, so chop off 4MB for
2028 * the framebuffer and inform the OS of this.
2031 physmap[1] = (arch_i386_xbox_memsize * 1024 * 1024) - XBOX_FB_SIZE;
2038 Maxmem = xen_start_info->nr_pages - init_first;
2041 physmap[0] = init_first << PAGE_SHIFT;
2042 physmap[1] = ptoa(Maxmem) - round_page(MSGBUF_SIZE);
2047 TUNABLE_INT_FETCH("hw.hasbrokenint12", &hasbrokenint12);
2048 bzero(&vmf, sizeof(vmf));
2049 bzero(physmap, sizeof(physmap));
2053 * Some newer BIOSes has broken INT 12H implementation which cause
2054 * kernel panic immediately. In this case, we need to scan SMAP
2055 * with INT 15:E820 first, then determine base memory size.
2057 if (hasbrokenint12) {
2062 * Perform "base memory" related probes & setup
2064 vm86_intcall(0x12, &vmf);
2065 basemem = vmf.vmf_ax;
2066 if (basemem > 640) {
2067 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
2073 * XXX if biosbasemem is now < 640, there is a `hole'
2074 * between the end of base memory and the start of
2075 * ISA memory. The hole may be empty or it may
2076 * contain BIOS code or data. Map it read/write so
2077 * that the BIOS can write to it. (Memory from 0 to
2078 * the physical end of the kernel is mapped read-only
2079 * to begin with and then parts of it are remapped.
2080 * The parts that aren't remapped form holes that
2081 * remain read-only and are unused by the kernel.
2082 * The base memory area is below the physical end of
2083 * the kernel and right now forms a read-only hole.
2084 * The part of it from PAGE_SIZE to
2085 * (trunc_page(biosbasemem * 1024) - 1) will be
2086 * remapped and used by the kernel later.)
2088 * This code is similar to the code used in
2089 * pmap_mapdev, but since no memory needs to be
2090 * allocated we simply change the mapping.
2092 for (pa = trunc_page(basemem * 1024);
2093 pa < ISA_HOLE_START; pa += PAGE_SIZE)
2094 pmap_kenter(KERNBASE + pa, pa);
2097 * Map pages between basemem and ISA_HOLE_START, if any, r/w into
2098 * the vm86 page table so that vm86 can scribble on them using
2099 * the vm86 map too. XXX: why 2 ways for this and only 1 way for
2100 * page 0, at least as initialized here?
2102 pte = (pt_entry_t *)vm86paddr;
2103 for (i = basemem / 4; i < 160; i++)
2104 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
2108 * Fetch the memory map with INT 15:E820. First, check to see
2109 * if the loader supplied it and use that if so. Otherwise,
2110 * use vm86 to invoke the BIOS call directly.
2114 kmdp = preload_search_by_type("elf kernel");
2116 kmdp = preload_search_by_type("elf32 kernel");
2118 smapbase = (struct bios_smap *)preload_search_info(kmdp,
2119 MODINFO_METADATA | MODINFOMD_SMAP);
2120 if (smapbase != NULL) {
2121 /* subr_module.c says:
2122 * "Consumer may safely assume that size value precedes data."
2123 * ie: an int32_t immediately precedes smap.
2125 smapsize = *((u_int32_t *)smapbase - 1);
2126 smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize);
2129 for (smap = smapbase; smap < smapend; smap++)
2130 if (!add_smap_entry(smap, physmap, &physmap_idx))
2134 * map page 1 R/W into the kernel page table so we can use it
2135 * as a buffer. The kernel will unmap this page later.
2137 pmap_kenter(KERNBASE + (1 << PAGE_SHIFT), 1 << PAGE_SHIFT);
2139 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE +
2141 vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di);
2145 vmf.vmf_eax = 0xE820;
2146 vmf.vmf_edx = SMAP_SIG;
2147 vmf.vmf_ecx = sizeof(struct bios_smap);
2148 i = vm86_datacall(0x15, &vmf, &vmc);
2149 if (i || vmf.vmf_eax != SMAP_SIG)
2152 if (!add_smap_entry(smap, physmap, &physmap_idx))
2154 } while (vmf.vmf_ebx != 0);
2158 * Perform "base memory" related probes & setup based on SMAP
2161 for (i = 0; i <= physmap_idx; i += 2) {
2162 if (physmap[i] == 0x00000000) {
2163 basemem = physmap[i + 1] / 1024;
2169 * XXX this function is horribly organized and has to the same
2170 * things that it does above here.
2174 if (basemem > 640) {
2176 "Preposterous BIOS basemem of %uK, truncating to 640K\n",
2182 * Let vm86 scribble on pages between basemem and
2183 * ISA_HOLE_START, as above.
2185 for (pa = trunc_page(basemem * 1024);
2186 pa < ISA_HOLE_START; pa += PAGE_SIZE)
2187 pmap_kenter(KERNBASE + pa, pa);
2188 pte = (pt_entry_t *)vm86paddr;
2189 for (i = basemem / 4; i < 160; i++)
2190 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
2193 if (physmap[1] != 0)
2197 * If we failed above, try memory map with INT 15:E801
2199 vmf.vmf_ax = 0xE801;
2200 if (vm86_intcall(0x15, &vmf) == 0) {
2201 extmem = vmf.vmf_cx + vmf.vmf_dx * 64;
2205 vm86_intcall(0x15, &vmf);
2206 extmem = vmf.vmf_ax;
2209 * Prefer the RTC value for extended memory.
2211 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8);
2216 * Special hack for chipsets that still remap the 384k hole when
2217 * there's 16MB of memory - this really confuses people that
2218 * are trying to use bus mastering ISA controllers with the
2219 * "16MB limit"; they only have 16MB, but the remapping puts
2220 * them beyond the limit.
2222 * If extended memory is between 15-16MB (16-17MB phys address range),
2225 if ((extmem > 15 * 1024) && (extmem < 16 * 1024))
2229 physmap[1] = basemem * 1024;
2231 physmap[physmap_idx] = 0x100000;
2232 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
2236 * Now, physmap contains a map of physical memory.
2240 /* make hole for AP bootstrap code */
2241 physmap[1] = mp_bootaddress(physmap[1]);
2245 * Maxmem isn't the "maximum memory", it's one larger than the
2246 * highest page of the physical address space. It should be
2247 * called something like "Maxphyspage". We may adjust this
2248 * based on ``hw.physmem'' and the results of the memory test.
2250 Maxmem = atop(physmap[physmap_idx + 1]);
2253 Maxmem = MAXMEM / 4;
2256 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable))
2257 Maxmem = atop(physmem_tunable);
2260 * If we have an SMAP, don't allow MAXMEM or hw.physmem to extend
2261 * the amount of memory in the system.
2263 if (has_smap && Maxmem > atop(physmap[physmap_idx + 1]))
2264 Maxmem = atop(physmap[physmap_idx + 1]);
2266 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
2267 (boothowto & RB_VERBOSE))
2268 printf("Physical memory use set to %ldK\n", Maxmem * 4);
2271 * If Maxmem has been increased beyond what the system has detected,
2272 * extend the last memory segment to the new limit.
2274 if (atop(physmap[physmap_idx + 1]) < Maxmem)
2275 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
2277 /* call pmap initialization to make new kernel address space */
2278 pmap_bootstrap(first);
2281 * Size up each available chunk of physical memory.
2283 physmap[0] = PAGE_SIZE; /* mask off page 0 */
2286 phys_avail[pa_indx++] = physmap[0];
2287 phys_avail[pa_indx] = physmap[0];
2288 dump_avail[da_indx] = physmap[0];
2292 * Get dcons buffer address
2294 if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
2295 getenv_quad("dcons.size", &dcons_size) == 0)
2300 * physmap is in bytes, so when converting to page boundaries,
2301 * round up the start address and round down the end address.
2303 for (i = 0; i <= physmap_idx; i += 2) {
2306 end = ptoa((vm_paddr_t)Maxmem);
2307 if (physmap[i + 1] < end)
2308 end = trunc_page(physmap[i + 1]);
2309 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
2310 int tmp, page_bad, full;
2311 int *ptr = (int *)CADDR1;
2315 * block out kernel memory as not available.
2317 if (pa >= KERNLOAD && pa < first)
2321 * block out dcons buffer
2324 && pa >= trunc_page(dcons_addr)
2325 && pa < dcons_addr + dcons_size)
2331 * map page into kernel: valid, read/write,non-cacheable
2333 *pte = pa | PG_V | PG_RW | PG_N;
2338 * Test for alternating 1's and 0's
2340 *(volatile int *)ptr = 0xaaaaaaaa;
2341 if (*(volatile int *)ptr != 0xaaaaaaaa)
2344 * Test for alternating 0's and 1's
2346 *(volatile int *)ptr = 0x55555555;
2347 if (*(volatile int *)ptr != 0x55555555)
2352 *(volatile int *)ptr = 0xffffffff;
2353 if (*(volatile int *)ptr != 0xffffffff)
2358 *(volatile int *)ptr = 0x0;
2359 if (*(volatile int *)ptr != 0x0)
2362 * Restore original value.
2367 * Adjust array of valid/good pages.
2369 if (page_bad == TRUE)
2372 * If this good page is a continuation of the
2373 * previous set of good pages, then just increase
2374 * the end pointer. Otherwise start a new chunk.
2375 * Note that "end" points one higher than end,
2376 * making the range >= start and < end.
2377 * If we're also doing a speculative memory
2378 * test and we at or past the end, bump up Maxmem
2379 * so that we keep going. The first bad page
2380 * will terminate the loop.
2382 if (phys_avail[pa_indx] == pa) {
2383 phys_avail[pa_indx] += PAGE_SIZE;
2386 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
2388 "Too many holes in the physical address space, giving up\n");
2393 phys_avail[pa_indx++] = pa; /* start */
2394 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
2398 if (dump_avail[da_indx] == pa) {
2399 dump_avail[da_indx] += PAGE_SIZE;
2402 if (da_indx == DUMP_AVAIL_ARRAY_END) {
2406 dump_avail[da_indx++] = pa; /* start */
2407 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
2417 phys_avail[0] = physfree;
2418 phys_avail[1] = xen_start_info->nr_pages*PAGE_SIZE;
2423 * The last chunk must contain at least one page plus the message
2424 * buffer to avoid complicating other code (message buffer address
2425 * calculation, etc.).
2427 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
2428 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) {
2429 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
2430 phys_avail[pa_indx--] = 0;
2431 phys_avail[pa_indx--] = 0;
2434 Maxmem = atop(phys_avail[pa_indx]);
2436 /* Trim off space for the message buffer. */
2437 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE);
2439 /* Map the message buffer. */
2440 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE)
2441 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] +
2448 #define MTOPSIZE (1<<(14 + PAGE_SHIFT))
2454 unsigned long gdtmachpfn;
2455 int error, gsel_tss, metadata_missing, x, pa;
2457 struct callback_register event = {
2458 .type = CALLBACKTYPE_event,
2459 .address = {GSEL(GCODE_SEL, SEL_KPL), (unsigned long)Xhypervisor_callback },
2461 struct callback_register failsafe = {
2462 .type = CALLBACKTYPE_failsafe,
2463 .address = {GSEL(GCODE_SEL, SEL_KPL), (unsigned long)failsafe_callback },
2466 thread0.td_kstack = proc0kstack;
2467 thread0.td_pcb = (struct pcb *)
2468 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
2471 * This may be done better later if it gets more high level
2472 * components in it. If so just link td->td_proc here.
2474 proc_linkup0(&proc0, &thread0);
2476 metadata_missing = 0;
2477 if (xen_start_info->mod_start) {
2478 preload_metadata = (caddr_t)xen_start_info->mod_start;
2479 preload_bootstrap_relocate(KERNBASE);
2481 metadata_missing = 1;
2484 kern_envp = static_env;
2485 else if ((caddr_t)xen_start_info->cmd_line)
2486 kern_envp = xen_setbootenv((caddr_t)xen_start_info->cmd_line);
2488 boothowto |= xen_boothowto(kern_envp);
2490 /* Init basic tunables, hz etc */
2494 * XEN occupies a portion of the upper virtual address space
2495 * At its base it manages an array mapping machine page frames
2496 * to physical page frames - hence we need to be able to
2497 * access 4GB - (64MB - 4MB + 64k)
2499 gdt_segs[GPRIV_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2500 gdt_segs[GUFS_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2501 gdt_segs[GUGS_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2502 gdt_segs[GCODE_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2503 gdt_segs[GDATA_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2504 gdt_segs[GUCODE_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2505 gdt_segs[GUDATA_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2506 gdt_segs[GBIOSLOWMEM_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2509 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
2510 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
2512 PT_SET_MA(gdt, xpmap_ptom(VTOP(gdt)) | PG_V | PG_RW);
2513 bzero(gdt, PAGE_SIZE);
2514 for (x = 0; x < NGDT; x++)
2515 ssdtosd(&gdt_segs[x], &gdt[x].sd);
2517 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN);
2519 gdtmachpfn = vtomach(gdt) >> PAGE_SHIFT;
2520 PT_SET_MA(gdt, xpmap_ptom(VTOP(gdt)) | PG_V);
2521 PANIC_IF(HYPERVISOR_set_gdt(&gdtmachpfn, 512) != 0);
2525 if ((error = HYPERVISOR_set_trap_table(trap_table)) != 0) {
2526 panic("set_trap_table failed - error %d\n", error);
2529 error = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
2531 error = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
2532 #if CONFIG_XEN_COMPAT <= 0x030002
2533 if (error == -ENOXENSYS)
2534 HYPERVISOR_set_callbacks(GSEL(GCODE_SEL, SEL_KPL),
2535 (unsigned long)Xhypervisor_callback,
2536 GSEL(GCODE_SEL, SEL_KPL), (unsigned long)failsafe_callback);
2538 pcpu_init(pc, 0, sizeof(struct pcpu));
2539 for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE)
2540 pmap_kenter(pa + KERNBASE, pa);
2541 dpcpu_init((void *)(first + KERNBASE), 0);
2542 first += DPCPU_SIZE;
2544 PCPU_SET(prvspace, pc);
2545 PCPU_SET(curthread, &thread0);
2546 PCPU_SET(curpcb, thread0.td_pcb);
2549 * Initialize mutexes.
2551 * icu_lock: in order to allow an interrupt to occur in a critical
2552 * section, to set pcpu->ipending (etc...) properly, we
2553 * must be able to get the icu lock, so it can't be
2557 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE);
2559 /* make ldt memory segments */
2560 PT_SET_MA(ldt, xpmap_ptom(VTOP(ldt)) | PG_V | PG_RW);
2561 bzero(ldt, PAGE_SIZE);
2562 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
2563 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
2564 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
2565 ssdtosd(&ldt_segs[x], &ldt[x].sd);
2567 default_proc_ldt.ldt_base = (caddr_t)ldt;
2568 default_proc_ldt.ldt_len = 6;
2569 _default_ldt = (int)&default_proc_ldt;
2570 PCPU_SET(currentldt, _default_ldt)
2571 PT_SET_MA(ldt, *vtopte((unsigned long)ldt) & ~PG_RW);
2572 xen_set_ldt((unsigned long) ldt, (sizeof ldt_segs / sizeof ldt_segs[0]));
2574 #if defined(XEN_PRIVILEGED)
2576 * Initialize the i8254 before the console so that console
2577 * initialization can use DELAY().
2583 * Initialize the console before we print anything out.
2587 if (metadata_missing)
2588 printf("WARNING: loader(8) metadata is missing!\n");
2596 ksym_start = bootinfo.bi_symtab;
2597 ksym_end = bootinfo.bi_esymtab;
2603 if (boothowto & RB_KDB)
2604 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
2607 finishidentcpu(); /* Final stage of CPU initialization */
2608 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2609 GSEL(GCODE_SEL, SEL_KPL));
2610 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2611 GSEL(GCODE_SEL, SEL_KPL));
2612 initializecpu(); /* Initialize CPU registers */
2614 /* make an initial tss so cpu can get interrupt stack on syscall! */
2615 /* Note: -16 is so we can grow the trapframe if we came from vm86 */
2616 PCPU_SET(common_tss.tss_esp0, thread0.td_kstack +
2617 KSTACK_PAGES * PAGE_SIZE - sizeof(struct pcb) - 16);
2618 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
2619 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
2620 HYPERVISOR_stack_switch(GSEL(GDATA_SEL, SEL_KPL),
2621 PCPU_GET(common_tss.tss_esp0));
2623 /* pointer to selector slot for %fs/%gs */
2624 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
2626 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
2627 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
2628 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
2629 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
2631 dblfault_tss.tss_cr3 = (int)IdlePDPT;
2633 dblfault_tss.tss_cr3 = (int)IdlePTD;
2635 dblfault_tss.tss_eip = (int)dblfault_handler;
2636 dblfault_tss.tss_eflags = PSL_KERNEL;
2637 dblfault_tss.tss_ds = dblfault_tss.tss_es =
2638 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
2639 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
2640 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
2641 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
2645 init_param2(physmem);
2647 /* now running on new page tables, configured,and u/iom is accessible */
2649 msgbufinit(msgbufp, MSGBUF_SIZE);
2650 /* transfer to user mode */
2652 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
2653 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
2655 /* setup proc 0's pcb */
2656 thread0.td_pcb->pcb_flags = 0;
2658 thread0.td_pcb->pcb_cr3 = (int)IdlePDPT;
2660 thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
2662 thread0.td_pcb->pcb_ext = 0;
2663 thread0.td_frame = &proc0_tf;
2664 thread0.td_pcb->pcb_fsd = PCPU_GET(fsgs_gdt)[0];
2665 thread0.td_pcb->pcb_gsd = PCPU_GET(fsgs_gdt)[1];
2667 if (cpu_probe_amdc1e())
2668 cpu_idle_fn = cpu_idle_amdc1e;
2676 struct gate_descriptor *gdp;
2677 int gsel_tss, metadata_missing, x, pa;
2680 thread0.td_kstack = proc0kstack;
2681 thread0.td_pcb = (struct pcb *)
2682 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
2685 * This may be done better later if it gets more high level
2686 * components in it. If so just link td->td_proc here.
2688 proc_linkup0(&proc0, &thread0);
2690 metadata_missing = 0;
2691 if (bootinfo.bi_modulep) {
2692 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE;
2693 preload_bootstrap_relocate(KERNBASE);
2695 metadata_missing = 1;
2698 kern_envp = static_env;
2699 else if (bootinfo.bi_envp)
2700 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE;
2702 /* Init basic tunables, hz etc */
2706 * Make gdt memory segments. All segments cover the full 4GB
2707 * of address space and permissions are enforced at page level.
2709 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1);
2710 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1);
2711 gdt_segs[GUCODE_SEL].ssd_limit = atop(0 - 1);
2712 gdt_segs[GUDATA_SEL].ssd_limit = atop(0 - 1);
2713 gdt_segs[GUFS_SEL].ssd_limit = atop(0 - 1);
2714 gdt_segs[GUGS_SEL].ssd_limit = atop(0 - 1);
2717 gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1);
2718 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
2719 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
2721 for (x = 0; x < NGDT; x++)
2722 ssdtosd(&gdt_segs[x], &gdt[x].sd);
2724 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
2725 r_gdt.rd_base = (int) gdt;
2726 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN);
2729 pcpu_init(pc, 0, sizeof(struct pcpu));
2730 for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE)
2731 pmap_kenter(pa + KERNBASE, pa);
2732 dpcpu_init((void *)(first + KERNBASE), 0);
2733 first += DPCPU_SIZE;
2734 PCPU_SET(prvspace, pc);
2735 PCPU_SET(curthread, &thread0);
2736 PCPU_SET(curpcb, thread0.td_pcb);
2739 * Initialize mutexes.
2741 * icu_lock: in order to allow an interrupt to occur in a critical
2742 * section, to set pcpu->ipending (etc...) properly, we
2743 * must be able to get the icu lock, so it can't be
2747 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE);
2749 /* make ldt memory segments */
2750 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
2751 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
2752 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
2753 ssdtosd(&ldt_segs[x], &ldt[x].sd);
2755 _default_ldt = GSEL(GLDT_SEL, SEL_KPL);
2757 PCPU_SET(currentldt, _default_ldt);
2760 for (x = 0; x < NIDT; x++)
2761 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL,
2762 GSEL(GCODE_SEL, SEL_KPL));
2763 setidt(IDT_DE, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL,
2764 GSEL(GCODE_SEL, SEL_KPL));
2765 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL,
2766 GSEL(GCODE_SEL, SEL_KPL));
2767 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYS386IGT, SEL_KPL,
2768 GSEL(GCODE_SEL, SEL_KPL));
2769 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL,
2770 GSEL(GCODE_SEL, SEL_KPL));
2771 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL,
2772 GSEL(GCODE_SEL, SEL_KPL));
2773 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL,
2774 GSEL(GCODE_SEL, SEL_KPL));
2775 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2776 GSEL(GCODE_SEL, SEL_KPL));
2777 setidt(IDT_NM, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL
2778 , GSEL(GCODE_SEL, SEL_KPL));
2779 setidt(IDT_DF, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL));
2780 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL,
2781 GSEL(GCODE_SEL, SEL_KPL));
2782 setidt(IDT_TS, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL,
2783 GSEL(GCODE_SEL, SEL_KPL));
2784 setidt(IDT_NP, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL,
2785 GSEL(GCODE_SEL, SEL_KPL));
2786 setidt(IDT_SS, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL,
2787 GSEL(GCODE_SEL, SEL_KPL));
2788 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2789 GSEL(GCODE_SEL, SEL_KPL));
2790 setidt(IDT_PF, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL,
2791 GSEL(GCODE_SEL, SEL_KPL));
2792 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL,
2793 GSEL(GCODE_SEL, SEL_KPL));
2794 setidt(IDT_AC, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL,
2795 GSEL(GCODE_SEL, SEL_KPL));
2796 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL,
2797 GSEL(GCODE_SEL, SEL_KPL));
2798 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL,
2799 GSEL(GCODE_SEL, SEL_KPL));
2800 setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL,
2801 GSEL(GCODE_SEL, SEL_KPL));
2803 r_idt.rd_limit = sizeof(idt0) - 1;
2804 r_idt.rd_base = (int) idt;
2809 * The following code queries the PCI ID of 0:0:0. For the XBOX,
2810 * This should be 0x10de / 0x02a5.
2812 * This is exactly what Linux does.
2814 outl(0xcf8, 0x80000000);
2815 if (inl(0xcfc) == 0x02a510de) {
2816 arch_i386_is_xbox = 1;
2817 pic16l_setled(XBOX_LED_GREEN);
2820 * We are an XBOX, but we may have either 64MB or 128MB of
2821 * memory. The PCI host bridge should be programmed for this,
2822 * so we just query it.
2824 outl(0xcf8, 0x80000084);
2825 arch_i386_xbox_memsize = (inl(0xcfc) == 0x7FFFFFF) ? 128 : 64;
2830 * Initialize the i8254 before the console so that console
2831 * initialization can use DELAY().
2836 * Initialize the console before we print anything out.
2840 if (metadata_missing)
2841 printf("WARNING: loader(8) metadata is missing!\n");
2849 ksym_start = bootinfo.bi_symtab;
2850 ksym_end = bootinfo.bi_esymtab;
2856 if (boothowto & RB_KDB)
2857 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
2860 finishidentcpu(); /* Final stage of CPU initialization */
2861 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2862 GSEL(GCODE_SEL, SEL_KPL));
2863 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2864 GSEL(GCODE_SEL, SEL_KPL));
2865 initializecpu(); /* Initialize CPU registers */
2867 /* make an initial tss so cpu can get interrupt stack on syscall! */
2868 /* Note: -16 is so we can grow the trapframe if we came from vm86 */
2869 PCPU_SET(common_tss.tss_esp0, thread0.td_kstack +
2870 KSTACK_PAGES * PAGE_SIZE - sizeof(struct pcb) - 16);
2871 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
2872 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
2873 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
2874 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
2875 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
2878 /* pointer to selector slot for %fs/%gs */
2879 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
2881 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
2882 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
2883 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
2884 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
2886 dblfault_tss.tss_cr3 = (int)IdlePDPT;
2888 dblfault_tss.tss_cr3 = (int)IdlePTD;
2890 dblfault_tss.tss_eip = (int)dblfault_handler;
2891 dblfault_tss.tss_eflags = PSL_KERNEL;
2892 dblfault_tss.tss_ds = dblfault_tss.tss_es =
2893 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
2894 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
2895 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
2896 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
2900 init_param2(physmem);
2902 /* now running on new page tables, configured,and u/iom is accessible */
2904 msgbufinit(msgbufp, MSGBUF_SIZE);
2906 /* make a call gate to reenter kernel with */
2907 gdp = &ldt[LSYS5CALLS_SEL].gd;
2909 x = (int) &IDTVEC(lcall_syscall);
2910 gdp->gd_looffset = x;
2911 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
2913 gdp->gd_type = SDT_SYS386CGT;
2914 gdp->gd_dpl = SEL_UPL;
2916 gdp->gd_hioffset = x >> 16;
2918 /* XXX does this work? */
2920 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL];
2921 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL];
2923 /* transfer to user mode */
2925 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
2926 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
2928 /* setup proc 0's pcb */
2929 thread0.td_pcb->pcb_flags = 0;
2931 thread0.td_pcb->pcb_cr3 = (int)IdlePDPT;
2933 thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
2935 thread0.td_pcb->pcb_ext = 0;
2936 thread0.td_frame = &proc0_tf;
2938 if (cpu_probe_amdc1e())
2939 cpu_idle_fn = cpu_idle_amdc1e;
2944 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
2947 pcpu->pc_acpi_id = 0xffffffff;
2951 spinlock_enter(void)
2956 if (td->td_md.md_spinlock_count == 0)
2957 td->td_md.md_saved_flags = intr_disable();
2958 td->td_md.md_spinlock_count++;
2969 td->td_md.md_spinlock_count--;
2970 if (td->td_md.md_spinlock_count == 0)
2971 intr_restore(td->td_md.md_saved_flags);
2974 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
2975 static void f00f_hack(void *unused);
2976 SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL);
2979 f00f_hack(void *unused)
2981 struct gate_descriptor *new_idt;
2989 printf("Intel Pentium detected, installing workaround for F00F bug\n");
2991 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2);
2993 panic("kmem_alloc returned 0");
2995 /* Put the problematic entry (#6) at the end of the lower page. */
2996 new_idt = (struct gate_descriptor*)
2997 (tmp + PAGE_SIZE - 7 * sizeof(struct gate_descriptor));
2998 bcopy(idt, new_idt, sizeof(idt0));
2999 r_idt.rd_base = (u_int)new_idt;
3002 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE,
3003 VM_PROT_READ, FALSE) != KERN_SUCCESS)
3004 panic("vm_map_protect failed");
3006 #endif /* defined(I586_CPU) && !NO_F00F_HACK */
3009 * Construct a PCB from a trapframe. This is called from kdb_trap() where
3010 * we want to start a backtrace from the function that caused us to enter
3011 * the debugger. We have the context in the trapframe, but base the trace
3012 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
3013 * enough for a backtrace.
3016 makectx(struct trapframe *tf, struct pcb *pcb)
3019 pcb->pcb_edi = tf->tf_edi;
3020 pcb->pcb_esi = tf->tf_esi;
3021 pcb->pcb_ebp = tf->tf_ebp;
3022 pcb->pcb_ebx = tf->tf_ebx;
3023 pcb->pcb_eip = tf->tf_eip;
3024 pcb->pcb_esp = (ISPL(tf->tf_cs)) ? tf->tf_esp : (int)(tf + 1) - 8;
3028 ptrace_set_pc(struct thread *td, u_long addr)
3031 td->td_frame->tf_eip = addr;
3036 ptrace_single_step(struct thread *td)
3038 td->td_frame->tf_eflags |= PSL_T;
3043 ptrace_clear_single_step(struct thread *td)
3045 td->td_frame->tf_eflags &= ~PSL_T;
3050 fill_regs(struct thread *td, struct reg *regs)
3053 struct trapframe *tp;
3057 regs->r_fs = tp->tf_fs;
3058 regs->r_es = tp->tf_es;
3059 regs->r_ds = tp->tf_ds;
3060 regs->r_edi = tp->tf_edi;
3061 regs->r_esi = tp->tf_esi;
3062 regs->r_ebp = tp->tf_ebp;
3063 regs->r_ebx = tp->tf_ebx;
3064 regs->r_edx = tp->tf_edx;
3065 regs->r_ecx = tp->tf_ecx;
3066 regs->r_eax = tp->tf_eax;
3067 regs->r_eip = tp->tf_eip;
3068 regs->r_cs = tp->tf_cs;
3069 regs->r_eflags = tp->tf_eflags;
3070 regs->r_esp = tp->tf_esp;
3071 regs->r_ss = tp->tf_ss;
3072 regs->r_gs = pcb->pcb_gs;
3077 set_regs(struct thread *td, struct reg *regs)
3080 struct trapframe *tp;
3083 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) ||
3084 !CS_SECURE(regs->r_cs))
3087 tp->tf_fs = regs->r_fs;
3088 tp->tf_es = regs->r_es;
3089 tp->tf_ds = regs->r_ds;
3090 tp->tf_edi = regs->r_edi;
3091 tp->tf_esi = regs->r_esi;
3092 tp->tf_ebp = regs->r_ebp;
3093 tp->tf_ebx = regs->r_ebx;
3094 tp->tf_edx = regs->r_edx;
3095 tp->tf_ecx = regs->r_ecx;
3096 tp->tf_eax = regs->r_eax;
3097 tp->tf_eip = regs->r_eip;
3098 tp->tf_cs = regs->r_cs;
3099 tp->tf_eflags = regs->r_eflags;
3100 tp->tf_esp = regs->r_esp;
3101 tp->tf_ss = regs->r_ss;
3102 pcb->pcb_gs = regs->r_gs;
3106 #ifdef CPU_ENABLE_SSE
3108 fill_fpregs_xmm(sv_xmm, sv_87)
3109 struct savexmm *sv_xmm;
3110 struct save87 *sv_87;
3112 register struct env87 *penv_87 = &sv_87->sv_env;
3113 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
3116 bzero(sv_87, sizeof(*sv_87));
3118 /* FPU control/status */
3119 penv_87->en_cw = penv_xmm->en_cw;
3120 penv_87->en_sw = penv_xmm->en_sw;
3121 penv_87->en_tw = penv_xmm->en_tw;
3122 penv_87->en_fip = penv_xmm->en_fip;
3123 penv_87->en_fcs = penv_xmm->en_fcs;
3124 penv_87->en_opcode = penv_xmm->en_opcode;
3125 penv_87->en_foo = penv_xmm->en_foo;
3126 penv_87->en_fos = penv_xmm->en_fos;
3129 for (i = 0; i < 8; ++i)
3130 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc;
3134 set_fpregs_xmm(sv_87, sv_xmm)
3135 struct save87 *sv_87;
3136 struct savexmm *sv_xmm;
3138 register struct env87 *penv_87 = &sv_87->sv_env;
3139 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
3142 /* FPU control/status */
3143 penv_xmm->en_cw = penv_87->en_cw;
3144 penv_xmm->en_sw = penv_87->en_sw;
3145 penv_xmm->en_tw = penv_87->en_tw;
3146 penv_xmm->en_fip = penv_87->en_fip;
3147 penv_xmm->en_fcs = penv_87->en_fcs;
3148 penv_xmm->en_opcode = penv_87->en_opcode;
3149 penv_xmm->en_foo = penv_87->en_foo;
3150 penv_xmm->en_fos = penv_87->en_fos;
3153 for (i = 0; i < 8; ++i)
3154 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i];
3156 #endif /* CPU_ENABLE_SSE */
3159 fill_fpregs(struct thread *td, struct fpreg *fpregs)
3161 #ifdef CPU_ENABLE_SSE
3163 fill_fpregs_xmm(&td->td_pcb->pcb_save.sv_xmm,
3164 (struct save87 *)fpregs);
3167 #endif /* CPU_ENABLE_SSE */
3168 bcopy(&td->td_pcb->pcb_save.sv_87, fpregs, sizeof *fpregs);
3173 set_fpregs(struct thread *td, struct fpreg *fpregs)
3175 #ifdef CPU_ENABLE_SSE
3177 set_fpregs_xmm((struct save87 *)fpregs,
3178 &td->td_pcb->pcb_save.sv_xmm);
3181 #endif /* CPU_ENABLE_SSE */
3182 bcopy(fpregs, &td->td_pcb->pcb_save.sv_87, sizeof *fpregs);
3187 * Get machine context.
3190 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
3192 struct trapframe *tp;
3193 struct segment_descriptor *sdp;
3197 PROC_LOCK(curthread->td_proc);
3198 mcp->mc_onstack = sigonstack(tp->tf_esp);
3199 PROC_UNLOCK(curthread->td_proc);
3200 mcp->mc_gs = td->td_pcb->pcb_gs;
3201 mcp->mc_fs = tp->tf_fs;
3202 mcp->mc_es = tp->tf_es;
3203 mcp->mc_ds = tp->tf_ds;
3204 mcp->mc_edi = tp->tf_edi;
3205 mcp->mc_esi = tp->tf_esi;
3206 mcp->mc_ebp = tp->tf_ebp;
3207 mcp->mc_isp = tp->tf_isp;
3208 mcp->mc_eflags = tp->tf_eflags;
3209 if (flags & GET_MC_CLEAR_RET) {
3212 mcp->mc_eflags &= ~PSL_C;
3214 mcp->mc_eax = tp->tf_eax;
3215 mcp->mc_edx = tp->tf_edx;
3217 mcp->mc_ebx = tp->tf_ebx;
3218 mcp->mc_ecx = tp->tf_ecx;
3219 mcp->mc_eip = tp->tf_eip;
3220 mcp->mc_cs = tp->tf_cs;
3221 mcp->mc_esp = tp->tf_esp;
3222 mcp->mc_ss = tp->tf_ss;
3223 mcp->mc_len = sizeof(*mcp);
3224 get_fpcontext(td, mcp);
3225 sdp = &td->td_pcb->pcb_gsd;
3226 mcp->mc_fsbase = sdp->sd_hibase << 24 | sdp->sd_lobase;
3227 sdp = &td->td_pcb->pcb_fsd;
3228 mcp->mc_gsbase = sdp->sd_hibase << 24 | sdp->sd_lobase;
3234 * Set machine context.
3236 * However, we don't set any but the user modifiable flags, and we won't
3237 * touch the cs selector.
3240 set_mcontext(struct thread *td, const mcontext_t *mcp)
3242 struct trapframe *tp;
3246 if (mcp->mc_len != sizeof(*mcp))
3248 eflags = (mcp->mc_eflags & PSL_USERCHANGE) |
3249 (tp->tf_eflags & ~PSL_USERCHANGE);
3250 if ((ret = set_fpcontext(td, mcp)) == 0) {
3251 tp->tf_fs = mcp->mc_fs;
3252 tp->tf_es = mcp->mc_es;
3253 tp->tf_ds = mcp->mc_ds;
3254 tp->tf_edi = mcp->mc_edi;
3255 tp->tf_esi = mcp->mc_esi;
3256 tp->tf_ebp = mcp->mc_ebp;
3257 tp->tf_ebx = mcp->mc_ebx;
3258 tp->tf_edx = mcp->mc_edx;
3259 tp->tf_ecx = mcp->mc_ecx;
3260 tp->tf_eax = mcp->mc_eax;
3261 tp->tf_eip = mcp->mc_eip;
3262 tp->tf_eflags = eflags;
3263 tp->tf_esp = mcp->mc_esp;
3264 tp->tf_ss = mcp->mc_ss;
3265 td->td_pcb->pcb_gs = mcp->mc_gs;
3272 get_fpcontext(struct thread *td, mcontext_t *mcp)
3275 mcp->mc_fpformat = _MC_FPFMT_NODEV;
3276 mcp->mc_ownedfp = _MC_FPOWNED_NONE;
3278 union savefpu *addr;
3281 * XXX mc_fpstate might be misaligned, since its declaration is not
3282 * unportabilized using __attribute__((aligned(16))) like the
3283 * declaration of struct savemm, and anyway, alignment doesn't work
3284 * for auto variables since we don't use gcc's pessimal stack
3285 * alignment. Work around this by abusing the spare fields after
3288 * XXX unpessimize most cases by only aligning when fxsave might be
3289 * called, although this requires knowing too much about
3290 * npxgetregs()'s internals.
3292 addr = (union savefpu *)&mcp->mc_fpstate;
3293 if (td == PCPU_GET(fpcurthread) &&
3294 #ifdef CPU_ENABLE_SSE
3297 ((uintptr_t)(void *)addr & 0xF)) {
3299 addr = (void *)((char *)addr + 4);
3300 while ((uintptr_t)(void *)addr & 0xF);
3302 mcp->mc_ownedfp = npxgetregs(td, addr);
3303 if (addr != (union savefpu *)&mcp->mc_fpstate) {
3304 bcopy(addr, &mcp->mc_fpstate, sizeof(mcp->mc_fpstate));
3305 bzero(&mcp->mc_spare2, sizeof(mcp->mc_spare2));
3307 mcp->mc_fpformat = npxformat();
3312 set_fpcontext(struct thread *td, const mcontext_t *mcp)
3314 union savefpu *addr;
3316 if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
3318 else if (mcp->mc_fpformat != _MC_FPFMT_387 &&
3319 mcp->mc_fpformat != _MC_FPFMT_XMM)
3321 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE)
3322 /* We don't care what state is left in the FPU or PCB. */
3324 else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
3325 mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
3326 /* XXX align as above. */
3327 addr = (union savefpu *)&mcp->mc_fpstate;
3328 if (td == PCPU_GET(fpcurthread) &&
3329 #ifdef CPU_ENABLE_SSE
3332 ((uintptr_t)(void *)addr & 0xF)) {
3334 addr = (void *)((char *)addr + 4);
3335 while ((uintptr_t)(void *)addr & 0xF);
3336 bcopy(&mcp->mc_fpstate, addr, sizeof(mcp->mc_fpstate));
3339 #ifdef CPU_ENABLE_SSE
3341 addr->sv_xmm.sv_env.en_mxcsr &= cpu_mxcsr_mask;
3344 * XXX we violate the dubious requirement that npxsetregs()
3345 * be called with interrupts disabled.
3347 npxsetregs(td, addr);
3350 * Don't bother putting things back where they were in the
3351 * misaligned case, since we know that the caller won't use
3360 fpstate_drop(struct thread *td)
3366 if (PCPU_GET(fpcurthread) == td)
3370 * XXX force a full drop of the npx. The above only drops it if we
3371 * owned it. npxgetregs() has the same bug in the !cpu_fxsr case.
3373 * XXX I don't much like npxgetregs()'s semantics of doing a full
3374 * drop. Dropping only to the pcb matches fnsave's behaviour.
3375 * We only need to drop to !PCB_INITDONE in sendsig(). But
3376 * sendsig() is the only caller of npxgetregs()... perhaps we just
3377 * have too many layers.
3379 curthread->td_pcb->pcb_flags &= ~PCB_NPXINITDONE;
3384 fill_dbregs(struct thread *td, struct dbreg *dbregs)
3389 dbregs->dr[0] = rdr0();
3390 dbregs->dr[1] = rdr1();
3391 dbregs->dr[2] = rdr2();
3392 dbregs->dr[3] = rdr3();
3393 dbregs->dr[4] = rdr4();
3394 dbregs->dr[5] = rdr5();
3395 dbregs->dr[6] = rdr6();
3396 dbregs->dr[7] = rdr7();
3399 dbregs->dr[0] = pcb->pcb_dr0;
3400 dbregs->dr[1] = pcb->pcb_dr1;
3401 dbregs->dr[2] = pcb->pcb_dr2;
3402 dbregs->dr[3] = pcb->pcb_dr3;
3405 dbregs->dr[6] = pcb->pcb_dr6;
3406 dbregs->dr[7] = pcb->pcb_dr7;
3412 set_dbregs(struct thread *td, struct dbreg *dbregs)
3418 load_dr0(dbregs->dr[0]);
3419 load_dr1(dbregs->dr[1]);
3420 load_dr2(dbregs->dr[2]);
3421 load_dr3(dbregs->dr[3]);
3422 load_dr4(dbregs->dr[4]);
3423 load_dr5(dbregs->dr[5]);
3424 load_dr6(dbregs->dr[6]);
3425 load_dr7(dbregs->dr[7]);
3428 * Don't let an illegal value for dr7 get set. Specifically,
3429 * check for undefined settings. Setting these bit patterns
3430 * result in undefined behaviour and can lead to an unexpected
3433 for (i = 0; i < 4; i++) {
3434 if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02)
3436 if (DBREG_DR7_LEN(dbregs->dr[7], i) == 0x02)
3443 * Don't let a process set a breakpoint that is not within the
3444 * process's address space. If a process could do this, it
3445 * could halt the system by setting a breakpoint in the kernel
3446 * (if ddb was enabled). Thus, we need to check to make sure
3447 * that no breakpoints are being enabled for addresses outside
3448 * process's address space.
3450 * XXX - what about when the watched area of the user's
3451 * address space is written into from within the kernel
3452 * ... wouldn't that still cause a breakpoint to be generated
3453 * from within kernel mode?
3456 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) {
3457 /* dr0 is enabled */
3458 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
3462 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) {
3463 /* dr1 is enabled */
3464 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
3468 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) {
3469 /* dr2 is enabled */
3470 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
3474 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) {
3475 /* dr3 is enabled */
3476 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
3480 pcb->pcb_dr0 = dbregs->dr[0];
3481 pcb->pcb_dr1 = dbregs->dr[1];
3482 pcb->pcb_dr2 = dbregs->dr[2];
3483 pcb->pcb_dr3 = dbregs->dr[3];
3484 pcb->pcb_dr6 = dbregs->dr[6];
3485 pcb->pcb_dr7 = dbregs->dr[7];
3487 pcb->pcb_flags |= PCB_DBREGS;
3494 * Return > 0 if a hardware breakpoint has been hit, and the
3495 * breakpoint was in user space. Return 0, otherwise.
3498 user_dbreg_trap(void)
3500 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */
3501 u_int32_t bp; /* breakpoint bits extracted from dr6 */
3502 int nbp; /* number of breakpoints that triggered */
3503 caddr_t addr[4]; /* breakpoint addresses */
3507 if ((dr7 & 0x000000ff) == 0) {
3509 * all GE and LE bits in the dr7 register are zero,
3510 * thus the trap couldn't have been caused by the
3511 * hardware debug registers
3518 bp = dr6 & 0x0000000f;
3522 * None of the breakpoint bits are set meaning this
3523 * trap was not caused by any of the debug registers
3529 * at least one of the breakpoints were hit, check to see
3530 * which ones and if any of them are user space addresses
3534 addr[nbp++] = (caddr_t)rdr0();
3537 addr[nbp++] = (caddr_t)rdr1();
3540 addr[nbp++] = (caddr_t)rdr2();
3543 addr[nbp++] = (caddr_t)rdr3();
3546 for (i = 0; i < nbp; i++) {
3547 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) {
3549 * addr[i] is in user space
3556 * None of the breakpoints are in user space.
3562 #include <machine/apicvar.h>
3565 * Provide stub functions so that the MADT APIC enumerator in the acpi
3566 * kernel module will link against a kernel without 'device apic'.
3568 * XXX - This is a gross hack.
3571 apic_register_enumerator(struct apic_enumerator *enumerator)
3576 ioapic_create(vm_paddr_t addr, int32_t apic_id, int intbase)
3582 ioapic_disable_pin(void *cookie, u_int pin)
3588 ioapic_get_vector(void *cookie, u_int pin)
3594 ioapic_register(void *cookie)
3599 ioapic_remap_vector(void *cookie, u_int pin, int vector)
3605 ioapic_set_extint(void *cookie, u_int pin)
3611 ioapic_set_nmi(void *cookie, u_int pin)
3617 ioapic_set_polarity(void *cookie, u_int pin, enum intr_polarity pol)
3623 ioapic_set_triggermode(void *cookie, u_int pin, enum intr_trigger trigger)
3629 lapic_create(u_int apic_id, int boot_cpu)
3634 lapic_init(vm_paddr_t addr)
3639 lapic_set_lvt_mode(u_int apic_id, u_int lvt, u_int32_t mode)
3645 lapic_set_lvt_polarity(u_int apic_id, u_int lvt, enum intr_polarity pol)
3651 lapic_set_lvt_triggermode(u_int apic_id, u_int lvt, enum intr_trigger trigger)
3660 * Provide inb() and outb() as functions. They are normally only available as
3661 * inline functions, thus cannot be called from the debugger.
3664 /* silence compiler warnings */
3665 u_char inb_(u_short);
3666 void outb_(u_short, u_char);
3675 outb_(u_short port, u_char data)