2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
6 * This code is derived from software contributed to Berkeley by
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
44 #include "opt_atpic.h"
45 #include "opt_compat.h"
50 #include "opt_kstack_pages.h"
51 #include "opt_maxmem.h"
52 #include "opt_mp_watchdog.h"
54 #include "opt_perfmon.h"
55 #include "opt_platform.h"
58 #include <sys/param.h>
60 #include <sys/systm.h>
64 #include <sys/callout.h>
67 #include <sys/eventhandler.h>
69 #include <sys/imgact.h>
71 #include <sys/kernel.h>
73 #include <sys/linker.h>
75 #include <sys/malloc.h>
76 #include <sys/memrange.h>
77 #include <sys/msgbuf.h>
78 #include <sys/mutex.h>
80 #include <sys/ptrace.h>
81 #include <sys/reboot.h>
82 #include <sys/rwlock.h>
83 #include <sys/sched.h>
84 #include <sys/signalvar.h>
88 #include <sys/syscallsubr.h>
89 #include <sys/sysctl.h>
90 #include <sys/sysent.h>
91 #include <sys/sysproto.h>
92 #include <sys/ucontext.h>
93 #include <sys/vmmeter.h>
96 #include <vm/vm_extern.h>
97 #include <vm/vm_kern.h>
98 #include <vm/vm_page.h>
99 #include <vm/vm_map.h>
100 #include <vm/vm_object.h>
101 #include <vm/vm_pager.h>
102 #include <vm/vm_param.h>
106 #error KDB must be enabled in order for DDB to work!
109 #include <ddb/db_sym.h>
113 #include <pc98/pc98/pc98_machdep.h>
118 #include <net/netisr.h>
120 #include <machine/bootinfo.h>
121 #include <machine/clock.h>
122 #include <machine/cpu.h>
123 #include <machine/cputypes.h>
124 #include <machine/intr_machdep.h>
126 #include <machine/md_var.h>
127 #include <machine/metadata.h>
128 #include <machine/mp_watchdog.h>
129 #include <machine/pc/bios.h>
130 #include <machine/pcb.h>
131 #include <machine/pcb_ext.h>
132 #include <machine/proc.h>
133 #include <machine/reg.h>
134 #include <machine/sigframe.h>
135 #include <machine/specialreg.h>
136 #include <machine/vm86.h>
137 #include <x86/init.h>
139 #include <machine/perfmon.h>
142 #include <machine/smp.h>
149 #include <x86/apicvar.h>
153 #include <x86/isa/icu.h>
157 #include <machine/xbox.h>
159 int arch_i386_is_xbox = 0;
160 uint32_t arch_i386_xbox_memsize = 0;
165 #include <xen/xen-os.h>
166 #include <xen/hypervisor.h>
167 #include <machine/xen/xenvar.h>
168 #include <machine/xen/xenfunc.h>
169 #include <xen/xen_intr.h>
171 void Xhypervisor_callback(void);
172 void failsafe_callback(void);
174 extern trap_info_t trap_table[];
175 struct proc_ldt default_proc_ldt;
176 extern int init_first;
178 extern unsigned long physfree;
181 /* Sanity check for __curthread() */
182 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
184 extern register_t init386(int first);
185 extern void dblfault_handler(void);
187 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
188 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
190 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
191 #define CPU_ENABLE_SSE
194 static void cpu_startup(void *);
195 static void fpstate_drop(struct thread *td);
196 static void get_fpcontext(struct thread *td, mcontext_t *mcp,
197 char *xfpusave, size_t xfpusave_len);
198 static int set_fpcontext(struct thread *td, mcontext_t *mcp,
199 char *xfpustate, size_t xfpustate_len);
200 #ifdef CPU_ENABLE_SSE
201 static void set_fpregs_xmm(struct save87 *, struct savexmm *);
202 static void fill_fpregs_xmm(struct savexmm *, struct save87 *);
203 #endif /* CPU_ENABLE_SSE */
204 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
206 /* Intel ICH registers */
207 #define ICH_PMBASE 0x400
208 #define ICH_SMI_EN ICH_PMBASE + 0x30
210 int _udatasel, _ucodesel;
214 int need_pre_dma_flush; /* If 1, use wbinvd befor DMA transfer. */
215 int need_post_dma_flush; /* If 1, use invd after DMA transfer. */
217 static int ispc98 = 1;
218 SYSCTL_INT(_machdep, OID_AUTO, ispc98, CTLFLAG_RD, &ispc98, 0, "");
224 static void osendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
226 #ifdef COMPAT_FREEBSD4
227 static void freebsd4_sendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
234 FEATURE(pae, "Physical Address Extensions");
238 * The number of PHYSMAP entries must be one less than the number of
239 * PHYSSEG entries because the PHYSMAP entry that spans the largest
240 * physical address that is accessible by ISA DMA is split into two
243 #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
245 vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
246 vm_paddr_t dump_avail[PHYSMAP_SIZE + 2];
248 /* must be 2 less so 0 0 can signal end of chunks */
249 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2)
250 #define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2)
252 struct kva_md_info kmi;
254 static struct trapframe proc0_tf;
255 struct pcpu __pcpu[MAXCPU];
259 struct mem_range_softc mem_range_softc;
261 /* Default init_ops implementation. */
262 struct init_ops init_ops = {
263 .early_clock_source_init = i8254_init,
264 .early_delay = i8254_delay,
266 .msi_init = msi_init,
279 * On MacBooks, we need to disallow the legacy USB circuit to
280 * generate an SMI# because this can cause several problems,
281 * namely: incorrect CPU frequency detection and failure to
283 * We do this by disabling a bit in the SMI_EN (SMI Control and
284 * Enable register) of the Intel ICH LPC Interface Bridge.
286 sysenv = kern_getenv("smbios.system.product");
287 if (sysenv != NULL) {
288 if (strncmp(sysenv, "MacBook1,1", 10) == 0 ||
289 strncmp(sysenv, "MacBook3,1", 10) == 0 ||
290 strncmp(sysenv, "MacBook4,1", 10) == 0 ||
291 strncmp(sysenv, "MacBookPro1,1", 13) == 0 ||
292 strncmp(sysenv, "MacBookPro1,2", 13) == 0 ||
293 strncmp(sysenv, "MacBookPro3,1", 13) == 0 ||
294 strncmp(sysenv, "MacBookPro4,1", 13) == 0 ||
295 strncmp(sysenv, "Macmini1,1", 10) == 0) {
297 printf("Disabling LEGACY_USB_EN bit on "
299 outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8);
306 * Good {morning,afternoon,evening,night}.
310 panicifcpuunsupported();
316 * Display physical memory if SMBIOS reports reasonable amount.
319 sysenv = kern_getenv("smbios.memory.enabled");
320 if (sysenv != NULL) {
321 memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10;
324 if (memsize < ptoa((uintmax_t)vm_cnt.v_free_count))
325 memsize = ptoa((uintmax_t)Maxmem);
326 printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20);
327 realmem = atop(memsize);
330 * Display any holes after the first chunk of extended memory.
335 printf("Physical memory chunk(s):\n");
336 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
339 size = phys_avail[indx + 1] - phys_avail[indx];
341 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
342 (uintmax_t)phys_avail[indx],
343 (uintmax_t)phys_avail[indx + 1] - 1,
344 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
348 vm_ksubmap_init(&kmi);
350 printf("avail memory = %ju (%ju MB)\n",
351 ptoa((uintmax_t)vm_cnt.v_free_count),
352 ptoa((uintmax_t)vm_cnt.v_free_count) / 1048576);
355 * Set up buffers, so they can be used to read disk labels.
358 vm_pager_bufferinit();
365 * Send an interrupt to process.
367 * Stack is set up to allow sigcode stored
368 * at top to call routine, followed by call
369 * to sigreturn routine below. After sigreturn
370 * resets the signal mask, the stack, and the
371 * frame pointer, it returns to the user
376 osendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
378 struct osigframe sf, *fp;
382 struct trapframe *regs;
388 PROC_LOCK_ASSERT(p, MA_OWNED);
389 sig = ksi->ksi_signo;
391 mtx_assert(&psp->ps_mtx, MA_OWNED);
393 oonstack = sigonstack(regs->tf_esp);
395 /* Allocate space for the signal handler context. */
396 if ((td->td_pflags & TDP_ALTSTACK) && !oonstack &&
397 SIGISMEMBER(psp->ps_sigonstack, sig)) {
398 fp = (struct osigframe *)(td->td_sigstk.ss_sp +
399 td->td_sigstk.ss_size - sizeof(struct osigframe));
400 #if defined(COMPAT_43)
401 td->td_sigstk.ss_flags |= SS_ONSTACK;
404 fp = (struct osigframe *)regs->tf_esp - 1;
406 /* Translate the signal if appropriate. */
407 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
408 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
410 /* Build the argument list for the signal handler. */
412 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc;
413 bzero(&sf.sf_siginfo, sizeof(sf.sf_siginfo));
414 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
415 /* Signal handler installed with SA_SIGINFO. */
416 sf.sf_arg2 = (register_t)&fp->sf_siginfo;
417 sf.sf_siginfo.si_signo = sig;
418 sf.sf_siginfo.si_code = ksi->ksi_code;
419 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher;
422 /* Old FreeBSD-style arguments. */
423 sf.sf_arg2 = ksi->ksi_code;
424 sf.sf_addr = (register_t)ksi->ksi_addr;
425 sf.sf_ahu.sf_handler = catcher;
427 mtx_unlock(&psp->ps_mtx);
430 /* Save most if not all of trap frame. */
431 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax;
432 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx;
433 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx;
434 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx;
435 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi;
436 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi;
437 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs;
438 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds;
439 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss;
440 sf.sf_siginfo.si_sc.sc_es = regs->tf_es;
441 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs;
442 sf.sf_siginfo.si_sc.sc_gs = rgs();
443 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp;
445 /* Build the signal context to be used by osigreturn(). */
446 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0;
447 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask);
448 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp;
449 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp;
450 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip;
451 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags;
452 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno;
453 sf.sf_siginfo.si_sc.sc_err = regs->tf_err;
456 * If we're a vm86 process, we want to save the segment registers.
457 * We also change eflags to be our emulated eflags, not the actual
460 if (regs->tf_eflags & PSL_VM) {
461 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */
462 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
463 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
465 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs;
466 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs;
467 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es;
468 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds;
470 if (vm86->vm86_has_vme == 0)
471 sf.sf_siginfo.si_sc.sc_ps =
472 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
473 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
475 /* See sendsig() for comments. */
476 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
480 * Copy the sigframe out to the user's stack.
482 if (copyout(&sf, fp, sizeof(*fp)) != 0) {
484 printf("process %ld has trashed its stack\n", (long)p->p_pid);
490 regs->tf_esp = (int)fp;
491 if (p->p_sysent->sv_sigcode_base != 0) {
492 regs->tf_eip = p->p_sysent->sv_sigcode_base + szsigcode -
495 /* a.out sysentvec does not use shared page */
496 regs->tf_eip = p->p_sysent->sv_psstrings - szosigcode;
498 regs->tf_eflags &= ~(PSL_T | PSL_D);
499 regs->tf_cs = _ucodesel;
500 regs->tf_ds = _udatasel;
501 regs->tf_es = _udatasel;
502 regs->tf_fs = _udatasel;
504 regs->tf_ss = _udatasel;
506 mtx_lock(&psp->ps_mtx);
508 #endif /* COMPAT_43 */
510 #ifdef COMPAT_FREEBSD4
512 freebsd4_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
514 struct sigframe4 sf, *sfp;
518 struct trapframe *regs;
524 PROC_LOCK_ASSERT(p, MA_OWNED);
525 sig = ksi->ksi_signo;
527 mtx_assert(&psp->ps_mtx, MA_OWNED);
529 oonstack = sigonstack(regs->tf_esp);
531 /* Save user context. */
532 bzero(&sf, sizeof(sf));
533 sf.sf_uc.uc_sigmask = *mask;
534 sf.sf_uc.uc_stack = td->td_sigstk;
535 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
536 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
537 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
538 sf.sf_uc.uc_mcontext.mc_gs = rgs();
539 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
540 bzero(sf.sf_uc.uc_mcontext.mc_fpregs,
541 sizeof(sf.sf_uc.uc_mcontext.mc_fpregs));
542 bzero(sf.sf_uc.uc_mcontext.__spare__,
543 sizeof(sf.sf_uc.uc_mcontext.__spare__));
544 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
546 /* Allocate space for the signal handler context. */
547 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
548 SIGISMEMBER(psp->ps_sigonstack, sig)) {
549 sfp = (struct sigframe4 *)(td->td_sigstk.ss_sp +
550 td->td_sigstk.ss_size - sizeof(struct sigframe4));
551 #if defined(COMPAT_43)
552 td->td_sigstk.ss_flags |= SS_ONSTACK;
555 sfp = (struct sigframe4 *)regs->tf_esp - 1;
557 /* Translate the signal if appropriate. */
558 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
559 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
561 /* Build the argument list for the signal handler. */
563 sf.sf_ucontext = (register_t)&sfp->sf_uc;
564 bzero(&sf.sf_si, sizeof(sf.sf_si));
565 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
566 /* Signal handler installed with SA_SIGINFO. */
567 sf.sf_siginfo = (register_t)&sfp->sf_si;
568 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
570 /* Fill in POSIX parts */
571 sf.sf_si.si_signo = sig;
572 sf.sf_si.si_code = ksi->ksi_code;
573 sf.sf_si.si_addr = ksi->ksi_addr;
575 /* Old FreeBSD-style arguments. */
576 sf.sf_siginfo = ksi->ksi_code;
577 sf.sf_addr = (register_t)ksi->ksi_addr;
578 sf.sf_ahu.sf_handler = catcher;
580 mtx_unlock(&psp->ps_mtx);
584 * If we're a vm86 process, we want to save the segment registers.
585 * We also change eflags to be our emulated eflags, not the actual
588 if (regs->tf_eflags & PSL_VM) {
589 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
590 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
592 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
593 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
594 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
595 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
597 if (vm86->vm86_has_vme == 0)
598 sf.sf_uc.uc_mcontext.mc_eflags =
599 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
600 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
603 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
604 * syscalls made by the signal handler. This just avoids
605 * wasting time for our lazy fixup of such faults. PSL_NT
606 * does nothing in vm86 mode, but vm86 programs can set it
607 * almost legitimately in probes for old cpu types.
609 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
613 * Copy the sigframe out to the user's stack.
615 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
617 printf("process %ld has trashed its stack\n", (long)p->p_pid);
623 regs->tf_esp = (int)sfp;
624 regs->tf_eip = p->p_sysent->sv_sigcode_base + szsigcode -
626 regs->tf_eflags &= ~(PSL_T | PSL_D);
627 regs->tf_cs = _ucodesel;
628 regs->tf_ds = _udatasel;
629 regs->tf_es = _udatasel;
630 regs->tf_fs = _udatasel;
631 regs->tf_ss = _udatasel;
633 mtx_lock(&psp->ps_mtx);
635 #endif /* COMPAT_FREEBSD4 */
638 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
640 struct sigframe sf, *sfp;
645 struct trapframe *regs;
646 struct segment_descriptor *sdp;
654 PROC_LOCK_ASSERT(p, MA_OWNED);
655 sig = ksi->ksi_signo;
657 mtx_assert(&psp->ps_mtx, MA_OWNED);
658 #ifdef COMPAT_FREEBSD4
659 if (SIGISMEMBER(psp->ps_freebsd4, sig)) {
660 freebsd4_sendsig(catcher, ksi, mask);
665 if (SIGISMEMBER(psp->ps_osigset, sig)) {
666 osendsig(catcher, ksi, mask);
671 oonstack = sigonstack(regs->tf_esp);
673 #ifdef CPU_ENABLE_SSE
674 if (cpu_max_ext_state_size > sizeof(union savefpu) && use_xsave) {
675 xfpusave_len = cpu_max_ext_state_size - sizeof(union savefpu);
676 xfpusave = __builtin_alloca(xfpusave_len);
685 /* Save user context. */
686 bzero(&sf, sizeof(sf));
687 sf.sf_uc.uc_sigmask = *mask;
688 sf.sf_uc.uc_stack = td->td_sigstk;
689 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
690 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
691 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
692 sf.sf_uc.uc_mcontext.mc_gs = rgs();
693 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
694 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
695 get_fpcontext(td, &sf.sf_uc.uc_mcontext, xfpusave, xfpusave_len);
698 * Unconditionally fill the fsbase and gsbase into the mcontext.
700 sdp = &td->td_pcb->pcb_fsd;
701 sf.sf_uc.uc_mcontext.mc_fsbase = sdp->sd_hibase << 24 |
703 sdp = &td->td_pcb->pcb_gsd;
704 sf.sf_uc.uc_mcontext.mc_gsbase = sdp->sd_hibase << 24 |
706 bzero(sf.sf_uc.uc_mcontext.mc_spare2,
707 sizeof(sf.sf_uc.uc_mcontext.mc_spare2));
708 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
710 /* Allocate space for the signal handler context. */
711 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
712 SIGISMEMBER(psp->ps_sigonstack, sig)) {
713 sp = td->td_sigstk.ss_sp + td->td_sigstk.ss_size;
714 #if defined(COMPAT_43)
715 td->td_sigstk.ss_flags |= SS_ONSTACK;
718 sp = (char *)regs->tf_esp - 128;
719 if (xfpusave != NULL) {
721 sp = (char *)((unsigned int)sp & ~0x3F);
722 sf.sf_uc.uc_mcontext.mc_xfpustate = (register_t)sp;
724 sp -= sizeof(struct sigframe);
726 /* Align to 16 bytes. */
727 sfp = (struct sigframe *)((unsigned int)sp & ~0xF);
729 /* Translate the signal if appropriate. */
730 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
731 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
733 /* Build the argument list for the signal handler. */
735 sf.sf_ucontext = (register_t)&sfp->sf_uc;
736 bzero(&sf.sf_si, sizeof(sf.sf_si));
737 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
738 /* Signal handler installed with SA_SIGINFO. */
739 sf.sf_siginfo = (register_t)&sfp->sf_si;
740 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
742 /* Fill in POSIX parts */
743 sf.sf_si = ksi->ksi_info;
744 sf.sf_si.si_signo = sig; /* maybe a translated signal */
746 /* Old FreeBSD-style arguments. */
747 sf.sf_siginfo = ksi->ksi_code;
748 sf.sf_addr = (register_t)ksi->ksi_addr;
749 sf.sf_ahu.sf_handler = catcher;
751 mtx_unlock(&psp->ps_mtx);
755 * If we're a vm86 process, we want to save the segment registers.
756 * We also change eflags to be our emulated eflags, not the actual
759 if (regs->tf_eflags & PSL_VM) {
760 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
761 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
763 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
764 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
765 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
766 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
768 if (vm86->vm86_has_vme == 0)
769 sf.sf_uc.uc_mcontext.mc_eflags =
770 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
771 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
774 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
775 * syscalls made by the signal handler. This just avoids
776 * wasting time for our lazy fixup of such faults. PSL_NT
777 * does nothing in vm86 mode, but vm86 programs can set it
778 * almost legitimately in probes for old cpu types.
780 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
784 * Copy the sigframe out to the user's stack.
786 if (copyout(&sf, sfp, sizeof(*sfp)) != 0 ||
787 (xfpusave != NULL && copyout(xfpusave,
788 (void *)sf.sf_uc.uc_mcontext.mc_xfpustate, xfpusave_len)
791 printf("process %ld has trashed its stack\n", (long)p->p_pid);
797 regs->tf_esp = (int)sfp;
798 regs->tf_eip = p->p_sysent->sv_sigcode_base;
799 if (regs->tf_eip == 0)
800 regs->tf_eip = p->p_sysent->sv_psstrings - szsigcode;
801 regs->tf_eflags &= ~(PSL_T | PSL_D);
802 regs->tf_cs = _ucodesel;
803 regs->tf_ds = _udatasel;
804 regs->tf_es = _udatasel;
805 regs->tf_fs = _udatasel;
806 regs->tf_ss = _udatasel;
808 mtx_lock(&psp->ps_mtx);
812 * System call to cleanup state after a signal
813 * has been taken. Reset signal mask and
814 * stack state from context left by sendsig (above).
815 * Return to previous pc and psl as specified by
816 * context left by sendsig. Check carefully to
817 * make sure that the user has not modified the
818 * state to gain improper privileges.
826 struct osigreturn_args /* {
827 struct osigcontext *sigcntxp;
830 struct osigcontext sc;
831 struct trapframe *regs;
832 struct osigcontext *scp;
837 error = copyin(uap->sigcntxp, &sc, sizeof(sc));
842 if (eflags & PSL_VM) {
843 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
844 struct vm86_kernel *vm86;
847 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
848 * set up the vm86 area, and we can't enter vm86 mode.
850 if (td->td_pcb->pcb_ext == 0)
852 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
853 if (vm86->vm86_inited == 0)
856 /* Go back to user mode if both flags are set. */
857 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
858 ksiginfo_init_trap(&ksi);
859 ksi.ksi_signo = SIGBUS;
860 ksi.ksi_code = BUS_OBJERR;
861 ksi.ksi_addr = (void *)regs->tf_eip;
862 trapsignal(td, &ksi);
865 if (vm86->vm86_has_vme) {
866 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
867 (eflags & VME_USERCHANGE) | PSL_VM;
869 vm86->vm86_eflags = eflags; /* save VIF, VIP */
870 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
871 (eflags & VM_USERCHANGE) | PSL_VM;
873 tf->tf_vm86_ds = scp->sc_ds;
874 tf->tf_vm86_es = scp->sc_es;
875 tf->tf_vm86_fs = scp->sc_fs;
876 tf->tf_vm86_gs = scp->sc_gs;
877 tf->tf_ds = _udatasel;
878 tf->tf_es = _udatasel;
879 tf->tf_fs = _udatasel;
882 * Don't allow users to change privileged or reserved flags.
884 if (!EFL_SECURE(eflags, regs->tf_eflags)) {
889 * Don't allow users to load a valid privileged %cs. Let the
890 * hardware check for invalid selectors, excess privilege in
891 * other selectors, invalid %eip's and invalid %esp's.
893 if (!CS_SECURE(scp->sc_cs)) {
894 ksiginfo_init_trap(&ksi);
895 ksi.ksi_signo = SIGBUS;
896 ksi.ksi_code = BUS_OBJERR;
897 ksi.ksi_trapno = T_PROTFLT;
898 ksi.ksi_addr = (void *)regs->tf_eip;
899 trapsignal(td, &ksi);
902 regs->tf_ds = scp->sc_ds;
903 regs->tf_es = scp->sc_es;
904 regs->tf_fs = scp->sc_fs;
907 /* Restore remaining registers. */
908 regs->tf_eax = scp->sc_eax;
909 regs->tf_ebx = scp->sc_ebx;
910 regs->tf_ecx = scp->sc_ecx;
911 regs->tf_edx = scp->sc_edx;
912 regs->tf_esi = scp->sc_esi;
913 regs->tf_edi = scp->sc_edi;
914 regs->tf_cs = scp->sc_cs;
915 regs->tf_ss = scp->sc_ss;
916 regs->tf_isp = scp->sc_isp;
917 regs->tf_ebp = scp->sc_fp;
918 regs->tf_esp = scp->sc_sp;
919 regs->tf_eip = scp->sc_pc;
920 regs->tf_eflags = eflags;
922 #if defined(COMPAT_43)
923 if (scp->sc_onstack & 1)
924 td->td_sigstk.ss_flags |= SS_ONSTACK;
926 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
928 kern_sigprocmask(td, SIG_SETMASK, (sigset_t *)&scp->sc_mask, NULL,
930 return (EJUSTRETURN);
932 #endif /* COMPAT_43 */
934 #ifdef COMPAT_FREEBSD4
939 freebsd4_sigreturn(td, uap)
941 struct freebsd4_sigreturn_args /* {
942 const ucontext4 *sigcntxp;
946 struct trapframe *regs;
947 struct ucontext4 *ucp;
948 int cs, eflags, error;
951 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
956 eflags = ucp->uc_mcontext.mc_eflags;
957 if (eflags & PSL_VM) {
958 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
959 struct vm86_kernel *vm86;
962 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
963 * set up the vm86 area, and we can't enter vm86 mode.
965 if (td->td_pcb->pcb_ext == 0)
967 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
968 if (vm86->vm86_inited == 0)
971 /* Go back to user mode if both flags are set. */
972 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
973 ksiginfo_init_trap(&ksi);
974 ksi.ksi_signo = SIGBUS;
975 ksi.ksi_code = BUS_OBJERR;
976 ksi.ksi_addr = (void *)regs->tf_eip;
977 trapsignal(td, &ksi);
979 if (vm86->vm86_has_vme) {
980 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
981 (eflags & VME_USERCHANGE) | PSL_VM;
983 vm86->vm86_eflags = eflags; /* save VIF, VIP */
984 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
985 (eflags & VM_USERCHANGE) | PSL_VM;
987 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
988 tf->tf_eflags = eflags;
989 tf->tf_vm86_ds = tf->tf_ds;
990 tf->tf_vm86_es = tf->tf_es;
991 tf->tf_vm86_fs = tf->tf_fs;
992 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
993 tf->tf_ds = _udatasel;
994 tf->tf_es = _udatasel;
995 tf->tf_fs = _udatasel;
998 * Don't allow users to change privileged or reserved flags.
1000 if (!EFL_SECURE(eflags, regs->tf_eflags)) {
1001 uprintf("pid %d (%s): freebsd4_sigreturn eflags = 0x%x\n",
1002 td->td_proc->p_pid, td->td_name, eflags);
1007 * Don't allow users to load a valid privileged %cs. Let the
1008 * hardware check for invalid selectors, excess privilege in
1009 * other selectors, invalid %eip's and invalid %esp's.
1011 cs = ucp->uc_mcontext.mc_cs;
1012 if (!CS_SECURE(cs)) {
1013 uprintf("pid %d (%s): freebsd4_sigreturn cs = 0x%x\n",
1014 td->td_proc->p_pid, td->td_name, cs);
1015 ksiginfo_init_trap(&ksi);
1016 ksi.ksi_signo = SIGBUS;
1017 ksi.ksi_code = BUS_OBJERR;
1018 ksi.ksi_trapno = T_PROTFLT;
1019 ksi.ksi_addr = (void *)regs->tf_eip;
1020 trapsignal(td, &ksi);
1024 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
1027 #if defined(COMPAT_43)
1028 if (ucp->uc_mcontext.mc_onstack & 1)
1029 td->td_sigstk.ss_flags |= SS_ONSTACK;
1031 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
1033 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
1034 return (EJUSTRETURN);
1036 #endif /* COMPAT_FREEBSD4 */
1042 sys_sigreturn(td, uap)
1044 struct sigreturn_args /* {
1045 const struct __ucontext *sigcntxp;
1050 struct trapframe *regs;
1053 size_t xfpustate_len;
1054 int cs, eflags, error, ret;
1059 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
1063 if ((ucp->uc_mcontext.mc_flags & ~_MC_FLAG_MASK) != 0) {
1064 uprintf("pid %d (%s): sigreturn mc_flags %x\n", p->p_pid,
1065 td->td_name, ucp->uc_mcontext.mc_flags);
1068 regs = td->td_frame;
1069 eflags = ucp->uc_mcontext.mc_eflags;
1070 if (eflags & PSL_VM) {
1071 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
1072 struct vm86_kernel *vm86;
1075 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
1076 * set up the vm86 area, and we can't enter vm86 mode.
1078 if (td->td_pcb->pcb_ext == 0)
1080 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
1081 if (vm86->vm86_inited == 0)
1084 /* Go back to user mode if both flags are set. */
1085 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
1086 ksiginfo_init_trap(&ksi);
1087 ksi.ksi_signo = SIGBUS;
1088 ksi.ksi_code = BUS_OBJERR;
1089 ksi.ksi_addr = (void *)regs->tf_eip;
1090 trapsignal(td, &ksi);
1093 if (vm86->vm86_has_vme) {
1094 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
1095 (eflags & VME_USERCHANGE) | PSL_VM;
1097 vm86->vm86_eflags = eflags; /* save VIF, VIP */
1098 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
1099 (eflags & VM_USERCHANGE) | PSL_VM;
1101 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
1102 tf->tf_eflags = eflags;
1103 tf->tf_vm86_ds = tf->tf_ds;
1104 tf->tf_vm86_es = tf->tf_es;
1105 tf->tf_vm86_fs = tf->tf_fs;
1106 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
1107 tf->tf_ds = _udatasel;
1108 tf->tf_es = _udatasel;
1109 tf->tf_fs = _udatasel;
1112 * Don't allow users to change privileged or reserved flags.
1114 if (!EFL_SECURE(eflags, regs->tf_eflags)) {
1115 uprintf("pid %d (%s): sigreturn eflags = 0x%x\n",
1116 td->td_proc->p_pid, td->td_name, eflags);
1121 * Don't allow users to load a valid privileged %cs. Let the
1122 * hardware check for invalid selectors, excess privilege in
1123 * other selectors, invalid %eip's and invalid %esp's.
1125 cs = ucp->uc_mcontext.mc_cs;
1126 if (!CS_SECURE(cs)) {
1127 uprintf("pid %d (%s): sigreturn cs = 0x%x\n",
1128 td->td_proc->p_pid, td->td_name, cs);
1129 ksiginfo_init_trap(&ksi);
1130 ksi.ksi_signo = SIGBUS;
1131 ksi.ksi_code = BUS_OBJERR;
1132 ksi.ksi_trapno = T_PROTFLT;
1133 ksi.ksi_addr = (void *)regs->tf_eip;
1134 trapsignal(td, &ksi);
1138 if ((uc.uc_mcontext.mc_flags & _MC_HASFPXSTATE) != 0) {
1139 xfpustate_len = uc.uc_mcontext.mc_xfpustate_len;
1140 if (xfpustate_len > cpu_max_ext_state_size -
1141 sizeof(union savefpu)) {
1143 "pid %d (%s): sigreturn xfpusave_len = 0x%zx\n",
1144 p->p_pid, td->td_name, xfpustate_len);
1147 xfpustate = __builtin_alloca(xfpustate_len);
1148 error = copyin((const void *)uc.uc_mcontext.mc_xfpustate,
1149 xfpustate, xfpustate_len);
1152 "pid %d (%s): sigreturn copying xfpustate failed\n",
1153 p->p_pid, td->td_name);
1160 ret = set_fpcontext(td, &ucp->uc_mcontext, xfpustate,
1164 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
1167 #if defined(COMPAT_43)
1168 if (ucp->uc_mcontext.mc_onstack & 1)
1169 td->td_sigstk.ss_flags |= SS_ONSTACK;
1171 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
1174 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
1175 return (EJUSTRETURN);
1179 * Machine dependent boot() routine
1181 * I haven't seen anything to put here yet
1182 * Possibly some stuff might be grafted back here from boot()
1190 * Flush the D-cache for non-DMA I/O so that the I-cache can
1191 * be made coherent later.
1194 cpu_flush_dcache(void *ptr, size_t len)
1196 /* Not applicable */
1199 /* Get current clock frequency for the given cpu id. */
1201 cpu_est_clockrate(int cpu_id, uint64_t *rate)
1203 uint64_t tsc1, tsc2;
1204 uint64_t acnt, mcnt, perf;
1207 if (pcpu_find(cpu_id) == NULL || rate == NULL)
1209 if ((cpu_feature & CPUID_TSC) == 0)
1210 return (EOPNOTSUPP);
1213 * If TSC is P-state invariant and APERF/MPERF MSRs do not exist,
1214 * DELAY(9) based logic fails.
1216 if (tsc_is_invariant && !tsc_perf_stat)
1217 return (EOPNOTSUPP);
1221 /* Schedule ourselves on the indicated cpu. */
1222 thread_lock(curthread);
1223 sched_bind(curthread, cpu_id);
1224 thread_unlock(curthread);
1228 /* Calibrate by measuring a short delay. */
1229 reg = intr_disable();
1230 if (tsc_is_invariant) {
1231 wrmsr(MSR_MPERF, 0);
1232 wrmsr(MSR_APERF, 0);
1235 mcnt = rdmsr(MSR_MPERF);
1236 acnt = rdmsr(MSR_APERF);
1239 perf = 1000 * acnt / mcnt;
1240 *rate = (tsc2 - tsc1) * perf;
1246 *rate = (tsc2 - tsc1) * 1000;
1251 thread_lock(curthread);
1252 sched_unbind(curthread);
1253 thread_unlock(curthread);
1266 HYPERVISOR_sched_op(SCHEDOP_block, 0);
1272 HYPERVISOR_shutdown(SHUTDOWN_poweroff);
1275 int scheduler_running;
1278 cpu_idle_hlt(sbintime_t sbt)
1281 scheduler_running = 1;
1288 * Shutdown the CPU as much as possible
1299 void (*cpu_idle_hook)(sbintime_t) = NULL; /* ACPI idle hook. */
1300 static int cpu_ident_amdc1e = 0; /* AMD C1E supported. */
1301 static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */
1302 SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RWTUN, &idle_mwait,
1303 0, "Use MONITOR/MWAIT for short idle");
1305 #define STATE_RUNNING 0x0
1306 #define STATE_MWAIT 0x1
1307 #define STATE_SLEEPING 0x2
1311 cpu_idle_acpi(sbintime_t sbt)
1315 state = (int *)PCPU_PTR(monitorbuf);
1316 *state = STATE_SLEEPING;
1318 /* See comments in cpu_idle_hlt(). */
1320 if (sched_runnable())
1322 else if (cpu_idle_hook)
1325 __asm __volatile("sti; hlt");
1326 *state = STATE_RUNNING;
1332 cpu_idle_hlt(sbintime_t sbt)
1336 state = (int *)PCPU_PTR(monitorbuf);
1337 *state = STATE_SLEEPING;
1340 * Since we may be in a critical section from cpu_idle(), if
1341 * an interrupt fires during that critical section we may have
1342 * a pending preemption. If the CPU halts, then that thread
1343 * may not execute until a later interrupt awakens the CPU.
1344 * To handle this race, check for a runnable thread after
1345 * disabling interrupts and immediately return if one is
1346 * found. Also, we must absolutely guarentee that hlt is
1347 * the next instruction after sti. This ensures that any
1348 * interrupt that fires after the call to disable_intr() will
1349 * immediately awaken the CPU from hlt. Finally, please note
1350 * that on x86 this works fine because of interrupts enabled only
1351 * after the instruction following sti takes place, while IF is set
1352 * to 1 immediately, allowing hlt instruction to acknowledge the
1356 if (sched_runnable())
1359 __asm __volatile("sti; hlt");
1360 *state = STATE_RUNNING;
1365 * MWAIT cpu power states. Lower 4 bits are sub-states.
1367 #define MWAIT_C0 0xf0
1368 #define MWAIT_C1 0x00
1369 #define MWAIT_C2 0x10
1370 #define MWAIT_C3 0x20
1371 #define MWAIT_C4 0x30
1374 cpu_idle_mwait(sbintime_t sbt)
1378 state = (int *)PCPU_PTR(monitorbuf);
1379 *state = STATE_MWAIT;
1381 /* See comments in cpu_idle_hlt(). */
1383 if (sched_runnable()) {
1385 *state = STATE_RUNNING;
1388 cpu_monitor(state, 0, 0);
1389 if (*state == STATE_MWAIT)
1390 __asm __volatile("sti; mwait" : : "a" (MWAIT_C1), "c" (0));
1393 *state = STATE_RUNNING;
1397 cpu_idle_spin(sbintime_t sbt)
1402 state = (int *)PCPU_PTR(monitorbuf);
1403 *state = STATE_RUNNING;
1406 * The sched_runnable() call is racy but as long as there is
1407 * a loop missing it one time will have just a little impact if any
1408 * (and it is much better than missing the check at all).
1410 for (i = 0; i < 1000; i++) {
1411 if (sched_runnable())
1418 * C1E renders the local APIC timer dead, so we disable it by
1419 * reading the Interrupt Pending Message register and clearing
1420 * both C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27).
1423 * "BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh Processors"
1424 * #32559 revision 3.00+
1426 #define MSR_AMDK8_IPM 0xc0010055
1427 #define AMDK8_SMIONCMPHALT (1ULL << 27)
1428 #define AMDK8_C1EONCMPHALT (1ULL << 28)
1429 #define AMDK8_CMPHALT (AMDK8_SMIONCMPHALT | AMDK8_C1EONCMPHALT)
1432 cpu_probe_amdc1e(void)
1436 * Detect the presence of C1E capability mostly on latest
1437 * dual-cores (or future) k8 family.
1439 if (cpu_vendor_id == CPU_VENDOR_AMD &&
1440 (cpu_id & 0x00000f00) == 0x00000f00 &&
1441 (cpu_id & 0x0fff0000) >= 0x00040000) {
1442 cpu_ident_amdc1e = 1;
1446 #if defined(PC98) || defined(XEN)
1447 void (*cpu_idle_fn)(sbintime_t) = cpu_idle_hlt;
1449 void (*cpu_idle_fn)(sbintime_t) = cpu_idle_acpi;
1458 sbintime_t sbt = -1;
1460 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d",
1462 #if defined(MP_WATCHDOG) && !defined(XEN)
1463 ap_watchdog(PCPU_GET(cpuid));
1466 /* If we are busy - try to use fast methods. */
1468 if ((cpu_feature2 & CPUID2_MON) && idle_mwait) {
1469 cpu_idle_mwait(busy);
1475 /* If we have time - switch timers into idle mode. */
1478 sbt = cpu_idleclock();
1482 /* Apply AMD APIC timer C1E workaround. */
1483 if (cpu_ident_amdc1e && cpu_disable_c3_sleep) {
1484 msr = rdmsr(MSR_AMDK8_IPM);
1485 if (msr & AMDK8_CMPHALT)
1486 wrmsr(MSR_AMDK8_IPM, msr & ~AMDK8_CMPHALT);
1490 /* Call main idle method. */
1493 /* Switch timers back into active mode. */
1501 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done",
1506 cpu_idle_wakeup(int cpu)
1511 pcpu = pcpu_find(cpu);
1512 state = (int *)pcpu->pc_monitorbuf;
1514 * This doesn't need to be atomic since missing the race will
1515 * simply result in unnecessary IPIs.
1517 if (*state == STATE_SLEEPING)
1519 if (*state == STATE_MWAIT)
1520 *state = STATE_RUNNING;
1525 * Ordered by speed/power consumption.
1531 { cpu_idle_spin, "spin" },
1532 { cpu_idle_mwait, "mwait" },
1533 { cpu_idle_hlt, "hlt" },
1535 { cpu_idle_acpi, "acpi" },
1541 idle_sysctl_available(SYSCTL_HANDLER_ARGS)
1547 avail = malloc(256, M_TEMP, M_WAITOK);
1549 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
1550 if (strstr(idle_tbl[i].id_name, "mwait") &&
1551 (cpu_feature2 & CPUID2_MON) == 0)
1554 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
1555 cpu_idle_hook == NULL)
1558 p += sprintf(p, "%s%s", p != avail ? ", " : "",
1559 idle_tbl[i].id_name);
1561 error = sysctl_handle_string(oidp, avail, 0, req);
1562 free(avail, M_TEMP);
1566 SYSCTL_PROC(_machdep, OID_AUTO, idle_available, CTLTYPE_STRING | CTLFLAG_RD,
1567 0, 0, idle_sysctl_available, "A", "list of available idle functions");
1570 idle_sysctl(SYSCTL_HANDLER_ARGS)
1578 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
1579 if (idle_tbl[i].id_fn == cpu_idle_fn) {
1580 p = idle_tbl[i].id_name;
1584 strncpy(buf, p, sizeof(buf));
1585 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
1586 if (error != 0 || req->newptr == NULL)
1588 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
1589 if (strstr(idle_tbl[i].id_name, "mwait") &&
1590 (cpu_feature2 & CPUID2_MON) == 0)
1593 if (strcmp(idle_tbl[i].id_name, "acpi") == 0 &&
1594 cpu_idle_hook == NULL)
1597 if (strcmp(idle_tbl[i].id_name, buf))
1599 cpu_idle_fn = idle_tbl[i].id_fn;
1605 SYSCTL_PROC(_machdep, OID_AUTO, idle, CTLTYPE_STRING | CTLFLAG_RW, 0, 0,
1606 idle_sysctl, "A", "currently selected idle function");
1609 * Reset registers to default values on exec.
1612 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
1614 struct trapframe *regs = td->td_frame;
1615 struct pcb *pcb = td->td_pcb;
1617 /* Reset pc->pcb_gs and %gs before possibly invalidating it. */
1618 pcb->pcb_gs = _udatasel;
1621 mtx_lock_spin(&dt_lock);
1622 if (td->td_proc->p_md.md_ldt)
1625 mtx_unlock_spin(&dt_lock);
1627 bzero((char *)regs, sizeof(struct trapframe));
1628 regs->tf_eip = imgp->entry_addr;
1629 regs->tf_esp = stack;
1630 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T);
1631 regs->tf_ss = _udatasel;
1632 regs->tf_ds = _udatasel;
1633 regs->tf_es = _udatasel;
1634 regs->tf_fs = _udatasel;
1635 regs->tf_cs = _ucodesel;
1637 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */
1638 regs->tf_ebx = imgp->ps_strings;
1641 * Reset the hardware debug registers if they were in use.
1642 * They won't have any meaning for the newly exec'd process.
1644 if (pcb->pcb_flags & PCB_DBREGS) {
1651 if (pcb == curpcb) {
1653 * Clear the debug registers on the running
1654 * CPU, otherwise they will end up affecting
1655 * the next process we switch to.
1659 pcb->pcb_flags &= ~PCB_DBREGS;
1662 pcb->pcb_initial_npxcw = __INITIAL_NPXCW__;
1665 * Drop the FP state if we hold it, so that the process gets a
1666 * clean FP state if it uses the FPU again.
1671 * XXX - Linux emulator
1672 * Make sure sure edx is 0x0 on entry. Linux binaries depend
1675 td->td_retval[1] = 0;
1686 * CR0_MP, CR0_NE and CR0_TS are set for NPX (FPU) support:
1688 * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
1689 * instructions. We must set the CR0_MP bit and use the CR0_TS
1690 * bit to control the trap, because setting the CR0_EM bit does
1691 * not cause WAIT instructions to trap. It's important to trap
1692 * WAIT instructions - otherwise the "wait" variants of no-wait
1693 * control instructions would degenerate to the "no-wait" variants
1694 * after FP context switches but work correctly otherwise. It's
1695 * particularly important to trap WAITs when there is no NPX -
1696 * otherwise the "wait" variants would always degenerate.
1698 * Try setting CR0_NE to get correct error reporting on 486DX's.
1699 * Setting it should fail or do nothing on lesser processors.
1701 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
1706 u_long bootdev; /* not a struct cdev *- encoding is different */
1707 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev,
1708 CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in struct cdev *format)");
1710 static char bootmethod[16] = "BIOS";
1711 SYSCTL_STRING(_machdep, OID_AUTO, bootmethod, CTLFLAG_RD, bootmethod, 0,
1712 "System firmware boot method");
1715 * Initialize 386 and configure to run kernel
1719 * Initialize segments & interrupt table
1725 union descriptor *gdt;
1726 union descriptor *ldt;
1728 union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */
1729 union descriptor ldt[NLDT]; /* local descriptor table */
1731 static struct gate_descriptor idt0[NIDT];
1732 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
1733 struct region_descriptor r_gdt, r_idt; /* table descriptors */
1734 struct mtx dt_lock; /* lock for GDT and LDT */
1736 static struct i386tss dblfault_tss;
1737 static char dblfault_stack[PAGE_SIZE];
1739 extern vm_offset_t proc0kstack;
1743 * software prototypes -- in more palatable form.
1745 * GCODE_SEL through GUDATA_SEL must be in this order for syscall/sysret
1746 * GUFS_SEL and GUGS_SEL must be in this order (swtch.s knows it)
1748 struct soft_segment_descriptor gdt_segs[] = {
1749 /* GNULL_SEL 0 Null Descriptor */
1755 .ssd_xx = 0, .ssd_xx1 = 0,
1758 /* GPRIV_SEL 1 SMP Per-Processor Private Data Descriptor */
1760 .ssd_limit = 0xfffff,
1761 .ssd_type = SDT_MEMRWA,
1764 .ssd_xx = 0, .ssd_xx1 = 0,
1767 /* GUFS_SEL 2 %fs Descriptor for user */
1769 .ssd_limit = 0xfffff,
1770 .ssd_type = SDT_MEMRWA,
1773 .ssd_xx = 0, .ssd_xx1 = 0,
1776 /* GUGS_SEL 3 %gs Descriptor for user */
1778 .ssd_limit = 0xfffff,
1779 .ssd_type = SDT_MEMRWA,
1782 .ssd_xx = 0, .ssd_xx1 = 0,
1785 /* GCODE_SEL 4 Code Descriptor for kernel */
1787 .ssd_limit = 0xfffff,
1788 .ssd_type = SDT_MEMERA,
1791 .ssd_xx = 0, .ssd_xx1 = 0,
1794 /* GDATA_SEL 5 Data Descriptor for kernel */
1796 .ssd_limit = 0xfffff,
1797 .ssd_type = SDT_MEMRWA,
1800 .ssd_xx = 0, .ssd_xx1 = 0,
1803 /* GUCODE_SEL 6 Code Descriptor for user */
1805 .ssd_limit = 0xfffff,
1806 .ssd_type = SDT_MEMERA,
1809 .ssd_xx = 0, .ssd_xx1 = 0,
1812 /* GUDATA_SEL 7 Data Descriptor for user */
1814 .ssd_limit = 0xfffff,
1815 .ssd_type = SDT_MEMRWA,
1818 .ssd_xx = 0, .ssd_xx1 = 0,
1821 /* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */
1822 { .ssd_base = 0x400,
1823 .ssd_limit = 0xfffff,
1824 .ssd_type = SDT_MEMRWA,
1827 .ssd_xx = 0, .ssd_xx1 = 0,
1831 /* GPROC0_SEL 9 Proc 0 Tss Descriptor */
1834 .ssd_limit = sizeof(struct i386tss)-1,
1835 .ssd_type = SDT_SYS386TSS,
1838 .ssd_xx = 0, .ssd_xx1 = 0,
1841 /* GLDT_SEL 10 LDT Descriptor */
1842 { .ssd_base = (int) ldt,
1843 .ssd_limit = sizeof(ldt)-1,
1844 .ssd_type = SDT_SYSLDT,
1847 .ssd_xx = 0, .ssd_xx1 = 0,
1850 /* GUSERLDT_SEL 11 User LDT Descriptor per process */
1851 { .ssd_base = (int) ldt,
1852 .ssd_limit = (512 * sizeof(union descriptor)-1),
1853 .ssd_type = SDT_SYSLDT,
1856 .ssd_xx = 0, .ssd_xx1 = 0,
1859 /* GPANIC_SEL 12 Panic Tss Descriptor */
1860 { .ssd_base = (int) &dblfault_tss,
1861 .ssd_limit = sizeof(struct i386tss)-1,
1862 .ssd_type = SDT_SYS386TSS,
1865 .ssd_xx = 0, .ssd_xx1 = 0,
1868 /* GBIOSCODE32_SEL 13 BIOS 32-bit interface (32bit Code) */
1870 .ssd_limit = 0xfffff,
1871 .ssd_type = SDT_MEMERA,
1874 .ssd_xx = 0, .ssd_xx1 = 0,
1877 /* GBIOSCODE16_SEL 14 BIOS 32-bit interface (16bit Code) */
1879 .ssd_limit = 0xfffff,
1880 .ssd_type = SDT_MEMERA,
1883 .ssd_xx = 0, .ssd_xx1 = 0,
1886 /* GBIOSDATA_SEL 15 BIOS 32-bit interface (Data) */
1888 .ssd_limit = 0xfffff,
1889 .ssd_type = SDT_MEMRWA,
1892 .ssd_xx = 0, .ssd_xx1 = 0,
1895 /* GBIOSUTIL_SEL 16 BIOS 16-bit interface (Utility) */
1897 .ssd_limit = 0xfffff,
1898 .ssd_type = SDT_MEMRWA,
1901 .ssd_xx = 0, .ssd_xx1 = 0,
1904 /* GBIOSARGS_SEL 17 BIOS 16-bit interface (Arguments) */
1906 .ssd_limit = 0xfffff,
1907 .ssd_type = SDT_MEMRWA,
1910 .ssd_xx = 0, .ssd_xx1 = 0,
1913 /* GNDIS_SEL 18 NDIS Descriptor */
1919 .ssd_xx = 0, .ssd_xx1 = 0,
1925 static struct soft_segment_descriptor ldt_segs[] = {
1926 /* Null Descriptor - overwritten by call gate */
1932 .ssd_xx = 0, .ssd_xx1 = 0,
1935 /* Null Descriptor - overwritten by call gate */
1941 .ssd_xx = 0, .ssd_xx1 = 0,
1944 /* Null Descriptor - overwritten by call gate */
1950 .ssd_xx = 0, .ssd_xx1 = 0,
1953 /* Code Descriptor for user */
1955 .ssd_limit = 0xfffff,
1956 .ssd_type = SDT_MEMERA,
1959 .ssd_xx = 0, .ssd_xx1 = 0,
1962 /* Null Descriptor - overwritten by call gate */
1968 .ssd_xx = 0, .ssd_xx1 = 0,
1971 /* Data Descriptor for user */
1973 .ssd_limit = 0xfffff,
1974 .ssd_type = SDT_MEMRWA,
1977 .ssd_xx = 0, .ssd_xx1 = 0,
1983 setidt(idx, func, typ, dpl, selec)
1990 struct gate_descriptor *ip;
1993 ip->gd_looffset = (int)func;
1994 ip->gd_selector = selec;
2000 ip->gd_hioffset = ((int)func)>>16 ;
2004 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
2005 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
2006 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
2007 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
2009 #ifdef KDTRACE_HOOKS
2013 IDTVEC(xen_intr_upcall),
2015 IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall);
2019 * Display the index and function name of any IDT entries that don't use
2020 * the default 'rsvd' entry point.
2022 DB_SHOW_COMMAND(idt, db_show_idt)
2024 struct gate_descriptor *ip;
2029 for (idx = 0; idx < NIDT && !db_pager_quit; idx++) {
2030 func = (ip->gd_hioffset << 16 | ip->gd_looffset);
2031 if (func != (uintptr_t)&IDTVEC(rsvd)) {
2032 db_printf("%3d\t", idx);
2033 db_printsym(func, DB_STGY_PROC);
2040 /* Show privileged registers. */
2041 DB_SHOW_COMMAND(sysregs, db_show_sysregs)
2043 uint64_t idtr, gdtr;
2046 db_printf("idtr\t0x%08x/%04x\n",
2047 (u_int)(idtr >> 16), (u_int)idtr & 0xffff);
2049 db_printf("gdtr\t0x%08x/%04x\n",
2050 (u_int)(gdtr >> 16), (u_int)gdtr & 0xffff);
2051 db_printf("ldtr\t0x%04x\n", rldt());
2052 db_printf("tr\t0x%04x\n", rtr());
2053 db_printf("cr0\t0x%08x\n", rcr0());
2054 db_printf("cr2\t0x%08x\n", rcr2());
2055 db_printf("cr3\t0x%08x\n", rcr3());
2056 db_printf("cr4\t0x%08x\n", rcr4());
2062 struct segment_descriptor *sd;
2063 struct soft_segment_descriptor *ssd;
2065 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
2066 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
2067 ssd->ssd_type = sd->sd_type;
2068 ssd->ssd_dpl = sd->sd_dpl;
2069 ssd->ssd_p = sd->sd_p;
2070 ssd->ssd_def32 = sd->sd_def32;
2071 ssd->ssd_gran = sd->sd_gran;
2074 #if !defined(PC98) && !defined(XEN)
2076 add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
2079 int i, insert_idx, physmap_idx;
2081 physmap_idx = *physmap_idxp;
2087 if (base > 0xffffffff) {
2088 printf("%uK of memory above 4GB ignored\n",
2089 (u_int)(length / 1024));
2095 * Find insertion point while checking for overlap. Start off by
2096 * assuming the new entry will be added to the end.
2098 insert_idx = physmap_idx + 2;
2099 for (i = 0; i <= physmap_idx; i += 2) {
2100 if (base < physmap[i + 1]) {
2101 if (base + length <= physmap[i]) {
2105 if (boothowto & RB_VERBOSE)
2107 "Overlapping memory regions, ignoring second region\n");
2112 /* See if we can prepend to the next entry. */
2113 if (insert_idx <= physmap_idx && base + length == physmap[insert_idx]) {
2114 physmap[insert_idx] = base;
2118 /* See if we can append to the previous entry. */
2119 if (insert_idx > 0 && base == physmap[insert_idx - 1]) {
2120 physmap[insert_idx - 1] += length;
2125 *physmap_idxp = physmap_idx;
2126 if (physmap_idx == PHYSMAP_SIZE) {
2128 "Too many segments in the physical address map, giving up\n");
2133 * Move the last 'N' entries down to make room for the new
2136 for (i = physmap_idx; i > insert_idx; i -= 2) {
2137 physmap[i] = physmap[i - 2];
2138 physmap[i + 1] = physmap[i - 1];
2141 /* Insert the new entry. */
2142 physmap[insert_idx] = base;
2143 physmap[insert_idx + 1] = base + length;
2148 add_smap_entry(struct bios_smap *smap, vm_paddr_t *physmap, int *physmap_idxp)
2150 if (boothowto & RB_VERBOSE)
2151 printf("SMAP type=%02x base=%016llx len=%016llx\n",
2152 smap->type, smap->base, smap->length);
2154 if (smap->type != SMAP_TYPE_MEMORY)
2157 return (add_physmap_entry(smap->base, smap->length, physmap,
2162 add_smap_entries(struct bios_smap *smapbase, vm_paddr_t *physmap,
2165 struct bios_smap *smap, *smapend;
2168 * Memory map from INT 15:E820.
2170 * subr_module.c says:
2171 * "Consumer may safely assume that size value precedes data."
2172 * ie: an int32_t immediately precedes SMAP.
2174 smapsize = *((u_int32_t *)smapbase - 1);
2175 smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize);
2177 for (smap = smapbase; smap < smapend; smap++)
2178 if (!add_smap_entry(smap, physmap, physmap_idxp))
2181 #endif /* !PC98 && !XEN */
2191 if (basemem > 640) {
2192 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
2198 * XXX if biosbasemem is now < 640, there is a `hole'
2199 * between the end of base memory and the start of
2200 * ISA memory. The hole may be empty or it may
2201 * contain BIOS code or data. Map it read/write so
2202 * that the BIOS can write to it. (Memory from 0 to
2203 * the physical end of the kernel is mapped read-only
2204 * to begin with and then parts of it are remapped.
2205 * The parts that aren't remapped form holes that
2206 * remain read-only and are unused by the kernel.
2207 * The base memory area is below the physical end of
2208 * the kernel and right now forms a read-only hole.
2209 * The part of it from PAGE_SIZE to
2210 * (trunc_page(biosbasemem * 1024) - 1) will be
2211 * remapped and used by the kernel later.)
2213 * This code is similar to the code used in
2214 * pmap_mapdev, but since no memory needs to be
2215 * allocated we simply change the mapping.
2217 for (pa = trunc_page(basemem * 1024);
2218 pa < ISA_HOLE_START; pa += PAGE_SIZE)
2219 pmap_kenter(KERNBASE + pa, pa);
2222 * Map pages between basemem and ISA_HOLE_START, if any, r/w into
2223 * the vm86 page table so that vm86 can scribble on them using
2224 * the vm86 map too. XXX: why 2 ways for this and only 1 way for
2225 * page 0, at least as initialized here?
2227 pte = (pt_entry_t *)vm86paddr;
2228 for (i = basemem / 4; i < 160; i++)
2229 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
2234 * Populate the (physmap) array with base/bound pairs describing the
2235 * available physical memory in the system, then test this memory and
2236 * build the phys_avail array describing the actually-available memory.
2238 * If we cannot accurately determine the physical memory map, then use
2239 * value from the 0xE801 call, and failing that, the RTC.
2241 * Total memory size may be set by the kernel environment variable
2242 * hw.physmem or the compile-time define MAXMEM.
2244 * XXX first should be vm_paddr_t.
2248 getmemsize(int first)
2250 int off, physmap_idx, pa_indx, da_indx;
2251 u_long physmem_tunable, memtest;
2252 vm_paddr_t physmap[PHYSMAP_SIZE];
2254 quad_t dcons_addr, dcons_size;
2261 bzero(physmap, sizeof(physmap));
2263 /* XXX - some of EPSON machines can't use PG_N */
2265 if (pc98_machine_type & M_EPSON_PC98) {
2266 switch (epson_machine_id) {
2270 case EPSON_PC486_HX:
2271 case EPSON_PC486_HG:
2272 case EPSON_PC486_HA:
2278 under16 = pc98_getmemsize(&basemem, &extmem);
2282 physmap[1] = basemem * 1024;
2284 physmap[physmap_idx] = 0x100000;
2285 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
2288 * Now, physmap contains a map of physical memory.
2292 /* make hole for AP bootstrap code */
2293 physmap[1] = mp_bootaddress(physmap[1]);
2297 * Maxmem isn't the "maximum memory", it's one larger than the
2298 * highest page of the physical address space. It should be
2299 * called something like "Maxphyspage". We may adjust this
2300 * based on ``hw.physmem'' and the results of the memory test.
2302 Maxmem = atop(physmap[physmap_idx + 1]);
2305 Maxmem = MAXMEM / 4;
2308 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable))
2309 Maxmem = atop(physmem_tunable);
2312 * By default keep the memtest enabled. Use a general name so that
2313 * one could eventually do more with the code than just disable it.
2316 TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest);
2318 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
2319 (boothowto & RB_VERBOSE))
2320 printf("Physical memory use set to %ldK\n", Maxmem * 4);
2323 * If Maxmem has been increased beyond what the system has detected,
2324 * extend the last memory segment to the new limit.
2326 if (atop(physmap[physmap_idx + 1]) < Maxmem)
2327 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
2330 * We need to divide chunk if Maxmem is larger than 16MB and
2331 * under 16MB area is not full of memory.
2332 * (1) system area (15-16MB region) is cut off
2333 * (2) extended memory is only over 16MB area (ex. Melco "HYPERMEMORY")
2335 if ((under16 != 16 * 1024) && (extmem > 15 * 1024)) {
2336 /* 15M - 16M region is cut off, so need to divide chunk */
2337 physmap[physmap_idx + 1] = under16 * 1024;
2339 physmap[physmap_idx] = 0x1000000;
2340 physmap[physmap_idx + 1] = physmap[2] + extmem * 1024;
2343 /* call pmap initialization to make new kernel address space */
2344 pmap_bootstrap(first);
2347 * Size up each available chunk of physical memory.
2349 physmap[0] = PAGE_SIZE; /* mask off page 0 */
2352 phys_avail[pa_indx++] = physmap[0];
2353 phys_avail[pa_indx] = physmap[0];
2354 dump_avail[da_indx] = physmap[0];
2358 * Get dcons buffer address
2360 if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
2361 getenv_quad("dcons.size", &dcons_size) == 0)
2365 * physmap is in bytes, so when converting to page boundaries,
2366 * round up the start address and round down the end address.
2368 for (i = 0; i <= physmap_idx; i += 2) {
2371 end = ptoa((vm_paddr_t)Maxmem);
2372 if (physmap[i + 1] < end)
2373 end = trunc_page(physmap[i + 1]);
2374 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
2375 int tmp, page_bad, full;
2376 int *ptr = (int *)CADDR3;
2380 * block out kernel memory as not available.
2382 if (pa >= KERNLOAD && pa < first)
2386 * block out dcons buffer
2389 && pa >= trunc_page(dcons_addr)
2390 && pa < dcons_addr + dcons_size)
2398 * map page into kernel: valid, read/write,non-cacheable
2400 *pte = pa | PG_V | PG_RW | pg_n;
2405 * Test for alternating 1's and 0's
2407 *(volatile int *)ptr = 0xaaaaaaaa;
2408 if (*(volatile int *)ptr != 0xaaaaaaaa)
2411 * Test for alternating 0's and 1's
2413 *(volatile int *)ptr = 0x55555555;
2414 if (*(volatile int *)ptr != 0x55555555)
2419 *(volatile int *)ptr = 0xffffffff;
2420 if (*(volatile int *)ptr != 0xffffffff)
2425 *(volatile int *)ptr = 0x0;
2426 if (*(volatile int *)ptr != 0x0)
2429 * Restore original value.
2435 * Adjust array of valid/good pages.
2437 if (page_bad == TRUE)
2440 * If this good page is a continuation of the
2441 * previous set of good pages, then just increase
2442 * the end pointer. Otherwise start a new chunk.
2443 * Note that "end" points one higher than end,
2444 * making the range >= start and < end.
2445 * If we're also doing a speculative memory
2446 * test and we at or past the end, bump up Maxmem
2447 * so that we keep going. The first bad page
2448 * will terminate the loop.
2450 if (phys_avail[pa_indx] == pa) {
2451 phys_avail[pa_indx] += PAGE_SIZE;
2454 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
2456 "Too many holes in the physical address space, giving up\n");
2461 phys_avail[pa_indx++] = pa; /* start */
2462 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
2466 if (dump_avail[da_indx] == pa) {
2467 dump_avail[da_indx] += PAGE_SIZE;
2470 if (da_indx == DUMP_AVAIL_ARRAY_END) {
2474 dump_avail[da_indx++] = pa; /* start */
2475 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
2487 * The last chunk must contain at least one page plus the message
2488 * buffer to avoid complicating other code (message buffer address
2489 * calculation, etc.).
2491 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
2492 round_page(msgbufsize) >= phys_avail[pa_indx]) {
2493 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
2494 phys_avail[pa_indx--] = 0;
2495 phys_avail[pa_indx--] = 0;
2498 Maxmem = atop(phys_avail[pa_indx]);
2500 /* Trim off space for the message buffer. */
2501 phys_avail[pa_indx] -= round_page(msgbufsize);
2503 /* Map the message buffer. */
2504 for (off = 0; off < round_page(msgbufsize); off += PAGE_SIZE)
2505 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] +
2512 getmemsize(int first)
2514 int has_smap, off, physmap_idx, pa_indx, da_indx;
2516 vm_paddr_t physmap[PHYSMAP_SIZE];
2518 quad_t dcons_addr, dcons_size, physmem_tunable;
2520 int hasbrokenint12, i, res;
2522 struct vm86frame vmf;
2523 struct vm86context vmc;
2525 struct bios_smap *smap, *smapbase;
2531 Maxmem = xen_start_info->nr_pages - init_first;
2534 physmap[0] = init_first << PAGE_SHIFT;
2535 physmap[1] = ptoa(Maxmem) - round_page(msgbufsize);
2539 if (arch_i386_is_xbox) {
2541 * We queried the memory size before, so chop off 4MB for
2542 * the framebuffer and inform the OS of this.
2545 physmap[1] = (arch_i386_xbox_memsize * 1024 * 1024) - XBOX_FB_SIZE;
2550 bzero(&vmf, sizeof(vmf));
2551 bzero(physmap, sizeof(physmap));
2555 * Check if the loader supplied an SMAP memory map. If so,
2556 * use that and do not make any VM86 calls.
2560 kmdp = preload_search_by_type("elf kernel");
2562 kmdp = preload_search_by_type("elf32 kernel");
2564 smapbase = (struct bios_smap *)preload_search_info(kmdp,
2565 MODINFO_METADATA | MODINFOMD_SMAP);
2566 if (smapbase != NULL) {
2567 add_smap_entries(smapbase, physmap, &physmap_idx);
2573 * Some newer BIOSes have a broken INT 12H implementation
2574 * which causes a kernel panic immediately. In this case, we
2575 * need use the SMAP to determine the base memory size.
2578 TUNABLE_INT_FETCH("hw.hasbrokenint12", &hasbrokenint12);
2579 if (hasbrokenint12 == 0) {
2580 /* Use INT12 to determine base memory size. */
2581 vm86_intcall(0x12, &vmf);
2582 basemem = vmf.vmf_ax;
2587 * Fetch the memory map with INT 15:E820. Map page 1 R/W into
2588 * the kernel page table so we can use it as a buffer. The
2589 * kernel will unmap this page later.
2591 pmap_kenter(KERNBASE + (1 << PAGE_SHIFT), 1 << PAGE_SHIFT);
2593 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT));
2594 res = vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di);
2595 KASSERT(res != 0, ("vm86_getptr() failed: address not found"));
2599 vmf.vmf_eax = 0xE820;
2600 vmf.vmf_edx = SMAP_SIG;
2601 vmf.vmf_ecx = sizeof(struct bios_smap);
2602 i = vm86_datacall(0x15, &vmf, &vmc);
2603 if (i || vmf.vmf_eax != SMAP_SIG)
2606 if (!add_smap_entry(smap, physmap, &physmap_idx))
2608 } while (vmf.vmf_ebx != 0);
2612 * If we didn't fetch the "base memory" size from INT12,
2613 * figure it out from the SMAP (or just guess).
2616 for (i = 0; i <= physmap_idx; i += 2) {
2617 if (physmap[i] == 0x00000000) {
2618 basemem = physmap[i + 1] / 1024;
2623 /* XXX: If we couldn't find basemem from SMAP, just guess. */
2629 if (physmap[1] != 0)
2633 * If we failed to find an SMAP, figure out the extended
2634 * memory size. We will then build a simple memory map with
2635 * two segments, one for "base memory" and the second for
2636 * "extended memory". Note that "extended memory" starts at a
2637 * physical address of 1MB and that both basemem and extmem
2638 * are in units of 1KB.
2640 * First, try to fetch the extended memory size via INT 15:E801.
2642 vmf.vmf_ax = 0xE801;
2643 if (vm86_intcall(0x15, &vmf) == 0) {
2644 extmem = vmf.vmf_cx + vmf.vmf_dx * 64;
2647 * If INT15:E801 fails, this is our last ditch effort
2648 * to determine the extended memory size. Currently
2649 * we prefer the RTC value over INT15:88.
2653 vm86_intcall(0x15, &vmf);
2654 extmem = vmf.vmf_ax;
2656 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8);
2661 * Special hack for chipsets that still remap the 384k hole when
2662 * there's 16MB of memory - this really confuses people that
2663 * are trying to use bus mastering ISA controllers with the
2664 * "16MB limit"; they only have 16MB, but the remapping puts
2665 * them beyond the limit.
2667 * If extended memory is between 15-16MB (16-17MB phys address range),
2670 if ((extmem > 15 * 1024) && (extmem < 16 * 1024))
2674 physmap[1] = basemem * 1024;
2676 physmap[physmap_idx] = 0x100000;
2677 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
2682 * Now, physmap contains a map of physical memory.
2686 /* make hole for AP bootstrap code */
2687 physmap[1] = mp_bootaddress(physmap[1]);
2691 * Maxmem isn't the "maximum memory", it's one larger than the
2692 * highest page of the physical address space. It should be
2693 * called something like "Maxphyspage". We may adjust this
2694 * based on ``hw.physmem'' and the results of the memory test.
2696 Maxmem = atop(physmap[physmap_idx + 1]);
2699 Maxmem = MAXMEM / 4;
2702 if (TUNABLE_QUAD_FETCH("hw.physmem", &physmem_tunable))
2703 Maxmem = atop(physmem_tunable);
2706 * If we have an SMAP, don't allow MAXMEM or hw.physmem to extend
2707 * the amount of memory in the system.
2709 if (has_smap && Maxmem > atop(physmap[physmap_idx + 1]))
2710 Maxmem = atop(physmap[physmap_idx + 1]);
2713 * By default enable the memory test on real hardware, and disable
2714 * it if we appear to be running in a VM. This avoids touching all
2715 * pages unnecessarily, which doesn't matter on real hardware but is
2716 * bad for shared VM hosts. Use a general name so that
2717 * one could eventually do more with the code than just disable it.
2719 memtest = (vm_guest > VM_GUEST_NO) ? 0 : 1;
2720 TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest);
2722 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
2723 (boothowto & RB_VERBOSE))
2724 printf("Physical memory use set to %ldK\n", Maxmem * 4);
2727 * If Maxmem has been increased beyond what the system has detected,
2728 * extend the last memory segment to the new limit.
2730 if (atop(physmap[physmap_idx + 1]) < Maxmem)
2731 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
2733 /* call pmap initialization to make new kernel address space */
2734 pmap_bootstrap(first);
2737 * Size up each available chunk of physical memory.
2739 physmap[0] = PAGE_SIZE; /* mask off page 0 */
2742 phys_avail[pa_indx++] = physmap[0];
2743 phys_avail[pa_indx] = physmap[0];
2744 dump_avail[da_indx] = physmap[0];
2748 * Get dcons buffer address
2750 if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
2751 getenv_quad("dcons.size", &dcons_size) == 0)
2756 * physmap is in bytes, so when converting to page boundaries,
2757 * round up the start address and round down the end address.
2759 for (i = 0; i <= physmap_idx; i += 2) {
2762 end = ptoa((vm_paddr_t)Maxmem);
2763 if (physmap[i + 1] < end)
2764 end = trunc_page(physmap[i + 1]);
2765 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
2766 int tmp, page_bad, full;
2767 int *ptr = (int *)CADDR3;
2771 * block out kernel memory as not available.
2773 if (pa >= KERNLOAD && pa < first)
2777 * block out dcons buffer
2780 && pa >= trunc_page(dcons_addr)
2781 && pa < dcons_addr + dcons_size)
2789 * map page into kernel: valid, read/write,non-cacheable
2791 *pte = pa | PG_V | PG_RW | PG_N;
2796 * Test for alternating 1's and 0's
2798 *(volatile int *)ptr = 0xaaaaaaaa;
2799 if (*(volatile int *)ptr != 0xaaaaaaaa)
2802 * Test for alternating 0's and 1's
2804 *(volatile int *)ptr = 0x55555555;
2805 if (*(volatile int *)ptr != 0x55555555)
2810 *(volatile int *)ptr = 0xffffffff;
2811 if (*(volatile int *)ptr != 0xffffffff)
2816 *(volatile int *)ptr = 0x0;
2817 if (*(volatile int *)ptr != 0x0)
2820 * Restore original value.
2826 * Adjust array of valid/good pages.
2828 if (page_bad == TRUE)
2831 * If this good page is a continuation of the
2832 * previous set of good pages, then just increase
2833 * the end pointer. Otherwise start a new chunk.
2834 * Note that "end" points one higher than end,
2835 * making the range >= start and < end.
2836 * If we're also doing a speculative memory
2837 * test and we at or past the end, bump up Maxmem
2838 * so that we keep going. The first bad page
2839 * will terminate the loop.
2841 if (phys_avail[pa_indx] == pa) {
2842 phys_avail[pa_indx] += PAGE_SIZE;
2845 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
2847 "Too many holes in the physical address space, giving up\n");
2852 phys_avail[pa_indx++] = pa; /* start */
2853 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
2857 if (dump_avail[da_indx] == pa) {
2858 dump_avail[da_indx] += PAGE_SIZE;
2861 if (da_indx == DUMP_AVAIL_ARRAY_END) {
2865 dump_avail[da_indx++] = pa; /* start */
2866 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
2876 phys_avail[0] = physfree;
2877 phys_avail[1] = xen_start_info->nr_pages*PAGE_SIZE;
2879 dump_avail[1] = xen_start_info->nr_pages*PAGE_SIZE;
2885 * The last chunk must contain at least one page plus the message
2886 * buffer to avoid complicating other code (message buffer address
2887 * calculation, etc.).
2889 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
2890 round_page(msgbufsize) >= phys_avail[pa_indx]) {
2891 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
2892 phys_avail[pa_indx--] = 0;
2893 phys_avail[pa_indx--] = 0;
2896 Maxmem = atop(phys_avail[pa_indx]);
2898 /* Trim off space for the message buffer. */
2899 phys_avail[pa_indx] -= round_page(msgbufsize);
2901 /* Map the message buffer. */
2902 for (off = 0; off < round_page(msgbufsize); off += PAGE_SIZE)
2903 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] +
2911 #define MTOPSIZE (1<<(14 + PAGE_SHIFT))
2917 unsigned long gdtmachpfn;
2918 int error, gsel_tss, metadata_missing, x, pa;
2920 #ifdef CPU_ENABLE_SSE
2921 struct xstate_hdr *xhdr;
2923 struct callback_register event = {
2924 .type = CALLBACKTYPE_event,
2925 .address = {GSEL(GCODE_SEL, SEL_KPL), (unsigned long)Xhypervisor_callback },
2927 struct callback_register failsafe = {
2928 .type = CALLBACKTYPE_failsafe,
2929 .address = {GSEL(GCODE_SEL, SEL_KPL), (unsigned long)failsafe_callback },
2932 thread0.td_kstack = proc0kstack;
2933 thread0.td_kstack_pages = KSTACK_PAGES;
2936 * This may be done better later if it gets more high level
2937 * components in it. If so just link td->td_proc here.
2939 proc_linkup0(&proc0, &thread0);
2941 metadata_missing = 0;
2942 if (xen_start_info->mod_start) {
2943 preload_metadata = (caddr_t)xen_start_info->mod_start;
2944 preload_bootstrap_relocate(KERNBASE);
2946 metadata_missing = 1;
2949 kern_envp = static_env;
2950 else if ((caddr_t)xen_start_info->cmd_line)
2951 kern_envp = xen_setbootenv((caddr_t)xen_start_info->cmd_line);
2953 boothowto |= xen_boothowto(kern_envp);
2955 /* Init basic tunables, hz etc */
2959 * XEN occupies a portion of the upper virtual address space
2960 * At its base it manages an array mapping machine page frames
2961 * to physical page frames - hence we need to be able to
2962 * access 4GB - (64MB - 4MB + 64k)
2964 gdt_segs[GPRIV_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2965 gdt_segs[GUFS_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2966 gdt_segs[GUGS_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2967 gdt_segs[GCODE_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2968 gdt_segs[GDATA_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2969 gdt_segs[GUCODE_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2970 gdt_segs[GUDATA_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2971 gdt_segs[GBIOSLOWMEM_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + MTOPSIZE);
2974 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
2975 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
2977 PT_SET_MA(gdt, xpmap_ptom(VTOP(gdt)) | PG_V | PG_RW);
2978 bzero(gdt, PAGE_SIZE);
2979 for (x = 0; x < NGDT; x++)
2980 ssdtosd(&gdt_segs[x], &gdt[x].sd);
2982 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN);
2984 gdtmachpfn = vtomach(gdt) >> PAGE_SHIFT;
2985 PT_SET_MA(gdt, xpmap_ptom(VTOP(gdt)) | PG_V);
2986 PANIC_IF(HYPERVISOR_set_gdt(&gdtmachpfn, 512) != 0);
2990 if ((error = HYPERVISOR_set_trap_table(trap_table)) != 0) {
2991 panic("set_trap_table failed - error %d\n", error);
2994 error = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
2996 error = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
2997 #if CONFIG_XEN_COMPAT <= 0x030002
2998 if (error == -ENOXENSYS)
2999 HYPERVISOR_set_callbacks(GSEL(GCODE_SEL, SEL_KPL),
3000 (unsigned long)Xhypervisor_callback,
3001 GSEL(GCODE_SEL, SEL_KPL), (unsigned long)failsafe_callback);
3003 pcpu_init(pc, 0, sizeof(struct pcpu));
3004 for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE)
3005 pmap_kenter(pa + KERNBASE, pa);
3006 dpcpu_init((void *)(first + KERNBASE), 0);
3007 first += DPCPU_SIZE;
3008 physfree += DPCPU_SIZE;
3009 init_first += DPCPU_SIZE / PAGE_SIZE;
3011 PCPU_SET(prvspace, pc);
3012 PCPU_SET(curthread, &thread0);
3015 * Initialize mutexes.
3017 * icu_lock: in order to allow an interrupt to occur in a critical
3018 * section, to set pcpu->ipending (etc...) properly, we
3019 * must be able to get the icu lock, so it can't be
3023 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE);
3025 /* make ldt memory segments */
3026 PT_SET_MA(ldt, xpmap_ptom(VTOP(ldt)) | PG_V | PG_RW);
3027 bzero(ldt, PAGE_SIZE);
3028 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
3029 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
3030 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
3031 ssdtosd(&ldt_segs[x], &ldt[x].sd);
3033 default_proc_ldt.ldt_base = (caddr_t)ldt;
3034 default_proc_ldt.ldt_len = 6;
3035 _default_ldt = (int)&default_proc_ldt;
3036 PCPU_SET(currentldt, _default_ldt);
3037 PT_SET_MA(ldt, *vtopte((unsigned long)ldt) & ~PG_RW);
3038 xen_set_ldt((unsigned long) ldt, (sizeof ldt_segs / sizeof ldt_segs[0]));
3040 #if defined(XEN_PRIVILEGED)
3042 * Initialize the i8254 before the console so that console
3043 * initialization can use DELAY().
3049 * Initialize the console before we print anything out.
3053 if (metadata_missing)
3054 printf("WARNING: loader(8) metadata is missing!\n");
3061 /* Reset and mask the atpics and leave them shut down. */
3065 * Point the ICU spurious interrupt vectors at the APIC spurious
3066 * interrupt handler.
3068 setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL,
3069 GSEL(GCODE_SEL, SEL_KPL));
3070 setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL,
3071 GSEL(GCODE_SEL, SEL_KPL));
3076 db_fetch_ksymtab(bootinfo.bi_symtab, bootinfo.bi_esymtab);
3082 if (boothowto & RB_KDB)
3083 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
3086 finishidentcpu(); /* Final stage of CPU initialization */
3087 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
3088 GSEL(GCODE_SEL, SEL_KPL));
3089 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
3090 GSEL(GCODE_SEL, SEL_KPL));
3091 initializecpu(); /* Initialize CPU registers */
3092 initializecpucache();
3094 /* pointer to selector slot for %fs/%gs */
3095 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
3097 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
3098 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
3099 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
3100 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
3102 dblfault_tss.tss_cr3 = (int)IdlePDPT;
3104 dblfault_tss.tss_cr3 = (int)IdlePTD;
3106 dblfault_tss.tss_eip = (int)dblfault_handler;
3107 dblfault_tss.tss_eflags = PSL_KERNEL;
3108 dblfault_tss.tss_ds = dblfault_tss.tss_es =
3109 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
3110 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
3111 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
3112 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
3116 init_param2(physmem);
3118 /* now running on new page tables, configured,and u/iom is accessible */
3120 msgbufinit(msgbufp, msgbufsize);
3125 * Set up thread0 pcb after npxinit calculated pcb + fpu save
3126 * area size. Zero out the extended state header in fpu save
3129 thread0.td_pcb = get_pcb_td(&thread0);
3130 bzero(get_pcb_user_save_td(&thread0), cpu_max_ext_state_size);
3131 #ifdef CPU_ENABLE_SSE
3133 xhdr = (struct xstate_hdr *)(get_pcb_user_save_td(&thread0) +
3135 xhdr->xstate_bv = xsave_mask;
3138 PCPU_SET(curpcb, thread0.td_pcb);
3139 /* make an initial tss so cpu can get interrupt stack on syscall! */
3140 /* Note: -16 is so we can grow the trapframe if we came from vm86 */
3141 PCPU_SET(common_tss.tss_esp0, (vm_offset_t)thread0.td_pcb - 16);
3142 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
3143 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
3144 HYPERVISOR_stack_switch(GSEL(GDATA_SEL, SEL_KPL),
3145 PCPU_GET(common_tss.tss_esp0));
3147 /* transfer to user mode */
3149 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
3150 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
3152 /* setup proc 0's pcb */
3153 thread0.td_pcb->pcb_flags = 0;
3155 thread0.td_pcb->pcb_cr3 = (int)IdlePDPT;
3157 thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
3159 thread0.td_pcb->pcb_ext = 0;
3160 thread0.td_frame = &proc0_tf;
3161 thread0.td_pcb->pcb_fsd = PCPU_GET(fsgs_gdt)[0];
3162 thread0.td_pcb->pcb_gsd = PCPU_GET(fsgs_gdt)[1];
3166 /* Location of kernel stack for locore */
3167 return ((register_t)thread0.td_pcb);
3175 struct gate_descriptor *gdp;
3176 int gsel_tss, metadata_missing, x, pa;
3178 #ifdef CPU_ENABLE_SSE
3179 struct xstate_hdr *xhdr;
3182 thread0.td_kstack = proc0kstack;
3183 thread0.td_kstack_pages = KSTACK_PAGES;
3186 * This may be done better later if it gets more high level
3187 * components in it. If so just link td->td_proc here.
3189 proc_linkup0(&proc0, &thread0);
3198 metadata_missing = 0;
3199 if (bootinfo.bi_modulep) {
3200 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE;
3201 preload_bootstrap_relocate(KERNBASE);
3203 metadata_missing = 1;
3206 kern_envp = static_env;
3207 else if (bootinfo.bi_envp)
3208 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE;
3210 /* Init basic tunables, hz etc */
3214 * Make gdt memory segments. All segments cover the full 4GB
3215 * of address space and permissions are enforced at page level.
3217 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1);
3218 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1);
3219 gdt_segs[GUCODE_SEL].ssd_limit = atop(0 - 1);
3220 gdt_segs[GUDATA_SEL].ssd_limit = atop(0 - 1);
3221 gdt_segs[GUFS_SEL].ssd_limit = atop(0 - 1);
3222 gdt_segs[GUGS_SEL].ssd_limit = atop(0 - 1);
3225 gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1);
3226 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
3227 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
3229 for (x = 0; x < NGDT; x++)
3230 ssdtosd(&gdt_segs[x], &gdt[x].sd);
3232 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
3233 r_gdt.rd_base = (int) gdt;
3234 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN);
3237 pcpu_init(pc, 0, sizeof(struct pcpu));
3238 for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE)
3239 pmap_kenter(pa + KERNBASE, pa);
3240 dpcpu_init((void *)(first + KERNBASE), 0);
3241 first += DPCPU_SIZE;
3242 PCPU_SET(prvspace, pc);
3243 PCPU_SET(curthread, &thread0);
3246 * Initialize mutexes.
3248 * icu_lock: in order to allow an interrupt to occur in a critical
3249 * section, to set pcpu->ipending (etc...) properly, we
3250 * must be able to get the icu lock, so it can't be
3254 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE);
3256 /* make ldt memory segments */
3257 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
3258 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
3259 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
3260 ssdtosd(&ldt_segs[x], &ldt[x].sd);
3262 _default_ldt = GSEL(GLDT_SEL, SEL_KPL);
3264 PCPU_SET(currentldt, _default_ldt);
3267 for (x = 0; x < NIDT; x++)
3268 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL,
3269 GSEL(GCODE_SEL, SEL_KPL));
3270 setidt(IDT_DE, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL,
3271 GSEL(GCODE_SEL, SEL_KPL));
3272 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL,
3273 GSEL(GCODE_SEL, SEL_KPL));
3274 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYS386IGT, SEL_KPL,
3275 GSEL(GCODE_SEL, SEL_KPL));
3276 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL,
3277 GSEL(GCODE_SEL, SEL_KPL));
3278 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL,
3279 GSEL(GCODE_SEL, SEL_KPL));
3280 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL,
3281 GSEL(GCODE_SEL, SEL_KPL));
3282 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
3283 GSEL(GCODE_SEL, SEL_KPL));
3284 setidt(IDT_NM, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL
3285 , GSEL(GCODE_SEL, SEL_KPL));
3286 setidt(IDT_DF, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL));
3287 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL,
3288 GSEL(GCODE_SEL, SEL_KPL));
3289 setidt(IDT_TS, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL,
3290 GSEL(GCODE_SEL, SEL_KPL));
3291 setidt(IDT_NP, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL,
3292 GSEL(GCODE_SEL, SEL_KPL));
3293 setidt(IDT_SS, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL,
3294 GSEL(GCODE_SEL, SEL_KPL));
3295 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
3296 GSEL(GCODE_SEL, SEL_KPL));
3297 setidt(IDT_PF, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL,
3298 GSEL(GCODE_SEL, SEL_KPL));
3299 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL,
3300 GSEL(GCODE_SEL, SEL_KPL));
3301 setidt(IDT_AC, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL,
3302 GSEL(GCODE_SEL, SEL_KPL));
3303 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL,
3304 GSEL(GCODE_SEL, SEL_KPL));
3305 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL,
3306 GSEL(GCODE_SEL, SEL_KPL));
3307 setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL,
3308 GSEL(GCODE_SEL, SEL_KPL));
3309 #ifdef KDTRACE_HOOKS
3310 setidt(IDT_DTRACE_RET, &IDTVEC(dtrace_ret), SDT_SYS386TGT, SEL_UPL,
3311 GSEL(GCODE_SEL, SEL_KPL));
3314 setidt(IDT_EVTCHN, &IDTVEC(xen_intr_upcall), SDT_SYS386IGT, SEL_UPL,
3315 GSEL(GCODE_SEL, SEL_KPL));
3318 r_idt.rd_limit = sizeof(idt0) - 1;
3319 r_idt.rd_base = (int) idt;
3324 * The following code queries the PCI ID of 0:0:0. For the XBOX,
3325 * This should be 0x10de / 0x02a5.
3327 * This is exactly what Linux does.
3329 outl(0xcf8, 0x80000000);
3330 if (inl(0xcfc) == 0x02a510de) {
3331 arch_i386_is_xbox = 1;
3332 pic16l_setled(XBOX_LED_GREEN);
3335 * We are an XBOX, but we may have either 64MB or 128MB of
3336 * memory. The PCI host bridge should be programmed for this,
3337 * so we just query it.
3339 outl(0xcf8, 0x80000084);
3340 arch_i386_xbox_memsize = (inl(0xcfc) == 0x7FFFFFF) ? 128 : 64;
3345 * Initialize the clock before the console so that console
3346 * initialization can use DELAY().
3351 * Initialize the console before we print anything out.
3355 if (metadata_missing)
3356 printf("WARNING: loader(8) metadata is missing!\n");
3365 /* Reset and mask the atpics and leave them shut down. */
3369 * Point the ICU spurious interrupt vectors at the APIC spurious
3370 * interrupt handler.
3372 setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL,
3373 GSEL(GCODE_SEL, SEL_KPL));
3374 setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL,
3375 GSEL(GCODE_SEL, SEL_KPL));
3380 db_fetch_ksymtab(bootinfo.bi_symtab, bootinfo.bi_esymtab);
3386 if (boothowto & RB_KDB)
3387 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
3390 finishidentcpu(); /* Final stage of CPU initialization */
3391 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
3392 GSEL(GCODE_SEL, SEL_KPL));
3393 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
3394 GSEL(GCODE_SEL, SEL_KPL));
3395 initializecpu(); /* Initialize CPU registers */
3396 initializecpucache();
3398 /* pointer to selector slot for %fs/%gs */
3399 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
3401 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
3402 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
3403 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
3404 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
3406 dblfault_tss.tss_cr3 = (int)IdlePDPT;
3408 dblfault_tss.tss_cr3 = (int)IdlePTD;
3410 dblfault_tss.tss_eip = (int)dblfault_handler;
3411 dblfault_tss.tss_eflags = PSL_KERNEL;
3412 dblfault_tss.tss_ds = dblfault_tss.tss_es =
3413 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
3414 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
3415 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
3416 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
3420 init_param2(physmem);
3422 /* now running on new page tables, configured,and u/iom is accessible */
3424 msgbufinit(msgbufp, msgbufsize);
3429 * Set up thread0 pcb after npxinit calculated pcb + fpu save
3430 * area size. Zero out the extended state header in fpu save
3433 thread0.td_pcb = get_pcb_td(&thread0);
3434 bzero(get_pcb_user_save_td(&thread0), cpu_max_ext_state_size);
3435 #ifdef CPU_ENABLE_SSE
3437 xhdr = (struct xstate_hdr *)(get_pcb_user_save_td(&thread0) +
3439 xhdr->xstate_bv = xsave_mask;
3442 PCPU_SET(curpcb, thread0.td_pcb);
3443 /* make an initial tss so cpu can get interrupt stack on syscall! */
3444 /* Note: -16 is so we can grow the trapframe if we came from vm86 */
3445 PCPU_SET(common_tss.tss_esp0, (vm_offset_t)thread0.td_pcb - 16);
3446 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
3447 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
3448 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
3449 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
3450 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
3453 /* make a call gate to reenter kernel with */
3454 gdp = &ldt[LSYS5CALLS_SEL].gd;
3456 x = (int) &IDTVEC(lcall_syscall);
3457 gdp->gd_looffset = x;
3458 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
3460 gdp->gd_type = SDT_SYS386CGT;
3461 gdp->gd_dpl = SEL_UPL;
3463 gdp->gd_hioffset = x >> 16;
3465 /* XXX does this work? */
3467 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL];
3468 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL];
3470 /* transfer to user mode */
3472 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
3473 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
3475 /* setup proc 0's pcb */
3476 thread0.td_pcb->pcb_flags = 0;
3478 thread0.td_pcb->pcb_cr3 = (int)IdlePDPT;
3480 thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
3482 thread0.td_pcb->pcb_ext = 0;
3483 thread0.td_frame = &proc0_tf;
3491 /* Location of kernel stack for locore */
3492 return ((register_t)thread0.td_pcb);
3497 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
3500 pcpu->pc_acpi_id = 0xffffffff;
3505 smap_sysctl_handler(SYSCTL_HANDLER_ARGS)
3507 struct bios_smap *smapbase;
3508 struct bios_smap_xattr smap;
3511 int count, error, i;
3513 /* Retrieve the system memory map from the loader. */
3514 kmdp = preload_search_by_type("elf kernel");
3516 kmdp = preload_search_by_type("elf32 kernel");
3519 smapbase = (struct bios_smap *)preload_search_info(kmdp,
3520 MODINFO_METADATA | MODINFOMD_SMAP);
3521 if (smapbase == NULL)
3523 smapattr = (uint32_t *)preload_search_info(kmdp,
3524 MODINFO_METADATA | MODINFOMD_SMAP_XATTR);
3525 count = *((u_int32_t *)smapbase - 1) / sizeof(*smapbase);
3527 for (i = 0; i < count; i++) {
3528 smap.base = smapbase[i].base;
3529 smap.length = smapbase[i].length;
3530 smap.type = smapbase[i].type;
3531 if (smapattr != NULL)
3532 smap.xattr = smapattr[i];
3535 error = SYSCTL_OUT(req, &smap, sizeof(smap));
3539 SYSCTL_PROC(_machdep, OID_AUTO, smap, CTLTYPE_OPAQUE|CTLFLAG_RD, NULL, 0,
3540 smap_sysctl_handler, "S,bios_smap_xattr", "Raw BIOS SMAP data");
3544 spinlock_enter(void)
3550 if (td->td_md.md_spinlock_count == 0) {
3551 flags = intr_disable();
3552 td->td_md.md_spinlock_count = 1;
3553 td->td_md.md_saved_flags = flags;
3555 td->td_md.md_spinlock_count++;
3567 flags = td->td_md.md_saved_flags;
3568 td->td_md.md_spinlock_count--;
3569 if (td->td_md.md_spinlock_count == 0)
3570 intr_restore(flags);
3573 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
3574 static void f00f_hack(void *unused);
3575 SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL);
3578 f00f_hack(void *unused)
3580 struct gate_descriptor *new_idt;
3588 printf("Intel Pentium detected, installing workaround for F00F bug\n");
3590 tmp = kmem_malloc(kernel_arena, PAGE_SIZE * 2, M_WAITOK | M_ZERO);
3592 panic("kmem_malloc returned 0");
3594 /* Put the problematic entry (#6) at the end of the lower page. */
3595 new_idt = (struct gate_descriptor*)
3596 (tmp + PAGE_SIZE - 7 * sizeof(struct gate_descriptor));
3597 bcopy(idt, new_idt, sizeof(idt0));
3598 r_idt.rd_base = (u_int)new_idt;
3601 pmap_protect(kernel_pmap, tmp, tmp + PAGE_SIZE, VM_PROT_READ);
3603 #endif /* defined(I586_CPU) && !NO_F00F_HACK */
3606 * Construct a PCB from a trapframe. This is called from kdb_trap() where
3607 * we want to start a backtrace from the function that caused us to enter
3608 * the debugger. We have the context in the trapframe, but base the trace
3609 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
3610 * enough for a backtrace.
3613 makectx(struct trapframe *tf, struct pcb *pcb)
3616 pcb->pcb_edi = tf->tf_edi;
3617 pcb->pcb_esi = tf->tf_esi;
3618 pcb->pcb_ebp = tf->tf_ebp;
3619 pcb->pcb_ebx = tf->tf_ebx;
3620 pcb->pcb_eip = tf->tf_eip;
3621 pcb->pcb_esp = (ISPL(tf->tf_cs)) ? tf->tf_esp : (int)(tf + 1) - 8;
3625 ptrace_set_pc(struct thread *td, u_long addr)
3628 td->td_frame->tf_eip = addr;
3633 ptrace_single_step(struct thread *td)
3635 td->td_frame->tf_eflags |= PSL_T;
3640 ptrace_clear_single_step(struct thread *td)
3642 td->td_frame->tf_eflags &= ~PSL_T;
3647 fill_regs(struct thread *td, struct reg *regs)
3650 struct trapframe *tp;
3654 regs->r_gs = pcb->pcb_gs;
3655 return (fill_frame_regs(tp, regs));
3659 fill_frame_regs(struct trapframe *tp, struct reg *regs)
3661 regs->r_fs = tp->tf_fs;
3662 regs->r_es = tp->tf_es;
3663 regs->r_ds = tp->tf_ds;
3664 regs->r_edi = tp->tf_edi;
3665 regs->r_esi = tp->tf_esi;
3666 regs->r_ebp = tp->tf_ebp;
3667 regs->r_ebx = tp->tf_ebx;
3668 regs->r_edx = tp->tf_edx;
3669 regs->r_ecx = tp->tf_ecx;
3670 regs->r_eax = tp->tf_eax;
3671 regs->r_eip = tp->tf_eip;
3672 regs->r_cs = tp->tf_cs;
3673 regs->r_eflags = tp->tf_eflags;
3674 regs->r_esp = tp->tf_esp;
3675 regs->r_ss = tp->tf_ss;
3680 set_regs(struct thread *td, struct reg *regs)
3683 struct trapframe *tp;
3686 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) ||
3687 !CS_SECURE(regs->r_cs))
3690 tp->tf_fs = regs->r_fs;
3691 tp->tf_es = regs->r_es;
3692 tp->tf_ds = regs->r_ds;
3693 tp->tf_edi = regs->r_edi;
3694 tp->tf_esi = regs->r_esi;
3695 tp->tf_ebp = regs->r_ebp;
3696 tp->tf_ebx = regs->r_ebx;
3697 tp->tf_edx = regs->r_edx;
3698 tp->tf_ecx = regs->r_ecx;
3699 tp->tf_eax = regs->r_eax;
3700 tp->tf_eip = regs->r_eip;
3701 tp->tf_cs = regs->r_cs;
3702 tp->tf_eflags = regs->r_eflags;
3703 tp->tf_esp = regs->r_esp;
3704 tp->tf_ss = regs->r_ss;
3705 pcb->pcb_gs = regs->r_gs;
3709 #ifdef CPU_ENABLE_SSE
3711 fill_fpregs_xmm(sv_xmm, sv_87)
3712 struct savexmm *sv_xmm;
3713 struct save87 *sv_87;
3715 register struct env87 *penv_87 = &sv_87->sv_env;
3716 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
3719 bzero(sv_87, sizeof(*sv_87));
3721 /* FPU control/status */
3722 penv_87->en_cw = penv_xmm->en_cw;
3723 penv_87->en_sw = penv_xmm->en_sw;
3724 penv_87->en_tw = penv_xmm->en_tw;
3725 penv_87->en_fip = penv_xmm->en_fip;
3726 penv_87->en_fcs = penv_xmm->en_fcs;
3727 penv_87->en_opcode = penv_xmm->en_opcode;
3728 penv_87->en_foo = penv_xmm->en_foo;
3729 penv_87->en_fos = penv_xmm->en_fos;
3732 for (i = 0; i < 8; ++i)
3733 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc;
3737 set_fpregs_xmm(sv_87, sv_xmm)
3738 struct save87 *sv_87;
3739 struct savexmm *sv_xmm;
3741 register struct env87 *penv_87 = &sv_87->sv_env;
3742 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
3745 /* FPU control/status */
3746 penv_xmm->en_cw = penv_87->en_cw;
3747 penv_xmm->en_sw = penv_87->en_sw;
3748 penv_xmm->en_tw = penv_87->en_tw;
3749 penv_xmm->en_fip = penv_87->en_fip;
3750 penv_xmm->en_fcs = penv_87->en_fcs;
3751 penv_xmm->en_opcode = penv_87->en_opcode;
3752 penv_xmm->en_foo = penv_87->en_foo;
3753 penv_xmm->en_fos = penv_87->en_fos;
3756 for (i = 0; i < 8; ++i)
3757 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i];
3759 #endif /* CPU_ENABLE_SSE */
3762 fill_fpregs(struct thread *td, struct fpreg *fpregs)
3765 KASSERT(td == curthread || TD_IS_SUSPENDED(td) ||
3766 P_SHOULDSTOP(td->td_proc),
3767 ("not suspended thread %p", td));
3771 bzero(fpregs, sizeof(*fpregs));
3773 #ifdef CPU_ENABLE_SSE
3775 fill_fpregs_xmm(&get_pcb_user_save_td(td)->sv_xmm,
3776 (struct save87 *)fpregs);
3778 #endif /* CPU_ENABLE_SSE */
3779 bcopy(&get_pcb_user_save_td(td)->sv_87, fpregs,
3785 set_fpregs(struct thread *td, struct fpreg *fpregs)
3788 #ifdef CPU_ENABLE_SSE
3790 set_fpregs_xmm((struct save87 *)fpregs,
3791 &get_pcb_user_save_td(td)->sv_xmm);
3793 #endif /* CPU_ENABLE_SSE */
3794 bcopy(fpregs, &get_pcb_user_save_td(td)->sv_87,
3803 * Get machine context.
3806 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
3808 struct trapframe *tp;
3809 struct segment_descriptor *sdp;
3813 PROC_LOCK(curthread->td_proc);
3814 mcp->mc_onstack = sigonstack(tp->tf_esp);
3815 PROC_UNLOCK(curthread->td_proc);
3816 mcp->mc_gs = td->td_pcb->pcb_gs;
3817 mcp->mc_fs = tp->tf_fs;
3818 mcp->mc_es = tp->tf_es;
3819 mcp->mc_ds = tp->tf_ds;
3820 mcp->mc_edi = tp->tf_edi;
3821 mcp->mc_esi = tp->tf_esi;
3822 mcp->mc_ebp = tp->tf_ebp;
3823 mcp->mc_isp = tp->tf_isp;
3824 mcp->mc_eflags = tp->tf_eflags;
3825 if (flags & GET_MC_CLEAR_RET) {
3828 mcp->mc_eflags &= ~PSL_C;
3830 mcp->mc_eax = tp->tf_eax;
3831 mcp->mc_edx = tp->tf_edx;
3833 mcp->mc_ebx = tp->tf_ebx;
3834 mcp->mc_ecx = tp->tf_ecx;
3835 mcp->mc_eip = tp->tf_eip;
3836 mcp->mc_cs = tp->tf_cs;
3837 mcp->mc_esp = tp->tf_esp;
3838 mcp->mc_ss = tp->tf_ss;
3839 mcp->mc_len = sizeof(*mcp);
3840 get_fpcontext(td, mcp, NULL, 0);
3841 sdp = &td->td_pcb->pcb_fsd;
3842 mcp->mc_fsbase = sdp->sd_hibase << 24 | sdp->sd_lobase;
3843 sdp = &td->td_pcb->pcb_gsd;
3844 mcp->mc_gsbase = sdp->sd_hibase << 24 | sdp->sd_lobase;
3846 mcp->mc_xfpustate = 0;
3847 mcp->mc_xfpustate_len = 0;
3848 bzero(mcp->mc_spare2, sizeof(mcp->mc_spare2));
3853 * Set machine context.
3855 * However, we don't set any but the user modifiable flags, and we won't
3856 * touch the cs selector.
3859 set_mcontext(struct thread *td, mcontext_t *mcp)
3861 struct trapframe *tp;
3866 if (mcp->mc_len != sizeof(*mcp) ||
3867 (mcp->mc_flags & ~_MC_FLAG_MASK) != 0)
3869 eflags = (mcp->mc_eflags & PSL_USERCHANGE) |
3870 (tp->tf_eflags & ~PSL_USERCHANGE);
3871 if (mcp->mc_flags & _MC_HASFPXSTATE) {
3872 if (mcp->mc_xfpustate_len > cpu_max_ext_state_size -
3873 sizeof(union savefpu))
3875 xfpustate = __builtin_alloca(mcp->mc_xfpustate_len);
3876 ret = copyin((void *)mcp->mc_xfpustate, xfpustate,
3877 mcp->mc_xfpustate_len);
3882 ret = set_fpcontext(td, mcp, xfpustate, mcp->mc_xfpustate_len);
3885 tp->tf_fs = mcp->mc_fs;
3886 tp->tf_es = mcp->mc_es;
3887 tp->tf_ds = mcp->mc_ds;
3888 tp->tf_edi = mcp->mc_edi;
3889 tp->tf_esi = mcp->mc_esi;
3890 tp->tf_ebp = mcp->mc_ebp;
3891 tp->tf_ebx = mcp->mc_ebx;
3892 tp->tf_edx = mcp->mc_edx;
3893 tp->tf_ecx = mcp->mc_ecx;
3894 tp->tf_eax = mcp->mc_eax;
3895 tp->tf_eip = mcp->mc_eip;
3896 tp->tf_eflags = eflags;
3897 tp->tf_esp = mcp->mc_esp;
3898 tp->tf_ss = mcp->mc_ss;
3899 td->td_pcb->pcb_gs = mcp->mc_gs;
3904 get_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpusave,
3905 size_t xfpusave_len)
3907 #ifdef CPU_ENABLE_SSE
3908 size_t max_len, len;
3912 mcp->mc_fpformat = _MC_FPFMT_NODEV;
3913 mcp->mc_ownedfp = _MC_FPOWNED_NONE;
3914 bzero(mcp->mc_fpstate, sizeof(mcp->mc_fpstate));
3916 mcp->mc_ownedfp = npxgetregs(td);
3917 bcopy(get_pcb_user_save_td(td), &mcp->mc_fpstate[0],
3918 sizeof(mcp->mc_fpstate));
3919 mcp->mc_fpformat = npxformat();
3920 #ifdef CPU_ENABLE_SSE
3921 if (!use_xsave || xfpusave_len == 0)
3923 max_len = cpu_max_ext_state_size - sizeof(union savefpu);
3925 if (len > max_len) {
3927 bzero(xfpusave + max_len, len - max_len);
3929 mcp->mc_flags |= _MC_HASFPXSTATE;
3930 mcp->mc_xfpustate_len = len;
3931 bcopy(get_pcb_user_save_td(td) + 1, xfpusave, len);
3937 set_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpustate,
3938 size_t xfpustate_len)
3940 union savefpu *fpstate;
3943 if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
3945 else if (mcp->mc_fpformat != _MC_FPFMT_387 &&
3946 mcp->mc_fpformat != _MC_FPFMT_XMM)
3948 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE) {
3949 /* We don't care what state is left in the FPU or PCB. */
3952 } else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
3953 mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
3955 fpstate = (union savefpu *)&mcp->mc_fpstate;
3956 #ifdef CPU_ENABLE_SSE
3958 fpstate->sv_xmm.sv_env.en_mxcsr &= cpu_mxcsr_mask;
3960 error = npxsetregs(td, fpstate, xfpustate, xfpustate_len);
3970 fpstate_drop(struct thread *td)
3973 KASSERT(PCB_USER_FPU(td->td_pcb), ("fpstate_drop: kernel-owned fpu"));
3976 if (PCPU_GET(fpcurthread) == td)
3980 * XXX force a full drop of the npx. The above only drops it if we
3981 * owned it. npxgetregs() has the same bug in the !cpu_fxsr case.
3983 * XXX I don't much like npxgetregs()'s semantics of doing a full
3984 * drop. Dropping only to the pcb matches fnsave's behaviour.
3985 * We only need to drop to !PCB_INITDONE in sendsig(). But
3986 * sendsig() is the only caller of npxgetregs()... perhaps we just
3987 * have too many layers.
3989 curthread->td_pcb->pcb_flags &= ~(PCB_NPXINITDONE |
3990 PCB_NPXUSERINITDONE);
3995 fill_dbregs(struct thread *td, struct dbreg *dbregs)
4000 dbregs->dr[0] = rdr0();
4001 dbregs->dr[1] = rdr1();
4002 dbregs->dr[2] = rdr2();
4003 dbregs->dr[3] = rdr3();
4004 dbregs->dr[4] = rdr4();
4005 dbregs->dr[5] = rdr5();
4006 dbregs->dr[6] = rdr6();
4007 dbregs->dr[7] = rdr7();
4010 dbregs->dr[0] = pcb->pcb_dr0;
4011 dbregs->dr[1] = pcb->pcb_dr1;
4012 dbregs->dr[2] = pcb->pcb_dr2;
4013 dbregs->dr[3] = pcb->pcb_dr3;
4016 dbregs->dr[6] = pcb->pcb_dr6;
4017 dbregs->dr[7] = pcb->pcb_dr7;
4023 set_dbregs(struct thread *td, struct dbreg *dbregs)
4029 load_dr0(dbregs->dr[0]);
4030 load_dr1(dbregs->dr[1]);
4031 load_dr2(dbregs->dr[2]);
4032 load_dr3(dbregs->dr[3]);
4033 load_dr4(dbregs->dr[4]);
4034 load_dr5(dbregs->dr[5]);
4035 load_dr6(dbregs->dr[6]);
4036 load_dr7(dbregs->dr[7]);
4039 * Don't let an illegal value for dr7 get set. Specifically,
4040 * check for undefined settings. Setting these bit patterns
4041 * result in undefined behaviour and can lead to an unexpected
4044 for (i = 0; i < 4; i++) {
4045 if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02)
4047 if (DBREG_DR7_LEN(dbregs->dr[7], i) == 0x02)
4054 * Don't let a process set a breakpoint that is not within the
4055 * process's address space. If a process could do this, it
4056 * could halt the system by setting a breakpoint in the kernel
4057 * (if ddb was enabled). Thus, we need to check to make sure
4058 * that no breakpoints are being enabled for addresses outside
4059 * process's address space.
4061 * XXX - what about when the watched area of the user's
4062 * address space is written into from within the kernel
4063 * ... wouldn't that still cause a breakpoint to be generated
4064 * from within kernel mode?
4067 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) {
4068 /* dr0 is enabled */
4069 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
4073 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) {
4074 /* dr1 is enabled */
4075 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
4079 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) {
4080 /* dr2 is enabled */
4081 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
4085 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) {
4086 /* dr3 is enabled */
4087 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
4091 pcb->pcb_dr0 = dbregs->dr[0];
4092 pcb->pcb_dr1 = dbregs->dr[1];
4093 pcb->pcb_dr2 = dbregs->dr[2];
4094 pcb->pcb_dr3 = dbregs->dr[3];
4095 pcb->pcb_dr6 = dbregs->dr[6];
4096 pcb->pcb_dr7 = dbregs->dr[7];
4098 pcb->pcb_flags |= PCB_DBREGS;
4105 * Return > 0 if a hardware breakpoint has been hit, and the
4106 * breakpoint was in user space. Return 0, otherwise.
4109 user_dbreg_trap(void)
4111 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */
4112 u_int32_t bp; /* breakpoint bits extracted from dr6 */
4113 int nbp; /* number of breakpoints that triggered */
4114 caddr_t addr[4]; /* breakpoint addresses */
4118 if ((dr7 & 0x000000ff) == 0) {
4120 * all GE and LE bits in the dr7 register are zero,
4121 * thus the trap couldn't have been caused by the
4122 * hardware debug registers
4129 bp = dr6 & 0x0000000f;
4133 * None of the breakpoint bits are set meaning this
4134 * trap was not caused by any of the debug registers
4140 * at least one of the breakpoints were hit, check to see
4141 * which ones and if any of them are user space addresses
4145 addr[nbp++] = (caddr_t)rdr0();
4148 addr[nbp++] = (caddr_t)rdr1();
4151 addr[nbp++] = (caddr_t)rdr2();
4154 addr[nbp++] = (caddr_t)rdr3();
4157 for (i = 0; i < nbp; i++) {
4158 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) {
4160 * addr[i] is in user space
4167 * None of the breakpoints are in user space.
4175 * Provide inb() and outb() as functions. They are normally only available as
4176 * inline functions, thus cannot be called from the debugger.
4179 /* silence compiler warnings */
4180 u_char inb_(u_short);
4181 void outb_(u_short, u_char);
4190 outb_(u_short port, u_char data)