2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
6 * This code is derived from software contributed to Berkeley by
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
44 #include "opt_atpic.h"
45 #include "opt_compat.h"
50 #include "opt_kstack_pages.h"
51 #include "opt_maxmem.h"
52 #include "opt_mp_watchdog.h"
54 #include "opt_perfmon.h"
55 #include "opt_platform.h"
58 #include <sys/param.h>
60 #include <sys/systm.h>
64 #include <sys/callout.h>
67 #include <sys/eventhandler.h>
69 #include <sys/imgact.h>
71 #include <sys/kernel.h>
73 #include <sys/linker.h>
75 #include <sys/malloc.h>
76 #include <sys/memrange.h>
77 #include <sys/msgbuf.h>
78 #include <sys/mutex.h>
80 #include <sys/ptrace.h>
81 #include <sys/reboot.h>
82 #include <sys/rwlock.h>
83 #include <sys/sched.h>
84 #include <sys/signalvar.h>
88 #include <sys/syscallsubr.h>
89 #include <sys/sysctl.h>
90 #include <sys/sysent.h>
91 #include <sys/sysproto.h>
92 #include <sys/ucontext.h>
93 #include <sys/vmmeter.h>
96 #include <vm/vm_extern.h>
97 #include <vm/vm_kern.h>
98 #include <vm/vm_page.h>
99 #include <vm/vm_map.h>
100 #include <vm/vm_object.h>
101 #include <vm/vm_pager.h>
102 #include <vm/vm_param.h>
106 #error KDB must be enabled in order for DDB to work!
109 #include <ddb/db_sym.h>
114 #include <net/netisr.h>
116 #include <machine/bootinfo.h>
117 #include <machine/clock.h>
118 #include <machine/cpu.h>
119 #include <machine/cputypes.h>
120 #include <machine/intr_machdep.h>
122 #include <machine/md_var.h>
123 #include <machine/metadata.h>
124 #include <machine/mp_watchdog.h>
125 #include <machine/pc/bios.h>
126 #include <machine/pcb.h>
127 #include <machine/pcb_ext.h>
128 #include <machine/proc.h>
129 #include <machine/reg.h>
130 #include <machine/sigframe.h>
131 #include <machine/specialreg.h>
132 #include <machine/vm86.h>
133 #include <x86/init.h>
135 #include <machine/perfmon.h>
138 #include <machine/smp.h>
145 #include <x86/apicvar.h>
149 #include <x86/isa/icu.h>
153 #include <machine/xbox.h>
155 int arch_i386_is_xbox = 0;
156 uint32_t arch_i386_xbox_memsize = 0;
159 /* Sanity check for __curthread() */
160 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
162 extern register_t init386(int first);
163 extern void dblfault_handler(void);
165 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
166 #define CPU_ENABLE_SSE
169 static void cpu_startup(void *);
170 static void fpstate_drop(struct thread *td);
171 static void get_fpcontext(struct thread *td, mcontext_t *mcp,
172 char *xfpusave, size_t xfpusave_len);
173 static int set_fpcontext(struct thread *td, mcontext_t *mcp,
174 char *xfpustate, size_t xfpustate_len);
175 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
177 /* Intel ICH registers */
178 #define ICH_PMBASE 0x400
179 #define ICH_SMI_EN ICH_PMBASE + 0x30
181 int _udatasel, _ucodesel;
187 static void osendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
189 #ifdef COMPAT_FREEBSD4
190 static void freebsd4_sendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
197 FEATURE(pae, "Physical Address Extensions");
201 * The number of PHYSMAP entries must be one less than the number of
202 * PHYSSEG entries because the PHYSMAP entry that spans the largest
203 * physical address that is accessible by ISA DMA is split into two
206 #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
208 vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
209 vm_paddr_t dump_avail[PHYSMAP_SIZE + 2];
211 /* must be 2 less so 0 0 can signal end of chunks */
212 #define PHYS_AVAIL_ARRAY_END (nitems(phys_avail) - 2)
213 #define DUMP_AVAIL_ARRAY_END (nitems(dump_avail) - 2)
215 struct kva_md_info kmi;
217 static struct trapframe proc0_tf;
218 struct pcpu __pcpu[MAXCPU];
222 struct mem_range_softc mem_range_softc;
224 /* Default init_ops implementation. */
225 struct init_ops init_ops = {
226 .early_clock_source_init = i8254_init,
227 .early_delay = i8254_delay,
229 .msi_init = msi_init,
241 * On MacBooks, we need to disallow the legacy USB circuit to
242 * generate an SMI# because this can cause several problems,
243 * namely: incorrect CPU frequency detection and failure to
245 * We do this by disabling a bit in the SMI_EN (SMI Control and
246 * Enable register) of the Intel ICH LPC Interface Bridge.
248 sysenv = kern_getenv("smbios.system.product");
249 if (sysenv != NULL) {
250 if (strncmp(sysenv, "MacBook1,1", 10) == 0 ||
251 strncmp(sysenv, "MacBook3,1", 10) == 0 ||
252 strncmp(sysenv, "MacBook4,1", 10) == 0 ||
253 strncmp(sysenv, "MacBookPro1,1", 13) == 0 ||
254 strncmp(sysenv, "MacBookPro1,2", 13) == 0 ||
255 strncmp(sysenv, "MacBookPro3,1", 13) == 0 ||
256 strncmp(sysenv, "MacBookPro4,1", 13) == 0 ||
257 strncmp(sysenv, "Macmini1,1", 10) == 0) {
259 printf("Disabling LEGACY_USB_EN bit on "
261 outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8);
267 * Good {morning,afternoon,evening,night}.
271 panicifcpuunsupported();
277 * Display physical memory if SMBIOS reports reasonable amount.
280 sysenv = kern_getenv("smbios.memory.enabled");
281 if (sysenv != NULL) {
282 memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10;
285 if (memsize < ptoa((uintmax_t)vm_cnt.v_free_count))
286 memsize = ptoa((uintmax_t)Maxmem);
287 printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20);
288 realmem = atop(memsize);
291 * Display any holes after the first chunk of extended memory.
296 printf("Physical memory chunk(s):\n");
297 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
300 size = phys_avail[indx + 1] - phys_avail[indx];
302 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
303 (uintmax_t)phys_avail[indx],
304 (uintmax_t)phys_avail[indx + 1] - 1,
305 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
309 vm_ksubmap_init(&kmi);
311 printf("avail memory = %ju (%ju MB)\n",
312 ptoa((uintmax_t)vm_cnt.v_free_count),
313 ptoa((uintmax_t)vm_cnt.v_free_count) / 1048576);
316 * Set up buffers, so they can be used to read disk labels.
319 vm_pager_bufferinit();
324 * Send an interrupt to process.
326 * Stack is set up to allow sigcode stored
327 * at top to call routine, followed by call
328 * to sigreturn routine below. After sigreturn
329 * resets the signal mask, the stack, and the
330 * frame pointer, it returns to the user
335 osendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
337 struct osigframe sf, *fp;
341 struct trapframe *regs;
347 PROC_LOCK_ASSERT(p, MA_OWNED);
348 sig = ksi->ksi_signo;
350 mtx_assert(&psp->ps_mtx, MA_OWNED);
352 oonstack = sigonstack(regs->tf_esp);
354 /* Allocate space for the signal handler context. */
355 if ((td->td_pflags & TDP_ALTSTACK) && !oonstack &&
356 SIGISMEMBER(psp->ps_sigonstack, sig)) {
357 fp = (struct osigframe *)((uintptr_t)td->td_sigstk.ss_sp +
358 td->td_sigstk.ss_size - sizeof(struct osigframe));
359 #if defined(COMPAT_43)
360 td->td_sigstk.ss_flags |= SS_ONSTACK;
363 fp = (struct osigframe *)regs->tf_esp - 1;
365 /* Build the argument list for the signal handler. */
367 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc;
368 bzero(&sf.sf_siginfo, sizeof(sf.sf_siginfo));
369 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
370 /* Signal handler installed with SA_SIGINFO. */
371 sf.sf_arg2 = (register_t)&fp->sf_siginfo;
372 sf.sf_siginfo.si_signo = sig;
373 sf.sf_siginfo.si_code = ksi->ksi_code;
374 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher;
377 /* Old FreeBSD-style arguments. */
378 sf.sf_arg2 = ksi->ksi_code;
379 sf.sf_addr = (register_t)ksi->ksi_addr;
380 sf.sf_ahu.sf_handler = catcher;
382 mtx_unlock(&psp->ps_mtx);
385 /* Save most if not all of trap frame. */
386 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax;
387 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx;
388 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx;
389 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx;
390 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi;
391 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi;
392 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs;
393 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds;
394 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss;
395 sf.sf_siginfo.si_sc.sc_es = regs->tf_es;
396 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs;
397 sf.sf_siginfo.si_sc.sc_gs = rgs();
398 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp;
400 /* Build the signal context to be used by osigreturn(). */
401 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0;
402 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask);
403 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp;
404 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp;
405 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip;
406 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags;
407 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno;
408 sf.sf_siginfo.si_sc.sc_err = regs->tf_err;
411 * If we're a vm86 process, we want to save the segment registers.
412 * We also change eflags to be our emulated eflags, not the actual
415 if (regs->tf_eflags & PSL_VM) {
416 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */
417 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
418 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
420 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs;
421 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs;
422 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es;
423 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds;
425 if (vm86->vm86_has_vme == 0)
426 sf.sf_siginfo.si_sc.sc_ps =
427 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
428 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
430 /* See sendsig() for comments. */
431 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
435 * Copy the sigframe out to the user's stack.
437 if (copyout(&sf, fp, sizeof(*fp)) != 0) {
439 printf("process %ld has trashed its stack\n", (long)p->p_pid);
445 regs->tf_esp = (int)fp;
446 if (p->p_sysent->sv_sigcode_base != 0) {
447 regs->tf_eip = p->p_sysent->sv_sigcode_base + szsigcode -
450 /* a.out sysentvec does not use shared page */
451 regs->tf_eip = p->p_sysent->sv_psstrings - szosigcode;
453 regs->tf_eflags &= ~(PSL_T | PSL_D);
454 regs->tf_cs = _ucodesel;
455 regs->tf_ds = _udatasel;
456 regs->tf_es = _udatasel;
457 regs->tf_fs = _udatasel;
459 regs->tf_ss = _udatasel;
461 mtx_lock(&psp->ps_mtx);
463 #endif /* COMPAT_43 */
465 #ifdef COMPAT_FREEBSD4
467 freebsd4_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
469 struct sigframe4 sf, *sfp;
473 struct trapframe *regs;
479 PROC_LOCK_ASSERT(p, MA_OWNED);
480 sig = ksi->ksi_signo;
482 mtx_assert(&psp->ps_mtx, MA_OWNED);
484 oonstack = sigonstack(regs->tf_esp);
486 /* Save user context. */
487 bzero(&sf, sizeof(sf));
488 sf.sf_uc.uc_sigmask = *mask;
489 sf.sf_uc.uc_stack = td->td_sigstk;
490 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
491 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
492 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
493 sf.sf_uc.uc_mcontext.mc_gs = rgs();
494 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
495 bzero(sf.sf_uc.uc_mcontext.mc_fpregs,
496 sizeof(sf.sf_uc.uc_mcontext.mc_fpregs));
497 bzero(sf.sf_uc.uc_mcontext.__spare__,
498 sizeof(sf.sf_uc.uc_mcontext.__spare__));
499 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
501 /* Allocate space for the signal handler context. */
502 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
503 SIGISMEMBER(psp->ps_sigonstack, sig)) {
504 sfp = (struct sigframe4 *)((uintptr_t)td->td_sigstk.ss_sp +
505 td->td_sigstk.ss_size - sizeof(struct sigframe4));
506 #if defined(COMPAT_43)
507 td->td_sigstk.ss_flags |= SS_ONSTACK;
510 sfp = (struct sigframe4 *)regs->tf_esp - 1;
512 /* Build the argument list for the signal handler. */
514 sf.sf_ucontext = (register_t)&sfp->sf_uc;
515 bzero(&sf.sf_si, sizeof(sf.sf_si));
516 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
517 /* Signal handler installed with SA_SIGINFO. */
518 sf.sf_siginfo = (register_t)&sfp->sf_si;
519 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
521 /* Fill in POSIX parts */
522 sf.sf_si.si_signo = sig;
523 sf.sf_si.si_code = ksi->ksi_code;
524 sf.sf_si.si_addr = ksi->ksi_addr;
526 /* Old FreeBSD-style arguments. */
527 sf.sf_siginfo = ksi->ksi_code;
528 sf.sf_addr = (register_t)ksi->ksi_addr;
529 sf.sf_ahu.sf_handler = catcher;
531 mtx_unlock(&psp->ps_mtx);
535 * If we're a vm86 process, we want to save the segment registers.
536 * We also change eflags to be our emulated eflags, not the actual
539 if (regs->tf_eflags & PSL_VM) {
540 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
541 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
543 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
544 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
545 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
546 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
548 if (vm86->vm86_has_vme == 0)
549 sf.sf_uc.uc_mcontext.mc_eflags =
550 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
551 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
554 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
555 * syscalls made by the signal handler. This just avoids
556 * wasting time for our lazy fixup of such faults. PSL_NT
557 * does nothing in vm86 mode, but vm86 programs can set it
558 * almost legitimately in probes for old cpu types.
560 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
564 * Copy the sigframe out to the user's stack.
566 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
568 printf("process %ld has trashed its stack\n", (long)p->p_pid);
574 regs->tf_esp = (int)sfp;
575 regs->tf_eip = p->p_sysent->sv_sigcode_base + szsigcode -
577 regs->tf_eflags &= ~(PSL_T | PSL_D);
578 regs->tf_cs = _ucodesel;
579 regs->tf_ds = _udatasel;
580 regs->tf_es = _udatasel;
581 regs->tf_fs = _udatasel;
582 regs->tf_ss = _udatasel;
584 mtx_lock(&psp->ps_mtx);
586 #endif /* COMPAT_FREEBSD4 */
589 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
591 struct sigframe sf, *sfp;
596 struct trapframe *regs;
597 struct segment_descriptor *sdp;
605 PROC_LOCK_ASSERT(p, MA_OWNED);
606 sig = ksi->ksi_signo;
608 mtx_assert(&psp->ps_mtx, MA_OWNED);
609 #ifdef COMPAT_FREEBSD4
610 if (SIGISMEMBER(psp->ps_freebsd4, sig)) {
611 freebsd4_sendsig(catcher, ksi, mask);
616 if (SIGISMEMBER(psp->ps_osigset, sig)) {
617 osendsig(catcher, ksi, mask);
622 oonstack = sigonstack(regs->tf_esp);
624 #ifdef CPU_ENABLE_SSE
625 if (cpu_max_ext_state_size > sizeof(union savefpu) && use_xsave) {
626 xfpusave_len = cpu_max_ext_state_size - sizeof(union savefpu);
627 xfpusave = __builtin_alloca(xfpusave_len);
636 /* Save user context. */
637 bzero(&sf, sizeof(sf));
638 sf.sf_uc.uc_sigmask = *mask;
639 sf.sf_uc.uc_stack = td->td_sigstk;
640 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
641 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
642 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
643 sf.sf_uc.uc_mcontext.mc_gs = rgs();
644 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
645 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
646 get_fpcontext(td, &sf.sf_uc.uc_mcontext, xfpusave, xfpusave_len);
649 * Unconditionally fill the fsbase and gsbase into the mcontext.
651 sdp = &td->td_pcb->pcb_fsd;
652 sf.sf_uc.uc_mcontext.mc_fsbase = sdp->sd_hibase << 24 |
654 sdp = &td->td_pcb->pcb_gsd;
655 sf.sf_uc.uc_mcontext.mc_gsbase = sdp->sd_hibase << 24 |
657 bzero(sf.sf_uc.uc_mcontext.mc_spare2,
658 sizeof(sf.sf_uc.uc_mcontext.mc_spare2));
659 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
661 /* Allocate space for the signal handler context. */
662 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
663 SIGISMEMBER(psp->ps_sigonstack, sig)) {
664 sp = (char *)td->td_sigstk.ss_sp + td->td_sigstk.ss_size;
665 #if defined(COMPAT_43)
666 td->td_sigstk.ss_flags |= SS_ONSTACK;
669 sp = (char *)regs->tf_esp - 128;
670 if (xfpusave != NULL) {
672 sp = (char *)((unsigned int)sp & ~0x3F);
673 sf.sf_uc.uc_mcontext.mc_xfpustate = (register_t)sp;
675 sp -= sizeof(struct sigframe);
677 /* Align to 16 bytes. */
678 sfp = (struct sigframe *)((unsigned int)sp & ~0xF);
680 /* Build the argument list for the signal handler. */
682 sf.sf_ucontext = (register_t)&sfp->sf_uc;
683 bzero(&sf.sf_si, sizeof(sf.sf_si));
684 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
685 /* Signal handler installed with SA_SIGINFO. */
686 sf.sf_siginfo = (register_t)&sfp->sf_si;
687 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
689 /* Fill in POSIX parts */
690 sf.sf_si = ksi->ksi_info;
691 sf.sf_si.si_signo = sig; /* maybe a translated signal */
693 /* Old FreeBSD-style arguments. */
694 sf.sf_siginfo = ksi->ksi_code;
695 sf.sf_addr = (register_t)ksi->ksi_addr;
696 sf.sf_ahu.sf_handler = catcher;
698 mtx_unlock(&psp->ps_mtx);
702 * If we're a vm86 process, we want to save the segment registers.
703 * We also change eflags to be our emulated eflags, not the actual
706 if (regs->tf_eflags & PSL_VM) {
707 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
708 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
710 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
711 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
712 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
713 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
715 if (vm86->vm86_has_vme == 0)
716 sf.sf_uc.uc_mcontext.mc_eflags =
717 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
718 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
721 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
722 * syscalls made by the signal handler. This just avoids
723 * wasting time for our lazy fixup of such faults. PSL_NT
724 * does nothing in vm86 mode, but vm86 programs can set it
725 * almost legitimately in probes for old cpu types.
727 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
731 * Copy the sigframe out to the user's stack.
733 if (copyout(&sf, sfp, sizeof(*sfp)) != 0 ||
734 (xfpusave != NULL && copyout(xfpusave,
735 (void *)sf.sf_uc.uc_mcontext.mc_xfpustate, xfpusave_len)
738 printf("process %ld has trashed its stack\n", (long)p->p_pid);
744 regs->tf_esp = (int)sfp;
745 regs->tf_eip = p->p_sysent->sv_sigcode_base;
746 if (regs->tf_eip == 0)
747 regs->tf_eip = p->p_sysent->sv_psstrings - szsigcode;
748 regs->tf_eflags &= ~(PSL_T | PSL_D);
749 regs->tf_cs = _ucodesel;
750 regs->tf_ds = _udatasel;
751 regs->tf_es = _udatasel;
752 regs->tf_fs = _udatasel;
753 regs->tf_ss = _udatasel;
755 mtx_lock(&psp->ps_mtx);
759 * System call to cleanup state after a signal
760 * has been taken. Reset signal mask and
761 * stack state from context left by sendsig (above).
762 * Return to previous pc and psl as specified by
763 * context left by sendsig. Check carefully to
764 * make sure that the user has not modified the
765 * state to gain improper privileges.
773 struct osigreturn_args /* {
774 struct osigcontext *sigcntxp;
777 struct osigcontext sc;
778 struct trapframe *regs;
779 struct osigcontext *scp;
784 error = copyin(uap->sigcntxp, &sc, sizeof(sc));
789 if (eflags & PSL_VM) {
790 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
791 struct vm86_kernel *vm86;
794 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
795 * set up the vm86 area, and we can't enter vm86 mode.
797 if (td->td_pcb->pcb_ext == 0)
799 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
800 if (vm86->vm86_inited == 0)
803 /* Go back to user mode if both flags are set. */
804 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
805 ksiginfo_init_trap(&ksi);
806 ksi.ksi_signo = SIGBUS;
807 ksi.ksi_code = BUS_OBJERR;
808 ksi.ksi_addr = (void *)regs->tf_eip;
809 trapsignal(td, &ksi);
812 if (vm86->vm86_has_vme) {
813 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
814 (eflags & VME_USERCHANGE) | PSL_VM;
816 vm86->vm86_eflags = eflags; /* save VIF, VIP */
817 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
818 (eflags & VM_USERCHANGE) | PSL_VM;
820 tf->tf_vm86_ds = scp->sc_ds;
821 tf->tf_vm86_es = scp->sc_es;
822 tf->tf_vm86_fs = scp->sc_fs;
823 tf->tf_vm86_gs = scp->sc_gs;
824 tf->tf_ds = _udatasel;
825 tf->tf_es = _udatasel;
826 tf->tf_fs = _udatasel;
829 * Don't allow users to change privileged or reserved flags.
831 if (!EFL_SECURE(eflags, regs->tf_eflags)) {
836 * Don't allow users to load a valid privileged %cs. Let the
837 * hardware check for invalid selectors, excess privilege in
838 * other selectors, invalid %eip's and invalid %esp's.
840 if (!CS_SECURE(scp->sc_cs)) {
841 ksiginfo_init_trap(&ksi);
842 ksi.ksi_signo = SIGBUS;
843 ksi.ksi_code = BUS_OBJERR;
844 ksi.ksi_trapno = T_PROTFLT;
845 ksi.ksi_addr = (void *)regs->tf_eip;
846 trapsignal(td, &ksi);
849 regs->tf_ds = scp->sc_ds;
850 regs->tf_es = scp->sc_es;
851 regs->tf_fs = scp->sc_fs;
854 /* Restore remaining registers. */
855 regs->tf_eax = scp->sc_eax;
856 regs->tf_ebx = scp->sc_ebx;
857 regs->tf_ecx = scp->sc_ecx;
858 regs->tf_edx = scp->sc_edx;
859 regs->tf_esi = scp->sc_esi;
860 regs->tf_edi = scp->sc_edi;
861 regs->tf_cs = scp->sc_cs;
862 regs->tf_ss = scp->sc_ss;
863 regs->tf_isp = scp->sc_isp;
864 regs->tf_ebp = scp->sc_fp;
865 regs->tf_esp = scp->sc_sp;
866 regs->tf_eip = scp->sc_pc;
867 regs->tf_eflags = eflags;
869 #if defined(COMPAT_43)
870 if (scp->sc_onstack & 1)
871 td->td_sigstk.ss_flags |= SS_ONSTACK;
873 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
875 kern_sigprocmask(td, SIG_SETMASK, (sigset_t *)&scp->sc_mask, NULL,
877 return (EJUSTRETURN);
879 #endif /* COMPAT_43 */
881 #ifdef COMPAT_FREEBSD4
886 freebsd4_sigreturn(td, uap)
888 struct freebsd4_sigreturn_args /* {
889 const ucontext4 *sigcntxp;
893 struct trapframe *regs;
894 struct ucontext4 *ucp;
895 int cs, eflags, error;
898 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
903 eflags = ucp->uc_mcontext.mc_eflags;
904 if (eflags & PSL_VM) {
905 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
906 struct vm86_kernel *vm86;
909 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
910 * set up the vm86 area, and we can't enter vm86 mode.
912 if (td->td_pcb->pcb_ext == 0)
914 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
915 if (vm86->vm86_inited == 0)
918 /* Go back to user mode if both flags are set. */
919 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
920 ksiginfo_init_trap(&ksi);
921 ksi.ksi_signo = SIGBUS;
922 ksi.ksi_code = BUS_OBJERR;
923 ksi.ksi_addr = (void *)regs->tf_eip;
924 trapsignal(td, &ksi);
926 if (vm86->vm86_has_vme) {
927 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
928 (eflags & VME_USERCHANGE) | PSL_VM;
930 vm86->vm86_eflags = eflags; /* save VIF, VIP */
931 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
932 (eflags & VM_USERCHANGE) | PSL_VM;
934 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
935 tf->tf_eflags = eflags;
936 tf->tf_vm86_ds = tf->tf_ds;
937 tf->tf_vm86_es = tf->tf_es;
938 tf->tf_vm86_fs = tf->tf_fs;
939 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
940 tf->tf_ds = _udatasel;
941 tf->tf_es = _udatasel;
942 tf->tf_fs = _udatasel;
945 * Don't allow users to change privileged or reserved flags.
947 if (!EFL_SECURE(eflags, regs->tf_eflags)) {
948 uprintf("pid %d (%s): freebsd4_sigreturn eflags = 0x%x\n",
949 td->td_proc->p_pid, td->td_name, eflags);
954 * Don't allow users to load a valid privileged %cs. Let the
955 * hardware check for invalid selectors, excess privilege in
956 * other selectors, invalid %eip's and invalid %esp's.
958 cs = ucp->uc_mcontext.mc_cs;
959 if (!CS_SECURE(cs)) {
960 uprintf("pid %d (%s): freebsd4_sigreturn cs = 0x%x\n",
961 td->td_proc->p_pid, td->td_name, cs);
962 ksiginfo_init_trap(&ksi);
963 ksi.ksi_signo = SIGBUS;
964 ksi.ksi_code = BUS_OBJERR;
965 ksi.ksi_trapno = T_PROTFLT;
966 ksi.ksi_addr = (void *)regs->tf_eip;
967 trapsignal(td, &ksi);
971 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
974 #if defined(COMPAT_43)
975 if (ucp->uc_mcontext.mc_onstack & 1)
976 td->td_sigstk.ss_flags |= SS_ONSTACK;
978 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
980 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
981 return (EJUSTRETURN);
983 #endif /* COMPAT_FREEBSD4 */
989 sys_sigreturn(td, uap)
991 struct sigreturn_args /* {
992 const struct __ucontext *sigcntxp;
997 struct trapframe *regs;
1000 size_t xfpustate_len;
1001 int cs, eflags, error, ret;
1006 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
1010 if ((ucp->uc_mcontext.mc_flags & ~_MC_FLAG_MASK) != 0) {
1011 uprintf("pid %d (%s): sigreturn mc_flags %x\n", p->p_pid,
1012 td->td_name, ucp->uc_mcontext.mc_flags);
1015 regs = td->td_frame;
1016 eflags = ucp->uc_mcontext.mc_eflags;
1017 if (eflags & PSL_VM) {
1018 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
1019 struct vm86_kernel *vm86;
1022 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
1023 * set up the vm86 area, and we can't enter vm86 mode.
1025 if (td->td_pcb->pcb_ext == 0)
1027 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
1028 if (vm86->vm86_inited == 0)
1031 /* Go back to user mode if both flags are set. */
1032 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
1033 ksiginfo_init_trap(&ksi);
1034 ksi.ksi_signo = SIGBUS;
1035 ksi.ksi_code = BUS_OBJERR;
1036 ksi.ksi_addr = (void *)regs->tf_eip;
1037 trapsignal(td, &ksi);
1040 if (vm86->vm86_has_vme) {
1041 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
1042 (eflags & VME_USERCHANGE) | PSL_VM;
1044 vm86->vm86_eflags = eflags; /* save VIF, VIP */
1045 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
1046 (eflags & VM_USERCHANGE) | PSL_VM;
1048 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
1049 tf->tf_eflags = eflags;
1050 tf->tf_vm86_ds = tf->tf_ds;
1051 tf->tf_vm86_es = tf->tf_es;
1052 tf->tf_vm86_fs = tf->tf_fs;
1053 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
1054 tf->tf_ds = _udatasel;
1055 tf->tf_es = _udatasel;
1056 tf->tf_fs = _udatasel;
1059 * Don't allow users to change privileged or reserved flags.
1061 if (!EFL_SECURE(eflags, regs->tf_eflags)) {
1062 uprintf("pid %d (%s): sigreturn eflags = 0x%x\n",
1063 td->td_proc->p_pid, td->td_name, eflags);
1068 * Don't allow users to load a valid privileged %cs. Let the
1069 * hardware check for invalid selectors, excess privilege in
1070 * other selectors, invalid %eip's and invalid %esp's.
1072 cs = ucp->uc_mcontext.mc_cs;
1073 if (!CS_SECURE(cs)) {
1074 uprintf("pid %d (%s): sigreturn cs = 0x%x\n",
1075 td->td_proc->p_pid, td->td_name, cs);
1076 ksiginfo_init_trap(&ksi);
1077 ksi.ksi_signo = SIGBUS;
1078 ksi.ksi_code = BUS_OBJERR;
1079 ksi.ksi_trapno = T_PROTFLT;
1080 ksi.ksi_addr = (void *)regs->tf_eip;
1081 trapsignal(td, &ksi);
1085 if ((uc.uc_mcontext.mc_flags & _MC_HASFPXSTATE) != 0) {
1086 xfpustate_len = uc.uc_mcontext.mc_xfpustate_len;
1087 if (xfpustate_len > cpu_max_ext_state_size -
1088 sizeof(union savefpu)) {
1090 "pid %d (%s): sigreturn xfpusave_len = 0x%zx\n",
1091 p->p_pid, td->td_name, xfpustate_len);
1094 xfpustate = __builtin_alloca(xfpustate_len);
1095 error = copyin((const void *)uc.uc_mcontext.mc_xfpustate,
1096 xfpustate, xfpustate_len);
1099 "pid %d (%s): sigreturn copying xfpustate failed\n",
1100 p->p_pid, td->td_name);
1107 ret = set_fpcontext(td, &ucp->uc_mcontext, xfpustate,
1111 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
1114 #if defined(COMPAT_43)
1115 if (ucp->uc_mcontext.mc_onstack & 1)
1116 td->td_sigstk.ss_flags |= SS_ONSTACK;
1118 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
1121 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
1122 return (EJUSTRETURN);
1126 * Reset registers to default values on exec.
1129 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
1131 struct trapframe *regs = td->td_frame;
1132 struct pcb *pcb = td->td_pcb;
1134 /* Reset pc->pcb_gs and %gs before possibly invalidating it. */
1135 pcb->pcb_gs = _udatasel;
1138 mtx_lock_spin(&dt_lock);
1139 if (td->td_proc->p_md.md_ldt)
1142 mtx_unlock_spin(&dt_lock);
1144 bzero((char *)regs, sizeof(struct trapframe));
1145 regs->tf_eip = imgp->entry_addr;
1146 regs->tf_esp = stack;
1147 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T);
1148 regs->tf_ss = _udatasel;
1149 regs->tf_ds = _udatasel;
1150 regs->tf_es = _udatasel;
1151 regs->tf_fs = _udatasel;
1152 regs->tf_cs = _ucodesel;
1154 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */
1155 regs->tf_ebx = imgp->ps_strings;
1158 * Reset the hardware debug registers if they were in use.
1159 * They won't have any meaning for the newly exec'd process.
1161 if (pcb->pcb_flags & PCB_DBREGS) {
1168 if (pcb == curpcb) {
1170 * Clear the debug registers on the running
1171 * CPU, otherwise they will end up affecting
1172 * the next process we switch to.
1176 pcb->pcb_flags &= ~PCB_DBREGS;
1179 pcb->pcb_initial_npxcw = __INITIAL_NPXCW__;
1182 * Drop the FP state if we hold it, so that the process gets a
1183 * clean FP state if it uses the FPU again.
1188 * XXX - Linux emulator
1189 * Make sure sure edx is 0x0 on entry. Linux binaries depend
1192 td->td_retval[1] = 0;
1203 * CR0_MP, CR0_NE and CR0_TS are set for NPX (FPU) support:
1205 * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
1206 * instructions. We must set the CR0_MP bit and use the CR0_TS
1207 * bit to control the trap, because setting the CR0_EM bit does
1208 * not cause WAIT instructions to trap. It's important to trap
1209 * WAIT instructions - otherwise the "wait" variants of no-wait
1210 * control instructions would degenerate to the "no-wait" variants
1211 * after FP context switches but work correctly otherwise. It's
1212 * particularly important to trap WAITs when there is no NPX -
1213 * otherwise the "wait" variants would always degenerate.
1215 * Try setting CR0_NE to get correct error reporting on 486DX's.
1216 * Setting it should fail or do nothing on lesser processors.
1218 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
1223 u_long bootdev; /* not a struct cdev *- encoding is different */
1224 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev,
1225 CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in struct cdev *format)");
1227 static char bootmethod[16] = "BIOS";
1228 SYSCTL_STRING(_machdep, OID_AUTO, bootmethod, CTLFLAG_RD, bootmethod, 0,
1229 "System firmware boot method");
1232 * Initialize 386 and configure to run kernel
1236 * Initialize segments & interrupt table
1241 union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */
1242 union descriptor ldt[NLDT]; /* local descriptor table */
1243 static struct gate_descriptor idt0[NIDT];
1244 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
1245 struct region_descriptor r_gdt, r_idt; /* table descriptors */
1246 struct mtx dt_lock; /* lock for GDT and LDT */
1248 static struct i386tss dblfault_tss;
1249 static char dblfault_stack[PAGE_SIZE];
1251 extern vm_offset_t proc0kstack;
1255 * software prototypes -- in more palatable form.
1257 * GCODE_SEL through GUDATA_SEL must be in this order for syscall/sysret
1258 * GUFS_SEL and GUGS_SEL must be in this order (swtch.s knows it)
1260 struct soft_segment_descriptor gdt_segs[] = {
1261 /* GNULL_SEL 0 Null Descriptor */
1267 .ssd_xx = 0, .ssd_xx1 = 0,
1270 /* GPRIV_SEL 1 SMP Per-Processor Private Data Descriptor */
1272 .ssd_limit = 0xfffff,
1273 .ssd_type = SDT_MEMRWA,
1276 .ssd_xx = 0, .ssd_xx1 = 0,
1279 /* GUFS_SEL 2 %fs Descriptor for user */
1281 .ssd_limit = 0xfffff,
1282 .ssd_type = SDT_MEMRWA,
1285 .ssd_xx = 0, .ssd_xx1 = 0,
1288 /* GUGS_SEL 3 %gs Descriptor for user */
1290 .ssd_limit = 0xfffff,
1291 .ssd_type = SDT_MEMRWA,
1294 .ssd_xx = 0, .ssd_xx1 = 0,
1297 /* GCODE_SEL 4 Code Descriptor for kernel */
1299 .ssd_limit = 0xfffff,
1300 .ssd_type = SDT_MEMERA,
1303 .ssd_xx = 0, .ssd_xx1 = 0,
1306 /* GDATA_SEL 5 Data Descriptor for kernel */
1308 .ssd_limit = 0xfffff,
1309 .ssd_type = SDT_MEMRWA,
1312 .ssd_xx = 0, .ssd_xx1 = 0,
1315 /* GUCODE_SEL 6 Code Descriptor for user */
1317 .ssd_limit = 0xfffff,
1318 .ssd_type = SDT_MEMERA,
1321 .ssd_xx = 0, .ssd_xx1 = 0,
1324 /* GUDATA_SEL 7 Data Descriptor for user */
1326 .ssd_limit = 0xfffff,
1327 .ssd_type = SDT_MEMRWA,
1330 .ssd_xx = 0, .ssd_xx1 = 0,
1333 /* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */
1334 { .ssd_base = 0x400,
1335 .ssd_limit = 0xfffff,
1336 .ssd_type = SDT_MEMRWA,
1339 .ssd_xx = 0, .ssd_xx1 = 0,
1342 /* GPROC0_SEL 9 Proc 0 Tss Descriptor */
1345 .ssd_limit = sizeof(struct i386tss)-1,
1346 .ssd_type = SDT_SYS386TSS,
1349 .ssd_xx = 0, .ssd_xx1 = 0,
1352 /* GLDT_SEL 10 LDT Descriptor */
1353 { .ssd_base = (int) ldt,
1354 .ssd_limit = sizeof(ldt)-1,
1355 .ssd_type = SDT_SYSLDT,
1358 .ssd_xx = 0, .ssd_xx1 = 0,
1361 /* GUSERLDT_SEL 11 User LDT Descriptor per process */
1362 { .ssd_base = (int) ldt,
1363 .ssd_limit = (512 * sizeof(union descriptor)-1),
1364 .ssd_type = SDT_SYSLDT,
1367 .ssd_xx = 0, .ssd_xx1 = 0,
1370 /* GPANIC_SEL 12 Panic Tss Descriptor */
1371 { .ssd_base = (int) &dblfault_tss,
1372 .ssd_limit = sizeof(struct i386tss)-1,
1373 .ssd_type = SDT_SYS386TSS,
1376 .ssd_xx = 0, .ssd_xx1 = 0,
1379 /* GBIOSCODE32_SEL 13 BIOS 32-bit interface (32bit Code) */
1381 .ssd_limit = 0xfffff,
1382 .ssd_type = SDT_MEMERA,
1385 .ssd_xx = 0, .ssd_xx1 = 0,
1388 /* GBIOSCODE16_SEL 14 BIOS 32-bit interface (16bit Code) */
1390 .ssd_limit = 0xfffff,
1391 .ssd_type = SDT_MEMERA,
1394 .ssd_xx = 0, .ssd_xx1 = 0,
1397 /* GBIOSDATA_SEL 15 BIOS 32-bit interface (Data) */
1399 .ssd_limit = 0xfffff,
1400 .ssd_type = SDT_MEMRWA,
1403 .ssd_xx = 0, .ssd_xx1 = 0,
1406 /* GBIOSUTIL_SEL 16 BIOS 16-bit interface (Utility) */
1408 .ssd_limit = 0xfffff,
1409 .ssd_type = SDT_MEMRWA,
1412 .ssd_xx = 0, .ssd_xx1 = 0,
1415 /* GBIOSARGS_SEL 17 BIOS 16-bit interface (Arguments) */
1417 .ssd_limit = 0xfffff,
1418 .ssd_type = SDT_MEMRWA,
1421 .ssd_xx = 0, .ssd_xx1 = 0,
1424 /* GNDIS_SEL 18 NDIS Descriptor */
1430 .ssd_xx = 0, .ssd_xx1 = 0,
1435 static struct soft_segment_descriptor ldt_segs[] = {
1436 /* Null Descriptor - overwritten by call gate */
1442 .ssd_xx = 0, .ssd_xx1 = 0,
1445 /* Null Descriptor - overwritten by call gate */
1451 .ssd_xx = 0, .ssd_xx1 = 0,
1454 /* Null Descriptor - overwritten by call gate */
1460 .ssd_xx = 0, .ssd_xx1 = 0,
1463 /* Code Descriptor for user */
1465 .ssd_limit = 0xfffff,
1466 .ssd_type = SDT_MEMERA,
1469 .ssd_xx = 0, .ssd_xx1 = 0,
1472 /* Null Descriptor - overwritten by call gate */
1478 .ssd_xx = 0, .ssd_xx1 = 0,
1481 /* Data Descriptor for user */
1483 .ssd_limit = 0xfffff,
1484 .ssd_type = SDT_MEMRWA,
1487 .ssd_xx = 0, .ssd_xx1 = 0,
1493 setidt(idx, func, typ, dpl, selec)
1500 struct gate_descriptor *ip;
1503 ip->gd_looffset = (int)func;
1504 ip->gd_selector = selec;
1510 ip->gd_hioffset = ((int)func)>>16 ;
1514 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
1515 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
1516 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
1517 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
1519 #ifdef KDTRACE_HOOKS
1523 IDTVEC(xen_intr_upcall),
1525 IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall);
1529 * Display the index and function name of any IDT entries that don't use
1530 * the default 'rsvd' entry point.
1532 DB_SHOW_COMMAND(idt, db_show_idt)
1534 struct gate_descriptor *ip;
1539 for (idx = 0; idx < NIDT && !db_pager_quit; idx++) {
1540 func = (ip->gd_hioffset << 16 | ip->gd_looffset);
1541 if (func != (uintptr_t)&IDTVEC(rsvd)) {
1542 db_printf("%3d\t", idx);
1543 db_printsym(func, DB_STGY_PROC);
1550 /* Show privileged registers. */
1551 DB_SHOW_COMMAND(sysregs, db_show_sysregs)
1553 uint64_t idtr, gdtr;
1556 db_printf("idtr\t0x%08x/%04x\n",
1557 (u_int)(idtr >> 16), (u_int)idtr & 0xffff);
1559 db_printf("gdtr\t0x%08x/%04x\n",
1560 (u_int)(gdtr >> 16), (u_int)gdtr & 0xffff);
1561 db_printf("ldtr\t0x%04x\n", rldt());
1562 db_printf("tr\t0x%04x\n", rtr());
1563 db_printf("cr0\t0x%08x\n", rcr0());
1564 db_printf("cr2\t0x%08x\n", rcr2());
1565 db_printf("cr3\t0x%08x\n", rcr3());
1566 db_printf("cr4\t0x%08x\n", rcr4());
1567 if (rcr4() & CR4_XSAVE)
1568 db_printf("xcr0\t0x%016llx\n", rxcr(0));
1569 if (amd_feature & (AMDID_NX | AMDID_LM))
1570 db_printf("EFER\t0x%016llx\n", rdmsr(MSR_EFER));
1571 if (cpu_feature2 & (CPUID2_VMX | CPUID2_SMX))
1572 db_printf("FEATURES_CTL\t0x%016llx\n",
1573 rdmsr(MSR_IA32_FEATURE_CONTROL));
1574 if ((cpu_vendor_id == CPU_VENDOR_INTEL ||
1575 cpu_vendor_id == CPU_VENDOR_AMD) && CPUID_TO_FAMILY(cpu_id) >= 6)
1576 db_printf("DEBUG_CTL\t0x%016llx\n", rdmsr(MSR_DEBUGCTLMSR));
1577 if (cpu_feature & CPUID_PAT)
1578 db_printf("PAT\t0x%016llx\n", rdmsr(MSR_PAT));
1581 DB_SHOW_COMMAND(dbregs, db_show_dbregs)
1584 db_printf("dr0\t0x%08x\n", rdr0());
1585 db_printf("dr1\t0x%08x\n", rdr1());
1586 db_printf("dr2\t0x%08x\n", rdr2());
1587 db_printf("dr3\t0x%08x\n", rdr3());
1588 db_printf("dr6\t0x%08x\n", rdr6());
1589 db_printf("dr7\t0x%08x\n", rdr7());
1595 struct segment_descriptor *sd;
1596 struct soft_segment_descriptor *ssd;
1598 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
1599 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
1600 ssd->ssd_type = sd->sd_type;
1601 ssd->ssd_dpl = sd->sd_dpl;
1602 ssd->ssd_p = sd->sd_p;
1603 ssd->ssd_def32 = sd->sd_def32;
1604 ssd->ssd_gran = sd->sd_gran;
1608 add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
1611 int i, insert_idx, physmap_idx;
1613 physmap_idx = *physmap_idxp;
1619 if (base > 0xffffffff) {
1620 printf("%uK of memory above 4GB ignored\n",
1621 (u_int)(length / 1024));
1627 * Find insertion point while checking for overlap. Start off by
1628 * assuming the new entry will be added to the end.
1630 insert_idx = physmap_idx + 2;
1631 for (i = 0; i <= physmap_idx; i += 2) {
1632 if (base < physmap[i + 1]) {
1633 if (base + length <= physmap[i]) {
1637 if (boothowto & RB_VERBOSE)
1639 "Overlapping memory regions, ignoring second region\n");
1644 /* See if we can prepend to the next entry. */
1645 if (insert_idx <= physmap_idx && base + length == physmap[insert_idx]) {
1646 physmap[insert_idx] = base;
1650 /* See if we can append to the previous entry. */
1651 if (insert_idx > 0 && base == physmap[insert_idx - 1]) {
1652 physmap[insert_idx - 1] += length;
1657 *physmap_idxp = physmap_idx;
1658 if (physmap_idx == PHYSMAP_SIZE) {
1660 "Too many segments in the physical address map, giving up\n");
1665 * Move the last 'N' entries down to make room for the new
1668 for (i = physmap_idx; i > insert_idx; i -= 2) {
1669 physmap[i] = physmap[i - 2];
1670 physmap[i + 1] = physmap[i - 1];
1673 /* Insert the new entry. */
1674 physmap[insert_idx] = base;
1675 physmap[insert_idx + 1] = base + length;
1680 add_smap_entry(struct bios_smap *smap, vm_paddr_t *physmap, int *physmap_idxp)
1682 if (boothowto & RB_VERBOSE)
1683 printf("SMAP type=%02x base=%016llx len=%016llx\n",
1684 smap->type, smap->base, smap->length);
1686 if (smap->type != SMAP_TYPE_MEMORY)
1689 return (add_physmap_entry(smap->base, smap->length, physmap,
1694 add_smap_entries(struct bios_smap *smapbase, vm_paddr_t *physmap,
1697 struct bios_smap *smap, *smapend;
1700 * Memory map from INT 15:E820.
1702 * subr_module.c says:
1703 * "Consumer may safely assume that size value precedes data."
1704 * ie: an int32_t immediately precedes SMAP.
1706 smapsize = *((u_int32_t *)smapbase - 1);
1707 smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize);
1709 for (smap = smapbase; smap < smapend; smap++)
1710 if (!add_smap_entry(smap, physmap, physmap_idxp))
1721 if (basemem > 640) {
1722 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
1728 * XXX if biosbasemem is now < 640, there is a `hole'
1729 * between the end of base memory and the start of
1730 * ISA memory. The hole may be empty or it may
1731 * contain BIOS code or data. Map it read/write so
1732 * that the BIOS can write to it. (Memory from 0 to
1733 * the physical end of the kernel is mapped read-only
1734 * to begin with and then parts of it are remapped.
1735 * The parts that aren't remapped form holes that
1736 * remain read-only and are unused by the kernel.
1737 * The base memory area is below the physical end of
1738 * the kernel and right now forms a read-only hole.
1739 * The part of it from PAGE_SIZE to
1740 * (trunc_page(biosbasemem * 1024) - 1) will be
1741 * remapped and used by the kernel later.)
1743 * This code is similar to the code used in
1744 * pmap_mapdev, but since no memory needs to be
1745 * allocated we simply change the mapping.
1747 for (pa = trunc_page(basemem * 1024);
1748 pa < ISA_HOLE_START; pa += PAGE_SIZE)
1749 pmap_kenter(KERNBASE + pa, pa);
1752 * Map pages between basemem and ISA_HOLE_START, if any, r/w into
1753 * the vm86 page table so that vm86 can scribble on them using
1754 * the vm86 map too. XXX: why 2 ways for this and only 1 way for
1755 * page 0, at least as initialized here?
1757 pte = (pt_entry_t *)vm86paddr;
1758 for (i = basemem / 4; i < 160; i++)
1759 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
1763 * Populate the (physmap) array with base/bound pairs describing the
1764 * available physical memory in the system, then test this memory and
1765 * build the phys_avail array describing the actually-available memory.
1767 * If we cannot accurately determine the physical memory map, then use
1768 * value from the 0xE801 call, and failing that, the RTC.
1770 * Total memory size may be set by the kernel environment variable
1771 * hw.physmem or the compile-time define MAXMEM.
1773 * XXX first should be vm_paddr_t.
1776 getmemsize(int first)
1778 int has_smap, off, physmap_idx, pa_indx, da_indx;
1780 vm_paddr_t physmap[PHYSMAP_SIZE];
1782 quad_t dcons_addr, dcons_size, physmem_tunable;
1783 int hasbrokenint12, i, res;
1785 struct vm86frame vmf;
1786 struct vm86context vmc;
1788 struct bios_smap *smap, *smapbase;
1793 if (arch_i386_is_xbox) {
1795 * We queried the memory size before, so chop off 4MB for
1796 * the framebuffer and inform the OS of this.
1799 physmap[1] = (arch_i386_xbox_memsize * 1024 * 1024) - XBOX_FB_SIZE;
1804 bzero(&vmf, sizeof(vmf));
1805 bzero(physmap, sizeof(physmap));
1809 * Check if the loader supplied an SMAP memory map. If so,
1810 * use that and do not make any VM86 calls.
1813 kmdp = preload_search_by_type("elf kernel");
1815 kmdp = preload_search_by_type("elf32 kernel");
1816 smapbase = (struct bios_smap *)preload_search_info(kmdp,
1817 MODINFO_METADATA | MODINFOMD_SMAP);
1818 if (smapbase != NULL) {
1819 add_smap_entries(smapbase, physmap, &physmap_idx);
1825 * Some newer BIOSes have a broken INT 12H implementation
1826 * which causes a kernel panic immediately. In this case, we
1827 * need use the SMAP to determine the base memory size.
1830 TUNABLE_INT_FETCH("hw.hasbrokenint12", &hasbrokenint12);
1831 if (hasbrokenint12 == 0) {
1832 /* Use INT12 to determine base memory size. */
1833 vm86_intcall(0x12, &vmf);
1834 basemem = vmf.vmf_ax;
1839 * Fetch the memory map with INT 15:E820. Map page 1 R/W into
1840 * the kernel page table so we can use it as a buffer. The
1841 * kernel will unmap this page later.
1843 pmap_kenter(KERNBASE + (1 << PAGE_SHIFT), 1 << PAGE_SHIFT);
1845 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT));
1846 res = vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di);
1847 KASSERT(res != 0, ("vm86_getptr() failed: address not found"));
1851 vmf.vmf_eax = 0xE820;
1852 vmf.vmf_edx = SMAP_SIG;
1853 vmf.vmf_ecx = sizeof(struct bios_smap);
1854 i = vm86_datacall(0x15, &vmf, &vmc);
1855 if (i || vmf.vmf_eax != SMAP_SIG)
1858 if (!add_smap_entry(smap, physmap, &physmap_idx))
1860 } while (vmf.vmf_ebx != 0);
1864 * If we didn't fetch the "base memory" size from INT12,
1865 * figure it out from the SMAP (or just guess).
1868 for (i = 0; i <= physmap_idx; i += 2) {
1869 if (physmap[i] == 0x00000000) {
1870 basemem = physmap[i + 1] / 1024;
1875 /* XXX: If we couldn't find basemem from SMAP, just guess. */
1881 if (physmap[1] != 0)
1885 * If we failed to find an SMAP, figure out the extended
1886 * memory size. We will then build a simple memory map with
1887 * two segments, one for "base memory" and the second for
1888 * "extended memory". Note that "extended memory" starts at a
1889 * physical address of 1MB and that both basemem and extmem
1890 * are in units of 1KB.
1892 * First, try to fetch the extended memory size via INT 15:E801.
1894 vmf.vmf_ax = 0xE801;
1895 if (vm86_intcall(0x15, &vmf) == 0) {
1896 extmem = vmf.vmf_cx + vmf.vmf_dx * 64;
1899 * If INT15:E801 fails, this is our last ditch effort
1900 * to determine the extended memory size. Currently
1901 * we prefer the RTC value over INT15:88.
1905 vm86_intcall(0x15, &vmf);
1906 extmem = vmf.vmf_ax;
1908 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8);
1913 * Special hack for chipsets that still remap the 384k hole when
1914 * there's 16MB of memory - this really confuses people that
1915 * are trying to use bus mastering ISA controllers with the
1916 * "16MB limit"; they only have 16MB, but the remapping puts
1917 * them beyond the limit.
1919 * If extended memory is between 15-16MB (16-17MB phys address range),
1922 if ((extmem > 15 * 1024) && (extmem < 16 * 1024))
1926 physmap[1] = basemem * 1024;
1928 physmap[physmap_idx] = 0x100000;
1929 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
1933 * Now, physmap contains a map of physical memory.
1937 /* make hole for AP bootstrap code */
1938 physmap[1] = mp_bootaddress(physmap[1]);
1942 * Maxmem isn't the "maximum memory", it's one larger than the
1943 * highest page of the physical address space. It should be
1944 * called something like "Maxphyspage". We may adjust this
1945 * based on ``hw.physmem'' and the results of the memory test.
1947 * This is especially confusing when it is much larger than the
1948 * memory size and is displayed as "realmem".
1950 Maxmem = atop(physmap[physmap_idx + 1]);
1953 Maxmem = MAXMEM / 4;
1956 if (TUNABLE_QUAD_FETCH("hw.physmem", &physmem_tunable))
1957 Maxmem = atop(physmem_tunable);
1960 * If we have an SMAP, don't allow MAXMEM or hw.physmem to extend
1961 * the amount of memory in the system.
1963 if (has_smap && Maxmem > atop(physmap[physmap_idx + 1]))
1964 Maxmem = atop(physmap[physmap_idx + 1]);
1967 * By default enable the memory test on real hardware, and disable
1968 * it if we appear to be running in a VM. This avoids touching all
1969 * pages unnecessarily, which doesn't matter on real hardware but is
1970 * bad for shared VM hosts. Use a general name so that
1971 * one could eventually do more with the code than just disable it.
1973 memtest = (vm_guest > VM_GUEST_NO) ? 0 : 1;
1974 TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest);
1976 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
1977 (boothowto & RB_VERBOSE))
1978 printf("Physical memory use set to %ldK\n", Maxmem * 4);
1981 * If Maxmem has been increased beyond what the system has detected,
1982 * extend the last memory segment to the new limit.
1984 if (atop(physmap[physmap_idx + 1]) < Maxmem)
1985 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
1987 /* call pmap initialization to make new kernel address space */
1988 pmap_bootstrap(first);
1991 * Size up each available chunk of physical memory.
1993 physmap[0] = PAGE_SIZE; /* mask off page 0 */
1996 phys_avail[pa_indx++] = physmap[0];
1997 phys_avail[pa_indx] = physmap[0];
1998 dump_avail[da_indx] = physmap[0];
2002 * Get dcons buffer address
2004 if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
2005 getenv_quad("dcons.size", &dcons_size) == 0)
2009 * physmap is in bytes, so when converting to page boundaries,
2010 * round up the start address and round down the end address.
2012 for (i = 0; i <= physmap_idx; i += 2) {
2015 end = ptoa((vm_paddr_t)Maxmem);
2016 if (physmap[i + 1] < end)
2017 end = trunc_page(physmap[i + 1]);
2018 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
2019 int tmp, page_bad, full;
2020 int *ptr = (int *)CADDR3;
2024 * block out kernel memory as not available.
2026 if (pa >= KERNLOAD && pa < first)
2030 * block out dcons buffer
2033 && pa >= trunc_page(dcons_addr)
2034 && pa < dcons_addr + dcons_size)
2042 * map page into kernel: valid, read/write,non-cacheable
2044 *pte = pa | PG_V | PG_RW | PG_N;
2049 * Test for alternating 1's and 0's
2051 *(volatile int *)ptr = 0xaaaaaaaa;
2052 if (*(volatile int *)ptr != 0xaaaaaaaa)
2055 * Test for alternating 0's and 1's
2057 *(volatile int *)ptr = 0x55555555;
2058 if (*(volatile int *)ptr != 0x55555555)
2063 *(volatile int *)ptr = 0xffffffff;
2064 if (*(volatile int *)ptr != 0xffffffff)
2069 *(volatile int *)ptr = 0x0;
2070 if (*(volatile int *)ptr != 0x0)
2073 * Restore original value.
2079 * Adjust array of valid/good pages.
2081 if (page_bad == TRUE)
2084 * If this good page is a continuation of the
2085 * previous set of good pages, then just increase
2086 * the end pointer. Otherwise start a new chunk.
2087 * Note that "end" points one higher than end,
2088 * making the range >= start and < end.
2089 * If we're also doing a speculative memory
2090 * test and we at or past the end, bump up Maxmem
2091 * so that we keep going. The first bad page
2092 * will terminate the loop.
2094 if (phys_avail[pa_indx] == pa) {
2095 phys_avail[pa_indx] += PAGE_SIZE;
2098 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
2100 "Too many holes in the physical address space, giving up\n");
2105 phys_avail[pa_indx++] = pa; /* start */
2106 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
2110 if (dump_avail[da_indx] == pa) {
2111 dump_avail[da_indx] += PAGE_SIZE;
2114 if (da_indx == DUMP_AVAIL_ARRAY_END) {
2118 dump_avail[da_indx++] = pa; /* start */
2119 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
2131 * The last chunk must contain at least one page plus the message
2132 * buffer to avoid complicating other code (message buffer address
2133 * calculation, etc.).
2135 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
2136 round_page(msgbufsize) >= phys_avail[pa_indx]) {
2137 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
2138 phys_avail[pa_indx--] = 0;
2139 phys_avail[pa_indx--] = 0;
2142 Maxmem = atop(phys_avail[pa_indx]);
2144 /* Trim off space for the message buffer. */
2145 phys_avail[pa_indx] -= round_page(msgbufsize);
2147 /* Map the message buffer. */
2148 for (off = 0; off < round_page(msgbufsize); off += PAGE_SIZE)
2149 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] +
2157 db_fetch_ksymtab(bootinfo.bi_symtab, bootinfo.bi_esymtab);
2161 if (boothowto & RB_KDB)
2162 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
2170 struct gate_descriptor *gdp;
2171 int gsel_tss, metadata_missing, x, pa;
2173 #ifdef CPU_ENABLE_SSE
2174 struct xstate_hdr *xhdr;
2178 thread0.td_kstack = proc0kstack;
2179 thread0.td_kstack_pages = TD0_KSTACK_PAGES;
2182 * This may be done better later if it gets more high level
2183 * components in it. If so just link td->td_proc here.
2185 proc_linkup0(&proc0, &thread0);
2187 metadata_missing = 0;
2188 if (bootinfo.bi_modulep) {
2189 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE;
2190 preload_bootstrap_relocate(KERNBASE);
2192 metadata_missing = 1;
2195 if (bootinfo.bi_envp != 0)
2196 init_static_kenv((char *)bootinfo.bi_envp + KERNBASE, 0);
2198 init_static_kenv(NULL, 0);
2200 /* Init basic tunables, hz etc */
2204 * Make gdt memory segments. All segments cover the full 4GB
2205 * of address space and permissions are enforced at page level.
2207 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1);
2208 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1);
2209 gdt_segs[GUCODE_SEL].ssd_limit = atop(0 - 1);
2210 gdt_segs[GUDATA_SEL].ssd_limit = atop(0 - 1);
2211 gdt_segs[GUFS_SEL].ssd_limit = atop(0 - 1);
2212 gdt_segs[GUGS_SEL].ssd_limit = atop(0 - 1);
2215 gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1);
2216 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
2217 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
2219 for (x = 0; x < NGDT; x++)
2220 ssdtosd(&gdt_segs[x], &gdt[x].sd);
2222 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
2223 r_gdt.rd_base = (int) gdt;
2224 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN);
2227 pcpu_init(pc, 0, sizeof(struct pcpu));
2228 for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE)
2229 pmap_kenter(pa + KERNBASE, pa);
2230 dpcpu_init((void *)(first + KERNBASE), 0);
2231 first += DPCPU_SIZE;
2232 PCPU_SET(prvspace, pc);
2233 PCPU_SET(curthread, &thread0);
2234 /* Non-late cninit() and printf() can be moved up to here. */
2237 * Initialize mutexes.
2239 * icu_lock: in order to allow an interrupt to occur in a critical
2240 * section, to set pcpu->ipending (etc...) properly, we
2241 * must be able to get the icu lock, so it can't be
2245 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE);
2247 /* make ldt memory segments */
2248 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
2249 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
2250 for (x = 0; x < nitems(ldt_segs); x++)
2251 ssdtosd(&ldt_segs[x], &ldt[x].sd);
2253 _default_ldt = GSEL(GLDT_SEL, SEL_KPL);
2255 PCPU_SET(currentldt, _default_ldt);
2258 for (x = 0; x < NIDT; x++)
2259 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL,
2260 GSEL(GCODE_SEL, SEL_KPL));
2261 setidt(IDT_DE, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL,
2262 GSEL(GCODE_SEL, SEL_KPL));
2263 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL,
2264 GSEL(GCODE_SEL, SEL_KPL));
2265 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYS386IGT, SEL_KPL,
2266 GSEL(GCODE_SEL, SEL_KPL));
2267 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL,
2268 GSEL(GCODE_SEL, SEL_KPL));
2269 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL,
2270 GSEL(GCODE_SEL, SEL_KPL));
2271 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL,
2272 GSEL(GCODE_SEL, SEL_KPL));
2273 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2274 GSEL(GCODE_SEL, SEL_KPL));
2275 setidt(IDT_NM, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL
2276 , GSEL(GCODE_SEL, SEL_KPL));
2277 setidt(IDT_DF, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL));
2278 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL,
2279 GSEL(GCODE_SEL, SEL_KPL));
2280 setidt(IDT_TS, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL,
2281 GSEL(GCODE_SEL, SEL_KPL));
2282 setidt(IDT_NP, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL,
2283 GSEL(GCODE_SEL, SEL_KPL));
2284 setidt(IDT_SS, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL,
2285 GSEL(GCODE_SEL, SEL_KPL));
2286 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2287 GSEL(GCODE_SEL, SEL_KPL));
2288 setidt(IDT_PF, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL,
2289 GSEL(GCODE_SEL, SEL_KPL));
2290 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL,
2291 GSEL(GCODE_SEL, SEL_KPL));
2292 setidt(IDT_AC, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL,
2293 GSEL(GCODE_SEL, SEL_KPL));
2294 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL,
2295 GSEL(GCODE_SEL, SEL_KPL));
2296 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL,
2297 GSEL(GCODE_SEL, SEL_KPL));
2298 setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL,
2299 GSEL(GCODE_SEL, SEL_KPL));
2300 #ifdef KDTRACE_HOOKS
2301 setidt(IDT_DTRACE_RET, &IDTVEC(dtrace_ret), SDT_SYS386TGT, SEL_UPL,
2302 GSEL(GCODE_SEL, SEL_KPL));
2305 setidt(IDT_EVTCHN, &IDTVEC(xen_intr_upcall), SDT_SYS386IGT, SEL_UPL,
2306 GSEL(GCODE_SEL, SEL_KPL));
2309 r_idt.rd_limit = sizeof(idt0) - 1;
2310 r_idt.rd_base = (int) idt;
2315 * The following code queries the PCI ID of 0:0:0. For the XBOX,
2316 * This should be 0x10de / 0x02a5.
2318 * This is exactly what Linux does.
2320 outl(0xcf8, 0x80000000);
2321 if (inl(0xcfc) == 0x02a510de) {
2322 arch_i386_is_xbox = 1;
2323 pic16l_setled(XBOX_LED_GREEN);
2326 * We are an XBOX, but we may have either 64MB or 128MB of
2327 * memory. The PCI host bridge should be programmed for this,
2328 * so we just query it.
2330 outl(0xcf8, 0x80000084);
2331 arch_i386_xbox_memsize = (inl(0xcfc) == 0x7FFFFFF) ? 128 : 64;
2336 * Initialize the clock before the console so that console
2337 * initialization can use DELAY().
2341 finishidentcpu(); /* Final stage of CPU initialization */
2342 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2343 GSEL(GCODE_SEL, SEL_KPL));
2344 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2345 GSEL(GCODE_SEL, SEL_KPL));
2346 initializecpu(); /* Initialize CPU registers */
2347 initializecpucache();
2349 /* pointer to selector slot for %fs/%gs */
2350 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
2352 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
2353 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
2354 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
2355 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
2356 #if defined(PAE) || defined(PAE_TABLES)
2357 dblfault_tss.tss_cr3 = (int)IdlePDPT;
2359 dblfault_tss.tss_cr3 = (int)IdlePTD;
2361 dblfault_tss.tss_eip = (int)dblfault_handler;
2362 dblfault_tss.tss_eflags = PSL_KERNEL;
2363 dblfault_tss.tss_ds = dblfault_tss.tss_es =
2364 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
2365 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
2366 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
2367 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
2369 /* Initialize the tss (except for the final esp0) early for vm86. */
2370 PCPU_SET(common_tss.tss_esp0, thread0.td_kstack +
2371 thread0.td_kstack_pages * PAGE_SIZE - 16);
2372 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
2373 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
2374 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
2375 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
2376 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
2379 /* Initialize the PIC early for vm86 calls. */
2385 /* Reset and mask the atpics and leave them shut down. */
2389 * Point the ICU spurious interrupt vectors at the APIC spurious
2390 * interrupt handler.
2392 setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL,
2393 GSEL(GCODE_SEL, SEL_KPL));
2394 setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL,
2395 GSEL(GCODE_SEL, SEL_KPL));
2400 * The console and kdb should be initialized even earlier than here,
2401 * but some console drivers don't work until after getmemsize().
2402 * Default to late console initialization to support these drivers.
2403 * This loses mainly printf()s in getmemsize() and early debugging.
2406 TUNABLE_INT_FETCH("debug.late_console", &late_console);
2407 if (!late_console) {
2414 init_param2(physmem);
2416 /* now running on new page tables, configured,and u/iom is accessible */
2421 if (metadata_missing)
2422 printf("WARNING: loader(8) metadata is missing!\n");
2427 msgbufinit(msgbufp, msgbufsize);
2432 * Set up thread0 pcb after npxinit calculated pcb + fpu save
2433 * area size. Zero out the extended state header in fpu save
2436 thread0.td_pcb = get_pcb_td(&thread0);
2437 bzero(get_pcb_user_save_td(&thread0), cpu_max_ext_state_size);
2438 #ifdef CPU_ENABLE_SSE
2440 xhdr = (struct xstate_hdr *)(get_pcb_user_save_td(&thread0) +
2442 xhdr->xstate_bv = xsave_mask;
2445 PCPU_SET(curpcb, thread0.td_pcb);
2446 /* Move esp0 in the tss to its final place. */
2447 /* Note: -16 is so we can grow the trapframe if we came from vm86 */
2448 PCPU_SET(common_tss.tss_esp0, (vm_offset_t)thread0.td_pcb - 16);
2449 gdt[GPROC0_SEL].sd.sd_type = SDT_SYS386TSS; /* clear busy bit */
2452 /* make a call gate to reenter kernel with */
2453 gdp = &ldt[LSYS5CALLS_SEL].gd;
2455 x = (int) &IDTVEC(lcall_syscall);
2456 gdp->gd_looffset = x;
2457 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
2459 gdp->gd_type = SDT_SYS386CGT;
2460 gdp->gd_dpl = SEL_UPL;
2462 gdp->gd_hioffset = x >> 16;
2464 /* XXX does this work? */
2466 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL];
2467 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL];
2469 /* transfer to user mode */
2471 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
2472 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
2474 /* setup proc 0's pcb */
2475 thread0.td_pcb->pcb_flags = 0;
2476 #if defined(PAE) || defined(PAE_TABLES)
2477 thread0.td_pcb->pcb_cr3 = (int)IdlePDPT;
2479 thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
2481 thread0.td_pcb->pcb_ext = 0;
2482 thread0.td_frame = &proc0_tf;
2490 /* Location of kernel stack for locore */
2491 return ((register_t)thread0.td_pcb);
2495 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
2498 pcpu->pc_acpi_id = 0xffffffff;
2502 smap_sysctl_handler(SYSCTL_HANDLER_ARGS)
2504 struct bios_smap *smapbase;
2505 struct bios_smap_xattr smap;
2508 int count, error, i;
2510 /* Retrieve the system memory map from the loader. */
2511 kmdp = preload_search_by_type("elf kernel");
2513 kmdp = preload_search_by_type("elf32 kernel");
2514 smapbase = (struct bios_smap *)preload_search_info(kmdp,
2515 MODINFO_METADATA | MODINFOMD_SMAP);
2516 if (smapbase == NULL)
2518 smapattr = (uint32_t *)preload_search_info(kmdp,
2519 MODINFO_METADATA | MODINFOMD_SMAP_XATTR);
2520 count = *((u_int32_t *)smapbase - 1) / sizeof(*smapbase);
2522 for (i = 0; i < count; i++) {
2523 smap.base = smapbase[i].base;
2524 smap.length = smapbase[i].length;
2525 smap.type = smapbase[i].type;
2526 if (smapattr != NULL)
2527 smap.xattr = smapattr[i];
2530 error = SYSCTL_OUT(req, &smap, sizeof(smap));
2534 SYSCTL_PROC(_machdep, OID_AUTO, smap, CTLTYPE_OPAQUE|CTLFLAG_RD, NULL, 0,
2535 smap_sysctl_handler, "S,bios_smap_xattr", "Raw BIOS SMAP data");
2538 spinlock_enter(void)
2544 if (td->td_md.md_spinlock_count == 0) {
2545 flags = intr_disable();
2546 td->td_md.md_spinlock_count = 1;
2547 td->td_md.md_saved_flags = flags;
2549 td->td_md.md_spinlock_count++;
2561 flags = td->td_md.md_saved_flags;
2562 td->td_md.md_spinlock_count--;
2563 if (td->td_md.md_spinlock_count == 0)
2564 intr_restore(flags);
2567 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
2568 static void f00f_hack(void *unused);
2569 SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL);
2572 f00f_hack(void *unused)
2574 struct gate_descriptor *new_idt;
2582 printf("Intel Pentium detected, installing workaround for F00F bug\n");
2584 tmp = kmem_malloc(kernel_arena, PAGE_SIZE * 2, M_WAITOK | M_ZERO);
2586 panic("kmem_malloc returned 0");
2588 /* Put the problematic entry (#6) at the end of the lower page. */
2589 new_idt = (struct gate_descriptor*)
2590 (tmp + PAGE_SIZE - 7 * sizeof(struct gate_descriptor));
2591 bcopy(idt, new_idt, sizeof(idt0));
2592 r_idt.rd_base = (u_int)new_idt;
2595 pmap_protect(kernel_pmap, tmp, tmp + PAGE_SIZE, VM_PROT_READ);
2597 #endif /* defined(I586_CPU) && !NO_F00F_HACK */
2600 * Construct a PCB from a trapframe. This is called from kdb_trap() where
2601 * we want to start a backtrace from the function that caused us to enter
2602 * the debugger. We have the context in the trapframe, but base the trace
2603 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
2604 * enough for a backtrace.
2607 makectx(struct trapframe *tf, struct pcb *pcb)
2610 pcb->pcb_edi = tf->tf_edi;
2611 pcb->pcb_esi = tf->tf_esi;
2612 pcb->pcb_ebp = tf->tf_ebp;
2613 pcb->pcb_ebx = tf->tf_ebx;
2614 pcb->pcb_eip = tf->tf_eip;
2615 pcb->pcb_esp = (ISPL(tf->tf_cs)) ? tf->tf_esp : (int)(tf + 1) - 8;
2616 pcb->pcb_gs = rgs();
2620 ptrace_set_pc(struct thread *td, u_long addr)
2623 td->td_frame->tf_eip = addr;
2628 ptrace_single_step(struct thread *td)
2630 td->td_frame->tf_eflags |= PSL_T;
2635 ptrace_clear_single_step(struct thread *td)
2637 td->td_frame->tf_eflags &= ~PSL_T;
2642 fill_regs(struct thread *td, struct reg *regs)
2645 struct trapframe *tp;
2649 regs->r_gs = pcb->pcb_gs;
2650 return (fill_frame_regs(tp, regs));
2654 fill_frame_regs(struct trapframe *tp, struct reg *regs)
2656 regs->r_fs = tp->tf_fs;
2657 regs->r_es = tp->tf_es;
2658 regs->r_ds = tp->tf_ds;
2659 regs->r_edi = tp->tf_edi;
2660 regs->r_esi = tp->tf_esi;
2661 regs->r_ebp = tp->tf_ebp;
2662 regs->r_ebx = tp->tf_ebx;
2663 regs->r_edx = tp->tf_edx;
2664 regs->r_ecx = tp->tf_ecx;
2665 regs->r_eax = tp->tf_eax;
2666 regs->r_eip = tp->tf_eip;
2667 regs->r_cs = tp->tf_cs;
2668 regs->r_eflags = tp->tf_eflags;
2669 regs->r_esp = tp->tf_esp;
2670 regs->r_ss = tp->tf_ss;
2675 set_regs(struct thread *td, struct reg *regs)
2678 struct trapframe *tp;
2681 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) ||
2682 !CS_SECURE(regs->r_cs))
2685 tp->tf_fs = regs->r_fs;
2686 tp->tf_es = regs->r_es;
2687 tp->tf_ds = regs->r_ds;
2688 tp->tf_edi = regs->r_edi;
2689 tp->tf_esi = regs->r_esi;
2690 tp->tf_ebp = regs->r_ebp;
2691 tp->tf_ebx = regs->r_ebx;
2692 tp->tf_edx = regs->r_edx;
2693 tp->tf_ecx = regs->r_ecx;
2694 tp->tf_eax = regs->r_eax;
2695 tp->tf_eip = regs->r_eip;
2696 tp->tf_cs = regs->r_cs;
2697 tp->tf_eflags = regs->r_eflags;
2698 tp->tf_esp = regs->r_esp;
2699 tp->tf_ss = regs->r_ss;
2700 pcb->pcb_gs = regs->r_gs;
2705 fill_fpregs(struct thread *td, struct fpreg *fpregs)
2708 KASSERT(td == curthread || TD_IS_SUSPENDED(td) ||
2709 P_SHOULDSTOP(td->td_proc),
2710 ("not suspended thread %p", td));
2714 bzero(fpregs, sizeof(*fpregs));
2716 #ifdef CPU_ENABLE_SSE
2718 npx_fill_fpregs_xmm(&get_pcb_user_save_td(td)->sv_xmm,
2719 (struct save87 *)fpregs);
2721 #endif /* CPU_ENABLE_SSE */
2722 bcopy(&get_pcb_user_save_td(td)->sv_87, fpregs,
2728 set_fpregs(struct thread *td, struct fpreg *fpregs)
2731 #ifdef CPU_ENABLE_SSE
2733 npx_set_fpregs_xmm((struct save87 *)fpregs,
2734 &get_pcb_user_save_td(td)->sv_xmm);
2736 #endif /* CPU_ENABLE_SSE */
2737 bcopy(fpregs, &get_pcb_user_save_td(td)->sv_87,
2746 * Get machine context.
2749 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
2751 struct trapframe *tp;
2752 struct segment_descriptor *sdp;
2756 PROC_LOCK(curthread->td_proc);
2757 mcp->mc_onstack = sigonstack(tp->tf_esp);
2758 PROC_UNLOCK(curthread->td_proc);
2759 mcp->mc_gs = td->td_pcb->pcb_gs;
2760 mcp->mc_fs = tp->tf_fs;
2761 mcp->mc_es = tp->tf_es;
2762 mcp->mc_ds = tp->tf_ds;
2763 mcp->mc_edi = tp->tf_edi;
2764 mcp->mc_esi = tp->tf_esi;
2765 mcp->mc_ebp = tp->tf_ebp;
2766 mcp->mc_isp = tp->tf_isp;
2767 mcp->mc_eflags = tp->tf_eflags;
2768 if (flags & GET_MC_CLEAR_RET) {
2771 mcp->mc_eflags &= ~PSL_C;
2773 mcp->mc_eax = tp->tf_eax;
2774 mcp->mc_edx = tp->tf_edx;
2776 mcp->mc_ebx = tp->tf_ebx;
2777 mcp->mc_ecx = tp->tf_ecx;
2778 mcp->mc_eip = tp->tf_eip;
2779 mcp->mc_cs = tp->tf_cs;
2780 mcp->mc_esp = tp->tf_esp;
2781 mcp->mc_ss = tp->tf_ss;
2782 mcp->mc_len = sizeof(*mcp);
2783 get_fpcontext(td, mcp, NULL, 0);
2784 sdp = &td->td_pcb->pcb_fsd;
2785 mcp->mc_fsbase = sdp->sd_hibase << 24 | sdp->sd_lobase;
2786 sdp = &td->td_pcb->pcb_gsd;
2787 mcp->mc_gsbase = sdp->sd_hibase << 24 | sdp->sd_lobase;
2789 mcp->mc_xfpustate = 0;
2790 mcp->mc_xfpustate_len = 0;
2791 bzero(mcp->mc_spare2, sizeof(mcp->mc_spare2));
2796 * Set machine context.
2798 * However, we don't set any but the user modifiable flags, and we won't
2799 * touch the cs selector.
2802 set_mcontext(struct thread *td, mcontext_t *mcp)
2804 struct trapframe *tp;
2809 if (mcp->mc_len != sizeof(*mcp) ||
2810 (mcp->mc_flags & ~_MC_FLAG_MASK) != 0)
2812 eflags = (mcp->mc_eflags & PSL_USERCHANGE) |
2813 (tp->tf_eflags & ~PSL_USERCHANGE);
2814 if (mcp->mc_flags & _MC_HASFPXSTATE) {
2815 if (mcp->mc_xfpustate_len > cpu_max_ext_state_size -
2816 sizeof(union savefpu))
2818 xfpustate = __builtin_alloca(mcp->mc_xfpustate_len);
2819 ret = copyin((void *)mcp->mc_xfpustate, xfpustate,
2820 mcp->mc_xfpustate_len);
2825 ret = set_fpcontext(td, mcp, xfpustate, mcp->mc_xfpustate_len);
2828 tp->tf_fs = mcp->mc_fs;
2829 tp->tf_es = mcp->mc_es;
2830 tp->tf_ds = mcp->mc_ds;
2831 tp->tf_edi = mcp->mc_edi;
2832 tp->tf_esi = mcp->mc_esi;
2833 tp->tf_ebp = mcp->mc_ebp;
2834 tp->tf_ebx = mcp->mc_ebx;
2835 tp->tf_edx = mcp->mc_edx;
2836 tp->tf_ecx = mcp->mc_ecx;
2837 tp->tf_eax = mcp->mc_eax;
2838 tp->tf_eip = mcp->mc_eip;
2839 tp->tf_eflags = eflags;
2840 tp->tf_esp = mcp->mc_esp;
2841 tp->tf_ss = mcp->mc_ss;
2842 td->td_pcb->pcb_gs = mcp->mc_gs;
2847 get_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpusave,
2848 size_t xfpusave_len)
2850 #ifdef CPU_ENABLE_SSE
2851 size_t max_len, len;
2855 mcp->mc_fpformat = _MC_FPFMT_NODEV;
2856 mcp->mc_ownedfp = _MC_FPOWNED_NONE;
2857 bzero(mcp->mc_fpstate, sizeof(mcp->mc_fpstate));
2859 mcp->mc_ownedfp = npxgetregs(td);
2860 bcopy(get_pcb_user_save_td(td), &mcp->mc_fpstate[0],
2861 sizeof(mcp->mc_fpstate));
2862 mcp->mc_fpformat = npxformat();
2863 #ifdef CPU_ENABLE_SSE
2864 if (!use_xsave || xfpusave_len == 0)
2866 max_len = cpu_max_ext_state_size - sizeof(union savefpu);
2868 if (len > max_len) {
2870 bzero(xfpusave + max_len, len - max_len);
2872 mcp->mc_flags |= _MC_HASFPXSTATE;
2873 mcp->mc_xfpustate_len = len;
2874 bcopy(get_pcb_user_save_td(td) + 1, xfpusave, len);
2880 set_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpustate,
2881 size_t xfpustate_len)
2883 union savefpu *fpstate;
2886 if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
2888 else if (mcp->mc_fpformat != _MC_FPFMT_387 &&
2889 mcp->mc_fpformat != _MC_FPFMT_XMM)
2891 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE) {
2892 /* We don't care what state is left in the FPU or PCB. */
2895 } else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
2896 mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
2898 fpstate = (union savefpu *)&mcp->mc_fpstate;
2899 #ifdef CPU_ENABLE_SSE
2901 fpstate->sv_xmm.sv_env.en_mxcsr &= cpu_mxcsr_mask;
2903 error = npxsetregs(td, fpstate, xfpustate, xfpustate_len);
2913 fpstate_drop(struct thread *td)
2916 KASSERT(PCB_USER_FPU(td->td_pcb), ("fpstate_drop: kernel-owned fpu"));
2919 if (PCPU_GET(fpcurthread) == td)
2923 * XXX force a full drop of the npx. The above only drops it if we
2924 * owned it. npxgetregs() has the same bug in the !cpu_fxsr case.
2926 * XXX I don't much like npxgetregs()'s semantics of doing a full
2927 * drop. Dropping only to the pcb matches fnsave's behaviour.
2928 * We only need to drop to !PCB_INITDONE in sendsig(). But
2929 * sendsig() is the only caller of npxgetregs()... perhaps we just
2930 * have too many layers.
2932 curthread->td_pcb->pcb_flags &= ~(PCB_NPXINITDONE |
2933 PCB_NPXUSERINITDONE);
2938 fill_dbregs(struct thread *td, struct dbreg *dbregs)
2943 dbregs->dr[0] = rdr0();
2944 dbregs->dr[1] = rdr1();
2945 dbregs->dr[2] = rdr2();
2946 dbregs->dr[3] = rdr3();
2947 dbregs->dr[4] = rdr4();
2948 dbregs->dr[5] = rdr5();
2949 dbregs->dr[6] = rdr6();
2950 dbregs->dr[7] = rdr7();
2953 dbregs->dr[0] = pcb->pcb_dr0;
2954 dbregs->dr[1] = pcb->pcb_dr1;
2955 dbregs->dr[2] = pcb->pcb_dr2;
2956 dbregs->dr[3] = pcb->pcb_dr3;
2959 dbregs->dr[6] = pcb->pcb_dr6;
2960 dbregs->dr[7] = pcb->pcb_dr7;
2966 set_dbregs(struct thread *td, struct dbreg *dbregs)
2972 load_dr0(dbregs->dr[0]);
2973 load_dr1(dbregs->dr[1]);
2974 load_dr2(dbregs->dr[2]);
2975 load_dr3(dbregs->dr[3]);
2976 load_dr4(dbregs->dr[4]);
2977 load_dr5(dbregs->dr[5]);
2978 load_dr6(dbregs->dr[6]);
2979 load_dr7(dbregs->dr[7]);
2982 * Don't let an illegal value for dr7 get set. Specifically,
2983 * check for undefined settings. Setting these bit patterns
2984 * result in undefined behaviour and can lead to an unexpected
2987 for (i = 0; i < 4; i++) {
2988 if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02)
2990 if (DBREG_DR7_LEN(dbregs->dr[7], i) == 0x02)
2997 * Don't let a process set a breakpoint that is not within the
2998 * process's address space. If a process could do this, it
2999 * could halt the system by setting a breakpoint in the kernel
3000 * (if ddb was enabled). Thus, we need to check to make sure
3001 * that no breakpoints are being enabled for addresses outside
3002 * process's address space.
3004 * XXX - what about when the watched area of the user's
3005 * address space is written into from within the kernel
3006 * ... wouldn't that still cause a breakpoint to be generated
3007 * from within kernel mode?
3010 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) {
3011 /* dr0 is enabled */
3012 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
3016 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) {
3017 /* dr1 is enabled */
3018 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
3022 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) {
3023 /* dr2 is enabled */
3024 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
3028 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) {
3029 /* dr3 is enabled */
3030 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
3034 pcb->pcb_dr0 = dbregs->dr[0];
3035 pcb->pcb_dr1 = dbregs->dr[1];
3036 pcb->pcb_dr2 = dbregs->dr[2];
3037 pcb->pcb_dr3 = dbregs->dr[3];
3038 pcb->pcb_dr6 = dbregs->dr[6];
3039 pcb->pcb_dr7 = dbregs->dr[7];
3041 pcb->pcb_flags |= PCB_DBREGS;
3048 * Return > 0 if a hardware breakpoint has been hit, and the
3049 * breakpoint was in user space. Return 0, otherwise.
3052 user_dbreg_trap(void)
3054 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */
3055 u_int32_t bp; /* breakpoint bits extracted from dr6 */
3056 int nbp; /* number of breakpoints that triggered */
3057 caddr_t addr[4]; /* breakpoint addresses */
3061 if ((dr7 & 0x000000ff) == 0) {
3063 * all GE and LE bits in the dr7 register are zero,
3064 * thus the trap couldn't have been caused by the
3065 * hardware debug registers
3072 bp = dr6 & 0x0000000f;
3076 * None of the breakpoint bits are set meaning this
3077 * trap was not caused by any of the debug registers
3083 * at least one of the breakpoints were hit, check to see
3084 * which ones and if any of them are user space addresses
3088 addr[nbp++] = (caddr_t)rdr0();
3091 addr[nbp++] = (caddr_t)rdr1();
3094 addr[nbp++] = (caddr_t)rdr2();
3097 addr[nbp++] = (caddr_t)rdr3();
3100 for (i = 0; i < nbp; i++) {
3101 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) {
3103 * addr[i] is in user space
3110 * None of the breakpoints are in user space.
3118 * Provide inb() and outb() as functions. They are normally only available as
3119 * inline functions, thus cannot be called from the debugger.
3122 /* silence compiler warnings */
3123 u_char inb_(u_short);
3124 void outb_(u_short, u_char);
3133 outb_(u_short port, u_char data)