2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
6 * This code is derived from software contributed to Berkeley by
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
44 #include "opt_atalk.h"
45 #include "opt_atpic.h"
46 #include "opt_compat.h"
52 #include "opt_kstack_pages.h"
53 #include "opt_maxmem.h"
54 #include "opt_mp_watchdog.h"
56 #include "opt_perfmon.h"
57 #include "opt_kdtrace.h"
59 #include <sys/param.h>
61 #include <sys/systm.h>
65 #include <sys/callout.h>
68 #include <sys/eventhandler.h>
70 #include <sys/imgact.h>
72 #include <sys/kernel.h>
74 #include <sys/linker.h>
76 #include <sys/malloc.h>
77 #include <sys/memrange.h>
78 #include <sys/msgbuf.h>
79 #include <sys/mutex.h>
81 #include <sys/ptrace.h>
82 #include <sys/reboot.h>
83 #include <sys/rwlock.h>
84 #include <sys/sched.h>
85 #include <sys/signalvar.h>
89 #include <sys/syscallsubr.h>
90 #include <sys/sysctl.h>
91 #include <sys/sysent.h>
92 #include <sys/sysproto.h>
93 #include <sys/ucontext.h>
94 #include <sys/vmmeter.h>
97 #include <vm/vm_extern.h>
98 #include <vm/vm_kern.h>
99 #include <vm/vm_page.h>
100 #include <vm/vm_map.h>
101 #include <vm/vm_object.h>
102 #include <vm/vm_pager.h>
103 #include <vm/vm_param.h>
107 #error KDB must be enabled in order for DDB to work!
110 #include <ddb/db_sym.h>
113 #include <pc98/pc98/pc98_machdep.h>
115 #include <net/netisr.h>
117 #include <machine/bootinfo.h>
118 #include <machine/clock.h>
119 #include <machine/cpu.h>
120 #include <machine/cputypes.h>
121 #include <machine/intr_machdep.h>
123 #include <machine/md_var.h>
124 #include <machine/mp_watchdog.h>
125 #include <machine/pc/bios.h>
126 #include <machine/pcb.h>
127 #include <machine/pcb_ext.h>
128 #include <machine/proc.h>
129 #include <machine/reg.h>
130 #include <machine/sigframe.h>
131 #include <machine/specialreg.h>
132 #include <machine/vm86.h>
134 #include <machine/perfmon.h>
137 #include <machine/smp.h>
141 #include <machine/apicvar.h>
145 #include <x86/isa/icu.h>
148 /* Sanity check for __curthread() */
149 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
151 extern void init386(int first);
152 extern void dblfault_handler(void);
154 extern void printcpuinfo(void); /* XXX header file */
155 extern void finishidentcpu(void);
156 extern void panicifcpuunsupported(void);
158 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
159 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
161 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
162 #define CPU_ENABLE_SSE
165 static void cpu_startup(void *);
166 static void fpstate_drop(struct thread *td);
167 static void get_fpcontext(struct thread *td, mcontext_t *mcp);
168 static int set_fpcontext(struct thread *td, const mcontext_t *mcp);
169 #ifdef CPU_ENABLE_SSE
170 static void set_fpregs_xmm(struct save87 *, struct savexmm *);
171 static void fill_fpregs_xmm(struct savexmm *, struct save87 *);
172 #endif /* CPU_ENABLE_SSE */
173 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
175 int need_pre_dma_flush; /* If 1, use wbinvd befor DMA transfer. */
176 int need_post_dma_flush; /* If 1, use invd after DMA transfer. */
179 extern vm_offset_t ksym_start, ksym_end;
182 int _udatasel, _ucodesel;
185 static int ispc98 = 1;
186 SYSCTL_INT(_machdep, OID_AUTO, ispc98, CTLFLAG_RD, &ispc98, 0, "");
191 static void osendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
193 #ifdef COMPAT_FREEBSD4
194 static void freebsd4_sendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
201 * The number of PHYSMAP entries must be one less than the number of
202 * PHYSSEG entries because the PHYSMAP entry that spans the largest
203 * physical address that is accessible by ISA DMA is split into two
206 #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
208 vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
209 vm_paddr_t dump_avail[PHYSMAP_SIZE + 2];
211 /* must be 2 less so 0 0 can signal end of chunks */
212 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2)
213 #define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2)
215 struct kva_md_info kmi;
217 static struct trapframe proc0_tf;
218 struct pcpu __pcpu[MAXCPU];
222 struct mem_range_softc mem_range_softc;
231 * Good {morning,afternoon,evening,night}.
235 panicifcpuunsupported();
242 * Display physical memory.
244 memsize = ptoa((uintmax_t)Maxmem);
245 printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20);
248 * Display any holes after the first chunk of extended memory.
253 printf("Physical memory chunk(s):\n");
254 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
257 size = phys_avail[indx + 1] - phys_avail[indx];
259 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
260 (uintmax_t)phys_avail[indx],
261 (uintmax_t)phys_avail[indx + 1] - 1,
262 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
266 vm_ksubmap_init(&kmi);
268 printf("avail memory = %ju (%ju MB)\n",
269 ptoa((uintmax_t)cnt.v_free_count),
270 ptoa((uintmax_t)cnt.v_free_count) / 1048576);
273 * Set up buffers, so they can be used to read disk labels.
276 vm_pager_bufferinit();
281 * Send an interrupt to process.
283 * Stack is set up to allow sigcode stored
284 * at top to call routine, followed by kcall
285 * to sigreturn routine below. After sigreturn
286 * resets the signal mask, the stack, and the
287 * frame pointer, it returns to the user
292 osendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
294 struct osigframe sf, *fp;
298 struct trapframe *regs;
304 PROC_LOCK_ASSERT(p, MA_OWNED);
305 sig = ksi->ksi_signo;
307 mtx_assert(&psp->ps_mtx, MA_OWNED);
309 oonstack = sigonstack(regs->tf_esp);
311 /* Allocate space for the signal handler context. */
312 if ((td->td_pflags & TDP_ALTSTACK) && !oonstack &&
313 SIGISMEMBER(psp->ps_sigonstack, sig)) {
314 fp = (struct osigframe *)(td->td_sigstk.ss_sp +
315 td->td_sigstk.ss_size - sizeof(struct osigframe));
316 #if defined(COMPAT_43)
317 td->td_sigstk.ss_flags |= SS_ONSTACK;
320 fp = (struct osigframe *)regs->tf_esp - 1;
322 /* Translate the signal if appropriate. */
323 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
324 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
326 /* Build the argument list for the signal handler. */
328 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc;
329 bzero(&sf.sf_siginfo, sizeof(sf.sf_siginfo));
330 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
331 /* Signal handler installed with SA_SIGINFO. */
332 sf.sf_arg2 = (register_t)&fp->sf_siginfo;
333 sf.sf_siginfo.si_signo = sig;
334 sf.sf_siginfo.si_code = ksi->ksi_code;
335 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher;
338 /* Old FreeBSD-style arguments. */
339 sf.sf_arg2 = ksi->ksi_code;
340 sf.sf_addr = (register_t)ksi->ksi_addr;
341 sf.sf_ahu.sf_handler = catcher;
343 mtx_unlock(&psp->ps_mtx);
346 /* Save most if not all of trap frame. */
347 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax;
348 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx;
349 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx;
350 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx;
351 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi;
352 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi;
353 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs;
354 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds;
355 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss;
356 sf.sf_siginfo.si_sc.sc_es = regs->tf_es;
357 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs;
358 sf.sf_siginfo.si_sc.sc_gs = rgs();
359 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp;
361 /* Build the signal context to be used by osigreturn(). */
362 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0;
363 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask);
364 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp;
365 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp;
366 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip;
367 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags;
368 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno;
369 sf.sf_siginfo.si_sc.sc_err = regs->tf_err;
372 * If we're a vm86 process, we want to save the segment registers.
373 * We also change eflags to be our emulated eflags, not the actual
376 if (regs->tf_eflags & PSL_VM) {
377 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */
378 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
379 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
381 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs;
382 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs;
383 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es;
384 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds;
386 if (vm86->vm86_has_vme == 0)
387 sf.sf_siginfo.si_sc.sc_ps =
388 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
389 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
391 /* See sendsig() for comments. */
392 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
396 * Copy the sigframe out to the user's stack.
398 if (copyout(&sf, fp, sizeof(*fp)) != 0) {
400 printf("process %ld has trashed its stack\n", (long)p->p_pid);
406 regs->tf_esp = (int)fp;
407 if (p->p_sysent->sv_sigcode_base != 0) {
408 regs->tf_eip = p->p_sysent->sv_sigcode_base + szsigcode -
411 /* a.out sysentvec does not use shared page */
412 regs->tf_eip = p->p_sysent->sv_psstrings - szosigcode;
414 regs->tf_eflags &= ~(PSL_T | PSL_D);
415 regs->tf_cs = _ucodesel;
416 regs->tf_ds = _udatasel;
417 regs->tf_es = _udatasel;
418 regs->tf_fs = _udatasel;
420 regs->tf_ss = _udatasel;
422 mtx_lock(&psp->ps_mtx);
424 #endif /* COMPAT_43 */
426 #ifdef COMPAT_FREEBSD4
428 freebsd4_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
430 struct sigframe4 sf, *sfp;
434 struct trapframe *regs;
440 PROC_LOCK_ASSERT(p, MA_OWNED);
441 sig = ksi->ksi_signo;
443 mtx_assert(&psp->ps_mtx, MA_OWNED);
445 oonstack = sigonstack(regs->tf_esp);
447 /* Save user context. */
448 bzero(&sf, sizeof(sf));
449 sf.sf_uc.uc_sigmask = *mask;
450 sf.sf_uc.uc_stack = td->td_sigstk;
451 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
452 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
453 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
454 sf.sf_uc.uc_mcontext.mc_gs = rgs();
455 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
456 bzero(sf.sf_uc.uc_mcontext.mc_fpregs,
457 sizeof(sf.sf_uc.uc_mcontext.mc_fpregs));
458 bzero(sf.sf_uc.uc_mcontext.__spare__,
459 sizeof(sf.sf_uc.uc_mcontext.__spare__));
460 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
462 /* Allocate space for the signal handler context. */
463 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
464 SIGISMEMBER(psp->ps_sigonstack, sig)) {
465 sfp = (struct sigframe4 *)(td->td_sigstk.ss_sp +
466 td->td_sigstk.ss_size - sizeof(struct sigframe4));
467 #if defined(COMPAT_43)
468 td->td_sigstk.ss_flags |= SS_ONSTACK;
471 sfp = (struct sigframe4 *)regs->tf_esp - 1;
473 /* Translate the signal if appropriate. */
474 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
475 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
477 /* Build the argument list for the signal handler. */
479 sf.sf_ucontext = (register_t)&sfp->sf_uc;
480 bzero(&sf.sf_si, sizeof(sf.sf_si));
481 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
482 /* Signal handler installed with SA_SIGINFO. */
483 sf.sf_siginfo = (register_t)&sfp->sf_si;
484 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
486 /* Fill in POSIX parts */
487 sf.sf_si.si_signo = sig;
488 sf.sf_si.si_code = ksi->ksi_code;
489 sf.sf_si.si_addr = ksi->ksi_addr;
491 /* Old FreeBSD-style arguments. */
492 sf.sf_siginfo = ksi->ksi_code;
493 sf.sf_addr = (register_t)ksi->ksi_addr;
494 sf.sf_ahu.sf_handler = catcher;
496 mtx_unlock(&psp->ps_mtx);
500 * If we're a vm86 process, we want to save the segment registers.
501 * We also change eflags to be our emulated eflags, not the actual
504 if (regs->tf_eflags & PSL_VM) {
505 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
506 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
508 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
509 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
510 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
511 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
513 if (vm86->vm86_has_vme == 0)
514 sf.sf_uc.uc_mcontext.mc_eflags =
515 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
516 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
519 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
520 * syscalls made by the signal handler. This just avoids
521 * wasting time for our lazy fixup of such faults. PSL_NT
522 * does nothing in vm86 mode, but vm86 programs can set it
523 * almost legitimately in probes for old cpu types.
525 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
529 * Copy the sigframe out to the user's stack.
531 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
533 printf("process %ld has trashed its stack\n", (long)p->p_pid);
539 regs->tf_esp = (int)sfp;
540 regs->tf_eip = p->p_sysent->sv_sigcode_base + szsigcode -
542 regs->tf_eflags &= ~(PSL_T | PSL_D);
543 regs->tf_cs = _ucodesel;
544 regs->tf_ds = _udatasel;
545 regs->tf_es = _udatasel;
546 regs->tf_fs = _udatasel;
547 regs->tf_ss = _udatasel;
549 mtx_lock(&psp->ps_mtx);
551 #endif /* COMPAT_FREEBSD4 */
554 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
556 struct sigframe sf, *sfp;
561 struct trapframe *regs;
562 struct segment_descriptor *sdp;
568 PROC_LOCK_ASSERT(p, MA_OWNED);
569 sig = ksi->ksi_signo;
571 mtx_assert(&psp->ps_mtx, MA_OWNED);
572 #ifdef COMPAT_FREEBSD4
573 if (SIGISMEMBER(psp->ps_freebsd4, sig)) {
574 freebsd4_sendsig(catcher, ksi, mask);
579 if (SIGISMEMBER(psp->ps_osigset, sig)) {
580 osendsig(catcher, ksi, mask);
585 oonstack = sigonstack(regs->tf_esp);
587 /* Save user context. */
588 bzero(&sf, sizeof(sf));
589 sf.sf_uc.uc_sigmask = *mask;
590 sf.sf_uc.uc_stack = td->td_sigstk;
591 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
592 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
593 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
594 sf.sf_uc.uc_mcontext.mc_gs = rgs();
595 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
596 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
597 get_fpcontext(td, &sf.sf_uc.uc_mcontext);
600 * Unconditionally fill the fsbase and gsbase into the mcontext.
602 sdp = &td->td_pcb->pcb_fsd;
603 sf.sf_uc.uc_mcontext.mc_fsbase = sdp->sd_hibase << 24 |
605 sdp = &td->td_pcb->pcb_gsd;
606 sf.sf_uc.uc_mcontext.mc_gsbase = sdp->sd_hibase << 24 |
608 sf.sf_uc.uc_mcontext.mc_flags = 0;
609 bzero(sf.sf_uc.uc_mcontext.mc_spare2,
610 sizeof(sf.sf_uc.uc_mcontext.mc_spare2));
611 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
613 /* Allocate space for the signal handler context. */
614 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
615 SIGISMEMBER(psp->ps_sigonstack, sig)) {
616 sp = td->td_sigstk.ss_sp +
617 td->td_sigstk.ss_size - sizeof(struct sigframe);
618 #if defined(COMPAT_43)
619 td->td_sigstk.ss_flags |= SS_ONSTACK;
622 sp = (char *)regs->tf_esp - sizeof(struct sigframe);
623 /* Align to 16 bytes. */
624 sfp = (struct sigframe *)((unsigned int)sp & ~0xF);
626 /* Translate the signal if appropriate. */
627 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
628 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
630 /* Build the argument list for the signal handler. */
632 sf.sf_ucontext = (register_t)&sfp->sf_uc;
633 bzero(&sf.sf_si, sizeof(sf.sf_si));
634 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
635 /* Signal handler installed with SA_SIGINFO. */
636 sf.sf_siginfo = (register_t)&sfp->sf_si;
637 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
639 /* Fill in POSIX parts */
640 sf.sf_si = ksi->ksi_info;
641 sf.sf_si.si_signo = sig; /* maybe a translated signal */
643 /* Old FreeBSD-style arguments. */
644 sf.sf_siginfo = ksi->ksi_code;
645 sf.sf_addr = (register_t)ksi->ksi_addr;
646 sf.sf_ahu.sf_handler = catcher;
648 mtx_unlock(&psp->ps_mtx);
652 * If we're a vm86 process, we want to save the segment registers.
653 * We also change eflags to be our emulated eflags, not the actual
656 if (regs->tf_eflags & PSL_VM) {
657 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
658 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
660 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
661 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
662 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
663 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
665 if (vm86->vm86_has_vme == 0)
666 sf.sf_uc.uc_mcontext.mc_eflags =
667 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
668 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
671 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
672 * syscalls made by the signal handler. This just avoids
673 * wasting time for our lazy fixup of such faults. PSL_NT
674 * does nothing in vm86 mode, but vm86 programs can set it
675 * almost legitimately in probes for old cpu types.
677 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
681 * Copy the sigframe out to the user's stack.
683 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
685 printf("process %ld has trashed its stack\n", (long)p->p_pid);
691 regs->tf_esp = (int)sfp;
692 regs->tf_eip = p->p_sysent->sv_sigcode_base;
693 regs->tf_eflags &= ~(PSL_T | PSL_D);
694 regs->tf_cs = _ucodesel;
695 regs->tf_ds = _udatasel;
696 regs->tf_es = _udatasel;
697 regs->tf_fs = _udatasel;
698 regs->tf_ss = _udatasel;
700 mtx_lock(&psp->ps_mtx);
704 * System call to cleanup state after a signal
705 * has been taken. Reset signal mask and
706 * stack state from context left by sendsig (above).
707 * Return to previous pc and psl as specified by
708 * context left by sendsig. Check carefully to
709 * make sure that the user has not modified the
710 * state to gain improper privileges.
718 struct osigreturn_args /* {
719 struct osigcontext *sigcntxp;
722 struct osigcontext sc;
723 struct trapframe *regs;
724 struct osigcontext *scp;
729 error = copyin(uap->sigcntxp, &sc, sizeof(sc));
734 if (eflags & PSL_VM) {
735 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
736 struct vm86_kernel *vm86;
739 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
740 * set up the vm86 area, and we can't enter vm86 mode.
742 if (td->td_pcb->pcb_ext == 0)
744 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
745 if (vm86->vm86_inited == 0)
748 /* Go back to user mode if both flags are set. */
749 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
750 ksiginfo_init_trap(&ksi);
751 ksi.ksi_signo = SIGBUS;
752 ksi.ksi_code = BUS_OBJERR;
753 ksi.ksi_addr = (void *)regs->tf_eip;
754 trapsignal(td, &ksi);
757 if (vm86->vm86_has_vme) {
758 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
759 (eflags & VME_USERCHANGE) | PSL_VM;
761 vm86->vm86_eflags = eflags; /* save VIF, VIP */
762 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
763 (eflags & VM_USERCHANGE) | PSL_VM;
765 tf->tf_vm86_ds = scp->sc_ds;
766 tf->tf_vm86_es = scp->sc_es;
767 tf->tf_vm86_fs = scp->sc_fs;
768 tf->tf_vm86_gs = scp->sc_gs;
769 tf->tf_ds = _udatasel;
770 tf->tf_es = _udatasel;
771 tf->tf_fs = _udatasel;
774 * Don't allow users to change privileged or reserved flags.
777 * XXX do allow users to change the privileged flag PSL_RF.
778 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
779 * should sometimes set it there too. tf_eflags is kept in
780 * the signal context during signal handling and there is no
781 * other place to remember it, so the PSL_RF bit may be
782 * corrupted by the signal handler without us knowing.
783 * Corruption of the PSL_RF bit at worst causes one more or
784 * one less debugger trap, so allowing it is fairly harmless.
786 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
791 * Don't allow users to load a valid privileged %cs. Let the
792 * hardware check for invalid selectors, excess privilege in
793 * other selectors, invalid %eip's and invalid %esp's.
795 if (!CS_SECURE(scp->sc_cs)) {
796 ksiginfo_init_trap(&ksi);
797 ksi.ksi_signo = SIGBUS;
798 ksi.ksi_code = BUS_OBJERR;
799 ksi.ksi_trapno = T_PROTFLT;
800 ksi.ksi_addr = (void *)regs->tf_eip;
801 trapsignal(td, &ksi);
804 regs->tf_ds = scp->sc_ds;
805 regs->tf_es = scp->sc_es;
806 regs->tf_fs = scp->sc_fs;
809 /* Restore remaining registers. */
810 regs->tf_eax = scp->sc_eax;
811 regs->tf_ebx = scp->sc_ebx;
812 regs->tf_ecx = scp->sc_ecx;
813 regs->tf_edx = scp->sc_edx;
814 regs->tf_esi = scp->sc_esi;
815 regs->tf_edi = scp->sc_edi;
816 regs->tf_cs = scp->sc_cs;
817 regs->tf_ss = scp->sc_ss;
818 regs->tf_isp = scp->sc_isp;
819 regs->tf_ebp = scp->sc_fp;
820 regs->tf_esp = scp->sc_sp;
821 regs->tf_eip = scp->sc_pc;
822 regs->tf_eflags = eflags;
824 #if defined(COMPAT_43)
825 if (scp->sc_onstack & 1)
826 td->td_sigstk.ss_flags |= SS_ONSTACK;
828 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
830 kern_sigprocmask(td, SIG_SETMASK, (sigset_t *)&scp->sc_mask, NULL,
832 return (EJUSTRETURN);
834 #endif /* COMPAT_43 */
836 #ifdef COMPAT_FREEBSD4
841 freebsd4_sigreturn(td, uap)
843 struct freebsd4_sigreturn_args /* {
844 const ucontext4 *sigcntxp;
848 struct trapframe *regs;
849 struct ucontext4 *ucp;
850 int cs, eflags, error;
853 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
858 eflags = ucp->uc_mcontext.mc_eflags;
859 if (eflags & PSL_VM) {
860 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
861 struct vm86_kernel *vm86;
864 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
865 * set up the vm86 area, and we can't enter vm86 mode.
867 if (td->td_pcb->pcb_ext == 0)
869 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
870 if (vm86->vm86_inited == 0)
873 /* Go back to user mode if both flags are set. */
874 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
875 ksiginfo_init_trap(&ksi);
876 ksi.ksi_signo = SIGBUS;
877 ksi.ksi_code = BUS_OBJERR;
878 ksi.ksi_addr = (void *)regs->tf_eip;
879 trapsignal(td, &ksi);
881 if (vm86->vm86_has_vme) {
882 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
883 (eflags & VME_USERCHANGE) | PSL_VM;
885 vm86->vm86_eflags = eflags; /* save VIF, VIP */
886 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
887 (eflags & VM_USERCHANGE) | PSL_VM;
889 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
890 tf->tf_eflags = eflags;
891 tf->tf_vm86_ds = tf->tf_ds;
892 tf->tf_vm86_es = tf->tf_es;
893 tf->tf_vm86_fs = tf->tf_fs;
894 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
895 tf->tf_ds = _udatasel;
896 tf->tf_es = _udatasel;
897 tf->tf_fs = _udatasel;
900 * Don't allow users to change privileged or reserved flags.
903 * XXX do allow users to change the privileged flag PSL_RF.
904 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
905 * should sometimes set it there too. tf_eflags is kept in
906 * the signal context during signal handling and there is no
907 * other place to remember it, so the PSL_RF bit may be
908 * corrupted by the signal handler without us knowing.
909 * Corruption of the PSL_RF bit at worst causes one more or
910 * one less debugger trap, so allowing it is fairly harmless.
912 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
913 uprintf("pid %d (%s): freebsd4_sigreturn eflags = 0x%x\n",
914 td->td_proc->p_pid, td->td_name, eflags);
919 * Don't allow users to load a valid privileged %cs. Let the
920 * hardware check for invalid selectors, excess privilege in
921 * other selectors, invalid %eip's and invalid %esp's.
923 cs = ucp->uc_mcontext.mc_cs;
924 if (!CS_SECURE(cs)) {
925 uprintf("pid %d (%s): freebsd4_sigreturn cs = 0x%x\n",
926 td->td_proc->p_pid, td->td_name, cs);
927 ksiginfo_init_trap(&ksi);
928 ksi.ksi_signo = SIGBUS;
929 ksi.ksi_code = BUS_OBJERR;
930 ksi.ksi_trapno = T_PROTFLT;
931 ksi.ksi_addr = (void *)regs->tf_eip;
932 trapsignal(td, &ksi);
936 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
939 #if defined(COMPAT_43)
940 if (ucp->uc_mcontext.mc_onstack & 1)
941 td->td_sigstk.ss_flags |= SS_ONSTACK;
943 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
945 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
946 return (EJUSTRETURN);
948 #endif /* COMPAT_FREEBSD4 */
954 sys_sigreturn(td, uap)
956 struct sigreturn_args /* {
957 const struct __ucontext *sigcntxp;
961 struct trapframe *regs;
963 int cs, eflags, error, ret;
966 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
971 eflags = ucp->uc_mcontext.mc_eflags;
972 if (eflags & PSL_VM) {
973 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
974 struct vm86_kernel *vm86;
977 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
978 * set up the vm86 area, and we can't enter vm86 mode.
980 if (td->td_pcb->pcb_ext == 0)
982 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
983 if (vm86->vm86_inited == 0)
986 /* Go back to user mode if both flags are set. */
987 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
988 ksiginfo_init_trap(&ksi);
989 ksi.ksi_signo = SIGBUS;
990 ksi.ksi_code = BUS_OBJERR;
991 ksi.ksi_addr = (void *)regs->tf_eip;
992 trapsignal(td, &ksi);
995 if (vm86->vm86_has_vme) {
996 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
997 (eflags & VME_USERCHANGE) | PSL_VM;
999 vm86->vm86_eflags = eflags; /* save VIF, VIP */
1000 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
1001 (eflags & VM_USERCHANGE) | PSL_VM;
1003 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
1004 tf->tf_eflags = eflags;
1005 tf->tf_vm86_ds = tf->tf_ds;
1006 tf->tf_vm86_es = tf->tf_es;
1007 tf->tf_vm86_fs = tf->tf_fs;
1008 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
1009 tf->tf_ds = _udatasel;
1010 tf->tf_es = _udatasel;
1011 tf->tf_fs = _udatasel;
1014 * Don't allow users to change privileged or reserved flags.
1017 * XXX do allow users to change the privileged flag PSL_RF.
1018 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
1019 * should sometimes set it there too. tf_eflags is kept in
1020 * the signal context during signal handling and there is no
1021 * other place to remember it, so the PSL_RF bit may be
1022 * corrupted by the signal handler without us knowing.
1023 * Corruption of the PSL_RF bit at worst causes one more or
1024 * one less debugger trap, so allowing it is fairly harmless.
1026 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
1027 uprintf("pid %d (%s): sigreturn eflags = 0x%x\n",
1028 td->td_proc->p_pid, td->td_name, eflags);
1033 * Don't allow users to load a valid privileged %cs. Let the
1034 * hardware check for invalid selectors, excess privilege in
1035 * other selectors, invalid %eip's and invalid %esp's.
1037 cs = ucp->uc_mcontext.mc_cs;
1038 if (!CS_SECURE(cs)) {
1039 uprintf("pid %d (%s): sigreturn cs = 0x%x\n",
1040 td->td_proc->p_pid, td->td_name, cs);
1041 ksiginfo_init_trap(&ksi);
1042 ksi.ksi_signo = SIGBUS;
1043 ksi.ksi_code = BUS_OBJERR;
1044 ksi.ksi_trapno = T_PROTFLT;
1045 ksi.ksi_addr = (void *)regs->tf_eip;
1046 trapsignal(td, &ksi);
1050 ret = set_fpcontext(td, &ucp->uc_mcontext);
1053 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
1056 #if defined(COMPAT_43)
1057 if (ucp->uc_mcontext.mc_onstack & 1)
1058 td->td_sigstk.ss_flags |= SS_ONSTACK;
1060 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
1063 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
1064 return (EJUSTRETURN);
1068 * Machine dependent boot() routine
1070 * I haven't seen anything to put here yet
1071 * Possibly some stuff might be grafted back here from boot()
1079 * Flush the D-cache for non-DMA I/O so that the I-cache can
1080 * be made coherent later.
1083 cpu_flush_dcache(void *ptr, size_t len)
1085 /* Not applicable */
1088 /* Get current clock frequency for the given cpu id. */
1090 cpu_est_clockrate(int cpu_id, uint64_t *rate)
1092 uint64_t tsc1, tsc2;
1095 if (pcpu_find(cpu_id) == NULL || rate == NULL)
1097 if ((cpu_feature & CPUID_TSC) == 0)
1098 return (EOPNOTSUPP);
1102 /* Schedule ourselves on the indicated cpu. */
1103 thread_lock(curthread);
1104 sched_bind(curthread, cpu_id);
1105 thread_unlock(curthread);
1109 /* Calibrate by measuring a short delay. */
1110 reg = intr_disable();
1115 *rate = (tsc2 - tsc1) * 1000;
1119 thread_lock(curthread);
1120 sched_unbind(curthread);
1121 thread_unlock(curthread);
1130 * Shutdown the CPU as much as possible
1139 static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */
1140 TUNABLE_INT("machdep.idle_mwait", &idle_mwait);
1141 SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RW, &idle_mwait,
1142 0, "Use MONITOR/MWAIT for short idle");
1144 #define STATE_RUNNING 0x0
1145 #define STATE_MWAIT 0x1
1146 #define STATE_SLEEPING 0x2
1149 cpu_idle_hlt(sbintime_t sbt)
1153 state = (int *)PCPU_PTR(monitorbuf);
1154 *state = STATE_SLEEPING;
1157 * Since we may be in a critical section from cpu_idle(), if
1158 * an interrupt fires during that critical section we may have
1159 * a pending preemption. If the CPU halts, then that thread
1160 * may not execute until a later interrupt awakens the CPU.
1161 * To handle this race, check for a runnable thread after
1162 * disabling interrupts and immediately return if one is
1163 * found. Also, we must absolutely guarentee that hlt is
1164 * the next instruction after sti. This ensures that any
1165 * interrupt that fires after the call to disable_intr() will
1166 * immediately awaken the CPU from hlt. Finally, please note
1167 * that on x86 this works fine because of interrupts enabled only
1168 * after the instruction following sti takes place, while IF is set
1169 * to 1 immediately, allowing hlt instruction to acknowledge the
1173 if (sched_runnable())
1176 __asm __volatile("sti; hlt");
1177 *state = STATE_RUNNING;
1181 * MWAIT cpu power states. Lower 4 bits are sub-states.
1183 #define MWAIT_C0 0xf0
1184 #define MWAIT_C1 0x00
1185 #define MWAIT_C2 0x10
1186 #define MWAIT_C3 0x20
1187 #define MWAIT_C4 0x30
1190 cpu_idle_mwait(sbintime_t sbt)
1194 state = (int *)PCPU_PTR(monitorbuf);
1195 *state = STATE_MWAIT;
1197 /* See comments in cpu_idle_hlt(). */
1199 if (sched_runnable()) {
1201 *state = STATE_RUNNING;
1204 cpu_monitor(state, 0, 0);
1205 if (*state == STATE_MWAIT)
1206 __asm __volatile("sti; mwait" : : "a" (MWAIT_C1), "c" (0));
1209 *state = STATE_RUNNING;
1213 cpu_idle_spin(sbintime_t sbt)
1218 state = (int *)PCPU_PTR(monitorbuf);
1219 *state = STATE_RUNNING;
1222 * The sched_runnable() call is racy but as long as there is
1223 * a loop missing it one time will have just a little impact if any
1224 * (and it is much better than missing the check at all).
1226 for (i = 0; i < 1000; i++) {
1227 if (sched_runnable())
1233 void (*cpu_idle_fn)(sbintime_t) = cpu_idle_hlt;
1238 sbintime_t sbt = -1;
1240 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d",
1242 #if defined(MP_WATCHDOG)
1243 ap_watchdog(PCPU_GET(cpuid));
1245 /* If we are busy - try to use fast methods. */
1247 if ((cpu_feature2 & CPUID2_MON) && idle_mwait) {
1248 cpu_idle_mwait(busy);
1253 /* If we have time - switch timers into idle mode. */
1256 sbt = cpu_idleclock();
1259 /* Call main idle method. */
1262 /* Switch timers mack into active mode. */
1268 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done",
1273 cpu_idle_wakeup(int cpu)
1278 pcpu = pcpu_find(cpu);
1279 state = (int *)pcpu->pc_monitorbuf;
1281 * This doesn't need to be atomic since missing the race will
1282 * simply result in unnecessary IPIs.
1284 if (*state == STATE_SLEEPING)
1286 if (*state == STATE_MWAIT)
1287 *state = STATE_RUNNING;
1292 * Ordered by speed/power consumption.
1298 { cpu_idle_spin, "spin" },
1299 { cpu_idle_mwait, "mwait" },
1300 { cpu_idle_hlt, "hlt" },
1305 idle_sysctl_available(SYSCTL_HANDLER_ARGS)
1311 avail = malloc(256, M_TEMP, M_WAITOK);
1313 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
1314 if (strstr(idle_tbl[i].id_name, "mwait") &&
1315 (cpu_feature2 & CPUID2_MON) == 0)
1317 p += sprintf(p, "%s%s", p != avail ? ", " : "",
1318 idle_tbl[i].id_name);
1320 error = sysctl_handle_string(oidp, avail, 0, req);
1321 free(avail, M_TEMP);
1325 SYSCTL_PROC(_machdep, OID_AUTO, idle_available, CTLTYPE_STRING | CTLFLAG_RD,
1326 0, 0, idle_sysctl_available, "A", "list of available idle functions");
1329 idle_sysctl(SYSCTL_HANDLER_ARGS)
1337 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
1338 if (idle_tbl[i].id_fn == cpu_idle_fn) {
1339 p = idle_tbl[i].id_name;
1343 strncpy(buf, p, sizeof(buf));
1344 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
1345 if (error != 0 || req->newptr == NULL)
1347 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
1348 if (strstr(idle_tbl[i].id_name, "mwait") &&
1349 (cpu_feature2 & CPUID2_MON) == 0)
1351 if (strcmp(idle_tbl[i].id_name, buf))
1353 cpu_idle_fn = idle_tbl[i].id_fn;
1359 SYSCTL_PROC(_machdep, OID_AUTO, idle, CTLTYPE_STRING | CTLFLAG_RW, 0, 0,
1360 idle_sysctl, "A", "currently selected idle function");
1363 * Reset registers to default values on exec.
1366 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
1368 struct trapframe *regs = td->td_frame;
1369 struct pcb *pcb = td->td_pcb;
1371 /* Reset pc->pcb_gs and %gs before possibly invalidating it. */
1372 pcb->pcb_gs = _udatasel;
1375 mtx_lock_spin(&dt_lock);
1376 if (td->td_proc->p_md.md_ldt)
1379 mtx_unlock_spin(&dt_lock);
1381 bzero((char *)regs, sizeof(struct trapframe));
1382 regs->tf_eip = imgp->entry_addr;
1383 regs->tf_esp = stack;
1384 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T);
1385 regs->tf_ss = _udatasel;
1386 regs->tf_ds = _udatasel;
1387 regs->tf_es = _udatasel;
1388 regs->tf_fs = _udatasel;
1389 regs->tf_cs = _ucodesel;
1391 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */
1392 regs->tf_ebx = imgp->ps_strings;
1395 * Reset the hardware debug registers if they were in use.
1396 * They won't have any meaning for the newly exec'd process.
1398 if (pcb->pcb_flags & PCB_DBREGS) {
1405 if (pcb == curpcb) {
1407 * Clear the debug registers on the running
1408 * CPU, otherwise they will end up affecting
1409 * the next process we switch to.
1413 pcb->pcb_flags &= ~PCB_DBREGS;
1417 * Initialize the math emulator (if any) for the current process.
1418 * Actually, just clear the bit that says that the emulator has
1419 * been initialized. Initialization is delayed until the process
1420 * traps to the emulator (if it is done at all) mainly because
1421 * emulators don't provide an entry point for initialization.
1423 td->td_pcb->pcb_flags &= ~FP_SOFTFP;
1424 pcb->pcb_initial_npxcw = __INITIAL_NPXCW__;
1427 * Drop the FP state if we hold it, so that the process gets a
1428 * clean FP state if it uses the FPU again.
1433 * XXX - Linux emulator
1434 * Make sure sure edx is 0x0 on entry. Linux binaries depend
1437 td->td_retval[1] = 0;
1448 * CR0_MP, CR0_NE and CR0_TS are set for NPX (FPU) support:
1450 * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
1451 * instructions. We must set the CR0_MP bit and use the CR0_TS
1452 * bit to control the trap, because setting the CR0_EM bit does
1453 * not cause WAIT instructions to trap. It's important to trap
1454 * WAIT instructions - otherwise the "wait" variants of no-wait
1455 * control instructions would degenerate to the "no-wait" variants
1456 * after FP context switches but work correctly otherwise. It's
1457 * particularly important to trap WAITs when there is no NPX -
1458 * otherwise the "wait" variants would always degenerate.
1460 * Try setting CR0_NE to get correct error reporting on 486DX's.
1461 * Setting it should fail or do nothing on lesser processors.
1463 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
1468 u_long bootdev; /* not a struct cdev *- encoding is different */
1469 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev,
1470 CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in struct cdev *format)");
1473 * Initialize 386 and configure to run kernel
1477 * Initialize segments & interrupt table
1482 union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */
1483 union descriptor ldt[NLDT]; /* local descriptor table */
1484 static struct gate_descriptor idt0[NIDT];
1485 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
1486 struct region_descriptor r_gdt, r_idt; /* table descriptors */
1487 struct mtx dt_lock; /* lock for GDT and LDT */
1489 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
1490 extern int has_f00f_bug;
1493 static struct i386tss dblfault_tss;
1494 static char dblfault_stack[PAGE_SIZE];
1496 extern vm_offset_t proc0kstack;
1500 * software prototypes -- in more palatable form.
1502 * GCODE_SEL through GUDATA_SEL must be in this order for syscall/sysret
1503 * GUFS_SEL and GUGS_SEL must be in this order (swtch.s knows it)
1505 struct soft_segment_descriptor gdt_segs[] = {
1506 /* GNULL_SEL 0 Null Descriptor */
1512 .ssd_xx = 0, .ssd_xx1 = 0,
1515 /* GPRIV_SEL 1 SMP Per-Processor Private Data Descriptor */
1517 .ssd_limit = 0xfffff,
1518 .ssd_type = SDT_MEMRWA,
1521 .ssd_xx = 0, .ssd_xx1 = 0,
1524 /* GUFS_SEL 2 %fs Descriptor for user */
1526 .ssd_limit = 0xfffff,
1527 .ssd_type = SDT_MEMRWA,
1530 .ssd_xx = 0, .ssd_xx1 = 0,
1533 /* GUGS_SEL 3 %gs Descriptor for user */
1535 .ssd_limit = 0xfffff,
1536 .ssd_type = SDT_MEMRWA,
1539 .ssd_xx = 0, .ssd_xx1 = 0,
1542 /* GCODE_SEL 4 Code Descriptor for kernel */
1544 .ssd_limit = 0xfffff,
1545 .ssd_type = SDT_MEMERA,
1548 .ssd_xx = 0, .ssd_xx1 = 0,
1551 /* GDATA_SEL 5 Data Descriptor for kernel */
1553 .ssd_limit = 0xfffff,
1554 .ssd_type = SDT_MEMRWA,
1557 .ssd_xx = 0, .ssd_xx1 = 0,
1560 /* GUCODE_SEL 6 Code Descriptor for user */
1562 .ssd_limit = 0xfffff,
1563 .ssd_type = SDT_MEMERA,
1566 .ssd_xx = 0, .ssd_xx1 = 0,
1569 /* GUDATA_SEL 7 Data Descriptor for user */
1571 .ssd_limit = 0xfffff,
1572 .ssd_type = SDT_MEMRWA,
1575 .ssd_xx = 0, .ssd_xx1 = 0,
1578 /* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */
1579 { .ssd_base = 0x400,
1580 .ssd_limit = 0xfffff,
1581 .ssd_type = SDT_MEMRWA,
1584 .ssd_xx = 0, .ssd_xx1 = 0,
1587 /* GPROC0_SEL 9 Proc 0 Tss Descriptor */
1590 .ssd_limit = sizeof(struct i386tss)-1,
1591 .ssd_type = SDT_SYS386TSS,
1594 .ssd_xx = 0, .ssd_xx1 = 0,
1597 /* GLDT_SEL 10 LDT Descriptor */
1598 { .ssd_base = (int) ldt,
1599 .ssd_limit = sizeof(ldt)-1,
1600 .ssd_type = SDT_SYSLDT,
1603 .ssd_xx = 0, .ssd_xx1 = 0,
1606 /* GUSERLDT_SEL 11 User LDT Descriptor per process */
1607 { .ssd_base = (int) ldt,
1608 .ssd_limit = (512 * sizeof(union descriptor)-1),
1609 .ssd_type = SDT_SYSLDT,
1612 .ssd_xx = 0, .ssd_xx1 = 0,
1615 /* GPANIC_SEL 12 Panic Tss Descriptor */
1616 { .ssd_base = (int) &dblfault_tss,
1617 .ssd_limit = sizeof(struct i386tss)-1,
1618 .ssd_type = SDT_SYS386TSS,
1621 .ssd_xx = 0, .ssd_xx1 = 0,
1624 /* GBIOSCODE32_SEL 13 BIOS 32-bit interface (32bit Code) */
1626 .ssd_limit = 0xfffff,
1627 .ssd_type = SDT_MEMERA,
1630 .ssd_xx = 0, .ssd_xx1 = 0,
1633 /* GBIOSCODE16_SEL 14 BIOS 32-bit interface (16bit Code) */
1635 .ssd_limit = 0xfffff,
1636 .ssd_type = SDT_MEMERA,
1639 .ssd_xx = 0, .ssd_xx1 = 0,
1642 /* GBIOSDATA_SEL 15 BIOS 32-bit interface (Data) */
1644 .ssd_limit = 0xfffff,
1645 .ssd_type = SDT_MEMRWA,
1648 .ssd_xx = 0, .ssd_xx1 = 0,
1651 /* GBIOSUTIL_SEL 16 BIOS 16-bit interface (Utility) */
1653 .ssd_limit = 0xfffff,
1654 .ssd_type = SDT_MEMRWA,
1657 .ssd_xx = 0, .ssd_xx1 = 0,
1660 /* GBIOSARGS_SEL 17 BIOS 16-bit interface (Arguments) */
1662 .ssd_limit = 0xfffff,
1663 .ssd_type = SDT_MEMRWA,
1666 .ssd_xx = 0, .ssd_xx1 = 0,
1669 /* GNDIS_SEL 18 NDIS Descriptor */
1675 .ssd_xx = 0, .ssd_xx1 = 0,
1680 static struct soft_segment_descriptor ldt_segs[] = {
1681 /* Null Descriptor - overwritten by call gate */
1687 .ssd_xx = 0, .ssd_xx1 = 0,
1690 /* Null Descriptor - overwritten by call gate */
1696 .ssd_xx = 0, .ssd_xx1 = 0,
1699 /* Null Descriptor - overwritten by call gate */
1705 .ssd_xx = 0, .ssd_xx1 = 0,
1708 /* Code Descriptor for user */
1710 .ssd_limit = 0xfffff,
1711 .ssd_type = SDT_MEMERA,
1714 .ssd_xx = 0, .ssd_xx1 = 0,
1717 /* Null Descriptor - overwritten by call gate */
1723 .ssd_xx = 0, .ssd_xx1 = 0,
1726 /* Data Descriptor for user */
1728 .ssd_limit = 0xfffff,
1729 .ssd_type = SDT_MEMRWA,
1732 .ssd_xx = 0, .ssd_xx1 = 0,
1738 setidt(idx, func, typ, dpl, selec)
1745 struct gate_descriptor *ip;
1748 ip->gd_looffset = (int)func;
1749 ip->gd_selector = selec;
1755 ip->gd_hioffset = ((int)func)>>16 ;
1759 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
1760 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
1761 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
1762 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
1764 #ifdef KDTRACE_HOOKS
1767 IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall);
1771 * Display the index and function name of any IDT entries that don't use
1772 * the default 'rsvd' entry point.
1774 DB_SHOW_COMMAND(idt, db_show_idt)
1776 struct gate_descriptor *ip;
1781 for (idx = 0; idx < NIDT && !db_pager_quit; idx++) {
1782 func = (ip->gd_hioffset << 16 | ip->gd_looffset);
1783 if (func != (uintptr_t)&IDTVEC(rsvd)) {
1784 db_printf("%3d\t", idx);
1785 db_printsym(func, DB_STGY_PROC);
1792 /* Show privileged registers. */
1793 DB_SHOW_COMMAND(sysregs, db_show_sysregs)
1795 uint64_t idtr, gdtr;
1798 db_printf("idtr\t0x%08x/%04x\n",
1799 (u_int)(idtr >> 16), (u_int)idtr & 0xffff);
1801 db_printf("gdtr\t0x%08x/%04x\n",
1802 (u_int)(gdtr >> 16), (u_int)gdtr & 0xffff);
1803 db_printf("ldtr\t0x%04x\n", rldt());
1804 db_printf("tr\t0x%04x\n", rtr());
1805 db_printf("cr0\t0x%08x\n", rcr0());
1806 db_printf("cr2\t0x%08x\n", rcr2());
1807 db_printf("cr3\t0x%08x\n", rcr3());
1808 db_printf("cr4\t0x%08x\n", rcr4());
1814 struct segment_descriptor *sd;
1815 struct soft_segment_descriptor *ssd;
1817 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
1818 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
1819 ssd->ssd_type = sd->sd_type;
1820 ssd->ssd_dpl = sd->sd_dpl;
1821 ssd->ssd_p = sd->sd_p;
1822 ssd->ssd_def32 = sd->sd_def32;
1823 ssd->ssd_gran = sd->sd_gran;
1833 if (basemem > 640) {
1834 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
1840 * XXX if biosbasemem is now < 640, there is a `hole'
1841 * between the end of base memory and the start of
1842 * ISA memory. The hole may be empty or it may
1843 * contain BIOS code or data. Map it read/write so
1844 * that the BIOS can write to it. (Memory from 0 to
1845 * the physical end of the kernel is mapped read-only
1846 * to begin with and then parts of it are remapped.
1847 * The parts that aren't remapped form holes that
1848 * remain read-only and are unused by the kernel.
1849 * The base memory area is below the physical end of
1850 * the kernel and right now forms a read-only hole.
1851 * The part of it from PAGE_SIZE to
1852 * (trunc_page(biosbasemem * 1024) - 1) will be
1853 * remapped and used by the kernel later.)
1855 * This code is similar to the code used in
1856 * pmap_mapdev, but since no memory needs to be
1857 * allocated we simply change the mapping.
1859 for (pa = trunc_page(basemem * 1024);
1860 pa < ISA_HOLE_START; pa += PAGE_SIZE)
1861 pmap_kenter(KERNBASE + pa, pa);
1864 * Map pages between basemem and ISA_HOLE_START, if any, r/w into
1865 * the vm86 page table so that vm86 can scribble on them using
1866 * the vm86 map too. XXX: why 2 ways for this and only 1 way for
1867 * page 0, at least as initialized here?
1869 pte = (pt_entry_t *)vm86paddr;
1870 for (i = basemem / 4; i < 160; i++)
1871 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
1875 * Populate the (physmap) array with base/bound pairs describing the
1876 * available physical memory in the system, then test this memory and
1877 * build the phys_avail array describing the actually-available memory.
1879 * If we cannot accurately determine the physical memory map, then use
1880 * value from the 0xE801 call, and failing that, the RTC.
1882 * Total memory size may be set by the kernel environment variable
1883 * hw.physmem or the compile-time define MAXMEM.
1885 * XXX first should be vm_paddr_t.
1888 getmemsize(int first)
1890 int off, physmap_idx, pa_indx, da_indx;
1891 u_long physmem_tunable, memtest;
1892 vm_paddr_t physmap[PHYSMAP_SIZE];
1894 quad_t dcons_addr, dcons_size;
1901 bzero(physmap, sizeof(physmap));
1903 /* XXX - some of EPSON machines can't use PG_N */
1905 if (pc98_machine_type & M_EPSON_PC98) {
1906 switch (epson_machine_id) {
1910 case EPSON_PC486_HX:
1911 case EPSON_PC486_HG:
1912 case EPSON_PC486_HA:
1918 under16 = pc98_getmemsize(&basemem, &extmem);
1922 physmap[1] = basemem * 1024;
1924 physmap[physmap_idx] = 0x100000;
1925 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
1928 * Now, physmap contains a map of physical memory.
1932 /* make hole for AP bootstrap code */
1933 physmap[1] = mp_bootaddress(physmap[1]);
1937 * Maxmem isn't the "maximum memory", it's one larger than the
1938 * highest page of the physical address space. It should be
1939 * called something like "Maxphyspage". We may adjust this
1940 * based on ``hw.physmem'' and the results of the memory test.
1942 Maxmem = atop(physmap[physmap_idx + 1]);
1945 Maxmem = MAXMEM / 4;
1948 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable))
1949 Maxmem = atop(physmem_tunable);
1952 * By default keep the memtest enabled. Use a general name so that
1953 * one could eventually do more with the code than just disable it.
1956 TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest);
1958 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
1959 (boothowto & RB_VERBOSE))
1960 printf("Physical memory use set to %ldK\n", Maxmem * 4);
1963 * If Maxmem has been increased beyond what the system has detected,
1964 * extend the last memory segment to the new limit.
1966 if (atop(physmap[physmap_idx + 1]) < Maxmem)
1967 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
1970 * We need to divide chunk if Maxmem is larger than 16MB and
1971 * under 16MB area is not full of memory.
1972 * (1) system area (15-16MB region) is cut off
1973 * (2) extended memory is only over 16MB area (ex. Melco "HYPERMEMORY")
1975 if ((under16 != 16 * 1024) && (extmem > 15 * 1024)) {
1976 /* 15M - 16M region is cut off, so need to divide chunk */
1977 physmap[physmap_idx + 1] = under16 * 1024;
1979 physmap[physmap_idx] = 0x1000000;
1980 physmap[physmap_idx + 1] = physmap[2] + extmem * 1024;
1983 /* call pmap initialization to make new kernel address space */
1984 pmap_bootstrap(first);
1987 * Size up each available chunk of physical memory.
1989 physmap[0] = PAGE_SIZE; /* mask off page 0 */
1992 phys_avail[pa_indx++] = physmap[0];
1993 phys_avail[pa_indx] = physmap[0];
1994 dump_avail[da_indx] = physmap[0];
1998 * Get dcons buffer address
2000 if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
2001 getenv_quad("dcons.size", &dcons_size) == 0)
2005 * physmap is in bytes, so when converting to page boundaries,
2006 * round up the start address and round down the end address.
2008 for (i = 0; i <= physmap_idx; i += 2) {
2011 end = ptoa((vm_paddr_t)Maxmem);
2012 if (physmap[i + 1] < end)
2013 end = trunc_page(physmap[i + 1]);
2014 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
2015 int tmp, page_bad, full;
2016 int *ptr = (int *)CADDR1;
2020 * block out kernel memory as not available.
2022 if (pa >= KERNLOAD && pa < first)
2026 * block out dcons buffer
2029 && pa >= trunc_page(dcons_addr)
2030 && pa < dcons_addr + dcons_size)
2038 * map page into kernel: valid, read/write,non-cacheable
2040 *pte = pa | PG_V | PG_RW | pg_n;
2045 * Test for alternating 1's and 0's
2047 *(volatile int *)ptr = 0xaaaaaaaa;
2048 if (*(volatile int *)ptr != 0xaaaaaaaa)
2051 * Test for alternating 0's and 1's
2053 *(volatile int *)ptr = 0x55555555;
2054 if (*(volatile int *)ptr != 0x55555555)
2059 *(volatile int *)ptr = 0xffffffff;
2060 if (*(volatile int *)ptr != 0xffffffff)
2065 *(volatile int *)ptr = 0x0;
2066 if (*(volatile int *)ptr != 0x0)
2069 * Restore original value.
2075 * Adjust array of valid/good pages.
2077 if (page_bad == TRUE)
2080 * If this good page is a continuation of the
2081 * previous set of good pages, then just increase
2082 * the end pointer. Otherwise start a new chunk.
2083 * Note that "end" points one higher than end,
2084 * making the range >= start and < end.
2085 * If we're also doing a speculative memory
2086 * test and we at or past the end, bump up Maxmem
2087 * so that we keep going. The first bad page
2088 * will terminate the loop.
2090 if (phys_avail[pa_indx] == pa) {
2091 phys_avail[pa_indx] += PAGE_SIZE;
2094 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
2096 "Too many holes in the physical address space, giving up\n");
2101 phys_avail[pa_indx++] = pa; /* start */
2102 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
2106 if (dump_avail[da_indx] == pa) {
2107 dump_avail[da_indx] += PAGE_SIZE;
2110 if (da_indx == DUMP_AVAIL_ARRAY_END) {
2114 dump_avail[da_indx++] = pa; /* start */
2115 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
2127 * The last chunk must contain at least one page plus the message
2128 * buffer to avoid complicating other code (message buffer address
2129 * calculation, etc.).
2131 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
2132 round_page(msgbufsize) >= phys_avail[pa_indx]) {
2133 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
2134 phys_avail[pa_indx--] = 0;
2135 phys_avail[pa_indx--] = 0;
2138 Maxmem = atop(phys_avail[pa_indx]);
2140 /* Trim off space for the message buffer. */
2141 phys_avail[pa_indx] -= round_page(msgbufsize);
2143 /* Map the message buffer. */
2144 for (off = 0; off < round_page(msgbufsize); off += PAGE_SIZE)
2145 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] +
2155 struct gate_descriptor *gdp;
2156 int gsel_tss, metadata_missing, x, pa;
2160 thread0.td_kstack = proc0kstack;
2161 thread0.td_kstack_pages = KSTACK_PAGES;
2162 kstack0_sz = thread0.td_kstack_pages * PAGE_SIZE;
2163 thread0.td_pcb = (struct pcb *)(thread0.td_kstack + kstack0_sz) - 1;
2166 * This may be done better later if it gets more high level
2167 * components in it. If so just link td->td_proc here.
2169 proc_linkup0(&proc0, &thread0);
2176 metadata_missing = 0;
2177 if (bootinfo.bi_modulep) {
2178 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE;
2179 preload_bootstrap_relocate(KERNBASE);
2181 metadata_missing = 1;
2184 kern_envp = static_env;
2185 else if (bootinfo.bi_envp)
2186 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE;
2188 /* Init basic tunables, hz etc */
2192 * Make gdt memory segments. All segments cover the full 4GB
2193 * of address space and permissions are enforced at page level.
2195 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1);
2196 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1);
2197 gdt_segs[GUCODE_SEL].ssd_limit = atop(0 - 1);
2198 gdt_segs[GUDATA_SEL].ssd_limit = atop(0 - 1);
2199 gdt_segs[GUFS_SEL].ssd_limit = atop(0 - 1);
2200 gdt_segs[GUGS_SEL].ssd_limit = atop(0 - 1);
2203 gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1);
2204 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
2205 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
2207 for (x = 0; x < NGDT; x++)
2208 ssdtosd(&gdt_segs[x], &gdt[x].sd);
2210 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
2211 r_gdt.rd_base = (int) gdt;
2212 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN);
2215 pcpu_init(pc, 0, sizeof(struct pcpu));
2216 for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE)
2217 pmap_kenter(pa + KERNBASE, pa);
2218 dpcpu_init((void *)(first + KERNBASE), 0);
2219 first += DPCPU_SIZE;
2220 PCPU_SET(prvspace, pc);
2221 PCPU_SET(curthread, &thread0);
2222 PCPU_SET(curpcb, thread0.td_pcb);
2225 * Initialize mutexes.
2227 * icu_lock: in order to allow an interrupt to occur in a critical
2228 * section, to set pcpu->ipending (etc...) properly, we
2229 * must be able to get the icu lock, so it can't be
2233 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE);
2235 /* make ldt memory segments */
2236 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
2237 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
2238 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
2239 ssdtosd(&ldt_segs[x], &ldt[x].sd);
2241 _default_ldt = GSEL(GLDT_SEL, SEL_KPL);
2243 PCPU_SET(currentldt, _default_ldt);
2246 for (x = 0; x < NIDT; x++)
2247 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL,
2248 GSEL(GCODE_SEL, SEL_KPL));
2249 setidt(IDT_DE, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL,
2250 GSEL(GCODE_SEL, SEL_KPL));
2251 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL,
2252 GSEL(GCODE_SEL, SEL_KPL));
2253 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYS386IGT, SEL_KPL,
2254 GSEL(GCODE_SEL, SEL_KPL));
2255 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL,
2256 GSEL(GCODE_SEL, SEL_KPL));
2257 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL,
2258 GSEL(GCODE_SEL, SEL_KPL));
2259 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL,
2260 GSEL(GCODE_SEL, SEL_KPL));
2261 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2262 GSEL(GCODE_SEL, SEL_KPL));
2263 setidt(IDT_NM, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL
2264 , GSEL(GCODE_SEL, SEL_KPL));
2265 setidt(IDT_DF, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL));
2266 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL,
2267 GSEL(GCODE_SEL, SEL_KPL));
2268 setidt(IDT_TS, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL,
2269 GSEL(GCODE_SEL, SEL_KPL));
2270 setidt(IDT_NP, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL,
2271 GSEL(GCODE_SEL, SEL_KPL));
2272 setidt(IDT_SS, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL,
2273 GSEL(GCODE_SEL, SEL_KPL));
2274 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2275 GSEL(GCODE_SEL, SEL_KPL));
2276 setidt(IDT_PF, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL,
2277 GSEL(GCODE_SEL, SEL_KPL));
2278 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL,
2279 GSEL(GCODE_SEL, SEL_KPL));
2280 setidt(IDT_AC, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL,
2281 GSEL(GCODE_SEL, SEL_KPL));
2282 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL,
2283 GSEL(GCODE_SEL, SEL_KPL));
2284 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL,
2285 GSEL(GCODE_SEL, SEL_KPL));
2286 setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL,
2287 GSEL(GCODE_SEL, SEL_KPL));
2288 #ifdef KDTRACE_HOOKS
2289 setidt(IDT_DTRACE_RET, &IDTVEC(dtrace_ret), SDT_SYS386TGT, SEL_UPL,
2290 GSEL(GCODE_SEL, SEL_KPL));
2293 r_idt.rd_limit = sizeof(idt0) - 1;
2294 r_idt.rd_base = (int) idt;
2298 * Initialize the i8254 before the console so that console
2299 * initialization can use DELAY().
2304 * Initialize the console before we print anything out.
2308 if (metadata_missing)
2309 printf("WARNING: loader(8) metadata is missing!\n");
2315 /* Reset and mask the atpics and leave them shut down. */
2319 * Point the ICU spurious interrupt vectors at the APIC spurious
2320 * interrupt handler.
2322 setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL,
2323 GSEL(GCODE_SEL, SEL_KPL));
2324 setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL,
2325 GSEL(GCODE_SEL, SEL_KPL));
2330 ksym_start = bootinfo.bi_symtab;
2331 ksym_end = bootinfo.bi_esymtab;
2337 if (boothowto & RB_KDB)
2338 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
2341 finishidentcpu(); /* Final stage of CPU initialization */
2342 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2343 GSEL(GCODE_SEL, SEL_KPL));
2344 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2345 GSEL(GCODE_SEL, SEL_KPL));
2346 initializecpu(); /* Initialize CPU registers */
2348 /* make an initial tss so cpu can get interrupt stack on syscall! */
2349 /* Note: -16 is so we can grow the trapframe if we came from vm86 */
2350 PCPU_SET(common_tss.tss_esp0, thread0.td_kstack +
2351 kstack0_sz - sizeof(struct pcb) - 16);
2352 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
2353 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
2354 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
2355 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
2356 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
2359 /* pointer to selector slot for %fs/%gs */
2360 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
2362 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
2363 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
2364 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
2365 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
2366 dblfault_tss.tss_cr3 = (int)IdlePTD;
2367 dblfault_tss.tss_eip = (int)dblfault_handler;
2368 dblfault_tss.tss_eflags = PSL_KERNEL;
2369 dblfault_tss.tss_ds = dblfault_tss.tss_es =
2370 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
2371 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
2372 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
2373 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
2377 init_param2(physmem);
2379 /* now running on new page tables, configured,and u/iom is accessible */
2381 msgbufinit(msgbufp, msgbufsize);
2383 /* make a call gate to reenter kernel with */
2384 gdp = &ldt[LSYS5CALLS_SEL].gd;
2386 x = (int) &IDTVEC(lcall_syscall);
2387 gdp->gd_looffset = x;
2388 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
2390 gdp->gd_type = SDT_SYS386CGT;
2391 gdp->gd_dpl = SEL_UPL;
2393 gdp->gd_hioffset = x >> 16;
2395 /* XXX does this work? */
2397 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL];
2398 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL];
2400 /* transfer to user mode */
2402 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
2403 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
2405 /* setup proc 0's pcb */
2406 thread0.td_pcb->pcb_flags = 0;
2407 thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
2408 thread0.td_pcb->pcb_ext = 0;
2409 thread0.td_frame = &proc0_tf;
2413 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
2419 spinlock_enter(void)
2425 if (td->td_md.md_spinlock_count == 0) {
2426 flags = intr_disable();
2427 td->td_md.md_spinlock_count = 1;
2428 td->td_md.md_saved_flags = flags;
2430 td->td_md.md_spinlock_count++;
2442 flags = td->td_md.md_saved_flags;
2443 td->td_md.md_spinlock_count--;
2444 if (td->td_md.md_spinlock_count == 0)
2445 intr_restore(flags);
2448 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
2449 static void f00f_hack(void *unused);
2450 SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL);
2453 f00f_hack(void *unused)
2455 struct gate_descriptor *new_idt;
2463 printf("Intel Pentium detected, installing workaround for F00F bug\n");
2465 tmp = kmem_malloc(kernel_arena, PAGE_SIZE * 2, M_WAITOK | M_ZERO);
2467 panic("kmem_alloc returned 0");
2469 /* Put the problematic entry (#6) at the end of the lower page. */
2470 new_idt = (struct gate_descriptor*)
2471 (tmp + PAGE_SIZE - 7 * sizeof(struct gate_descriptor));
2472 bcopy(idt, new_idt, sizeof(idt0));
2473 r_idt.rd_base = (u_int)new_idt;
2476 pmap_protect(kernel_pmap, tmp, tmp + PAGE_SIZE, VM_PROT_READ);
2478 #endif /* defined(I586_CPU) && !NO_F00F_HACK */
2481 * Construct a PCB from a trapframe. This is called from kdb_trap() where
2482 * we want to start a backtrace from the function that caused us to enter
2483 * the debugger. We have the context in the trapframe, but base the trace
2484 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
2485 * enough for a backtrace.
2488 makectx(struct trapframe *tf, struct pcb *pcb)
2491 pcb->pcb_edi = tf->tf_edi;
2492 pcb->pcb_esi = tf->tf_esi;
2493 pcb->pcb_ebp = tf->tf_ebp;
2494 pcb->pcb_ebx = tf->tf_ebx;
2495 pcb->pcb_eip = tf->tf_eip;
2496 pcb->pcb_esp = (ISPL(tf->tf_cs)) ? tf->tf_esp : (int)(tf + 1) - 8;
2500 ptrace_set_pc(struct thread *td, u_long addr)
2503 td->td_frame->tf_eip = addr;
2508 ptrace_single_step(struct thread *td)
2510 td->td_frame->tf_eflags |= PSL_T;
2515 ptrace_clear_single_step(struct thread *td)
2517 td->td_frame->tf_eflags &= ~PSL_T;
2522 fill_regs(struct thread *td, struct reg *regs)
2525 struct trapframe *tp;
2529 regs->r_gs = pcb->pcb_gs;
2530 return (fill_frame_regs(tp, regs));
2534 fill_frame_regs(struct trapframe *tp, struct reg *regs)
2536 regs->r_fs = tp->tf_fs;
2537 regs->r_es = tp->tf_es;
2538 regs->r_ds = tp->tf_ds;
2539 regs->r_edi = tp->tf_edi;
2540 regs->r_esi = tp->tf_esi;
2541 regs->r_ebp = tp->tf_ebp;
2542 regs->r_ebx = tp->tf_ebx;
2543 regs->r_edx = tp->tf_edx;
2544 regs->r_ecx = tp->tf_ecx;
2545 regs->r_eax = tp->tf_eax;
2546 regs->r_eip = tp->tf_eip;
2547 regs->r_cs = tp->tf_cs;
2548 regs->r_eflags = tp->tf_eflags;
2549 regs->r_esp = tp->tf_esp;
2550 regs->r_ss = tp->tf_ss;
2555 set_regs(struct thread *td, struct reg *regs)
2558 struct trapframe *tp;
2561 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) ||
2562 !CS_SECURE(regs->r_cs))
2565 tp->tf_fs = regs->r_fs;
2566 tp->tf_es = regs->r_es;
2567 tp->tf_ds = regs->r_ds;
2568 tp->tf_edi = regs->r_edi;
2569 tp->tf_esi = regs->r_esi;
2570 tp->tf_ebp = regs->r_ebp;
2571 tp->tf_ebx = regs->r_ebx;
2572 tp->tf_edx = regs->r_edx;
2573 tp->tf_ecx = regs->r_ecx;
2574 tp->tf_eax = regs->r_eax;
2575 tp->tf_eip = regs->r_eip;
2576 tp->tf_cs = regs->r_cs;
2577 tp->tf_eflags = regs->r_eflags;
2578 tp->tf_esp = regs->r_esp;
2579 tp->tf_ss = regs->r_ss;
2580 pcb->pcb_gs = regs->r_gs;
2584 #ifdef CPU_ENABLE_SSE
2586 fill_fpregs_xmm(sv_xmm, sv_87)
2587 struct savexmm *sv_xmm;
2588 struct save87 *sv_87;
2590 register struct env87 *penv_87 = &sv_87->sv_env;
2591 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
2594 bzero(sv_87, sizeof(*sv_87));
2596 /* FPU control/status */
2597 penv_87->en_cw = penv_xmm->en_cw;
2598 penv_87->en_sw = penv_xmm->en_sw;
2599 penv_87->en_tw = penv_xmm->en_tw;
2600 penv_87->en_fip = penv_xmm->en_fip;
2601 penv_87->en_fcs = penv_xmm->en_fcs;
2602 penv_87->en_opcode = penv_xmm->en_opcode;
2603 penv_87->en_foo = penv_xmm->en_foo;
2604 penv_87->en_fos = penv_xmm->en_fos;
2607 for (i = 0; i < 8; ++i)
2608 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc;
2612 set_fpregs_xmm(sv_87, sv_xmm)
2613 struct save87 *sv_87;
2614 struct savexmm *sv_xmm;
2616 register struct env87 *penv_87 = &sv_87->sv_env;
2617 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
2620 /* FPU control/status */
2621 penv_xmm->en_cw = penv_87->en_cw;
2622 penv_xmm->en_sw = penv_87->en_sw;
2623 penv_xmm->en_tw = penv_87->en_tw;
2624 penv_xmm->en_fip = penv_87->en_fip;
2625 penv_xmm->en_fcs = penv_87->en_fcs;
2626 penv_xmm->en_opcode = penv_87->en_opcode;
2627 penv_xmm->en_foo = penv_87->en_foo;
2628 penv_xmm->en_fos = penv_87->en_fos;
2631 for (i = 0; i < 8; ++i)
2632 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i];
2634 #endif /* CPU_ENABLE_SSE */
2637 fill_fpregs(struct thread *td, struct fpreg *fpregs)
2640 KASSERT(td == curthread || TD_IS_SUSPENDED(td) ||
2641 P_SHOULDSTOP(td->td_proc),
2642 ("not suspended thread %p", td));
2646 bzero(fpregs, sizeof(*fpregs));
2648 #ifdef CPU_ENABLE_SSE
2650 fill_fpregs_xmm(&td->td_pcb->pcb_user_save.sv_xmm,
2651 (struct save87 *)fpregs);
2653 #endif /* CPU_ENABLE_SSE */
2654 bcopy(&td->td_pcb->pcb_user_save.sv_87, fpregs,
2660 set_fpregs(struct thread *td, struct fpreg *fpregs)
2663 #ifdef CPU_ENABLE_SSE
2665 set_fpregs_xmm((struct save87 *)fpregs,
2666 &td->td_pcb->pcb_user_save.sv_xmm);
2668 #endif /* CPU_ENABLE_SSE */
2669 bcopy(fpregs, &td->td_pcb->pcb_user_save.sv_87,
2678 * Get machine context.
2681 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
2683 struct trapframe *tp;
2684 struct segment_descriptor *sdp;
2688 PROC_LOCK(curthread->td_proc);
2689 mcp->mc_onstack = sigonstack(tp->tf_esp);
2690 PROC_UNLOCK(curthread->td_proc);
2691 mcp->mc_gs = td->td_pcb->pcb_gs;
2692 mcp->mc_fs = tp->tf_fs;
2693 mcp->mc_es = tp->tf_es;
2694 mcp->mc_ds = tp->tf_ds;
2695 mcp->mc_edi = tp->tf_edi;
2696 mcp->mc_esi = tp->tf_esi;
2697 mcp->mc_ebp = tp->tf_ebp;
2698 mcp->mc_isp = tp->tf_isp;
2699 mcp->mc_eflags = tp->tf_eflags;
2700 if (flags & GET_MC_CLEAR_RET) {
2703 mcp->mc_eflags &= ~PSL_C;
2705 mcp->mc_eax = tp->tf_eax;
2706 mcp->mc_edx = tp->tf_edx;
2708 mcp->mc_ebx = tp->tf_ebx;
2709 mcp->mc_ecx = tp->tf_ecx;
2710 mcp->mc_eip = tp->tf_eip;
2711 mcp->mc_cs = tp->tf_cs;
2712 mcp->mc_esp = tp->tf_esp;
2713 mcp->mc_ss = tp->tf_ss;
2714 mcp->mc_len = sizeof(*mcp);
2715 get_fpcontext(td, mcp);
2716 sdp = &td->td_pcb->pcb_fsd;
2717 mcp->mc_fsbase = sdp->sd_hibase << 24 | sdp->sd_lobase;
2718 sdp = &td->td_pcb->pcb_gsd;
2719 mcp->mc_gsbase = sdp->sd_hibase << 24 | sdp->sd_lobase;
2721 bzero(mcp->mc_spare2, sizeof(mcp->mc_spare2));
2726 * Set machine context.
2728 * However, we don't set any but the user modifiable flags, and we won't
2729 * touch the cs selector.
2732 set_mcontext(struct thread *td, const mcontext_t *mcp)
2734 struct trapframe *tp;
2738 if (mcp->mc_len != sizeof(*mcp))
2740 eflags = (mcp->mc_eflags & PSL_USERCHANGE) |
2741 (tp->tf_eflags & ~PSL_USERCHANGE);
2742 if ((ret = set_fpcontext(td, mcp)) == 0) {
2743 tp->tf_fs = mcp->mc_fs;
2744 tp->tf_es = mcp->mc_es;
2745 tp->tf_ds = mcp->mc_ds;
2746 tp->tf_edi = mcp->mc_edi;
2747 tp->tf_esi = mcp->mc_esi;
2748 tp->tf_ebp = mcp->mc_ebp;
2749 tp->tf_ebx = mcp->mc_ebx;
2750 tp->tf_edx = mcp->mc_edx;
2751 tp->tf_ecx = mcp->mc_ecx;
2752 tp->tf_eax = mcp->mc_eax;
2753 tp->tf_eip = mcp->mc_eip;
2754 tp->tf_eflags = eflags;
2755 tp->tf_esp = mcp->mc_esp;
2756 tp->tf_ss = mcp->mc_ss;
2757 td->td_pcb->pcb_gs = mcp->mc_gs;
2764 get_fpcontext(struct thread *td, mcontext_t *mcp)
2768 mcp->mc_fpformat = _MC_FPFMT_NODEV;
2769 mcp->mc_ownedfp = _MC_FPOWNED_NONE;
2770 bzero(mcp->mc_fpstate, sizeof(mcp->mc_fpstate));
2772 mcp->mc_ownedfp = npxgetregs(td);
2773 bcopy(&td->td_pcb->pcb_user_save, &mcp->mc_fpstate[0],
2774 sizeof(mcp->mc_fpstate));
2775 mcp->mc_fpformat = npxformat();
2780 set_fpcontext(struct thread *td, const mcontext_t *mcp)
2783 if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
2785 else if (mcp->mc_fpformat != _MC_FPFMT_387 &&
2786 mcp->mc_fpformat != _MC_FPFMT_XMM)
2788 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE)
2789 /* We don't care what state is left in the FPU or PCB. */
2791 else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
2792 mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
2794 #ifdef CPU_ENABLE_SSE
2796 ((union savefpu *)&mcp->mc_fpstate)->sv_xmm.sv_env.
2797 en_mxcsr &= cpu_mxcsr_mask;
2799 npxsetregs(td, (union savefpu *)&mcp->mc_fpstate);
2807 fpstate_drop(struct thread *td)
2810 KASSERT(PCB_USER_FPU(td->td_pcb), ("fpstate_drop: kernel-owned fpu"));
2813 if (PCPU_GET(fpcurthread) == td)
2817 * XXX force a full drop of the npx. The above only drops it if we
2818 * owned it. npxgetregs() has the same bug in the !cpu_fxsr case.
2820 * XXX I don't much like npxgetregs()'s semantics of doing a full
2821 * drop. Dropping only to the pcb matches fnsave's behaviour.
2822 * We only need to drop to !PCB_INITDONE in sendsig(). But
2823 * sendsig() is the only caller of npxgetregs()... perhaps we just
2824 * have too many layers.
2826 curthread->td_pcb->pcb_flags &= ~(PCB_NPXINITDONE |
2827 PCB_NPXUSERINITDONE);
2832 fill_dbregs(struct thread *td, struct dbreg *dbregs)
2837 dbregs->dr[0] = rdr0();
2838 dbregs->dr[1] = rdr1();
2839 dbregs->dr[2] = rdr2();
2840 dbregs->dr[3] = rdr3();
2841 dbregs->dr[4] = rdr4();
2842 dbregs->dr[5] = rdr5();
2843 dbregs->dr[6] = rdr6();
2844 dbregs->dr[7] = rdr7();
2847 dbregs->dr[0] = pcb->pcb_dr0;
2848 dbregs->dr[1] = pcb->pcb_dr1;
2849 dbregs->dr[2] = pcb->pcb_dr2;
2850 dbregs->dr[3] = pcb->pcb_dr3;
2853 dbregs->dr[6] = pcb->pcb_dr6;
2854 dbregs->dr[7] = pcb->pcb_dr7;
2860 set_dbregs(struct thread *td, struct dbreg *dbregs)
2866 load_dr0(dbregs->dr[0]);
2867 load_dr1(dbregs->dr[1]);
2868 load_dr2(dbregs->dr[2]);
2869 load_dr3(dbregs->dr[3]);
2870 load_dr4(dbregs->dr[4]);
2871 load_dr5(dbregs->dr[5]);
2872 load_dr6(dbregs->dr[6]);
2873 load_dr7(dbregs->dr[7]);
2876 * Don't let an illegal value for dr7 get set. Specifically,
2877 * check for undefined settings. Setting these bit patterns
2878 * result in undefined behaviour and can lead to an unexpected
2881 for (i = 0; i < 4; i++) {
2882 if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02)
2884 if (DBREG_DR7_LEN(dbregs->dr[7], i) == 0x02)
2891 * Don't let a process set a breakpoint that is not within the
2892 * process's address space. If a process could do this, it
2893 * could halt the system by setting a breakpoint in the kernel
2894 * (if ddb was enabled). Thus, we need to check to make sure
2895 * that no breakpoints are being enabled for addresses outside
2896 * process's address space.
2898 * XXX - what about when the watched area of the user's
2899 * address space is written into from within the kernel
2900 * ... wouldn't that still cause a breakpoint to be generated
2901 * from within kernel mode?
2904 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) {
2905 /* dr0 is enabled */
2906 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
2910 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) {
2911 /* dr1 is enabled */
2912 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
2916 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) {
2917 /* dr2 is enabled */
2918 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
2922 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) {
2923 /* dr3 is enabled */
2924 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
2928 pcb->pcb_dr0 = dbregs->dr[0];
2929 pcb->pcb_dr1 = dbregs->dr[1];
2930 pcb->pcb_dr2 = dbregs->dr[2];
2931 pcb->pcb_dr3 = dbregs->dr[3];
2932 pcb->pcb_dr6 = dbregs->dr[6];
2933 pcb->pcb_dr7 = dbregs->dr[7];
2935 pcb->pcb_flags |= PCB_DBREGS;
2942 * Return > 0 if a hardware breakpoint has been hit, and the
2943 * breakpoint was in user space. Return 0, otherwise.
2946 user_dbreg_trap(void)
2948 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */
2949 u_int32_t bp; /* breakpoint bits extracted from dr6 */
2950 int nbp; /* number of breakpoints that triggered */
2951 caddr_t addr[4]; /* breakpoint addresses */
2955 if ((dr7 & 0x000000ff) == 0) {
2957 * all GE and LE bits in the dr7 register are zero,
2958 * thus the trap couldn't have been caused by the
2959 * hardware debug registers
2966 bp = dr6 & 0x0000000f;
2970 * None of the breakpoint bits are set meaning this
2971 * trap was not caused by any of the debug registers
2977 * at least one of the breakpoints were hit, check to see
2978 * which ones and if any of them are user space addresses
2982 addr[nbp++] = (caddr_t)rdr0();
2985 addr[nbp++] = (caddr_t)rdr1();
2988 addr[nbp++] = (caddr_t)rdr2();
2991 addr[nbp++] = (caddr_t)rdr3();
2994 for (i = 0; i < nbp; i++) {
2995 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) {
2997 * addr[i] is in user space
3004 * None of the breakpoints are in user space.
3012 * Provide inb() and outb() as functions. They are normally only available as
3013 * inline functions, thus cannot be called from the debugger.
3016 /* silence compiler warnings */
3017 u_char inb_(u_short);
3018 void outb_(u_short, u_char);
3027 outb_(u_short port, u_char data)