2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
6 * This code is derived from software contributed to Berkeley by
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
43 #include "opt_atalk.h"
44 #include "opt_compat.h"
50 #include "opt_kstack_pages.h"
51 #include "opt_maxmem.h"
52 #include "opt_msgbuf.h"
54 #include "opt_perfmon.h"
56 #include <sys/param.h>
58 #include <sys/systm.h>
62 #include <sys/callout.h>
65 #include <sys/eventhandler.h>
67 #include <sys/imgact.h>
69 #include <sys/kernel.h>
71 #include <sys/linker.h>
73 #include <sys/malloc.h>
74 #include <sys/memrange.h>
75 #include <sys/msgbuf.h>
76 #include <sys/mutex.h>
78 #include <sys/ptrace.h>
79 #include <sys/reboot.h>
80 #include <sys/sched.h>
81 #include <sys/signalvar.h>
82 #include <sys/sysctl.h>
83 #include <sys/sysent.h>
84 #include <sys/sysproto.h>
85 #include <sys/ucontext.h>
86 #include <sys/vmmeter.h>
89 #include <vm/vm_extern.h>
90 #include <vm/vm_kern.h>
91 #include <vm/vm_page.h>
92 #include <vm/vm_map.h>
93 #include <vm/vm_object.h>
94 #include <vm/vm_pager.h>
95 #include <vm/vm_param.h>
99 #error KDB must be enabled in order for DDB to work!
102 #include <ddb/db_sym.h>
105 #include <pc98/pc98/pc98_machdep.h>
107 #include <net/netisr.h>
109 #include <machine/bootinfo.h>
110 #include <machine/clock.h>
111 #include <machine/cpu.h>
112 #include <machine/cputypes.h>
113 #include <machine/intr_machdep.h>
114 #include <machine/md_var.h>
115 #include <machine/pc/bios.h>
116 #include <machine/pcb.h>
117 #include <machine/pcb_ext.h>
118 #include <machine/proc.h>
119 #include <machine/reg.h>
120 #include <machine/sigframe.h>
121 #include <machine/specialreg.h>
122 #include <machine/vm86.h>
124 #include <machine/perfmon.h>
127 #include <machine/smp.h>
131 #include <i386/isa/icu.h>
134 /* Sanity check for __curthread() */
135 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
137 extern void init386(int first);
138 extern void dblfault_handler(void);
140 extern void printcpuinfo(void); /* XXX header file */
141 extern void finishidentcpu(void);
142 extern void panicifcpuunsupported(void);
143 extern void initializecpu(void);
145 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
146 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
148 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
149 #define CPU_ENABLE_SSE
152 static void cpu_startup(void *);
153 static void fpstate_drop(struct thread *td);
154 static void get_fpcontext(struct thread *td, mcontext_t *mcp);
155 static int set_fpcontext(struct thread *td, const mcontext_t *mcp);
156 #ifdef CPU_ENABLE_SSE
157 static void set_fpregs_xmm(struct save87 *, struct savexmm *);
158 static void fill_fpregs_xmm(struct savexmm *, struct save87 *);
159 #endif /* CPU_ENABLE_SSE */
160 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
162 int need_pre_dma_flush; /* If 1, use wbinvd befor DMA transfer. */
163 int need_post_dma_flush; /* If 1, use invd after DMA transfer. */
166 extern vm_offset_t ksym_start, ksym_end;
169 int _udatasel, _ucodesel;
172 static int ispc98 = 1;
173 SYSCTL_INT(_machdep, OID_AUTO, ispc98, CTLFLAG_RD, &ispc98, 0, "");
178 static void osendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
180 #ifdef COMPAT_FREEBSD4
181 static void freebsd4_sendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
188 * The number of PHYSMAP entries must be one less than the number of
189 * PHYSSEG entries because the PHYSMAP entry that spans the largest
190 * physical address that is accessible by ISA DMA is split into two
193 #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
195 vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
196 vm_paddr_t dump_avail[PHYSMAP_SIZE + 2];
198 /* must be 2 less so 0 0 can signal end of chunks */
199 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2)
200 #define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2)
202 struct kva_md_info kmi;
204 static struct trapframe proc0_tf;
205 struct pcpu __pcpu[MAXCPU];
209 struct mem_range_softc mem_range_softc;
216 * Good {morning,afternoon,evening,night}.
220 panicifcpuunsupported();
224 printf("real memory = %ju (%ju MB)\n", ptoa((uintmax_t)Maxmem),
225 ptoa((uintmax_t)Maxmem) / 1048576);
228 * Display any holes after the first chunk of extended memory.
233 printf("Physical memory chunk(s):\n");
234 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
237 size = phys_avail[indx + 1] - phys_avail[indx];
239 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
240 (uintmax_t)phys_avail[indx],
241 (uintmax_t)phys_avail[indx + 1] - 1,
242 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
246 vm_ksubmap_init(&kmi);
248 printf("avail memory = %ju (%ju MB)\n",
249 ptoa((uintmax_t)cnt.v_free_count),
250 ptoa((uintmax_t)cnt.v_free_count) / 1048576);
253 * Set up buffers, so they can be used to read disk labels.
256 vm_pager_bufferinit();
262 * Send an interrupt to process.
264 * Stack is set up to allow sigcode stored
265 * at top to call routine, followed by kcall
266 * to sigreturn routine below. After sigreturn
267 * resets the signal mask, the stack, and the
268 * frame pointer, it returns to the user
273 osendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
275 struct osigframe sf, *fp;
279 struct trapframe *regs;
285 PROC_LOCK_ASSERT(p, MA_OWNED);
286 sig = ksi->ksi_signo;
288 mtx_assert(&psp->ps_mtx, MA_OWNED);
290 oonstack = sigonstack(regs->tf_esp);
292 /* Allocate space for the signal handler context. */
293 if ((td->td_pflags & TDP_ALTSTACK) && !oonstack &&
294 SIGISMEMBER(psp->ps_sigonstack, sig)) {
295 fp = (struct osigframe *)(td->td_sigstk.ss_sp +
296 td->td_sigstk.ss_size - sizeof(struct osigframe));
297 #if defined(COMPAT_43)
298 td->td_sigstk.ss_flags |= SS_ONSTACK;
301 fp = (struct osigframe *)regs->tf_esp - 1;
303 /* Translate the signal if appropriate. */
304 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
305 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
307 /* Build the argument list for the signal handler. */
309 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc;
310 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
311 /* Signal handler installed with SA_SIGINFO. */
312 sf.sf_arg2 = (register_t)&fp->sf_siginfo;
313 sf.sf_siginfo.si_signo = sig;
314 sf.sf_siginfo.si_code = ksi->ksi_code;
315 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher;
317 /* Old FreeBSD-style arguments. */
318 sf.sf_arg2 = ksi->ksi_code;
319 sf.sf_addr = (register_t)ksi->ksi_addr;
320 sf.sf_ahu.sf_handler = catcher;
322 mtx_unlock(&psp->ps_mtx);
325 /* Save most if not all of trap frame. */
326 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax;
327 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx;
328 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx;
329 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx;
330 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi;
331 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi;
332 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs;
333 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds;
334 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss;
335 sf.sf_siginfo.si_sc.sc_es = regs->tf_es;
336 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs;
337 sf.sf_siginfo.si_sc.sc_gs = rgs();
338 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp;
340 /* Build the signal context to be used by osigreturn(). */
341 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0;
342 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask);
343 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp;
344 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp;
345 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip;
346 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags;
347 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno;
348 sf.sf_siginfo.si_sc.sc_err = regs->tf_err;
351 * If we're a vm86 process, we want to save the segment registers.
352 * We also change eflags to be our emulated eflags, not the actual
355 if (regs->tf_eflags & PSL_VM) {
356 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */
357 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
358 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
360 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs;
361 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs;
362 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es;
363 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds;
365 if (vm86->vm86_has_vme == 0)
366 sf.sf_siginfo.si_sc.sc_ps =
367 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
368 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
370 /* See sendsig() for comments. */
371 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
375 * Copy the sigframe out to the user's stack.
377 if (copyout(&sf, fp, sizeof(*fp)) != 0) {
379 printf("process %ld has trashed its stack\n", (long)p->p_pid);
385 regs->tf_esp = (int)fp;
386 regs->tf_eip = PS_STRINGS - szosigcode;
387 regs->tf_eflags &= ~(PSL_T | PSL_D);
388 regs->tf_cs = _ucodesel;
389 regs->tf_ds = _udatasel;
390 regs->tf_es = _udatasel;
391 regs->tf_fs = _udatasel;
393 regs->tf_ss = _udatasel;
395 mtx_lock(&psp->ps_mtx);
397 #endif /* COMPAT_43 */
399 #ifdef COMPAT_FREEBSD4
401 freebsd4_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
403 struct sigframe4 sf, *sfp;
407 struct trapframe *regs;
413 PROC_LOCK_ASSERT(p, MA_OWNED);
414 sig = ksi->ksi_signo;
416 mtx_assert(&psp->ps_mtx, MA_OWNED);
418 oonstack = sigonstack(regs->tf_esp);
420 /* Save user context. */
421 bzero(&sf, sizeof(sf));
422 sf.sf_uc.uc_sigmask = *mask;
423 sf.sf_uc.uc_stack = td->td_sigstk;
424 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
425 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
426 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
427 sf.sf_uc.uc_mcontext.mc_gs = rgs();
428 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
430 /* Allocate space for the signal handler context. */
431 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
432 SIGISMEMBER(psp->ps_sigonstack, sig)) {
433 sfp = (struct sigframe4 *)(td->td_sigstk.ss_sp +
434 td->td_sigstk.ss_size - sizeof(struct sigframe4));
435 #if defined(COMPAT_43)
436 td->td_sigstk.ss_flags |= SS_ONSTACK;
439 sfp = (struct sigframe4 *)regs->tf_esp - 1;
441 /* Translate the signal if appropriate. */
442 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
443 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
445 /* Build the argument list for the signal handler. */
447 sf.sf_ucontext = (register_t)&sfp->sf_uc;
448 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
449 /* Signal handler installed with SA_SIGINFO. */
450 sf.sf_siginfo = (register_t)&sfp->sf_si;
451 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
453 /* Fill in POSIX parts */
454 sf.sf_si.si_signo = sig;
455 sf.sf_si.si_code = ksi->ksi_code;
456 sf.sf_si.si_addr = ksi->ksi_addr;
458 /* Old FreeBSD-style arguments. */
459 sf.sf_siginfo = ksi->ksi_code;
460 sf.sf_addr = (register_t)ksi->ksi_addr;
461 sf.sf_ahu.sf_handler = catcher;
463 mtx_unlock(&psp->ps_mtx);
467 * If we're a vm86 process, we want to save the segment registers.
468 * We also change eflags to be our emulated eflags, not the actual
471 if (regs->tf_eflags & PSL_VM) {
472 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
473 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
475 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
476 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
477 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
478 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
480 if (vm86->vm86_has_vme == 0)
481 sf.sf_uc.uc_mcontext.mc_eflags =
482 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
483 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
486 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
487 * syscalls made by the signal handler. This just avoids
488 * wasting time for our lazy fixup of such faults. PSL_NT
489 * does nothing in vm86 mode, but vm86 programs can set it
490 * almost legitimately in probes for old cpu types.
492 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
496 * Copy the sigframe out to the user's stack.
498 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
500 printf("process %ld has trashed its stack\n", (long)p->p_pid);
506 regs->tf_esp = (int)sfp;
507 regs->tf_eip = PS_STRINGS - szfreebsd4_sigcode;
508 regs->tf_eflags &= ~(PSL_T | PSL_D);
509 regs->tf_cs = _ucodesel;
510 regs->tf_ds = _udatasel;
511 regs->tf_es = _udatasel;
512 regs->tf_fs = _udatasel;
513 regs->tf_ss = _udatasel;
515 mtx_lock(&psp->ps_mtx);
517 #endif /* COMPAT_FREEBSD4 */
520 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
522 struct sigframe sf, *sfp;
527 struct trapframe *regs;
533 PROC_LOCK_ASSERT(p, MA_OWNED);
534 sig = ksi->ksi_signo;
536 mtx_assert(&psp->ps_mtx, MA_OWNED);
537 #ifdef COMPAT_FREEBSD4
538 if (SIGISMEMBER(psp->ps_freebsd4, sig)) {
539 freebsd4_sendsig(catcher, ksi, mask);
544 if (SIGISMEMBER(psp->ps_osigset, sig)) {
545 osendsig(catcher, ksi, mask);
550 oonstack = sigonstack(regs->tf_esp);
552 /* Save user context. */
553 bzero(&sf, sizeof(sf));
554 sf.sf_uc.uc_sigmask = *mask;
555 sf.sf_uc.uc_stack = td->td_sigstk;
556 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
557 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
558 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
559 sf.sf_uc.uc_mcontext.mc_gs = rgs();
560 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
561 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
562 get_fpcontext(td, &sf.sf_uc.uc_mcontext);
565 /* Allocate space for the signal handler context. */
566 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
567 SIGISMEMBER(psp->ps_sigonstack, sig)) {
568 sp = td->td_sigstk.ss_sp +
569 td->td_sigstk.ss_size - sizeof(struct sigframe);
570 #if defined(COMPAT_43)
571 td->td_sigstk.ss_flags |= SS_ONSTACK;
574 sp = (char *)regs->tf_esp - sizeof(struct sigframe);
575 /* Align to 16 bytes. */
576 sfp = (struct sigframe *)((unsigned int)sp & ~0xF);
578 /* Translate the signal if appropriate. */
579 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
580 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
582 /* Build the argument list for the signal handler. */
584 sf.sf_ucontext = (register_t)&sfp->sf_uc;
585 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
586 /* Signal handler installed with SA_SIGINFO. */
587 sf.sf_siginfo = (register_t)&sfp->sf_si;
588 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
590 /* Fill in POSIX parts */
591 sf.sf_si = ksi->ksi_info;
592 sf.sf_si.si_signo = sig; /* maybe a translated signal */
594 /* Old FreeBSD-style arguments. */
595 sf.sf_siginfo = ksi->ksi_code;
596 sf.sf_addr = (register_t)ksi->ksi_addr;
597 sf.sf_ahu.sf_handler = catcher;
599 mtx_unlock(&psp->ps_mtx);
603 * If we're a vm86 process, we want to save the segment registers.
604 * We also change eflags to be our emulated eflags, not the actual
607 if (regs->tf_eflags & PSL_VM) {
608 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
609 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
611 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
612 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
613 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
614 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
616 if (vm86->vm86_has_vme == 0)
617 sf.sf_uc.uc_mcontext.mc_eflags =
618 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
619 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
622 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
623 * syscalls made by the signal handler. This just avoids
624 * wasting time for our lazy fixup of such faults. PSL_NT
625 * does nothing in vm86 mode, but vm86 programs can set it
626 * almost legitimately in probes for old cpu types.
628 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
632 * Copy the sigframe out to the user's stack.
634 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
636 printf("process %ld has trashed its stack\n", (long)p->p_pid);
642 regs->tf_esp = (int)sfp;
643 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode);
644 regs->tf_eflags &= ~(PSL_T | PSL_D);
645 regs->tf_cs = _ucodesel;
646 regs->tf_ds = _udatasel;
647 regs->tf_es = _udatasel;
648 regs->tf_fs = _udatasel;
649 regs->tf_ss = _udatasel;
651 mtx_lock(&psp->ps_mtx);
655 * System call to cleanup state after a signal
656 * has been taken. Reset signal mask and
657 * stack state from context left by sendsig (above).
658 * Return to previous pc and psl as specified by
659 * context left by sendsig. Check carefully to
660 * make sure that the user has not modified the
661 * state to gain improper privileges.
669 struct osigreturn_args /* {
670 struct osigcontext *sigcntxp;
673 struct osigcontext sc;
674 struct trapframe *regs;
675 struct osigcontext *scp;
676 struct proc *p = td->td_proc;
681 error = copyin(uap->sigcntxp, &sc, sizeof(sc));
686 if (eflags & PSL_VM) {
687 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
688 struct vm86_kernel *vm86;
691 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
692 * set up the vm86 area, and we can't enter vm86 mode.
694 if (td->td_pcb->pcb_ext == 0)
696 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
697 if (vm86->vm86_inited == 0)
700 /* Go back to user mode if both flags are set. */
701 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
702 ksiginfo_init_trap(&ksi);
703 ksi.ksi_signo = SIGBUS;
704 ksi.ksi_code = BUS_OBJERR;
705 ksi.ksi_addr = (void *)regs->tf_eip;
706 trapsignal(td, &ksi);
709 if (vm86->vm86_has_vme) {
710 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
711 (eflags & VME_USERCHANGE) | PSL_VM;
713 vm86->vm86_eflags = eflags; /* save VIF, VIP */
714 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
715 (eflags & VM_USERCHANGE) | PSL_VM;
717 tf->tf_vm86_ds = scp->sc_ds;
718 tf->tf_vm86_es = scp->sc_es;
719 tf->tf_vm86_fs = scp->sc_fs;
720 tf->tf_vm86_gs = scp->sc_gs;
721 tf->tf_ds = _udatasel;
722 tf->tf_es = _udatasel;
723 tf->tf_fs = _udatasel;
726 * Don't allow users to change privileged or reserved flags.
729 * XXX do allow users to change the privileged flag PSL_RF.
730 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
731 * should sometimes set it there too. tf_eflags is kept in
732 * the signal context during signal handling and there is no
733 * other place to remember it, so the PSL_RF bit may be
734 * corrupted by the signal handler without us knowing.
735 * Corruption of the PSL_RF bit at worst causes one more or
736 * one less debugger trap, so allowing it is fairly harmless.
738 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
743 * Don't allow users to load a valid privileged %cs. Let the
744 * hardware check for invalid selectors, excess privilege in
745 * other selectors, invalid %eip's and invalid %esp's.
747 if (!CS_SECURE(scp->sc_cs)) {
748 ksiginfo_init_trap(&ksi);
749 ksi.ksi_signo = SIGBUS;
750 ksi.ksi_code = BUS_OBJERR;
751 ksi.ksi_trapno = T_PROTFLT;
752 ksi.ksi_addr = (void *)regs->tf_eip;
753 trapsignal(td, &ksi);
756 regs->tf_ds = scp->sc_ds;
757 regs->tf_es = scp->sc_es;
758 regs->tf_fs = scp->sc_fs;
761 /* Restore remaining registers. */
762 regs->tf_eax = scp->sc_eax;
763 regs->tf_ebx = scp->sc_ebx;
764 regs->tf_ecx = scp->sc_ecx;
765 regs->tf_edx = scp->sc_edx;
766 regs->tf_esi = scp->sc_esi;
767 regs->tf_edi = scp->sc_edi;
768 regs->tf_cs = scp->sc_cs;
769 regs->tf_ss = scp->sc_ss;
770 regs->tf_isp = scp->sc_isp;
771 regs->tf_ebp = scp->sc_fp;
772 regs->tf_esp = scp->sc_sp;
773 regs->tf_eip = scp->sc_pc;
774 regs->tf_eflags = eflags;
777 #if defined(COMPAT_43)
778 if (scp->sc_onstack & 1)
779 td->td_sigstk.ss_flags |= SS_ONSTACK;
781 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
783 SIGSETOLD(td->td_sigmask, scp->sc_mask);
784 SIG_CANTMASK(td->td_sigmask);
787 return (EJUSTRETURN);
789 #endif /* COMPAT_43 */
791 #ifdef COMPAT_FREEBSD4
796 freebsd4_sigreturn(td, uap)
798 struct freebsd4_sigreturn_args /* {
799 const ucontext4 *sigcntxp;
803 struct proc *p = td->td_proc;
804 struct trapframe *regs;
805 const struct ucontext4 *ucp;
806 int cs, eflags, error;
809 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
814 eflags = ucp->uc_mcontext.mc_eflags;
815 if (eflags & PSL_VM) {
816 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
817 struct vm86_kernel *vm86;
820 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
821 * set up the vm86 area, and we can't enter vm86 mode.
823 if (td->td_pcb->pcb_ext == 0)
825 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
826 if (vm86->vm86_inited == 0)
829 /* Go back to user mode if both flags are set. */
830 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
831 ksiginfo_init_trap(&ksi);
832 ksi.ksi_signo = SIGBUS;
833 ksi.ksi_code = BUS_OBJERR;
834 ksi.ksi_addr = (void *)regs->tf_eip;
835 trapsignal(td, &ksi);
837 if (vm86->vm86_has_vme) {
838 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
839 (eflags & VME_USERCHANGE) | PSL_VM;
841 vm86->vm86_eflags = eflags; /* save VIF, VIP */
842 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
843 (eflags & VM_USERCHANGE) | PSL_VM;
845 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
846 tf->tf_eflags = eflags;
847 tf->tf_vm86_ds = tf->tf_ds;
848 tf->tf_vm86_es = tf->tf_es;
849 tf->tf_vm86_fs = tf->tf_fs;
850 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
851 tf->tf_ds = _udatasel;
852 tf->tf_es = _udatasel;
853 tf->tf_fs = _udatasel;
856 * Don't allow users to change privileged or reserved flags.
859 * XXX do allow users to change the privileged flag PSL_RF.
860 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
861 * should sometimes set it there too. tf_eflags is kept in
862 * the signal context during signal handling and there is no
863 * other place to remember it, so the PSL_RF bit may be
864 * corrupted by the signal handler without us knowing.
865 * Corruption of the PSL_RF bit at worst causes one more or
866 * one less debugger trap, so allowing it is fairly harmless.
868 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
869 printf("freebsd4_sigreturn: eflags = 0x%x\n", eflags);
874 * Don't allow users to load a valid privileged %cs. Let the
875 * hardware check for invalid selectors, excess privilege in
876 * other selectors, invalid %eip's and invalid %esp's.
878 cs = ucp->uc_mcontext.mc_cs;
879 if (!CS_SECURE(cs)) {
880 printf("freebsd4_sigreturn: cs = 0x%x\n", cs);
881 ksiginfo_init_trap(&ksi);
882 ksi.ksi_signo = SIGBUS;
883 ksi.ksi_code = BUS_OBJERR;
884 ksi.ksi_trapno = T_PROTFLT;
885 ksi.ksi_addr = (void *)regs->tf_eip;
886 trapsignal(td, &ksi);
890 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
894 #if defined(COMPAT_43)
895 if (ucp->uc_mcontext.mc_onstack & 1)
896 td->td_sigstk.ss_flags |= SS_ONSTACK;
898 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
901 td->td_sigmask = ucp->uc_sigmask;
902 SIG_CANTMASK(td->td_sigmask);
905 return (EJUSTRETURN);
907 #endif /* COMPAT_FREEBSD4 */
915 struct sigreturn_args /* {
916 const struct __ucontext *sigcntxp;
920 struct proc *p = td->td_proc;
921 struct trapframe *regs;
922 const ucontext_t *ucp;
923 int cs, eflags, error, ret;
926 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
931 eflags = ucp->uc_mcontext.mc_eflags;
932 if (eflags & PSL_VM) {
933 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
934 struct vm86_kernel *vm86;
937 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
938 * set up the vm86 area, and we can't enter vm86 mode.
940 if (td->td_pcb->pcb_ext == 0)
942 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
943 if (vm86->vm86_inited == 0)
946 /* Go back to user mode if both flags are set. */
947 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
948 ksiginfo_init_trap(&ksi);
949 ksi.ksi_signo = SIGBUS;
950 ksi.ksi_code = BUS_OBJERR;
951 ksi.ksi_addr = (void *)regs->tf_eip;
952 trapsignal(td, &ksi);
955 if (vm86->vm86_has_vme) {
956 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
957 (eflags & VME_USERCHANGE) | PSL_VM;
959 vm86->vm86_eflags = eflags; /* save VIF, VIP */
960 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
961 (eflags & VM_USERCHANGE) | PSL_VM;
963 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
964 tf->tf_eflags = eflags;
965 tf->tf_vm86_ds = tf->tf_ds;
966 tf->tf_vm86_es = tf->tf_es;
967 tf->tf_vm86_fs = tf->tf_fs;
968 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
969 tf->tf_ds = _udatasel;
970 tf->tf_es = _udatasel;
971 tf->tf_fs = _udatasel;
974 * Don't allow users to change privileged or reserved flags.
977 * XXX do allow users to change the privileged flag PSL_RF.
978 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
979 * should sometimes set it there too. tf_eflags is kept in
980 * the signal context during signal handling and there is no
981 * other place to remember it, so the PSL_RF bit may be
982 * corrupted by the signal handler without us knowing.
983 * Corruption of the PSL_RF bit at worst causes one more or
984 * one less debugger trap, so allowing it is fairly harmless.
986 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
987 printf("sigreturn: eflags = 0x%x\n", eflags);
992 * Don't allow users to load a valid privileged %cs. Let the
993 * hardware check for invalid selectors, excess privilege in
994 * other selectors, invalid %eip's and invalid %esp's.
996 cs = ucp->uc_mcontext.mc_cs;
997 if (!CS_SECURE(cs)) {
998 printf("sigreturn: cs = 0x%x\n", cs);
999 ksiginfo_init_trap(&ksi);
1000 ksi.ksi_signo = SIGBUS;
1001 ksi.ksi_code = BUS_OBJERR;
1002 ksi.ksi_trapno = T_PROTFLT;
1003 ksi.ksi_addr = (void *)regs->tf_eip;
1004 trapsignal(td, &ksi);
1008 ret = set_fpcontext(td, &ucp->uc_mcontext);
1011 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
1015 #if defined(COMPAT_43)
1016 if (ucp->uc_mcontext.mc_onstack & 1)
1017 td->td_sigstk.ss_flags |= SS_ONSTACK;
1019 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
1022 td->td_sigmask = ucp->uc_sigmask;
1023 SIG_CANTMASK(td->td_sigmask);
1026 return (EJUSTRETURN);
1030 * Machine dependent boot() routine
1032 * I haven't seen anything to put here yet
1033 * Possibly some stuff might be grafted back here from boot()
1040 /* Get current clock frequency for the given cpu id. */
1042 cpu_est_clockrate(int cpu_id, uint64_t *rate)
1045 uint64_t tsc1, tsc2;
1047 if (pcpu_find(cpu_id) == NULL || rate == NULL)
1050 return (EOPNOTSUPP);
1052 /* If we're booting, trust the rate calibrated moments ago. */
1059 /* Schedule ourselves on the indicated cpu. */
1060 thread_lock(curthread);
1061 sched_bind(curthread, cpu_id);
1062 thread_unlock(curthread);
1065 /* Calibrate by measuring a short delay. */
1066 reg = intr_disable();
1073 thread_lock(curthread);
1074 sched_unbind(curthread);
1075 thread_unlock(curthread);
1079 * Calculate the difference in readings, convert to Mhz, and
1080 * subtract 0.5% of the total. Empirical testing has shown that
1081 * overhead in DELAY() works out to approximately this value.
1084 *rate = tsc2 * 1000 - tsc2 * 5;
1089 * Shutdown the CPU as much as possible
1099 * Hook to idle the CPU when possible. In the SMP case we default to
1100 * off because a halted cpu will not currently pick up a new thread in the
1101 * run queue until the next timer tick. If turned on this will result in
1102 * approximately a 4.2% loss in real time performance in buildworld tests
1103 * (but improves user and sys times oddly enough), and saves approximately
1104 * 5% in power consumption on an idle machine (tests w/2xCPU 1.1GHz P3).
1106 * XXX we need to have a cpu mask of idle cpus and generate an IPI or
1107 * otherwise generate some sort of interrupt to wake up cpus sitting in HLT.
1108 * Then we can have our cake and eat it too.
1110 * XXX I'm turning it on for SMP as well by default for now. It seems to
1111 * help lock contention somewhat, and this is critical for HTT. -Peter
1113 static int cpu_idle_hlt = 1;
1114 TUNABLE_INT("machdep.cpu_idle_hlt", &cpu_idle_hlt);
1115 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW,
1116 &cpu_idle_hlt, 0, "Idle loop HLT enable");
1119 cpu_idle_default(void)
1122 * we must absolutely guarentee that hlt is the
1123 * absolute next instruction after sti or we
1124 * introduce a timing window.
1126 __asm __volatile("sti; hlt");
1130 * Note that we have to be careful here to avoid a race between checking
1131 * sched_runnable() and actually halting. If we don't do this, we may waste
1132 * the time between calling hlt and the next interrupt even though there
1133 * is a runnable process.
1140 if (mp_grab_cpu_hlt())
1146 if (sched_runnable())
1154 cpu_idle_wakeup(int cpu)
1160 /* Other subsystems (e.g., ACPI) can hook this later. */
1161 void (*cpu_idle_hook)(void) = cpu_idle_default;
1164 * Clear registers on exec
1167 exec_setregs(td, entry, stack, ps_strings)
1173 struct trapframe *regs = td->td_frame;
1174 struct pcb *pcb = td->td_pcb;
1176 /* Reset pc->pcb_gs and %gs before possibly invalidating it. */
1177 pcb->pcb_gs = _udatasel;
1180 mtx_lock_spin(&dt_lock);
1181 if (td->td_proc->p_md.md_ldt)
1184 mtx_unlock_spin(&dt_lock);
1186 bzero((char *)regs, sizeof(struct trapframe));
1187 regs->tf_eip = entry;
1188 regs->tf_esp = stack;
1189 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T);
1190 regs->tf_ss = _udatasel;
1191 regs->tf_ds = _udatasel;
1192 regs->tf_es = _udatasel;
1193 regs->tf_fs = _udatasel;
1194 regs->tf_cs = _ucodesel;
1196 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */
1197 regs->tf_ebx = ps_strings;
1200 * Reset the hardware debug registers if they were in use.
1201 * They won't have any meaning for the newly exec'd process.
1203 if (pcb->pcb_flags & PCB_DBREGS) {
1210 if (pcb == PCPU_GET(curpcb)) {
1212 * Clear the debug registers on the running
1213 * CPU, otherwise they will end up affecting
1214 * the next process we switch to.
1218 pcb->pcb_flags &= ~PCB_DBREGS;
1222 * Initialize the math emulator (if any) for the current process.
1223 * Actually, just clear the bit that says that the emulator has
1224 * been initialized. Initialization is delayed until the process
1225 * traps to the emulator (if it is done at all) mainly because
1226 * emulators don't provide an entry point for initialization.
1228 td->td_pcb->pcb_flags &= ~FP_SOFTFP;
1231 * Drop the FP state if we hold it, so that the process gets a
1232 * clean FP state if it uses the FPU again.
1237 * XXX - Linux emulator
1238 * Make sure sure edx is 0x0 on entry. Linux binaries depend
1241 td->td_retval[1] = 0;
1252 * CR0_MP, CR0_NE and CR0_TS are set for NPX (FPU) support:
1254 * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
1255 * instructions. We must set the CR0_MP bit and use the CR0_TS
1256 * bit to control the trap, because setting the CR0_EM bit does
1257 * not cause WAIT instructions to trap. It's important to trap
1258 * WAIT instructions - otherwise the "wait" variants of no-wait
1259 * control instructions would degenerate to the "no-wait" variants
1260 * after FP context switches but work correctly otherwise. It's
1261 * particularly important to trap WAITs when there is no NPX -
1262 * otherwise the "wait" variants would always degenerate.
1264 * Try setting CR0_NE to get correct error reporting on 486DX's.
1265 * Setting it should fail or do nothing on lesser processors.
1267 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
1272 u_long bootdev; /* not a struct cdev *- encoding is different */
1273 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev,
1274 CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in struct cdev *format)");
1277 * Initialize 386 and configure to run kernel
1281 * Initialize segments & interrupt table
1285 union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */
1286 static struct gate_descriptor idt0[NIDT];
1287 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
1288 union descriptor ldt[NLDT]; /* local descriptor table */
1289 struct region_descriptor r_gdt, r_idt; /* table descriptors */
1290 struct mtx dt_lock; /* lock for GDT and LDT */
1292 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
1293 extern int has_f00f_bug;
1296 static struct i386tss dblfault_tss;
1297 static char dblfault_stack[PAGE_SIZE];
1299 extern vm_offset_t proc0kstack;
1303 * software prototypes -- in more palatable form.
1305 * GCODE_SEL through GUDATA_SEL must be in this order for syscall/sysret
1306 * GUFS_SEL and GUGS_SEL must be in this order (swtch.s knows it)
1308 struct soft_segment_descriptor gdt_segs[] = {
1309 /* GNULL_SEL 0 Null Descriptor */
1310 { 0x0, /* segment base address */
1312 0, /* segment type */
1313 0, /* segment descriptor priority level */
1314 0, /* segment descriptor present */
1316 0, /* default 32 vs 16 bit size */
1317 0 /* limit granularity (byte/page units)*/ },
1318 /* GPRIV_SEL 1 SMP Per-Processor Private Data Descriptor */
1319 { 0x0, /* segment base address */
1320 0xfffff, /* length - all address space */
1321 SDT_MEMRWA, /* segment type */
1322 0, /* segment descriptor priority level */
1323 1, /* segment descriptor present */
1325 1, /* default 32 vs 16 bit size */
1326 1 /* limit granularity (byte/page units)*/ },
1327 /* GUFS_SEL 2 %fs Descriptor for user */
1328 { 0x0, /* segment base address */
1329 0xfffff, /* length - all address space */
1330 SDT_MEMRWA, /* segment type */
1331 SEL_UPL, /* segment descriptor priority level */
1332 1, /* segment descriptor present */
1334 1, /* default 32 vs 16 bit size */
1335 1 /* limit granularity (byte/page units)*/ },
1336 /* GUGS_SEL 3 %gs Descriptor for user */
1337 { 0x0, /* segment base address */
1338 0xfffff, /* length - all address space */
1339 SDT_MEMRWA, /* segment type */
1340 SEL_UPL, /* segment descriptor priority level */
1341 1, /* segment descriptor present */
1343 1, /* default 32 vs 16 bit size */
1344 1 /* limit granularity (byte/page units)*/ },
1345 /* GCODE_SEL 4 Code Descriptor for kernel */
1346 { 0x0, /* segment base address */
1347 0xfffff, /* length - all address space */
1348 SDT_MEMERA, /* segment type */
1349 0, /* segment descriptor priority level */
1350 1, /* segment descriptor present */
1352 1, /* default 32 vs 16 bit size */
1353 1 /* limit granularity (byte/page units)*/ },
1354 /* GDATA_SEL 5 Data Descriptor for kernel */
1355 { 0x0, /* segment base address */
1356 0xfffff, /* length - all address space */
1357 SDT_MEMRWA, /* segment type */
1358 0, /* segment descriptor priority level */
1359 1, /* segment descriptor present */
1361 1, /* default 32 vs 16 bit size */
1362 1 /* limit granularity (byte/page units)*/ },
1363 /* GUCODE_SEL 6 Code Descriptor for user */
1364 { 0x0, /* segment base address */
1365 0xfffff, /* length - all address space */
1366 SDT_MEMERA, /* segment type */
1367 SEL_UPL, /* segment descriptor priority level */
1368 1, /* segment descriptor present */
1370 1, /* default 32 vs 16 bit size */
1371 1 /* limit granularity (byte/page units)*/ },
1372 /* GUDATA_SEL 7 Data Descriptor for user */
1373 { 0x0, /* segment base address */
1374 0xfffff, /* length - all address space */
1375 SDT_MEMRWA, /* segment type */
1376 SEL_UPL, /* segment descriptor priority level */
1377 1, /* segment descriptor present */
1379 1, /* default 32 vs 16 bit size */
1380 1 /* limit granularity (byte/page units)*/ },
1381 /* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */
1382 { 0x400, /* segment base address */
1383 0xfffff, /* length */
1384 SDT_MEMRWA, /* segment type */
1385 0, /* segment descriptor priority level */
1386 1, /* segment descriptor present */
1388 1, /* default 32 vs 16 bit size */
1389 1 /* limit granularity (byte/page units)*/ },
1390 /* GPROC0_SEL 9 Proc 0 Tss Descriptor */
1392 0x0, /* segment base address */
1393 sizeof(struct i386tss)-1,/* length */
1394 SDT_SYS386TSS, /* segment type */
1395 0, /* segment descriptor priority level */
1396 1, /* segment descriptor present */
1398 0, /* unused - default 32 vs 16 bit size */
1399 0 /* limit granularity (byte/page units)*/ },
1400 /* GLDT_SEL 10 LDT Descriptor */
1401 { (int) ldt, /* segment base address */
1402 sizeof(ldt)-1, /* length - all address space */
1403 SDT_SYSLDT, /* segment type */
1404 SEL_UPL, /* segment descriptor priority level */
1405 1, /* segment descriptor present */
1407 0, /* unused - default 32 vs 16 bit size */
1408 0 /* limit granularity (byte/page units)*/ },
1409 /* GUSERLDT_SEL 11 User LDT Descriptor per process */
1410 { (int) ldt, /* segment base address */
1411 (512 * sizeof(union descriptor)-1), /* length */
1412 SDT_SYSLDT, /* segment type */
1413 0, /* segment descriptor priority level */
1414 1, /* segment descriptor present */
1416 0, /* unused - default 32 vs 16 bit size */
1417 0 /* limit granularity (byte/page units)*/ },
1418 /* GPANIC_SEL 12 Panic Tss Descriptor */
1419 { (int) &dblfault_tss, /* segment base address */
1420 sizeof(struct i386tss)-1,/* length - all address space */
1421 SDT_SYS386TSS, /* segment type */
1422 0, /* segment descriptor priority level */
1423 1, /* segment descriptor present */
1425 0, /* unused - default 32 vs 16 bit size */
1426 0 /* limit granularity (byte/page units)*/ },
1427 /* GBIOSCODE32_SEL 13 BIOS 32-bit interface (32bit Code) */
1428 { 0, /* segment base address (overwritten) */
1429 0xfffff, /* length */
1430 SDT_MEMERA, /* segment type */
1431 0, /* segment descriptor priority level */
1432 1, /* segment descriptor present */
1434 0, /* default 32 vs 16 bit size */
1435 1 /* limit granularity (byte/page units)*/ },
1436 /* GBIOSCODE16_SEL 14 BIOS 32-bit interface (16bit Code) */
1437 { 0, /* segment base address (overwritten) */
1438 0xfffff, /* length */
1439 SDT_MEMERA, /* segment type */
1440 0, /* segment descriptor priority level */
1441 1, /* segment descriptor present */
1443 0, /* default 32 vs 16 bit size */
1444 1 /* limit granularity (byte/page units)*/ },
1445 /* GBIOSDATA_SEL 15 BIOS 32-bit interface (Data) */
1446 { 0, /* segment base address (overwritten) */
1447 0xfffff, /* length */
1448 SDT_MEMRWA, /* segment type */
1449 0, /* segment descriptor priority level */
1450 1, /* segment descriptor present */
1452 1, /* default 32 vs 16 bit size */
1453 1 /* limit granularity (byte/page units)*/ },
1454 /* GBIOSUTIL_SEL 16 BIOS 16-bit interface (Utility) */
1455 { 0, /* segment base address (overwritten) */
1456 0xfffff, /* length */
1457 SDT_MEMRWA, /* segment type */
1458 0, /* segment descriptor priority level */
1459 1, /* segment descriptor present */
1461 0, /* default 32 vs 16 bit size */
1462 1 /* limit granularity (byte/page units)*/ },
1463 /* GBIOSARGS_SEL 17 BIOS 16-bit interface (Arguments) */
1464 { 0, /* segment base address (overwritten) */
1465 0xfffff, /* length */
1466 SDT_MEMRWA, /* segment type */
1467 0, /* segment descriptor priority level */
1468 1, /* segment descriptor present */
1470 0, /* default 32 vs 16 bit size */
1471 1 /* limit granularity (byte/page units)*/ },
1472 /* GNDIS_SEL 18 NDIS Descriptor */
1473 { 0x0, /* segment base address */
1475 0, /* segment type */
1476 0, /* segment descriptor priority level */
1477 0, /* segment descriptor present */
1479 0, /* default 32 vs 16 bit size */
1480 0 /* limit granularity (byte/page units)*/ },
1483 static struct soft_segment_descriptor ldt_segs[] = {
1484 /* Null Descriptor - overwritten by call gate */
1485 { 0x0, /* segment base address */
1486 0x0, /* length - all address space */
1487 0, /* segment type */
1488 0, /* segment descriptor priority level */
1489 0, /* segment descriptor present */
1491 0, /* default 32 vs 16 bit size */
1492 0 /* limit granularity (byte/page units)*/ },
1493 /* Null Descriptor - overwritten by call gate */
1494 { 0x0, /* segment base address */
1495 0x0, /* length - all address space */
1496 0, /* segment type */
1497 0, /* segment descriptor priority level */
1498 0, /* segment descriptor present */
1500 0, /* default 32 vs 16 bit size */
1501 0 /* limit granularity (byte/page units)*/ },
1502 /* Null Descriptor - overwritten by call gate */
1503 { 0x0, /* segment base address */
1504 0x0, /* length - all address space */
1505 0, /* segment type */
1506 0, /* segment descriptor priority level */
1507 0, /* segment descriptor present */
1509 0, /* default 32 vs 16 bit size */
1510 0 /* limit granularity (byte/page units)*/ },
1511 /* Code Descriptor for user */
1512 { 0x0, /* segment base address */
1513 0xfffff, /* length - all address space */
1514 SDT_MEMERA, /* segment type */
1515 SEL_UPL, /* segment descriptor priority level */
1516 1, /* segment descriptor present */
1518 1, /* default 32 vs 16 bit size */
1519 1 /* limit granularity (byte/page units)*/ },
1520 /* Null Descriptor - overwritten by call gate */
1521 { 0x0, /* segment base address */
1522 0x0, /* length - all address space */
1523 0, /* segment type */
1524 0, /* segment descriptor priority level */
1525 0, /* segment descriptor present */
1527 0, /* default 32 vs 16 bit size */
1528 0 /* limit granularity (byte/page units)*/ },
1529 /* Data Descriptor for user */
1530 { 0x0, /* segment base address */
1531 0xfffff, /* length - all address space */
1532 SDT_MEMRWA, /* segment type */
1533 SEL_UPL, /* segment descriptor priority level */
1534 1, /* segment descriptor present */
1536 1, /* default 32 vs 16 bit size */
1537 1 /* limit granularity (byte/page units)*/ },
1541 setidt(idx, func, typ, dpl, selec)
1548 struct gate_descriptor *ip;
1551 ip->gd_looffset = (int)func;
1552 ip->gd_selector = selec;
1558 ip->gd_hioffset = ((int)func)>>16 ;
1562 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
1563 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
1564 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
1565 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
1566 IDTVEC(xmm), IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall);
1570 * Display the index and function name of any IDT entries that don't use
1571 * the default 'rsvd' entry point.
1573 DB_SHOW_COMMAND(idt, db_show_idt)
1575 struct gate_descriptor *ip;
1580 for (idx = 0; idx < NIDT && !db_pager_quit; idx++) {
1581 func = (ip->gd_hioffset << 16 | ip->gd_looffset);
1582 if (func != (uintptr_t)&IDTVEC(rsvd)) {
1583 db_printf("%3d\t", idx);
1584 db_printsym(func, DB_STGY_PROC);
1591 /* Show privileged registers. */
1592 DB_SHOW_COMMAND(sysregs, db_show_sysregs)
1594 uint64_t idtr, gdtr;
1597 db_printf("idtr\t0x%08x/%04x\n",
1598 (u_int)(idtr >> 16), (u_int)idtr & 0xffff);
1600 db_printf("gdtr\t0x%08x/%04x\n",
1601 (u_int)(gdtr >> 16), (u_int)gdtr & 0xffff);
1602 db_printf("ldtr\t0x%04x\n", rldt());
1603 db_printf("tr\t0x%04x\n", rtr());
1604 db_printf("cr0\t0x%08x\n", rcr0());
1605 db_printf("cr2\t0x%08x\n", rcr2());
1606 db_printf("cr3\t0x%08x\n", rcr3());
1607 db_printf("cr4\t0x%08x\n", rcr4());
1613 struct segment_descriptor *sd;
1614 struct soft_segment_descriptor *ssd;
1616 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
1617 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
1618 ssd->ssd_type = sd->sd_type;
1619 ssd->ssd_dpl = sd->sd_dpl;
1620 ssd->ssd_p = sd->sd_p;
1621 ssd->ssd_def32 = sd->sd_def32;
1622 ssd->ssd_gran = sd->sd_gran;
1626 * Populate the (physmap) array with base/bound pairs describing the
1627 * available physical memory in the system, then test this memory and
1628 * build the phys_avail array describing the actually-available memory.
1630 * If we cannot accurately determine the physical memory map, then use
1631 * value from the 0xE801 call, and failing that, the RTC.
1633 * Total memory size may be set by the kernel environment variable
1634 * hw.physmem or the compile-time define MAXMEM.
1636 * XXX first should be vm_paddr_t.
1639 getmemsize(int first)
1641 int i, off, physmap_idx, pa_indx, da_indx;
1643 u_long physmem_tunable;
1644 u_int extmem, under16;
1645 vm_paddr_t pa, physmap[PHYSMAP_SIZE];
1647 quad_t dcons_addr, dcons_size;
1649 bzero(physmap, sizeof(physmap));
1651 /* XXX - some of EPSON machines can't use PG_N */
1653 if (pc98_machine_type & M_EPSON_PC98) {
1654 switch (epson_machine_id) {
1658 case EPSON_PC486_HX:
1659 case EPSON_PC486_HG:
1660 case EPSON_PC486_HA:
1667 * Perform "base memory" related probes & setup
1669 under16 = pc98_getmemsize(&basemem, &extmem);
1670 if (basemem > 640) {
1671 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
1677 * XXX if biosbasemem is now < 640, there is a `hole'
1678 * between the end of base memory and the start of
1679 * ISA memory. The hole may be empty or it may
1680 * contain BIOS code or data. Map it read/write so
1681 * that the BIOS can write to it. (Memory from 0 to
1682 * the physical end of the kernel is mapped read-only
1683 * to begin with and then parts of it are remapped.
1684 * The parts that aren't remapped form holes that
1685 * remain read-only and are unused by the kernel.
1686 * The base memory area is below the physical end of
1687 * the kernel and right now forms a read-only hole.
1688 * The part of it from PAGE_SIZE to
1689 * (trunc_page(biosbasemem * 1024) - 1) will be
1690 * remapped and used by the kernel later.)
1692 * This code is similar to the code used in
1693 * pmap_mapdev, but since no memory needs to be
1694 * allocated we simply change the mapping.
1696 for (pa = trunc_page(basemem * 1024);
1697 pa < ISA_HOLE_START; pa += PAGE_SIZE)
1698 pmap_kenter(KERNBASE + pa, pa);
1701 * if basemem != 640, map pages r/w into vm86 page table so
1702 * that the bios can scribble on it.
1704 pte = (pt_entry_t *)vm86paddr;
1705 for (i = basemem / 4; i < 160; i++)
1706 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
1709 physmap[1] = basemem * 1024;
1711 physmap[physmap_idx] = 0x100000;
1712 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
1715 * Now, physmap contains a map of physical memory.
1719 /* make hole for AP bootstrap code */
1720 physmap[1] = mp_bootaddress(physmap[1]);
1724 * Maxmem isn't the "maximum memory", it's one larger than the
1725 * highest page of the physical address space. It should be
1726 * called something like "Maxphyspage". We may adjust this
1727 * based on ``hw.physmem'' and the results of the memory test.
1729 Maxmem = atop(physmap[physmap_idx + 1]);
1732 Maxmem = MAXMEM / 4;
1735 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable))
1736 Maxmem = atop(physmem_tunable);
1738 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
1739 (boothowto & RB_VERBOSE))
1740 printf("Physical memory use set to %ldK\n", Maxmem * 4);
1743 * If Maxmem has been increased beyond what the system has detected,
1744 * extend the last memory segment to the new limit.
1746 if (atop(physmap[physmap_idx + 1]) < Maxmem)
1747 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
1750 * We need to divide chunk if Maxmem is larger than 16MB and
1751 * under 16MB area is not full of memory.
1752 * (1) system area (15-16MB region) is cut off
1753 * (2) extended memory is only over 16MB area (ex. Melco "HYPERMEMORY")
1755 if ((under16 != 16 * 1024) && (extmem > 15 * 1024)) {
1756 /* 15M - 16M region is cut off, so need to divide chunk */
1757 physmap[physmap_idx + 1] = under16 * 1024;
1759 physmap[physmap_idx] = 0x1000000;
1760 physmap[physmap_idx + 1] = physmap[2] + extmem * 1024;
1763 /* call pmap initialization to make new kernel address space */
1764 pmap_bootstrap(first);
1767 * Size up each available chunk of physical memory.
1769 physmap[0] = PAGE_SIZE; /* mask off page 0 */
1772 phys_avail[pa_indx++] = physmap[0];
1773 phys_avail[pa_indx] = physmap[0];
1774 dump_avail[da_indx] = physmap[0];
1778 * Get dcons buffer address
1780 if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
1781 getenv_quad("dcons.size", &dcons_size) == 0)
1785 * physmap is in bytes, so when converting to page boundaries,
1786 * round up the start address and round down the end address.
1788 for (i = 0; i <= physmap_idx; i += 2) {
1791 end = ptoa((vm_paddr_t)Maxmem);
1792 if (physmap[i + 1] < end)
1793 end = trunc_page(physmap[i + 1]);
1794 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
1795 int tmp, page_bad, full;
1796 int *ptr = (int *)CADDR1;
1800 * block out kernel memory as not available.
1802 if (pa >= KERNLOAD && pa < first)
1806 * block out dcons buffer
1809 && pa >= trunc_page(dcons_addr)
1810 && pa < dcons_addr + dcons_size)
1816 * map page into kernel: valid, read/write,non-cacheable
1818 *pte = pa | PG_V | PG_RW | pg_n;
1823 * Test for alternating 1's and 0's
1825 *(volatile int *)ptr = 0xaaaaaaaa;
1826 if (*(volatile int *)ptr != 0xaaaaaaaa)
1829 * Test for alternating 0's and 1's
1831 *(volatile int *)ptr = 0x55555555;
1832 if (*(volatile int *)ptr != 0x55555555)
1837 *(volatile int *)ptr = 0xffffffff;
1838 if (*(volatile int *)ptr != 0xffffffff)
1843 *(volatile int *)ptr = 0x0;
1844 if (*(volatile int *)ptr != 0x0)
1847 * Restore original value.
1852 * Adjust array of valid/good pages.
1854 if (page_bad == TRUE)
1857 * If this good page is a continuation of the
1858 * previous set of good pages, then just increase
1859 * the end pointer. Otherwise start a new chunk.
1860 * Note that "end" points one higher than end,
1861 * making the range >= start and < end.
1862 * If we're also doing a speculative memory
1863 * test and we at or past the end, bump up Maxmem
1864 * so that we keep going. The first bad page
1865 * will terminate the loop.
1867 if (phys_avail[pa_indx] == pa) {
1868 phys_avail[pa_indx] += PAGE_SIZE;
1871 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
1873 "Too many holes in the physical address space, giving up\n");
1878 phys_avail[pa_indx++] = pa; /* start */
1879 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
1883 if (dump_avail[da_indx] == pa) {
1884 dump_avail[da_indx] += PAGE_SIZE;
1887 if (da_indx == DUMP_AVAIL_ARRAY_END) {
1891 dump_avail[da_indx++] = pa; /* start */
1892 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
1904 * The last chunk must contain at least one page plus the message
1905 * buffer to avoid complicating other code (message buffer address
1906 * calculation, etc.).
1908 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
1909 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) {
1910 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
1911 phys_avail[pa_indx--] = 0;
1912 phys_avail[pa_indx--] = 0;
1915 Maxmem = atop(phys_avail[pa_indx]);
1917 /* Trim off space for the message buffer. */
1918 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE);
1920 /* Map the message buffer. */
1921 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE)
1922 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] +
1930 struct gate_descriptor *gdp;
1931 int gsel_tss, metadata_missing, x;
1934 thread0.td_kstack = proc0kstack;
1935 thread0.td_pcb = (struct pcb *)
1936 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
1939 * This may be done better later if it gets more high level
1940 * components in it. If so just link td->td_proc here.
1942 proc_linkup0(&proc0, &thread0);
1949 metadata_missing = 0;
1950 if (bootinfo.bi_modulep) {
1951 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE;
1952 preload_bootstrap_relocate(KERNBASE);
1954 metadata_missing = 1;
1957 kern_envp = static_env;
1958 else if (bootinfo.bi_envp)
1959 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE;
1961 /* Init basic tunables, hz etc */
1965 * Make gdt memory segments. All segments cover the full 4GB
1966 * of address space and permissions are enforced at page level.
1968 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1);
1969 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1);
1970 gdt_segs[GUCODE_SEL].ssd_limit = atop(0 - 1);
1971 gdt_segs[GUDATA_SEL].ssd_limit = atop(0 - 1);
1972 gdt_segs[GUFS_SEL].ssd_limit = atop(0 - 1);
1973 gdt_segs[GUGS_SEL].ssd_limit = atop(0 - 1);
1976 gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1);
1977 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
1978 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
1980 for (x = 0; x < NGDT; x++)
1981 ssdtosd(&gdt_segs[x], &gdt[x].sd);
1983 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
1984 r_gdt.rd_base = (int) gdt;
1985 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN);
1988 pcpu_init(pc, 0, sizeof(struct pcpu));
1989 PCPU_SET(prvspace, pc);
1990 PCPU_SET(curthread, &thread0);
1991 PCPU_SET(curpcb, thread0.td_pcb);
1994 * Initialize mutexes.
1996 * icu_lock: in order to allow an interrupt to occur in a critical
1997 * section, to set pcpu->ipending (etc...) properly, we
1998 * must be able to get the icu lock, so it can't be
2002 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE);
2004 /* make ldt memory segments */
2005 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
2006 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
2007 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
2008 ssdtosd(&ldt_segs[x], &ldt[x].sd);
2010 _default_ldt = GSEL(GLDT_SEL, SEL_KPL);
2012 PCPU_SET(currentldt, _default_ldt);
2015 for (x = 0; x < NIDT; x++)
2016 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL,
2017 GSEL(GCODE_SEL, SEL_KPL));
2018 setidt(IDT_DE, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL,
2019 GSEL(GCODE_SEL, SEL_KPL));
2020 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL,
2021 GSEL(GCODE_SEL, SEL_KPL));
2022 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYS386IGT, SEL_KPL,
2023 GSEL(GCODE_SEL, SEL_KPL));
2024 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL,
2025 GSEL(GCODE_SEL, SEL_KPL));
2026 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL,
2027 GSEL(GCODE_SEL, SEL_KPL));
2028 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL,
2029 GSEL(GCODE_SEL, SEL_KPL));
2030 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2031 GSEL(GCODE_SEL, SEL_KPL));
2032 setidt(IDT_NM, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL
2033 , GSEL(GCODE_SEL, SEL_KPL));
2034 setidt(IDT_DF, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL));
2035 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL,
2036 GSEL(GCODE_SEL, SEL_KPL));
2037 setidt(IDT_TS, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL,
2038 GSEL(GCODE_SEL, SEL_KPL));
2039 setidt(IDT_NP, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL,
2040 GSEL(GCODE_SEL, SEL_KPL));
2041 setidt(IDT_SS, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL,
2042 GSEL(GCODE_SEL, SEL_KPL));
2043 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2044 GSEL(GCODE_SEL, SEL_KPL));
2045 setidt(IDT_PF, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL,
2046 GSEL(GCODE_SEL, SEL_KPL));
2047 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL,
2048 GSEL(GCODE_SEL, SEL_KPL));
2049 setidt(IDT_AC, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL,
2050 GSEL(GCODE_SEL, SEL_KPL));
2051 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL,
2052 GSEL(GCODE_SEL, SEL_KPL));
2053 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL,
2054 GSEL(GCODE_SEL, SEL_KPL));
2055 setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL,
2056 GSEL(GCODE_SEL, SEL_KPL));
2058 r_idt.rd_limit = sizeof(idt0) - 1;
2059 r_idt.rd_base = (int) idt;
2063 * Initialize the i8254 before the console so that console
2064 * initialization can use DELAY().
2069 * Initialize the console before we print anything out.
2073 if (metadata_missing)
2074 printf("WARNING: loader(8) metadata is missing!\n");
2081 ksym_start = bootinfo.bi_symtab;
2082 ksym_end = bootinfo.bi_esymtab;
2088 if (boothowto & RB_KDB)
2089 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
2092 finishidentcpu(); /* Final stage of CPU initialization */
2093 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2094 GSEL(GCODE_SEL, SEL_KPL));
2095 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2096 GSEL(GCODE_SEL, SEL_KPL));
2097 initializecpu(); /* Initialize CPU registers */
2099 /* make an initial tss so cpu can get interrupt stack on syscall! */
2100 /* Note: -16 is so we can grow the trapframe if we came from vm86 */
2101 PCPU_SET(common_tss.tss_esp0, thread0.td_kstack +
2102 KSTACK_PAGES * PAGE_SIZE - sizeof(struct pcb) - 16);
2103 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
2104 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
2105 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
2106 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
2107 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
2110 /* pointer to selector slot for %fs/%gs */
2111 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
2113 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
2114 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
2115 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
2116 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
2117 dblfault_tss.tss_cr3 = (int)IdlePTD;
2118 dblfault_tss.tss_eip = (int)dblfault_handler;
2119 dblfault_tss.tss_eflags = PSL_KERNEL;
2120 dblfault_tss.tss_ds = dblfault_tss.tss_es =
2121 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
2122 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
2123 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
2124 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
2128 init_param2(physmem);
2130 /* now running on new page tables, configured,and u/iom is accessible */
2132 msgbufinit(msgbufp, MSGBUF_SIZE);
2134 /* make a call gate to reenter kernel with */
2135 gdp = &ldt[LSYS5CALLS_SEL].gd;
2137 x = (int) &IDTVEC(lcall_syscall);
2138 gdp->gd_looffset = x;
2139 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
2141 gdp->gd_type = SDT_SYS386CGT;
2142 gdp->gd_dpl = SEL_UPL;
2144 gdp->gd_hioffset = x >> 16;
2146 /* XXX does this work? */
2148 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL];
2149 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL];
2151 /* transfer to user mode */
2153 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
2154 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
2156 /* setup proc 0's pcb */
2157 thread0.td_pcb->pcb_flags = 0;
2158 thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
2159 thread0.td_pcb->pcb_ext = 0;
2160 thread0.td_frame = &proc0_tf;
2164 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
2170 spinlock_enter(void)
2175 if (td->td_md.md_spinlock_count == 0)
2176 td->td_md.md_saved_flags = intr_disable();
2177 td->td_md.md_spinlock_count++;
2188 td->td_md.md_spinlock_count--;
2189 if (td->td_md.md_spinlock_count == 0)
2190 intr_restore(td->td_md.md_saved_flags);
2193 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
2194 static void f00f_hack(void *unused);
2195 SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL);
2198 f00f_hack(void *unused)
2200 struct gate_descriptor *new_idt;
2208 printf("Intel Pentium detected, installing workaround for F00F bug\n");
2210 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2);
2212 panic("kmem_alloc returned 0");
2214 /* Put the problematic entry (#6) at the end of the lower page. */
2215 new_idt = (struct gate_descriptor*)
2216 (tmp + PAGE_SIZE - 7 * sizeof(struct gate_descriptor));
2217 bcopy(idt, new_idt, sizeof(idt0));
2218 r_idt.rd_base = (u_int)new_idt;
2221 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE,
2222 VM_PROT_READ, FALSE) != KERN_SUCCESS)
2223 panic("vm_map_protect failed");
2225 #endif /* defined(I586_CPU) && !NO_F00F_HACK */
2228 * Construct a PCB from a trapframe. This is called from kdb_trap() where
2229 * we want to start a backtrace from the function that caused us to enter
2230 * the debugger. We have the context in the trapframe, but base the trace
2231 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
2232 * enough for a backtrace.
2235 makectx(struct trapframe *tf, struct pcb *pcb)
2238 pcb->pcb_edi = tf->tf_edi;
2239 pcb->pcb_esi = tf->tf_esi;
2240 pcb->pcb_ebp = tf->tf_ebp;
2241 pcb->pcb_ebx = tf->tf_ebx;
2242 pcb->pcb_eip = tf->tf_eip;
2243 pcb->pcb_esp = (ISPL(tf->tf_cs)) ? tf->tf_esp : (int)(tf + 1) - 8;
2247 ptrace_set_pc(struct thread *td, u_long addr)
2250 td->td_frame->tf_eip = addr;
2255 ptrace_single_step(struct thread *td)
2257 td->td_frame->tf_eflags |= PSL_T;
2262 ptrace_clear_single_step(struct thread *td)
2264 td->td_frame->tf_eflags &= ~PSL_T;
2269 fill_regs(struct thread *td, struct reg *regs)
2272 struct trapframe *tp;
2276 regs->r_fs = tp->tf_fs;
2277 regs->r_es = tp->tf_es;
2278 regs->r_ds = tp->tf_ds;
2279 regs->r_edi = tp->tf_edi;
2280 regs->r_esi = tp->tf_esi;
2281 regs->r_ebp = tp->tf_ebp;
2282 regs->r_ebx = tp->tf_ebx;
2283 regs->r_edx = tp->tf_edx;
2284 regs->r_ecx = tp->tf_ecx;
2285 regs->r_eax = tp->tf_eax;
2286 regs->r_eip = tp->tf_eip;
2287 regs->r_cs = tp->tf_cs;
2288 regs->r_eflags = tp->tf_eflags;
2289 regs->r_esp = tp->tf_esp;
2290 regs->r_ss = tp->tf_ss;
2291 regs->r_gs = pcb->pcb_gs;
2296 set_regs(struct thread *td, struct reg *regs)
2299 struct trapframe *tp;
2302 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) ||
2303 !CS_SECURE(regs->r_cs))
2306 tp->tf_fs = regs->r_fs;
2307 tp->tf_es = regs->r_es;
2308 tp->tf_ds = regs->r_ds;
2309 tp->tf_edi = regs->r_edi;
2310 tp->tf_esi = regs->r_esi;
2311 tp->tf_ebp = regs->r_ebp;
2312 tp->tf_ebx = regs->r_ebx;
2313 tp->tf_edx = regs->r_edx;
2314 tp->tf_ecx = regs->r_ecx;
2315 tp->tf_eax = regs->r_eax;
2316 tp->tf_eip = regs->r_eip;
2317 tp->tf_cs = regs->r_cs;
2318 tp->tf_eflags = regs->r_eflags;
2319 tp->tf_esp = regs->r_esp;
2320 tp->tf_ss = regs->r_ss;
2321 pcb->pcb_gs = regs->r_gs;
2325 #ifdef CPU_ENABLE_SSE
2327 fill_fpregs_xmm(sv_xmm, sv_87)
2328 struct savexmm *sv_xmm;
2329 struct save87 *sv_87;
2331 register struct env87 *penv_87 = &sv_87->sv_env;
2332 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
2335 bzero(sv_87, sizeof(*sv_87));
2337 /* FPU control/status */
2338 penv_87->en_cw = penv_xmm->en_cw;
2339 penv_87->en_sw = penv_xmm->en_sw;
2340 penv_87->en_tw = penv_xmm->en_tw;
2341 penv_87->en_fip = penv_xmm->en_fip;
2342 penv_87->en_fcs = penv_xmm->en_fcs;
2343 penv_87->en_opcode = penv_xmm->en_opcode;
2344 penv_87->en_foo = penv_xmm->en_foo;
2345 penv_87->en_fos = penv_xmm->en_fos;
2348 for (i = 0; i < 8; ++i)
2349 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc;
2353 set_fpregs_xmm(sv_87, sv_xmm)
2354 struct save87 *sv_87;
2355 struct savexmm *sv_xmm;
2357 register struct env87 *penv_87 = &sv_87->sv_env;
2358 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
2361 /* FPU control/status */
2362 penv_xmm->en_cw = penv_87->en_cw;
2363 penv_xmm->en_sw = penv_87->en_sw;
2364 penv_xmm->en_tw = penv_87->en_tw;
2365 penv_xmm->en_fip = penv_87->en_fip;
2366 penv_xmm->en_fcs = penv_87->en_fcs;
2367 penv_xmm->en_opcode = penv_87->en_opcode;
2368 penv_xmm->en_foo = penv_87->en_foo;
2369 penv_xmm->en_fos = penv_87->en_fos;
2372 for (i = 0; i < 8; ++i)
2373 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i];
2375 #endif /* CPU_ENABLE_SSE */
2378 fill_fpregs(struct thread *td, struct fpreg *fpregs)
2380 #ifdef CPU_ENABLE_SSE
2382 fill_fpregs_xmm(&td->td_pcb->pcb_save.sv_xmm,
2383 (struct save87 *)fpregs);
2386 #endif /* CPU_ENABLE_SSE */
2387 bcopy(&td->td_pcb->pcb_save.sv_87, fpregs, sizeof *fpregs);
2392 set_fpregs(struct thread *td, struct fpreg *fpregs)
2394 #ifdef CPU_ENABLE_SSE
2396 set_fpregs_xmm((struct save87 *)fpregs,
2397 &td->td_pcb->pcb_save.sv_xmm);
2400 #endif /* CPU_ENABLE_SSE */
2401 bcopy(fpregs, &td->td_pcb->pcb_save.sv_87, sizeof *fpregs);
2406 * Get machine context.
2409 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
2411 struct trapframe *tp;
2415 PROC_LOCK(curthread->td_proc);
2416 mcp->mc_onstack = sigonstack(tp->tf_esp);
2417 PROC_UNLOCK(curthread->td_proc);
2418 mcp->mc_gs = td->td_pcb->pcb_gs;
2419 mcp->mc_fs = tp->tf_fs;
2420 mcp->mc_es = tp->tf_es;
2421 mcp->mc_ds = tp->tf_ds;
2422 mcp->mc_edi = tp->tf_edi;
2423 mcp->mc_esi = tp->tf_esi;
2424 mcp->mc_ebp = tp->tf_ebp;
2425 mcp->mc_isp = tp->tf_isp;
2426 mcp->mc_eflags = tp->tf_eflags;
2427 if (flags & GET_MC_CLEAR_RET) {
2430 mcp->mc_eflags &= ~PSL_C;
2432 mcp->mc_eax = tp->tf_eax;
2433 mcp->mc_edx = tp->tf_edx;
2435 mcp->mc_ebx = tp->tf_ebx;
2436 mcp->mc_ecx = tp->tf_ecx;
2437 mcp->mc_eip = tp->tf_eip;
2438 mcp->mc_cs = tp->tf_cs;
2439 mcp->mc_esp = tp->tf_esp;
2440 mcp->mc_ss = tp->tf_ss;
2441 mcp->mc_len = sizeof(*mcp);
2442 get_fpcontext(td, mcp);
2447 * Set machine context.
2449 * However, we don't set any but the user modifiable flags, and we won't
2450 * touch the cs selector.
2453 set_mcontext(struct thread *td, const mcontext_t *mcp)
2455 struct trapframe *tp;
2459 if (mcp->mc_len != sizeof(*mcp))
2461 eflags = (mcp->mc_eflags & PSL_USERCHANGE) |
2462 (tp->tf_eflags & ~PSL_USERCHANGE);
2463 if ((ret = set_fpcontext(td, mcp)) == 0) {
2464 tp->tf_fs = mcp->mc_fs;
2465 tp->tf_es = mcp->mc_es;
2466 tp->tf_ds = mcp->mc_ds;
2467 tp->tf_edi = mcp->mc_edi;
2468 tp->tf_esi = mcp->mc_esi;
2469 tp->tf_ebp = mcp->mc_ebp;
2470 tp->tf_ebx = mcp->mc_ebx;
2471 tp->tf_edx = mcp->mc_edx;
2472 tp->tf_ecx = mcp->mc_ecx;
2473 tp->tf_eax = mcp->mc_eax;
2474 tp->tf_eip = mcp->mc_eip;
2475 tp->tf_eflags = eflags;
2476 tp->tf_esp = mcp->mc_esp;
2477 tp->tf_ss = mcp->mc_ss;
2478 td->td_pcb->pcb_gs = mcp->mc_gs;
2485 get_fpcontext(struct thread *td, mcontext_t *mcp)
2488 mcp->mc_fpformat = _MC_FPFMT_NODEV;
2489 mcp->mc_ownedfp = _MC_FPOWNED_NONE;
2491 union savefpu *addr;
2494 * XXX mc_fpstate might be misaligned, since its declaration is not
2495 * unportabilized using __attribute__((aligned(16))) like the
2496 * declaration of struct savemm, and anyway, alignment doesn't work
2497 * for auto variables since we don't use gcc's pessimal stack
2498 * alignment. Work around this by abusing the spare fields after
2501 * XXX unpessimize most cases by only aligning when fxsave might be
2502 * called, although this requires knowing too much about
2503 * npxgetregs()'s internals.
2505 addr = (union savefpu *)&mcp->mc_fpstate;
2506 if (td == PCPU_GET(fpcurthread) &&
2507 #ifdef CPU_ENABLE_SSE
2510 ((uintptr_t)(void *)addr & 0xF)) {
2512 addr = (void *)((char *)addr + 4);
2513 while ((uintptr_t)(void *)addr & 0xF);
2515 mcp->mc_ownedfp = npxgetregs(td, addr);
2516 if (addr != (union savefpu *)&mcp->mc_fpstate) {
2517 bcopy(addr, &mcp->mc_fpstate, sizeof(mcp->mc_fpstate));
2518 bzero(&mcp->mc_spare2, sizeof(mcp->mc_spare2));
2520 mcp->mc_fpformat = npxformat();
2525 set_fpcontext(struct thread *td, const mcontext_t *mcp)
2527 union savefpu *addr;
2529 if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
2531 else if (mcp->mc_fpformat != _MC_FPFMT_387 &&
2532 mcp->mc_fpformat != _MC_FPFMT_XMM)
2534 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE)
2535 /* We don't care what state is left in the FPU or PCB. */
2537 else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
2538 mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
2539 /* XXX align as above. */
2540 addr = (union savefpu *)&mcp->mc_fpstate;
2541 if (td == PCPU_GET(fpcurthread) &&
2542 #ifdef CPU_ENABLE_SSE
2545 ((uintptr_t)(void *)addr & 0xF)) {
2547 addr = (void *)((char *)addr + 4);
2548 while ((uintptr_t)(void *)addr & 0xF);
2549 bcopy(&mcp->mc_fpstate, addr, sizeof(mcp->mc_fpstate));
2552 #ifdef CPU_ENABLE_SSE
2554 addr->sv_xmm.sv_env.en_mxcsr &= cpu_mxcsr_mask;
2557 * XXX we violate the dubious requirement that npxsetregs()
2558 * be called with interrupts disabled.
2560 npxsetregs(td, addr);
2563 * Don't bother putting things back where they were in the
2564 * misaligned case, since we know that the caller won't use
2573 fpstate_drop(struct thread *td)
2579 if (PCPU_GET(fpcurthread) == td)
2583 * XXX force a full drop of the npx. The above only drops it if we
2584 * owned it. npxgetregs() has the same bug in the !cpu_fxsr case.
2586 * XXX I don't much like npxgetregs()'s semantics of doing a full
2587 * drop. Dropping only to the pcb matches fnsave's behaviour.
2588 * We only need to drop to !PCB_INITDONE in sendsig(). But
2589 * sendsig() is the only caller of npxgetregs()... perhaps we just
2590 * have too many layers.
2592 curthread->td_pcb->pcb_flags &= ~PCB_NPXINITDONE;
2597 fill_dbregs(struct thread *td, struct dbreg *dbregs)
2602 dbregs->dr[0] = rdr0();
2603 dbregs->dr[1] = rdr1();
2604 dbregs->dr[2] = rdr2();
2605 dbregs->dr[3] = rdr3();
2606 dbregs->dr[4] = rdr4();
2607 dbregs->dr[5] = rdr5();
2608 dbregs->dr[6] = rdr6();
2609 dbregs->dr[7] = rdr7();
2612 dbregs->dr[0] = pcb->pcb_dr0;
2613 dbregs->dr[1] = pcb->pcb_dr1;
2614 dbregs->dr[2] = pcb->pcb_dr2;
2615 dbregs->dr[3] = pcb->pcb_dr3;
2618 dbregs->dr[6] = pcb->pcb_dr6;
2619 dbregs->dr[7] = pcb->pcb_dr7;
2625 set_dbregs(struct thread *td, struct dbreg *dbregs)
2631 load_dr0(dbregs->dr[0]);
2632 load_dr1(dbregs->dr[1]);
2633 load_dr2(dbregs->dr[2]);
2634 load_dr3(dbregs->dr[3]);
2635 load_dr4(dbregs->dr[4]);
2636 load_dr5(dbregs->dr[5]);
2637 load_dr6(dbregs->dr[6]);
2638 load_dr7(dbregs->dr[7]);
2641 * Don't let an illegal value for dr7 get set. Specifically,
2642 * check for undefined settings. Setting these bit patterns
2643 * result in undefined behaviour and can lead to an unexpected
2646 for (i = 0; i < 4; i++) {
2647 if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02)
2649 if (DBREG_DR7_LEN(dbregs->dr[7], i) == 0x02)
2656 * Don't let a process set a breakpoint that is not within the
2657 * process's address space. If a process could do this, it
2658 * could halt the system by setting a breakpoint in the kernel
2659 * (if ddb was enabled). Thus, we need to check to make sure
2660 * that no breakpoints are being enabled for addresses outside
2661 * process's address space.
2663 * XXX - what about when the watched area of the user's
2664 * address space is written into from within the kernel
2665 * ... wouldn't that still cause a breakpoint to be generated
2666 * from within kernel mode?
2669 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) {
2670 /* dr0 is enabled */
2671 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
2675 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) {
2676 /* dr1 is enabled */
2677 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
2681 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) {
2682 /* dr2 is enabled */
2683 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
2687 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) {
2688 /* dr3 is enabled */
2689 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
2693 pcb->pcb_dr0 = dbregs->dr[0];
2694 pcb->pcb_dr1 = dbregs->dr[1];
2695 pcb->pcb_dr2 = dbregs->dr[2];
2696 pcb->pcb_dr3 = dbregs->dr[3];
2697 pcb->pcb_dr6 = dbregs->dr[6];
2698 pcb->pcb_dr7 = dbregs->dr[7];
2700 pcb->pcb_flags |= PCB_DBREGS;
2707 * Return > 0 if a hardware breakpoint has been hit, and the
2708 * breakpoint was in user space. Return 0, otherwise.
2711 user_dbreg_trap(void)
2713 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */
2714 u_int32_t bp; /* breakpoint bits extracted from dr6 */
2715 int nbp; /* number of breakpoints that triggered */
2716 caddr_t addr[4]; /* breakpoint addresses */
2720 if ((dr7 & 0x000000ff) == 0) {
2722 * all GE and LE bits in the dr7 register are zero,
2723 * thus the trap couldn't have been caused by the
2724 * hardware debug registers
2731 bp = dr6 & 0x0000000f;
2735 * None of the breakpoint bits are set meaning this
2736 * trap was not caused by any of the debug registers
2742 * at least one of the breakpoints were hit, check to see
2743 * which ones and if any of them are user space addresses
2747 addr[nbp++] = (caddr_t)rdr0();
2750 addr[nbp++] = (caddr_t)rdr1();
2753 addr[nbp++] = (caddr_t)rdr2();
2756 addr[nbp++] = (caddr_t)rdr3();
2759 for (i = 0; i < nbp; i++) {
2760 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) {
2762 * addr[i] is in user space
2769 * None of the breakpoints are in user space.
2777 * Provide inb() and outb() as functions. They are normally only
2778 * available as macros calling inlined functions, thus cannot be
2779 * called from the debugger.
2781 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined.
2787 /* silence compiler warnings */
2789 void outb(u_int, u_char);
2796 * We use %%dx and not %1 here because i/o is done at %dx and not at
2797 * %edx, while gcc generates inferior code (movw instead of movl)
2798 * if we tell it to load (u_short) port.
2800 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
2805 outb(u_int port, u_char data)
2809 * Use an unnecessary assignment to help gcc's register allocator.
2810 * This make a large difference for gcc-1.40 and a tiny difference
2811 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for
2812 * best results. gcc-2.6.0 can't handle this.
2815 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));