2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
6 * This code is derived from software contributed to Berkeley by
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
43 #include "opt_atalk.h"
44 #include "opt_compat.h"
50 #include "opt_kstack_pages.h"
51 #include "opt_maxmem.h"
52 #include "opt_msgbuf.h"
54 #include "opt_perfmon.h"
56 #include <sys/param.h>
58 #include <sys/systm.h>
62 #include <sys/callout.h>
65 #include <sys/eventhandler.h>
67 #include <sys/imgact.h>
69 #include <sys/kernel.h>
71 #include <sys/linker.h>
73 #include <sys/malloc.h>
74 #include <sys/memrange.h>
75 #include <sys/msgbuf.h>
76 #include <sys/mutex.h>
78 #include <sys/ptrace.h>
79 #include <sys/reboot.h>
80 #include <sys/sched.h>
81 #include <sys/signalvar.h>
82 #include <sys/sysctl.h>
83 #include <sys/sysent.h>
84 #include <sys/sysproto.h>
85 #include <sys/ucontext.h>
86 #include <sys/vmmeter.h>
89 #include <vm/vm_extern.h>
90 #include <vm/vm_kern.h>
91 #include <vm/vm_page.h>
92 #include <vm/vm_map.h>
93 #include <vm/vm_object.h>
94 #include <vm/vm_pager.h>
95 #include <vm/vm_param.h>
99 #error KDB must be enabled in order for DDB to work!
102 #include <ddb/db_sym.h>
105 #include <pc98/pc98/pc98_machdep.h>
107 #include <net/netisr.h>
109 #include <machine/bootinfo.h>
110 #include <machine/clock.h>
111 #include <machine/cpu.h>
112 #include <machine/cputypes.h>
113 #include <machine/intr_machdep.h>
114 #include <machine/mca.h>
115 #include <machine/md_var.h>
116 #include <machine/pc/bios.h>
117 #include <machine/pcb.h>
118 #include <machine/pcb_ext.h>
119 #include <machine/proc.h>
120 #include <machine/reg.h>
121 #include <machine/sigframe.h>
122 #include <machine/specialreg.h>
123 #include <machine/vm86.h>
125 #include <machine/perfmon.h>
128 #include <machine/smp.h>
132 #include <i386/isa/icu.h>
135 /* Sanity check for __curthread() */
136 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
138 extern void init386(int first);
139 extern void dblfault_handler(void);
141 extern void printcpuinfo(void); /* XXX header file */
142 extern void finishidentcpu(void);
143 extern void panicifcpuunsupported(void);
144 extern void initializecpu(void);
146 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
147 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
149 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
150 #define CPU_ENABLE_SSE
153 static void cpu_startup(void *);
154 static void fpstate_drop(struct thread *td);
155 static void get_fpcontext(struct thread *td, mcontext_t *mcp);
156 static int set_fpcontext(struct thread *td, const mcontext_t *mcp);
157 #ifdef CPU_ENABLE_SSE
158 static void set_fpregs_xmm(struct save87 *, struct savexmm *);
159 static void fill_fpregs_xmm(struct savexmm *, struct save87 *);
160 #endif /* CPU_ENABLE_SSE */
161 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
163 int need_pre_dma_flush; /* If 1, use wbinvd befor DMA transfer. */
164 int need_post_dma_flush; /* If 1, use invd after DMA transfer. */
167 extern vm_offset_t ksym_start, ksym_end;
170 int _udatasel, _ucodesel;
173 static int ispc98 = 1;
174 SYSCTL_INT(_machdep, OID_AUTO, ispc98, CTLFLAG_RD, &ispc98, 0, "");
179 static void osendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
181 #ifdef COMPAT_FREEBSD4
182 static void freebsd4_sendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
189 * The number of PHYSMAP entries must be one less than the number of
190 * PHYSSEG entries because the PHYSMAP entry that spans the largest
191 * physical address that is accessible by ISA DMA is split into two
194 #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
196 vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
197 vm_paddr_t dump_avail[PHYSMAP_SIZE + 2];
199 /* must be 2 less so 0 0 can signal end of chunks */
200 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2)
201 #define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2)
203 struct kva_md_info kmi;
205 static struct trapframe proc0_tf;
206 struct pcpu __pcpu[MAXCPU];
210 struct mem_range_softc mem_range_softc;
217 * Good {morning,afternoon,evening,night}.
221 panicifcpuunsupported();
225 printf("real memory = %ju (%ju MB)\n", ptoa((uintmax_t)Maxmem),
226 ptoa((uintmax_t)Maxmem) / 1048576);
229 * Display any holes after the first chunk of extended memory.
234 printf("Physical memory chunk(s):\n");
235 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
238 size = phys_avail[indx + 1] - phys_avail[indx];
240 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
241 (uintmax_t)phys_avail[indx],
242 (uintmax_t)phys_avail[indx + 1] - 1,
243 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
247 vm_ksubmap_init(&kmi);
249 printf("avail memory = %ju (%ju MB)\n",
250 ptoa((uintmax_t)cnt.v_free_count),
251 ptoa((uintmax_t)cnt.v_free_count) / 1048576);
254 * Set up buffers, so they can be used to read disk labels.
257 vm_pager_bufferinit();
265 * Send an interrupt to process.
267 * Stack is set up to allow sigcode stored
268 * at top to call routine, followed by kcall
269 * to sigreturn routine below. After sigreturn
270 * resets the signal mask, the stack, and the
271 * frame pointer, it returns to the user
276 osendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
278 struct osigframe sf, *fp;
282 struct trapframe *regs;
288 PROC_LOCK_ASSERT(p, MA_OWNED);
289 sig = ksi->ksi_signo;
291 mtx_assert(&psp->ps_mtx, MA_OWNED);
293 oonstack = sigonstack(regs->tf_esp);
295 /* Allocate space for the signal handler context. */
296 if ((td->td_pflags & TDP_ALTSTACK) && !oonstack &&
297 SIGISMEMBER(psp->ps_sigonstack, sig)) {
298 fp = (struct osigframe *)(td->td_sigstk.ss_sp +
299 td->td_sigstk.ss_size - sizeof(struct osigframe));
300 #if defined(COMPAT_43)
301 td->td_sigstk.ss_flags |= SS_ONSTACK;
304 fp = (struct osigframe *)regs->tf_esp - 1;
306 /* Translate the signal if appropriate. */
307 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
308 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
310 /* Build the argument list for the signal handler. */
312 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc;
313 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
314 /* Signal handler installed with SA_SIGINFO. */
315 sf.sf_arg2 = (register_t)&fp->sf_siginfo;
316 sf.sf_siginfo.si_signo = sig;
317 sf.sf_siginfo.si_code = ksi->ksi_code;
318 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher;
320 /* Old FreeBSD-style arguments. */
321 sf.sf_arg2 = ksi->ksi_code;
322 sf.sf_addr = (register_t)ksi->ksi_addr;
323 sf.sf_ahu.sf_handler = catcher;
325 mtx_unlock(&psp->ps_mtx);
328 /* Save most if not all of trap frame. */
329 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax;
330 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx;
331 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx;
332 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx;
333 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi;
334 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi;
335 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs;
336 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds;
337 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss;
338 sf.sf_siginfo.si_sc.sc_es = regs->tf_es;
339 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs;
340 sf.sf_siginfo.si_sc.sc_gs = rgs();
341 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp;
343 /* Build the signal context to be used by osigreturn(). */
344 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0;
345 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask);
346 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp;
347 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp;
348 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip;
349 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags;
350 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno;
351 sf.sf_siginfo.si_sc.sc_err = regs->tf_err;
354 * If we're a vm86 process, we want to save the segment registers.
355 * We also change eflags to be our emulated eflags, not the actual
358 if (regs->tf_eflags & PSL_VM) {
359 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */
360 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
361 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
363 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs;
364 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs;
365 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es;
366 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds;
368 if (vm86->vm86_has_vme == 0)
369 sf.sf_siginfo.si_sc.sc_ps =
370 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
371 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
373 /* See sendsig() for comments. */
374 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
378 * Copy the sigframe out to the user's stack.
380 if (copyout(&sf, fp, sizeof(*fp)) != 0) {
382 printf("process %ld has trashed its stack\n", (long)p->p_pid);
388 regs->tf_esp = (int)fp;
389 regs->tf_eip = PS_STRINGS - szosigcode;
390 regs->tf_eflags &= ~(PSL_T | PSL_D);
391 regs->tf_cs = _ucodesel;
392 regs->tf_ds = _udatasel;
393 regs->tf_es = _udatasel;
394 regs->tf_fs = _udatasel;
396 regs->tf_ss = _udatasel;
398 mtx_lock(&psp->ps_mtx);
400 #endif /* COMPAT_43 */
402 #ifdef COMPAT_FREEBSD4
404 freebsd4_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
406 struct sigframe4 sf, *sfp;
410 struct trapframe *regs;
416 PROC_LOCK_ASSERT(p, MA_OWNED);
417 sig = ksi->ksi_signo;
419 mtx_assert(&psp->ps_mtx, MA_OWNED);
421 oonstack = sigonstack(regs->tf_esp);
423 /* Save user context. */
424 bzero(&sf, sizeof(sf));
425 sf.sf_uc.uc_sigmask = *mask;
426 sf.sf_uc.uc_stack = td->td_sigstk;
427 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
428 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
429 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
430 sf.sf_uc.uc_mcontext.mc_gs = rgs();
431 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
433 /* Allocate space for the signal handler context. */
434 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
435 SIGISMEMBER(psp->ps_sigonstack, sig)) {
436 sfp = (struct sigframe4 *)(td->td_sigstk.ss_sp +
437 td->td_sigstk.ss_size - sizeof(struct sigframe4));
438 #if defined(COMPAT_43)
439 td->td_sigstk.ss_flags |= SS_ONSTACK;
442 sfp = (struct sigframe4 *)regs->tf_esp - 1;
444 /* Translate the signal if appropriate. */
445 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
446 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
448 /* Build the argument list for the signal handler. */
450 sf.sf_ucontext = (register_t)&sfp->sf_uc;
451 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
452 /* Signal handler installed with SA_SIGINFO. */
453 sf.sf_siginfo = (register_t)&sfp->sf_si;
454 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
456 /* Fill in POSIX parts */
457 sf.sf_si.si_signo = sig;
458 sf.sf_si.si_code = ksi->ksi_code;
459 sf.sf_si.si_addr = ksi->ksi_addr;
461 /* Old FreeBSD-style arguments. */
462 sf.sf_siginfo = ksi->ksi_code;
463 sf.sf_addr = (register_t)ksi->ksi_addr;
464 sf.sf_ahu.sf_handler = catcher;
466 mtx_unlock(&psp->ps_mtx);
470 * If we're a vm86 process, we want to save the segment registers.
471 * We also change eflags to be our emulated eflags, not the actual
474 if (regs->tf_eflags & PSL_VM) {
475 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
476 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
478 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
479 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
480 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
481 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
483 if (vm86->vm86_has_vme == 0)
484 sf.sf_uc.uc_mcontext.mc_eflags =
485 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
486 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
489 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
490 * syscalls made by the signal handler. This just avoids
491 * wasting time for our lazy fixup of such faults. PSL_NT
492 * does nothing in vm86 mode, but vm86 programs can set it
493 * almost legitimately in probes for old cpu types.
495 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
499 * Copy the sigframe out to the user's stack.
501 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
503 printf("process %ld has trashed its stack\n", (long)p->p_pid);
509 regs->tf_esp = (int)sfp;
510 regs->tf_eip = PS_STRINGS - szfreebsd4_sigcode;
511 regs->tf_eflags &= ~(PSL_T | PSL_D);
512 regs->tf_cs = _ucodesel;
513 regs->tf_ds = _udatasel;
514 regs->tf_es = _udatasel;
515 regs->tf_fs = _udatasel;
516 regs->tf_ss = _udatasel;
518 mtx_lock(&psp->ps_mtx);
520 #endif /* COMPAT_FREEBSD4 */
523 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
525 struct sigframe sf, *sfp;
530 struct trapframe *regs;
531 struct segment_descriptor *sdp;
537 PROC_LOCK_ASSERT(p, MA_OWNED);
538 sig = ksi->ksi_signo;
540 mtx_assert(&psp->ps_mtx, MA_OWNED);
541 #ifdef COMPAT_FREEBSD4
542 if (SIGISMEMBER(psp->ps_freebsd4, sig)) {
543 freebsd4_sendsig(catcher, ksi, mask);
548 if (SIGISMEMBER(psp->ps_osigset, sig)) {
549 osendsig(catcher, ksi, mask);
554 oonstack = sigonstack(regs->tf_esp);
556 /* Save user context. */
557 bzero(&sf, sizeof(sf));
558 sf.sf_uc.uc_sigmask = *mask;
559 sf.sf_uc.uc_stack = td->td_sigstk;
560 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
561 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
562 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
563 sf.sf_uc.uc_mcontext.mc_gs = rgs();
564 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
565 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
566 get_fpcontext(td, &sf.sf_uc.uc_mcontext);
569 * Unconditionally fill the fsbase and gsbase into the mcontext.
571 sdp = &td->td_pcb->pcb_gsd;
572 sf.sf_uc.uc_mcontext.mc_fsbase = sdp->sd_hibase << 24 |
574 sdp = &td->td_pcb->pcb_fsd;
575 sf.sf_uc.uc_mcontext.mc_gsbase = sdp->sd_hibase << 24 |
578 /* Allocate space for the signal handler context. */
579 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
580 SIGISMEMBER(psp->ps_sigonstack, sig)) {
581 sp = td->td_sigstk.ss_sp +
582 td->td_sigstk.ss_size - sizeof(struct sigframe);
583 #if defined(COMPAT_43)
584 td->td_sigstk.ss_flags |= SS_ONSTACK;
587 sp = (char *)regs->tf_esp - sizeof(struct sigframe);
588 /* Align to 16 bytes. */
589 sfp = (struct sigframe *)((unsigned int)sp & ~0xF);
591 /* Translate the signal if appropriate. */
592 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
593 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
595 /* Build the argument list for the signal handler. */
597 sf.sf_ucontext = (register_t)&sfp->sf_uc;
598 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
599 /* Signal handler installed with SA_SIGINFO. */
600 sf.sf_siginfo = (register_t)&sfp->sf_si;
601 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
603 /* Fill in POSIX parts */
604 sf.sf_si = ksi->ksi_info;
605 sf.sf_si.si_signo = sig; /* maybe a translated signal */
607 /* Old FreeBSD-style arguments. */
608 sf.sf_siginfo = ksi->ksi_code;
609 sf.sf_addr = (register_t)ksi->ksi_addr;
610 sf.sf_ahu.sf_handler = catcher;
612 mtx_unlock(&psp->ps_mtx);
616 * If we're a vm86 process, we want to save the segment registers.
617 * We also change eflags to be our emulated eflags, not the actual
620 if (regs->tf_eflags & PSL_VM) {
621 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
622 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
624 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
625 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
626 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
627 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
629 if (vm86->vm86_has_vme == 0)
630 sf.sf_uc.uc_mcontext.mc_eflags =
631 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
632 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
635 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
636 * syscalls made by the signal handler. This just avoids
637 * wasting time for our lazy fixup of such faults. PSL_NT
638 * does nothing in vm86 mode, but vm86 programs can set it
639 * almost legitimately in probes for old cpu types.
641 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
645 * Copy the sigframe out to the user's stack.
647 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
649 printf("process %ld has trashed its stack\n", (long)p->p_pid);
655 regs->tf_esp = (int)sfp;
656 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode);
657 regs->tf_eflags &= ~(PSL_T | PSL_D);
658 regs->tf_cs = _ucodesel;
659 regs->tf_ds = _udatasel;
660 regs->tf_es = _udatasel;
661 regs->tf_fs = _udatasel;
662 regs->tf_ss = _udatasel;
664 mtx_lock(&psp->ps_mtx);
668 * System call to cleanup state after a signal
669 * has been taken. Reset signal mask and
670 * stack state from context left by sendsig (above).
671 * Return to previous pc and psl as specified by
672 * context left by sendsig. Check carefully to
673 * make sure that the user has not modified the
674 * state to gain improper privileges.
682 struct osigreturn_args /* {
683 struct osigcontext *sigcntxp;
686 struct osigcontext sc;
687 struct trapframe *regs;
688 struct osigcontext *scp;
693 error = copyin(uap->sigcntxp, &sc, sizeof(sc));
698 if (eflags & PSL_VM) {
699 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
700 struct vm86_kernel *vm86;
703 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
704 * set up the vm86 area, and we can't enter vm86 mode.
706 if (td->td_pcb->pcb_ext == 0)
708 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
709 if (vm86->vm86_inited == 0)
712 /* Go back to user mode if both flags are set. */
713 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
714 ksiginfo_init_trap(&ksi);
715 ksi.ksi_signo = SIGBUS;
716 ksi.ksi_code = BUS_OBJERR;
717 ksi.ksi_addr = (void *)regs->tf_eip;
718 trapsignal(td, &ksi);
721 if (vm86->vm86_has_vme) {
722 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
723 (eflags & VME_USERCHANGE) | PSL_VM;
725 vm86->vm86_eflags = eflags; /* save VIF, VIP */
726 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
727 (eflags & VM_USERCHANGE) | PSL_VM;
729 tf->tf_vm86_ds = scp->sc_ds;
730 tf->tf_vm86_es = scp->sc_es;
731 tf->tf_vm86_fs = scp->sc_fs;
732 tf->tf_vm86_gs = scp->sc_gs;
733 tf->tf_ds = _udatasel;
734 tf->tf_es = _udatasel;
735 tf->tf_fs = _udatasel;
738 * Don't allow users to change privileged or reserved flags.
741 * XXX do allow users to change the privileged flag PSL_RF.
742 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
743 * should sometimes set it there too. tf_eflags is kept in
744 * the signal context during signal handling and there is no
745 * other place to remember it, so the PSL_RF bit may be
746 * corrupted by the signal handler without us knowing.
747 * Corruption of the PSL_RF bit at worst causes one more or
748 * one less debugger trap, so allowing it is fairly harmless.
750 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
755 * Don't allow users to load a valid privileged %cs. Let the
756 * hardware check for invalid selectors, excess privilege in
757 * other selectors, invalid %eip's and invalid %esp's.
759 if (!CS_SECURE(scp->sc_cs)) {
760 ksiginfo_init_trap(&ksi);
761 ksi.ksi_signo = SIGBUS;
762 ksi.ksi_code = BUS_OBJERR;
763 ksi.ksi_trapno = T_PROTFLT;
764 ksi.ksi_addr = (void *)regs->tf_eip;
765 trapsignal(td, &ksi);
768 regs->tf_ds = scp->sc_ds;
769 regs->tf_es = scp->sc_es;
770 regs->tf_fs = scp->sc_fs;
773 /* Restore remaining registers. */
774 regs->tf_eax = scp->sc_eax;
775 regs->tf_ebx = scp->sc_ebx;
776 regs->tf_ecx = scp->sc_ecx;
777 regs->tf_edx = scp->sc_edx;
778 regs->tf_esi = scp->sc_esi;
779 regs->tf_edi = scp->sc_edi;
780 regs->tf_cs = scp->sc_cs;
781 regs->tf_ss = scp->sc_ss;
782 regs->tf_isp = scp->sc_isp;
783 regs->tf_ebp = scp->sc_fp;
784 regs->tf_esp = scp->sc_sp;
785 regs->tf_eip = scp->sc_pc;
786 regs->tf_eflags = eflags;
788 #if defined(COMPAT_43)
789 if (scp->sc_onstack & 1)
790 td->td_sigstk.ss_flags |= SS_ONSTACK;
792 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
794 kern_sigprocmask(td, SIG_SETMASK, (sigset_t *)&scp->sc_mask, NULL,
796 return (EJUSTRETURN);
798 #endif /* COMPAT_43 */
800 #ifdef COMPAT_FREEBSD4
805 freebsd4_sigreturn(td, uap)
807 struct freebsd4_sigreturn_args /* {
808 const ucontext4 *sigcntxp;
812 struct trapframe *regs;
813 struct ucontext4 *ucp;
814 int cs, eflags, error;
817 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
822 eflags = ucp->uc_mcontext.mc_eflags;
823 if (eflags & PSL_VM) {
824 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
825 struct vm86_kernel *vm86;
828 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
829 * set up the vm86 area, and we can't enter vm86 mode.
831 if (td->td_pcb->pcb_ext == 0)
833 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
834 if (vm86->vm86_inited == 0)
837 /* Go back to user mode if both flags are set. */
838 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
839 ksiginfo_init_trap(&ksi);
840 ksi.ksi_signo = SIGBUS;
841 ksi.ksi_code = BUS_OBJERR;
842 ksi.ksi_addr = (void *)regs->tf_eip;
843 trapsignal(td, &ksi);
845 if (vm86->vm86_has_vme) {
846 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
847 (eflags & VME_USERCHANGE) | PSL_VM;
849 vm86->vm86_eflags = eflags; /* save VIF, VIP */
850 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
851 (eflags & VM_USERCHANGE) | PSL_VM;
853 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
854 tf->tf_eflags = eflags;
855 tf->tf_vm86_ds = tf->tf_ds;
856 tf->tf_vm86_es = tf->tf_es;
857 tf->tf_vm86_fs = tf->tf_fs;
858 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
859 tf->tf_ds = _udatasel;
860 tf->tf_es = _udatasel;
861 tf->tf_fs = _udatasel;
864 * Don't allow users to change privileged or reserved flags.
867 * XXX do allow users to change the privileged flag PSL_RF.
868 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
869 * should sometimes set it there too. tf_eflags is kept in
870 * the signal context during signal handling and there is no
871 * other place to remember it, so the PSL_RF bit may be
872 * corrupted by the signal handler without us knowing.
873 * Corruption of the PSL_RF bit at worst causes one more or
874 * one less debugger trap, so allowing it is fairly harmless.
876 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
877 printf("freebsd4_sigreturn: eflags = 0x%x\n", eflags);
882 * Don't allow users to load a valid privileged %cs. Let the
883 * hardware check for invalid selectors, excess privilege in
884 * other selectors, invalid %eip's and invalid %esp's.
886 cs = ucp->uc_mcontext.mc_cs;
887 if (!CS_SECURE(cs)) {
888 printf("freebsd4_sigreturn: cs = 0x%x\n", cs);
889 ksiginfo_init_trap(&ksi);
890 ksi.ksi_signo = SIGBUS;
891 ksi.ksi_code = BUS_OBJERR;
892 ksi.ksi_trapno = T_PROTFLT;
893 ksi.ksi_addr = (void *)regs->tf_eip;
894 trapsignal(td, &ksi);
898 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
901 #if defined(COMPAT_43)
902 if (ucp->uc_mcontext.mc_onstack & 1)
903 td->td_sigstk.ss_flags |= SS_ONSTACK;
905 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
907 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
908 return (EJUSTRETURN);
910 #endif /* COMPAT_FREEBSD4 */
918 struct sigreturn_args /* {
919 const struct __ucontext *sigcntxp;
923 struct trapframe *regs;
925 int cs, eflags, error, ret;
928 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
933 eflags = ucp->uc_mcontext.mc_eflags;
934 if (eflags & PSL_VM) {
935 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
936 struct vm86_kernel *vm86;
939 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
940 * set up the vm86 area, and we can't enter vm86 mode.
942 if (td->td_pcb->pcb_ext == 0)
944 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
945 if (vm86->vm86_inited == 0)
948 /* Go back to user mode if both flags are set. */
949 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
950 ksiginfo_init_trap(&ksi);
951 ksi.ksi_signo = SIGBUS;
952 ksi.ksi_code = BUS_OBJERR;
953 ksi.ksi_addr = (void *)regs->tf_eip;
954 trapsignal(td, &ksi);
957 if (vm86->vm86_has_vme) {
958 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
959 (eflags & VME_USERCHANGE) | PSL_VM;
961 vm86->vm86_eflags = eflags; /* save VIF, VIP */
962 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
963 (eflags & VM_USERCHANGE) | PSL_VM;
965 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
966 tf->tf_eflags = eflags;
967 tf->tf_vm86_ds = tf->tf_ds;
968 tf->tf_vm86_es = tf->tf_es;
969 tf->tf_vm86_fs = tf->tf_fs;
970 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
971 tf->tf_ds = _udatasel;
972 tf->tf_es = _udatasel;
973 tf->tf_fs = _udatasel;
976 * Don't allow users to change privileged or reserved flags.
979 * XXX do allow users to change the privileged flag PSL_RF.
980 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
981 * should sometimes set it there too. tf_eflags is kept in
982 * the signal context during signal handling and there is no
983 * other place to remember it, so the PSL_RF bit may be
984 * corrupted by the signal handler without us knowing.
985 * Corruption of the PSL_RF bit at worst causes one more or
986 * one less debugger trap, so allowing it is fairly harmless.
988 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
989 printf("sigreturn: eflags = 0x%x\n", eflags);
994 * Don't allow users to load a valid privileged %cs. Let the
995 * hardware check for invalid selectors, excess privilege in
996 * other selectors, invalid %eip's and invalid %esp's.
998 cs = ucp->uc_mcontext.mc_cs;
999 if (!CS_SECURE(cs)) {
1000 printf("sigreturn: cs = 0x%x\n", cs);
1001 ksiginfo_init_trap(&ksi);
1002 ksi.ksi_signo = SIGBUS;
1003 ksi.ksi_code = BUS_OBJERR;
1004 ksi.ksi_trapno = T_PROTFLT;
1005 ksi.ksi_addr = (void *)regs->tf_eip;
1006 trapsignal(td, &ksi);
1010 ret = set_fpcontext(td, &ucp->uc_mcontext);
1013 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
1016 #if defined(COMPAT_43)
1017 if (ucp->uc_mcontext.mc_onstack & 1)
1018 td->td_sigstk.ss_flags |= SS_ONSTACK;
1020 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
1022 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
1024 return (EJUSTRETURN);
1028 * Machine dependent boot() routine
1030 * I haven't seen anything to put here yet
1031 * Possibly some stuff might be grafted back here from boot()
1039 * Flush the D-cache for non-DMA I/O so that the I-cache can
1040 * be made coherent later.
1043 cpu_flush_dcache(void *ptr, size_t len)
1045 /* Not applicable */
1048 /* Get current clock frequency for the given cpu id. */
1050 cpu_est_clockrate(int cpu_id, uint64_t *rate)
1053 uint64_t tsc1, tsc2;
1055 if (pcpu_find(cpu_id) == NULL || rate == NULL)
1058 return (EOPNOTSUPP);
1060 /* If we're booting, trust the rate calibrated moments ago. */
1067 /* Schedule ourselves on the indicated cpu. */
1068 thread_lock(curthread);
1069 sched_bind(curthread, cpu_id);
1070 thread_unlock(curthread);
1073 /* Calibrate by measuring a short delay. */
1074 reg = intr_disable();
1081 thread_lock(curthread);
1082 sched_unbind(curthread);
1083 thread_unlock(curthread);
1087 * Calculate the difference in readings, convert to Mhz, and
1088 * subtract 0.5% of the total. Empirical testing has shown that
1089 * overhead in DELAY() works out to approximately this value.
1092 *rate = tsc2 * 1000 - tsc2 * 5;
1097 * Shutdown the CPU as much as possible
1107 * Hook to idle the CPU when possible. In the SMP case we default to
1108 * off because a halted cpu will not currently pick up a new thread in the
1109 * run queue until the next timer tick. If turned on this will result in
1110 * approximately a 4.2% loss in real time performance in buildworld tests
1111 * (but improves user and sys times oddly enough), and saves approximately
1112 * 5% in power consumption on an idle machine (tests w/2xCPU 1.1GHz P3).
1114 * XXX we need to have a cpu mask of idle cpus and generate an IPI or
1115 * otherwise generate some sort of interrupt to wake up cpus sitting in HLT.
1116 * Then we can have our cake and eat it too.
1118 * XXX I'm turning it on for SMP as well by default for now. It seems to
1119 * help lock contention somewhat, and this is critical for HTT. -Peter
1121 static int cpu_idle_hlt = 1;
1122 TUNABLE_INT("machdep.cpu_idle_hlt", &cpu_idle_hlt);
1123 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW,
1124 &cpu_idle_hlt, 0, "Idle loop HLT enable");
1127 cpu_idle_default(void)
1130 * we must absolutely guarentee that hlt is the
1131 * absolute next instruction after sti or we
1132 * introduce a timing window.
1134 __asm __volatile("sti; hlt");
1138 * Note that we have to be careful here to avoid a race between checking
1139 * sched_runnable() and actually halting. If we don't do this, we may waste
1140 * the time between calling hlt and the next interrupt even though there
1141 * is a runnable process.
1148 if (mp_grab_cpu_hlt())
1154 if (sched_runnable())
1162 cpu_idle_wakeup(int cpu)
1168 /* Other subsystems (e.g., ACPI) can hook this later. */
1169 void (*cpu_idle_hook)(void) = cpu_idle_default;
1172 * Reset registers to default values on exec.
1175 exec_setregs(td, entry, stack, ps_strings)
1181 struct trapframe *regs = td->td_frame;
1182 struct pcb *pcb = td->td_pcb;
1184 /* Reset pc->pcb_gs and %gs before possibly invalidating it. */
1185 pcb->pcb_gs = _udatasel;
1188 mtx_lock_spin(&dt_lock);
1189 if (td->td_proc->p_md.md_ldt)
1192 mtx_unlock_spin(&dt_lock);
1194 bzero((char *)regs, sizeof(struct trapframe));
1195 regs->tf_eip = entry;
1196 regs->tf_esp = stack;
1197 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T);
1198 regs->tf_ss = _udatasel;
1199 regs->tf_ds = _udatasel;
1200 regs->tf_es = _udatasel;
1201 regs->tf_fs = _udatasel;
1202 regs->tf_cs = _ucodesel;
1204 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */
1205 regs->tf_ebx = ps_strings;
1208 * Reset the hardware debug registers if they were in use.
1209 * They won't have any meaning for the newly exec'd process.
1211 if (pcb->pcb_flags & PCB_DBREGS) {
1218 if (pcb == PCPU_GET(curpcb)) {
1220 * Clear the debug registers on the running
1221 * CPU, otherwise they will end up affecting
1222 * the next process we switch to.
1226 pcb->pcb_flags &= ~PCB_DBREGS;
1230 * Initialize the math emulator (if any) for the current process.
1231 * Actually, just clear the bit that says that the emulator has
1232 * been initialized. Initialization is delayed until the process
1233 * traps to the emulator (if it is done at all) mainly because
1234 * emulators don't provide an entry point for initialization.
1236 td->td_pcb->pcb_flags &= ~FP_SOFTFP;
1237 pcb->pcb_initial_npxcw = __INITIAL_NPXCW__;
1240 * Drop the FP state if we hold it, so that the process gets a
1241 * clean FP state if it uses the FPU again.
1246 * XXX - Linux emulator
1247 * Make sure sure edx is 0x0 on entry. Linux binaries depend
1250 td->td_retval[1] = 0;
1261 * CR0_MP, CR0_NE and CR0_TS are set for NPX (FPU) support:
1263 * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
1264 * instructions. We must set the CR0_MP bit and use the CR0_TS
1265 * bit to control the trap, because setting the CR0_EM bit does
1266 * not cause WAIT instructions to trap. It's important to trap
1267 * WAIT instructions - otherwise the "wait" variants of no-wait
1268 * control instructions would degenerate to the "no-wait" variants
1269 * after FP context switches but work correctly otherwise. It's
1270 * particularly important to trap WAITs when there is no NPX -
1271 * otherwise the "wait" variants would always degenerate.
1273 * Try setting CR0_NE to get correct error reporting on 486DX's.
1274 * Setting it should fail or do nothing on lesser processors.
1276 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
1281 u_long bootdev; /* not a struct cdev *- encoding is different */
1282 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev,
1283 CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in struct cdev *format)");
1286 * Initialize 386 and configure to run kernel
1290 * Initialize segments & interrupt table
1294 union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */
1295 static struct gate_descriptor idt0[NIDT];
1296 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
1297 union descriptor ldt[NLDT]; /* local descriptor table */
1298 struct region_descriptor r_gdt, r_idt; /* table descriptors */
1299 struct mtx dt_lock; /* lock for GDT and LDT */
1301 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
1302 extern int has_f00f_bug;
1305 static struct i386tss dblfault_tss;
1306 static char dblfault_stack[PAGE_SIZE];
1308 extern vm_offset_t proc0kstack;
1312 * software prototypes -- in more palatable form.
1314 * GCODE_SEL through GUDATA_SEL must be in this order for syscall/sysret
1315 * GUFS_SEL and GUGS_SEL must be in this order (swtch.s knows it)
1317 struct soft_segment_descriptor gdt_segs[] = {
1318 /* GNULL_SEL 0 Null Descriptor */
1324 .ssd_xx = 0, .ssd_xx1 = 0,
1327 /* GPRIV_SEL 1 SMP Per-Processor Private Data Descriptor */
1329 .ssd_limit = 0xfffff,
1330 .ssd_type = SDT_MEMRWA,
1333 .ssd_xx = 0, .ssd_xx1 = 0,
1336 /* GUFS_SEL 2 %fs Descriptor for user */
1338 .ssd_limit = 0xfffff,
1339 .ssd_type = SDT_MEMRWA,
1342 .ssd_xx = 0, .ssd_xx1 = 0,
1345 /* GUGS_SEL 3 %gs Descriptor for user */
1347 .ssd_limit = 0xfffff,
1348 .ssd_type = SDT_MEMRWA,
1351 .ssd_xx = 0, .ssd_xx1 = 0,
1354 /* GCODE_SEL 4 Code Descriptor for kernel */
1356 .ssd_limit = 0xfffff,
1357 .ssd_type = SDT_MEMERA,
1360 .ssd_xx = 0, .ssd_xx1 = 0,
1363 /* GDATA_SEL 5 Data Descriptor for kernel */
1365 .ssd_limit = 0xfffff,
1366 .ssd_type = SDT_MEMRWA,
1369 .ssd_xx = 0, .ssd_xx1 = 0,
1372 /* GUCODE_SEL 6 Code Descriptor for user */
1374 .ssd_limit = 0xfffff,
1375 .ssd_type = SDT_MEMERA,
1378 .ssd_xx = 0, .ssd_xx1 = 0,
1381 /* GUDATA_SEL 7 Data Descriptor for user */
1383 .ssd_limit = 0xfffff,
1384 .ssd_type = SDT_MEMRWA,
1387 .ssd_xx = 0, .ssd_xx1 = 0,
1390 /* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */
1391 { .ssd_base = 0x400,
1392 .ssd_limit = 0xfffff,
1393 .ssd_type = SDT_MEMRWA,
1396 .ssd_xx = 0, .ssd_xx1 = 0,
1399 /* GPROC0_SEL 9 Proc 0 Tss Descriptor */
1402 .ssd_limit = sizeof(struct i386tss)-1,
1403 .ssd_type = SDT_SYS386TSS,
1406 .ssd_xx = 0, .ssd_xx1 = 0,
1409 /* GLDT_SEL 10 LDT Descriptor */
1410 { .ssd_base = (int) ldt,
1411 .ssd_limit = sizeof(ldt)-1,
1412 .ssd_type = SDT_SYSLDT,
1415 .ssd_xx = 0, .ssd_xx1 = 0,
1418 /* GUSERLDT_SEL 11 User LDT Descriptor per process */
1419 { .ssd_base = (int) ldt,
1420 .ssd_limit = (512 * sizeof(union descriptor)-1),
1421 .ssd_type = SDT_SYSLDT,
1424 .ssd_xx = 0, .ssd_xx1 = 0,
1427 /* GPANIC_SEL 12 Panic Tss Descriptor */
1428 { .ssd_base = (int) &dblfault_tss,
1429 .ssd_limit = sizeof(struct i386tss)-1,
1430 .ssd_type = SDT_SYS386TSS,
1433 .ssd_xx = 0, .ssd_xx1 = 0,
1436 /* GBIOSCODE32_SEL 13 BIOS 32-bit interface (32bit Code) */
1438 .ssd_limit = 0xfffff,
1439 .ssd_type = SDT_MEMERA,
1442 .ssd_xx = 0, .ssd_xx1 = 0,
1445 /* GBIOSCODE16_SEL 14 BIOS 32-bit interface (16bit Code) */
1447 .ssd_limit = 0xfffff,
1448 .ssd_type = SDT_MEMERA,
1451 .ssd_xx = 0, .ssd_xx1 = 0,
1454 /* GBIOSDATA_SEL 15 BIOS 32-bit interface (Data) */
1456 .ssd_limit = 0xfffff,
1457 .ssd_type = SDT_MEMRWA,
1460 .ssd_xx = 0, .ssd_xx1 = 0,
1463 /* GBIOSUTIL_SEL 16 BIOS 16-bit interface (Utility) */
1465 .ssd_limit = 0xfffff,
1466 .ssd_type = SDT_MEMRWA,
1469 .ssd_xx = 0, .ssd_xx1 = 0,
1472 /* GBIOSARGS_SEL 17 BIOS 16-bit interface (Arguments) */
1474 .ssd_limit = 0xfffff,
1475 .ssd_type = SDT_MEMRWA,
1478 .ssd_xx = 0, .ssd_xx1 = 0,
1481 /* GNDIS_SEL 18 NDIS Descriptor */
1487 .ssd_xx = 0, .ssd_xx1 = 0,
1492 static struct soft_segment_descriptor ldt_segs[] = {
1493 /* Null Descriptor - overwritten by call gate */
1499 .ssd_xx = 0, .ssd_xx1 = 0,
1502 /* Null Descriptor - overwritten by call gate */
1508 .ssd_xx = 0, .ssd_xx1 = 0,
1511 /* Null Descriptor - overwritten by call gate */
1517 .ssd_xx = 0, .ssd_xx1 = 0,
1520 /* Code Descriptor for user */
1522 .ssd_limit = 0xfffff,
1523 .ssd_type = SDT_MEMERA,
1526 .ssd_xx = 0, .ssd_xx1 = 0,
1529 /* Null Descriptor - overwritten by call gate */
1535 .ssd_xx = 0, .ssd_xx1 = 0,
1538 /* Data Descriptor for user */
1540 .ssd_limit = 0xfffff,
1541 .ssd_type = SDT_MEMRWA,
1544 .ssd_xx = 0, .ssd_xx1 = 0,
1550 setidt(idx, func, typ, dpl, selec)
1557 struct gate_descriptor *ip;
1560 ip->gd_looffset = (int)func;
1561 ip->gd_selector = selec;
1567 ip->gd_hioffset = ((int)func)>>16 ;
1571 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
1572 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
1573 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
1574 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
1575 IDTVEC(xmm), IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall);
1579 * Display the index and function name of any IDT entries that don't use
1580 * the default 'rsvd' entry point.
1582 DB_SHOW_COMMAND(idt, db_show_idt)
1584 struct gate_descriptor *ip;
1589 for (idx = 0; idx < NIDT && !db_pager_quit; idx++) {
1590 func = (ip->gd_hioffset << 16 | ip->gd_looffset);
1591 if (func != (uintptr_t)&IDTVEC(rsvd)) {
1592 db_printf("%3d\t", idx);
1593 db_printsym(func, DB_STGY_PROC);
1600 /* Show privileged registers. */
1601 DB_SHOW_COMMAND(sysregs, db_show_sysregs)
1603 uint64_t idtr, gdtr;
1606 db_printf("idtr\t0x%08x/%04x\n",
1607 (u_int)(idtr >> 16), (u_int)idtr & 0xffff);
1609 db_printf("gdtr\t0x%08x/%04x\n",
1610 (u_int)(gdtr >> 16), (u_int)gdtr & 0xffff);
1611 db_printf("ldtr\t0x%04x\n", rldt());
1612 db_printf("tr\t0x%04x\n", rtr());
1613 db_printf("cr0\t0x%08x\n", rcr0());
1614 db_printf("cr2\t0x%08x\n", rcr2());
1615 db_printf("cr3\t0x%08x\n", rcr3());
1616 db_printf("cr4\t0x%08x\n", rcr4());
1622 struct segment_descriptor *sd;
1623 struct soft_segment_descriptor *ssd;
1625 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
1626 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
1627 ssd->ssd_type = sd->sd_type;
1628 ssd->ssd_dpl = sd->sd_dpl;
1629 ssd->ssd_p = sd->sd_p;
1630 ssd->ssd_def32 = sd->sd_def32;
1631 ssd->ssd_gran = sd->sd_gran;
1635 * Populate the (physmap) array with base/bound pairs describing the
1636 * available physical memory in the system, then test this memory and
1637 * build the phys_avail array describing the actually-available memory.
1639 * If we cannot accurately determine the physical memory map, then use
1640 * value from the 0xE801 call, and failing that, the RTC.
1642 * Total memory size may be set by the kernel environment variable
1643 * hw.physmem or the compile-time define MAXMEM.
1645 * XXX first should be vm_paddr_t.
1648 getmemsize(int first)
1650 int i, off, physmap_idx, pa_indx, da_indx;
1652 u_long physmem_tunable;
1653 u_int extmem, under16;
1654 vm_paddr_t pa, physmap[PHYSMAP_SIZE];
1656 quad_t dcons_addr, dcons_size;
1658 bzero(physmap, sizeof(physmap));
1660 /* XXX - some of EPSON machines can't use PG_N */
1662 if (pc98_machine_type & M_EPSON_PC98) {
1663 switch (epson_machine_id) {
1667 case EPSON_PC486_HX:
1668 case EPSON_PC486_HG:
1669 case EPSON_PC486_HA:
1676 * Perform "base memory" related probes & setup
1678 under16 = pc98_getmemsize(&basemem, &extmem);
1679 if (basemem > 640) {
1680 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
1686 * XXX if biosbasemem is now < 640, there is a `hole'
1687 * between the end of base memory and the start of
1688 * ISA memory. The hole may be empty or it may
1689 * contain BIOS code or data. Map it read/write so
1690 * that the BIOS can write to it. (Memory from 0 to
1691 * the physical end of the kernel is mapped read-only
1692 * to begin with and then parts of it are remapped.
1693 * The parts that aren't remapped form holes that
1694 * remain read-only and are unused by the kernel.
1695 * The base memory area is below the physical end of
1696 * the kernel and right now forms a read-only hole.
1697 * The part of it from PAGE_SIZE to
1698 * (trunc_page(biosbasemem * 1024) - 1) will be
1699 * remapped and used by the kernel later.)
1701 * This code is similar to the code used in
1702 * pmap_mapdev, but since no memory needs to be
1703 * allocated we simply change the mapping.
1705 for (pa = trunc_page(basemem * 1024);
1706 pa < ISA_HOLE_START; pa += PAGE_SIZE)
1707 pmap_kenter(KERNBASE + pa, pa);
1710 * if basemem != 640, map pages r/w into vm86 page table so
1711 * that the bios can scribble on it.
1713 pte = (pt_entry_t *)vm86paddr;
1714 for (i = basemem / 4; i < 160; i++)
1715 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
1718 physmap[1] = basemem * 1024;
1720 physmap[physmap_idx] = 0x100000;
1721 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
1724 * Now, physmap contains a map of physical memory.
1728 /* make hole for AP bootstrap code */
1729 physmap[1] = mp_bootaddress(physmap[1]);
1733 * Maxmem isn't the "maximum memory", it's one larger than the
1734 * highest page of the physical address space. It should be
1735 * called something like "Maxphyspage". We may adjust this
1736 * based on ``hw.physmem'' and the results of the memory test.
1738 Maxmem = atop(physmap[physmap_idx + 1]);
1741 Maxmem = MAXMEM / 4;
1744 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable))
1745 Maxmem = atop(physmem_tunable);
1747 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
1748 (boothowto & RB_VERBOSE))
1749 printf("Physical memory use set to %ldK\n", Maxmem * 4);
1752 * If Maxmem has been increased beyond what the system has detected,
1753 * extend the last memory segment to the new limit.
1755 if (atop(physmap[physmap_idx + 1]) < Maxmem)
1756 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
1759 * We need to divide chunk if Maxmem is larger than 16MB and
1760 * under 16MB area is not full of memory.
1761 * (1) system area (15-16MB region) is cut off
1762 * (2) extended memory is only over 16MB area (ex. Melco "HYPERMEMORY")
1764 if ((under16 != 16 * 1024) && (extmem > 15 * 1024)) {
1765 /* 15M - 16M region is cut off, so need to divide chunk */
1766 physmap[physmap_idx + 1] = under16 * 1024;
1768 physmap[physmap_idx] = 0x1000000;
1769 physmap[physmap_idx + 1] = physmap[2] + extmem * 1024;
1772 /* call pmap initialization to make new kernel address space */
1773 pmap_bootstrap(first);
1776 * Size up each available chunk of physical memory.
1778 physmap[0] = PAGE_SIZE; /* mask off page 0 */
1781 phys_avail[pa_indx++] = physmap[0];
1782 phys_avail[pa_indx] = physmap[0];
1783 dump_avail[da_indx] = physmap[0];
1787 * Get dcons buffer address
1789 if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
1790 getenv_quad("dcons.size", &dcons_size) == 0)
1794 * physmap is in bytes, so when converting to page boundaries,
1795 * round up the start address and round down the end address.
1797 for (i = 0; i <= physmap_idx; i += 2) {
1800 end = ptoa((vm_paddr_t)Maxmem);
1801 if (physmap[i + 1] < end)
1802 end = trunc_page(physmap[i + 1]);
1803 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
1804 int tmp, page_bad, full;
1805 int *ptr = (int *)CADDR1;
1809 * block out kernel memory as not available.
1811 if (pa >= KERNLOAD && pa < first)
1815 * block out dcons buffer
1818 && pa >= trunc_page(dcons_addr)
1819 && pa < dcons_addr + dcons_size)
1825 * map page into kernel: valid, read/write,non-cacheable
1827 *pte = pa | PG_V | PG_RW | pg_n;
1832 * Test for alternating 1's and 0's
1834 *(volatile int *)ptr = 0xaaaaaaaa;
1835 if (*(volatile int *)ptr != 0xaaaaaaaa)
1838 * Test for alternating 0's and 1's
1840 *(volatile int *)ptr = 0x55555555;
1841 if (*(volatile int *)ptr != 0x55555555)
1846 *(volatile int *)ptr = 0xffffffff;
1847 if (*(volatile int *)ptr != 0xffffffff)
1852 *(volatile int *)ptr = 0x0;
1853 if (*(volatile int *)ptr != 0x0)
1856 * Restore original value.
1861 * Adjust array of valid/good pages.
1863 if (page_bad == TRUE)
1866 * If this good page is a continuation of the
1867 * previous set of good pages, then just increase
1868 * the end pointer. Otherwise start a new chunk.
1869 * Note that "end" points one higher than end,
1870 * making the range >= start and < end.
1871 * If we're also doing a speculative memory
1872 * test and we at or past the end, bump up Maxmem
1873 * so that we keep going. The first bad page
1874 * will terminate the loop.
1876 if (phys_avail[pa_indx] == pa) {
1877 phys_avail[pa_indx] += PAGE_SIZE;
1880 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
1882 "Too many holes in the physical address space, giving up\n");
1887 phys_avail[pa_indx++] = pa; /* start */
1888 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
1892 if (dump_avail[da_indx] == pa) {
1893 dump_avail[da_indx] += PAGE_SIZE;
1896 if (da_indx == DUMP_AVAIL_ARRAY_END) {
1900 dump_avail[da_indx++] = pa; /* start */
1901 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
1913 * The last chunk must contain at least one page plus the message
1914 * buffer to avoid complicating other code (message buffer address
1915 * calculation, etc.).
1917 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
1918 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) {
1919 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
1920 phys_avail[pa_indx--] = 0;
1921 phys_avail[pa_indx--] = 0;
1924 Maxmem = atop(phys_avail[pa_indx]);
1926 /* Trim off space for the message buffer. */
1927 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE);
1929 /* Map the message buffer. */
1930 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE)
1931 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] +
1939 struct gate_descriptor *gdp;
1940 int gsel_tss, metadata_missing, x;
1944 thread0.td_kstack = proc0kstack;
1945 thread0.td_pcb = (struct pcb *)
1946 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
1949 * This may be done better later if it gets more high level
1950 * components in it. If so just link td->td_proc here.
1952 proc_linkup0(&proc0, &thread0);
1959 metadata_missing = 0;
1960 if (bootinfo.bi_modulep) {
1961 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE;
1962 preload_bootstrap_relocate(KERNBASE);
1964 metadata_missing = 1;
1967 kern_envp = static_env;
1968 else if (bootinfo.bi_envp)
1969 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE;
1971 /* Init basic tunables, hz etc */
1975 * Make gdt memory segments. All segments cover the full 4GB
1976 * of address space and permissions are enforced at page level.
1978 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1);
1979 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1);
1980 gdt_segs[GUCODE_SEL].ssd_limit = atop(0 - 1);
1981 gdt_segs[GUDATA_SEL].ssd_limit = atop(0 - 1);
1982 gdt_segs[GUFS_SEL].ssd_limit = atop(0 - 1);
1983 gdt_segs[GUGS_SEL].ssd_limit = atop(0 - 1);
1986 gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1);
1987 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
1988 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
1990 for (x = 0; x < NGDT; x++)
1991 ssdtosd(&gdt_segs[x], &gdt[x].sd);
1993 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
1994 r_gdt.rd_base = (int) gdt;
1995 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN);
1998 pcpu_init(pc, 0, sizeof(struct pcpu));
1999 for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE)
2000 pmap_kenter(pa + KERNBASE, pa);
2001 dpcpu_init((void *)(first + KERNBASE), 0);
2002 first += DPCPU_SIZE;
2004 PCPU_SET(prvspace, pc);
2005 PCPU_SET(curthread, &thread0);
2006 PCPU_SET(curpcb, thread0.td_pcb);
2009 * Initialize mutexes.
2011 * icu_lock: in order to allow an interrupt to occur in a critical
2012 * section, to set pcpu->ipending (etc...) properly, we
2013 * must be able to get the icu lock, so it can't be
2017 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE);
2019 /* make ldt memory segments */
2020 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
2021 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
2022 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
2023 ssdtosd(&ldt_segs[x], &ldt[x].sd);
2025 _default_ldt = GSEL(GLDT_SEL, SEL_KPL);
2027 PCPU_SET(currentldt, _default_ldt);
2030 for (x = 0; x < NIDT; x++)
2031 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL,
2032 GSEL(GCODE_SEL, SEL_KPL));
2033 setidt(IDT_DE, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL,
2034 GSEL(GCODE_SEL, SEL_KPL));
2035 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL,
2036 GSEL(GCODE_SEL, SEL_KPL));
2037 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYS386IGT, SEL_KPL,
2038 GSEL(GCODE_SEL, SEL_KPL));
2039 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL,
2040 GSEL(GCODE_SEL, SEL_KPL));
2041 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL,
2042 GSEL(GCODE_SEL, SEL_KPL));
2043 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL,
2044 GSEL(GCODE_SEL, SEL_KPL));
2045 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2046 GSEL(GCODE_SEL, SEL_KPL));
2047 setidt(IDT_NM, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL
2048 , GSEL(GCODE_SEL, SEL_KPL));
2049 setidt(IDT_DF, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL));
2050 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL,
2051 GSEL(GCODE_SEL, SEL_KPL));
2052 setidt(IDT_TS, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL,
2053 GSEL(GCODE_SEL, SEL_KPL));
2054 setidt(IDT_NP, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL,
2055 GSEL(GCODE_SEL, SEL_KPL));
2056 setidt(IDT_SS, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL,
2057 GSEL(GCODE_SEL, SEL_KPL));
2058 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2059 GSEL(GCODE_SEL, SEL_KPL));
2060 setidt(IDT_PF, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL,
2061 GSEL(GCODE_SEL, SEL_KPL));
2062 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL,
2063 GSEL(GCODE_SEL, SEL_KPL));
2064 setidt(IDT_AC, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL,
2065 GSEL(GCODE_SEL, SEL_KPL));
2066 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL,
2067 GSEL(GCODE_SEL, SEL_KPL));
2068 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL,
2069 GSEL(GCODE_SEL, SEL_KPL));
2070 setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL,
2071 GSEL(GCODE_SEL, SEL_KPL));
2073 r_idt.rd_limit = sizeof(idt0) - 1;
2074 r_idt.rd_base = (int) idt;
2078 * Initialize the i8254 before the console so that console
2079 * initialization can use DELAY().
2084 * Initialize the console before we print anything out.
2088 if (metadata_missing)
2089 printf("WARNING: loader(8) metadata is missing!\n");
2096 ksym_start = bootinfo.bi_symtab;
2097 ksym_end = bootinfo.bi_esymtab;
2103 if (boothowto & RB_KDB)
2104 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
2107 finishidentcpu(); /* Final stage of CPU initialization */
2108 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2109 GSEL(GCODE_SEL, SEL_KPL));
2110 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2111 GSEL(GCODE_SEL, SEL_KPL));
2112 initializecpu(); /* Initialize CPU registers */
2114 /* make an initial tss so cpu can get interrupt stack on syscall! */
2115 /* Note: -16 is so we can grow the trapframe if we came from vm86 */
2116 PCPU_SET(common_tss.tss_esp0, thread0.td_kstack +
2117 KSTACK_PAGES * PAGE_SIZE - sizeof(struct pcb) - 16);
2118 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
2119 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
2120 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
2121 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
2122 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
2125 /* pointer to selector slot for %fs/%gs */
2126 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
2128 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
2129 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
2130 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
2131 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
2132 dblfault_tss.tss_cr3 = (int)IdlePTD;
2133 dblfault_tss.tss_eip = (int)dblfault_handler;
2134 dblfault_tss.tss_eflags = PSL_KERNEL;
2135 dblfault_tss.tss_ds = dblfault_tss.tss_es =
2136 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
2137 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
2138 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
2139 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
2143 init_param2(physmem);
2145 /* now running on new page tables, configured,and u/iom is accessible */
2147 msgbufinit(msgbufp, MSGBUF_SIZE);
2149 /* make a call gate to reenter kernel with */
2150 gdp = &ldt[LSYS5CALLS_SEL].gd;
2152 x = (int) &IDTVEC(lcall_syscall);
2153 gdp->gd_looffset = x;
2154 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
2156 gdp->gd_type = SDT_SYS386CGT;
2157 gdp->gd_dpl = SEL_UPL;
2159 gdp->gd_hioffset = x >> 16;
2161 /* XXX does this work? */
2163 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL];
2164 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL];
2166 /* transfer to user mode */
2168 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
2169 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
2171 /* setup proc 0's pcb */
2172 thread0.td_pcb->pcb_flags = 0;
2173 thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
2174 thread0.td_pcb->pcb_ext = 0;
2175 thread0.td_frame = &proc0_tf;
2179 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
2185 spinlock_enter(void)
2190 if (td->td_md.md_spinlock_count == 0)
2191 td->td_md.md_saved_flags = intr_disable();
2192 td->td_md.md_spinlock_count++;
2203 td->td_md.md_spinlock_count--;
2204 if (td->td_md.md_spinlock_count == 0)
2205 intr_restore(td->td_md.md_saved_flags);
2208 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
2209 static void f00f_hack(void *unused);
2210 SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL);
2213 f00f_hack(void *unused)
2215 struct gate_descriptor *new_idt;
2223 printf("Intel Pentium detected, installing workaround for F00F bug\n");
2225 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2);
2227 panic("kmem_alloc returned 0");
2229 /* Put the problematic entry (#6) at the end of the lower page. */
2230 new_idt = (struct gate_descriptor*)
2231 (tmp + PAGE_SIZE - 7 * sizeof(struct gate_descriptor));
2232 bcopy(idt, new_idt, sizeof(idt0));
2233 r_idt.rd_base = (u_int)new_idt;
2236 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE,
2237 VM_PROT_READ, FALSE) != KERN_SUCCESS)
2238 panic("vm_map_protect failed");
2240 #endif /* defined(I586_CPU) && !NO_F00F_HACK */
2243 * Construct a PCB from a trapframe. This is called from kdb_trap() where
2244 * we want to start a backtrace from the function that caused us to enter
2245 * the debugger. We have the context in the trapframe, but base the trace
2246 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
2247 * enough for a backtrace.
2250 makectx(struct trapframe *tf, struct pcb *pcb)
2253 pcb->pcb_edi = tf->tf_edi;
2254 pcb->pcb_esi = tf->tf_esi;
2255 pcb->pcb_ebp = tf->tf_ebp;
2256 pcb->pcb_ebx = tf->tf_ebx;
2257 pcb->pcb_eip = tf->tf_eip;
2258 pcb->pcb_esp = (ISPL(tf->tf_cs)) ? tf->tf_esp : (int)(tf + 1) - 8;
2262 ptrace_set_pc(struct thread *td, u_long addr)
2265 td->td_frame->tf_eip = addr;
2270 ptrace_single_step(struct thread *td)
2272 td->td_frame->tf_eflags |= PSL_T;
2277 ptrace_clear_single_step(struct thread *td)
2279 td->td_frame->tf_eflags &= ~PSL_T;
2284 fill_regs(struct thread *td, struct reg *regs)
2287 struct trapframe *tp;
2291 regs->r_fs = tp->tf_fs;
2292 regs->r_es = tp->tf_es;
2293 regs->r_ds = tp->tf_ds;
2294 regs->r_edi = tp->tf_edi;
2295 regs->r_esi = tp->tf_esi;
2296 regs->r_ebp = tp->tf_ebp;
2297 regs->r_ebx = tp->tf_ebx;
2298 regs->r_edx = tp->tf_edx;
2299 regs->r_ecx = tp->tf_ecx;
2300 regs->r_eax = tp->tf_eax;
2301 regs->r_eip = tp->tf_eip;
2302 regs->r_cs = tp->tf_cs;
2303 regs->r_eflags = tp->tf_eflags;
2304 regs->r_esp = tp->tf_esp;
2305 regs->r_ss = tp->tf_ss;
2306 regs->r_gs = pcb->pcb_gs;
2311 set_regs(struct thread *td, struct reg *regs)
2314 struct trapframe *tp;
2317 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) ||
2318 !CS_SECURE(regs->r_cs))
2321 tp->tf_fs = regs->r_fs;
2322 tp->tf_es = regs->r_es;
2323 tp->tf_ds = regs->r_ds;
2324 tp->tf_edi = regs->r_edi;
2325 tp->tf_esi = regs->r_esi;
2326 tp->tf_ebp = regs->r_ebp;
2327 tp->tf_ebx = regs->r_ebx;
2328 tp->tf_edx = regs->r_edx;
2329 tp->tf_ecx = regs->r_ecx;
2330 tp->tf_eax = regs->r_eax;
2331 tp->tf_eip = regs->r_eip;
2332 tp->tf_cs = regs->r_cs;
2333 tp->tf_eflags = regs->r_eflags;
2334 tp->tf_esp = regs->r_esp;
2335 tp->tf_ss = regs->r_ss;
2336 pcb->pcb_gs = regs->r_gs;
2340 #ifdef CPU_ENABLE_SSE
2342 fill_fpregs_xmm(sv_xmm, sv_87)
2343 struct savexmm *sv_xmm;
2344 struct save87 *sv_87;
2346 register struct env87 *penv_87 = &sv_87->sv_env;
2347 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
2350 bzero(sv_87, sizeof(*sv_87));
2352 /* FPU control/status */
2353 penv_87->en_cw = penv_xmm->en_cw;
2354 penv_87->en_sw = penv_xmm->en_sw;
2355 penv_87->en_tw = penv_xmm->en_tw;
2356 penv_87->en_fip = penv_xmm->en_fip;
2357 penv_87->en_fcs = penv_xmm->en_fcs;
2358 penv_87->en_opcode = penv_xmm->en_opcode;
2359 penv_87->en_foo = penv_xmm->en_foo;
2360 penv_87->en_fos = penv_xmm->en_fos;
2363 for (i = 0; i < 8; ++i)
2364 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc;
2368 set_fpregs_xmm(sv_87, sv_xmm)
2369 struct save87 *sv_87;
2370 struct savexmm *sv_xmm;
2372 register struct env87 *penv_87 = &sv_87->sv_env;
2373 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
2376 /* FPU control/status */
2377 penv_xmm->en_cw = penv_87->en_cw;
2378 penv_xmm->en_sw = penv_87->en_sw;
2379 penv_xmm->en_tw = penv_87->en_tw;
2380 penv_xmm->en_fip = penv_87->en_fip;
2381 penv_xmm->en_fcs = penv_87->en_fcs;
2382 penv_xmm->en_opcode = penv_87->en_opcode;
2383 penv_xmm->en_foo = penv_87->en_foo;
2384 penv_xmm->en_fos = penv_87->en_fos;
2387 for (i = 0; i < 8; ++i)
2388 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i];
2390 #endif /* CPU_ENABLE_SSE */
2393 fill_fpregs(struct thread *td, struct fpreg *fpregs)
2395 #ifdef CPU_ENABLE_SSE
2397 fill_fpregs_xmm(&td->td_pcb->pcb_save.sv_xmm,
2398 (struct save87 *)fpregs);
2401 #endif /* CPU_ENABLE_SSE */
2402 bcopy(&td->td_pcb->pcb_save.sv_87, fpregs, sizeof *fpregs);
2407 set_fpregs(struct thread *td, struct fpreg *fpregs)
2409 #ifdef CPU_ENABLE_SSE
2411 set_fpregs_xmm((struct save87 *)fpregs,
2412 &td->td_pcb->pcb_save.sv_xmm);
2415 #endif /* CPU_ENABLE_SSE */
2416 bcopy(fpregs, &td->td_pcb->pcb_save.sv_87, sizeof *fpregs);
2421 * Get machine context.
2424 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
2426 struct trapframe *tp;
2427 struct segment_descriptor *sdp;
2431 PROC_LOCK(curthread->td_proc);
2432 mcp->mc_onstack = sigonstack(tp->tf_esp);
2433 PROC_UNLOCK(curthread->td_proc);
2434 mcp->mc_gs = td->td_pcb->pcb_gs;
2435 mcp->mc_fs = tp->tf_fs;
2436 mcp->mc_es = tp->tf_es;
2437 mcp->mc_ds = tp->tf_ds;
2438 mcp->mc_edi = tp->tf_edi;
2439 mcp->mc_esi = tp->tf_esi;
2440 mcp->mc_ebp = tp->tf_ebp;
2441 mcp->mc_isp = tp->tf_isp;
2442 mcp->mc_eflags = tp->tf_eflags;
2443 if (flags & GET_MC_CLEAR_RET) {
2446 mcp->mc_eflags &= ~PSL_C;
2448 mcp->mc_eax = tp->tf_eax;
2449 mcp->mc_edx = tp->tf_edx;
2451 mcp->mc_ebx = tp->tf_ebx;
2452 mcp->mc_ecx = tp->tf_ecx;
2453 mcp->mc_eip = tp->tf_eip;
2454 mcp->mc_cs = tp->tf_cs;
2455 mcp->mc_esp = tp->tf_esp;
2456 mcp->mc_ss = tp->tf_ss;
2457 mcp->mc_len = sizeof(*mcp);
2458 get_fpcontext(td, mcp);
2459 sdp = &td->td_pcb->pcb_gsd;
2460 mcp->mc_fsbase = sdp->sd_hibase << 24 | sdp->sd_lobase;
2461 sdp = &td->td_pcb->pcb_fsd;
2462 mcp->mc_gsbase = sdp->sd_hibase << 24 | sdp->sd_lobase;
2468 * Set machine context.
2470 * However, we don't set any but the user modifiable flags, and we won't
2471 * touch the cs selector.
2474 set_mcontext(struct thread *td, const mcontext_t *mcp)
2476 struct trapframe *tp;
2480 if (mcp->mc_len != sizeof(*mcp))
2482 eflags = (mcp->mc_eflags & PSL_USERCHANGE) |
2483 (tp->tf_eflags & ~PSL_USERCHANGE);
2484 if ((ret = set_fpcontext(td, mcp)) == 0) {
2485 tp->tf_fs = mcp->mc_fs;
2486 tp->tf_es = mcp->mc_es;
2487 tp->tf_ds = mcp->mc_ds;
2488 tp->tf_edi = mcp->mc_edi;
2489 tp->tf_esi = mcp->mc_esi;
2490 tp->tf_ebp = mcp->mc_ebp;
2491 tp->tf_ebx = mcp->mc_ebx;
2492 tp->tf_edx = mcp->mc_edx;
2493 tp->tf_ecx = mcp->mc_ecx;
2494 tp->tf_eax = mcp->mc_eax;
2495 tp->tf_eip = mcp->mc_eip;
2496 tp->tf_eflags = eflags;
2497 tp->tf_esp = mcp->mc_esp;
2498 tp->tf_ss = mcp->mc_ss;
2499 td->td_pcb->pcb_gs = mcp->mc_gs;
2506 get_fpcontext(struct thread *td, mcontext_t *mcp)
2509 mcp->mc_fpformat = _MC_FPFMT_NODEV;
2510 mcp->mc_ownedfp = _MC_FPOWNED_NONE;
2512 union savefpu *addr;
2515 * XXX mc_fpstate might be misaligned, since its declaration is not
2516 * unportabilized using __attribute__((aligned(16))) like the
2517 * declaration of struct savemm, and anyway, alignment doesn't work
2518 * for auto variables since we don't use gcc's pessimal stack
2519 * alignment. Work around this by abusing the spare fields after
2522 * XXX unpessimize most cases by only aligning when fxsave might be
2523 * called, although this requires knowing too much about
2524 * npxgetregs()'s internals.
2526 addr = (union savefpu *)&mcp->mc_fpstate;
2527 if (td == PCPU_GET(fpcurthread) &&
2528 #ifdef CPU_ENABLE_SSE
2531 ((uintptr_t)(void *)addr & 0xF)) {
2533 addr = (void *)((char *)addr + 4);
2534 while ((uintptr_t)(void *)addr & 0xF);
2536 mcp->mc_ownedfp = npxgetregs(td, addr);
2537 if (addr != (union savefpu *)&mcp->mc_fpstate) {
2538 bcopy(addr, &mcp->mc_fpstate, sizeof(mcp->mc_fpstate));
2539 bzero(&mcp->mc_spare2, sizeof(mcp->mc_spare2));
2541 mcp->mc_fpformat = npxformat();
2546 set_fpcontext(struct thread *td, const mcontext_t *mcp)
2548 union savefpu *addr;
2550 if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
2552 else if (mcp->mc_fpformat != _MC_FPFMT_387 &&
2553 mcp->mc_fpformat != _MC_FPFMT_XMM)
2555 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE)
2556 /* We don't care what state is left in the FPU or PCB. */
2558 else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
2559 mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
2560 /* XXX align as above. */
2561 addr = (union savefpu *)&mcp->mc_fpstate;
2562 if (td == PCPU_GET(fpcurthread) &&
2563 #ifdef CPU_ENABLE_SSE
2566 ((uintptr_t)(void *)addr & 0xF)) {
2568 addr = (void *)((char *)addr + 4);
2569 while ((uintptr_t)(void *)addr & 0xF);
2570 bcopy(&mcp->mc_fpstate, addr, sizeof(mcp->mc_fpstate));
2573 #ifdef CPU_ENABLE_SSE
2575 addr->sv_xmm.sv_env.en_mxcsr &= cpu_mxcsr_mask;
2578 * XXX we violate the dubious requirement that npxsetregs()
2579 * be called with interrupts disabled.
2581 npxsetregs(td, addr);
2584 * Don't bother putting things back where they were in the
2585 * misaligned case, since we know that the caller won't use
2594 fpstate_drop(struct thread *td)
2600 if (PCPU_GET(fpcurthread) == td)
2604 * XXX force a full drop of the npx. The above only drops it if we
2605 * owned it. npxgetregs() has the same bug in the !cpu_fxsr case.
2607 * XXX I don't much like npxgetregs()'s semantics of doing a full
2608 * drop. Dropping only to the pcb matches fnsave's behaviour.
2609 * We only need to drop to !PCB_INITDONE in sendsig(). But
2610 * sendsig() is the only caller of npxgetregs()... perhaps we just
2611 * have too many layers.
2613 curthread->td_pcb->pcb_flags &= ~PCB_NPXINITDONE;
2618 fill_dbregs(struct thread *td, struct dbreg *dbregs)
2623 dbregs->dr[0] = rdr0();
2624 dbregs->dr[1] = rdr1();
2625 dbregs->dr[2] = rdr2();
2626 dbregs->dr[3] = rdr3();
2627 dbregs->dr[4] = rdr4();
2628 dbregs->dr[5] = rdr5();
2629 dbregs->dr[6] = rdr6();
2630 dbregs->dr[7] = rdr7();
2633 dbregs->dr[0] = pcb->pcb_dr0;
2634 dbregs->dr[1] = pcb->pcb_dr1;
2635 dbregs->dr[2] = pcb->pcb_dr2;
2636 dbregs->dr[3] = pcb->pcb_dr3;
2639 dbregs->dr[6] = pcb->pcb_dr6;
2640 dbregs->dr[7] = pcb->pcb_dr7;
2646 set_dbregs(struct thread *td, struct dbreg *dbregs)
2652 load_dr0(dbregs->dr[0]);
2653 load_dr1(dbregs->dr[1]);
2654 load_dr2(dbregs->dr[2]);
2655 load_dr3(dbregs->dr[3]);
2656 load_dr4(dbregs->dr[4]);
2657 load_dr5(dbregs->dr[5]);
2658 load_dr6(dbregs->dr[6]);
2659 load_dr7(dbregs->dr[7]);
2662 * Don't let an illegal value for dr7 get set. Specifically,
2663 * check for undefined settings. Setting these bit patterns
2664 * result in undefined behaviour and can lead to an unexpected
2667 for (i = 0; i < 4; i++) {
2668 if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02)
2670 if (DBREG_DR7_LEN(dbregs->dr[7], i) == 0x02)
2677 * Don't let a process set a breakpoint that is not within the
2678 * process's address space. If a process could do this, it
2679 * could halt the system by setting a breakpoint in the kernel
2680 * (if ddb was enabled). Thus, we need to check to make sure
2681 * that no breakpoints are being enabled for addresses outside
2682 * process's address space.
2684 * XXX - what about when the watched area of the user's
2685 * address space is written into from within the kernel
2686 * ... wouldn't that still cause a breakpoint to be generated
2687 * from within kernel mode?
2690 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) {
2691 /* dr0 is enabled */
2692 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
2696 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) {
2697 /* dr1 is enabled */
2698 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
2702 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) {
2703 /* dr2 is enabled */
2704 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
2708 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) {
2709 /* dr3 is enabled */
2710 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
2714 pcb->pcb_dr0 = dbregs->dr[0];
2715 pcb->pcb_dr1 = dbregs->dr[1];
2716 pcb->pcb_dr2 = dbregs->dr[2];
2717 pcb->pcb_dr3 = dbregs->dr[3];
2718 pcb->pcb_dr6 = dbregs->dr[6];
2719 pcb->pcb_dr7 = dbregs->dr[7];
2721 pcb->pcb_flags |= PCB_DBREGS;
2728 * Return > 0 if a hardware breakpoint has been hit, and the
2729 * breakpoint was in user space. Return 0, otherwise.
2732 user_dbreg_trap(void)
2734 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */
2735 u_int32_t bp; /* breakpoint bits extracted from dr6 */
2736 int nbp; /* number of breakpoints that triggered */
2737 caddr_t addr[4]; /* breakpoint addresses */
2741 if ((dr7 & 0x000000ff) == 0) {
2743 * all GE and LE bits in the dr7 register are zero,
2744 * thus the trap couldn't have been caused by the
2745 * hardware debug registers
2752 bp = dr6 & 0x0000000f;
2756 * None of the breakpoint bits are set meaning this
2757 * trap was not caused by any of the debug registers
2763 * at least one of the breakpoints were hit, check to see
2764 * which ones and if any of them are user space addresses
2768 addr[nbp++] = (caddr_t)rdr0();
2771 addr[nbp++] = (caddr_t)rdr1();
2774 addr[nbp++] = (caddr_t)rdr2();
2777 addr[nbp++] = (caddr_t)rdr3();
2780 for (i = 0; i < nbp; i++) {
2781 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) {
2783 * addr[i] is in user space
2790 * None of the breakpoints are in user space.
2798 * Provide inb() and outb() as functions. They are normally only available as
2799 * inline functions, thus cannot be called from the debugger.
2802 /* silence compiler warnings */
2803 u_char inb_(u_short);
2804 void outb_(u_short, u_char);
2813 outb_(u_short port, u_char data)