2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
6 * This code is derived from software contributed to Berkeley by
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
43 #include "opt_atalk.h"
44 #include "opt_compat.h"
50 #include "opt_kstack_pages.h"
51 #include "opt_maxmem.h"
52 #include "opt_mp_watchdog.h"
54 #include "opt_perfmon.h"
56 #include <sys/param.h>
58 #include <sys/systm.h>
62 #include <sys/callout.h>
65 #include <sys/eventhandler.h>
67 #include <sys/imgact.h>
69 #include <sys/kernel.h>
71 #include <sys/linker.h>
73 #include <sys/malloc.h>
74 #include <sys/msgbuf.h>
75 #include <sys/mutex.h>
77 #include <sys/ptrace.h>
78 #include <sys/reboot.h>
79 #include <sys/sched.h>
80 #include <sys/signalvar.h>
84 #include <sys/syscallsubr.h>
85 #include <sys/sysctl.h>
86 #include <sys/sysent.h>
87 #include <sys/sysproto.h>
88 #include <sys/ucontext.h>
89 #include <sys/vmmeter.h>
92 #include <vm/vm_extern.h>
93 #include <vm/vm_kern.h>
94 #include <vm/vm_page.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_object.h>
97 #include <vm/vm_pager.h>
98 #include <vm/vm_param.h>
102 #error KDB must be enabled in order for DDB to work!
105 #include <ddb/db_sym.h>
108 #include <pc98/pc98/pc98_machdep.h>
110 #include <net/netisr.h>
112 #include <machine/bootinfo.h>
113 #include <machine/clock.h>
114 #include <machine/cpu.h>
115 #include <machine/cputypes.h>
116 #include <machine/intr_machdep.h>
118 #include <machine/md_var.h>
119 #include <machine/mp_watchdog.h>
120 #include <machine/pc/bios.h>
121 #include <machine/pcb.h>
122 #include <machine/pcb_ext.h>
123 #include <machine/proc.h>
124 #include <machine/reg.h>
125 #include <machine/sigframe.h>
126 #include <machine/specialreg.h>
127 #include <machine/vm86.h>
129 #include <machine/perfmon.h>
132 #include <machine/smp.h>
136 #include <x86/isa/icu.h>
139 /* Sanity check for __curthread() */
140 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
142 extern void init386(int first);
143 extern void dblfault_handler(void);
145 extern void printcpuinfo(void); /* XXX header file */
146 extern void finishidentcpu(void);
147 extern void panicifcpuunsupported(void);
148 extern void initializecpu(void);
150 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
151 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
153 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
154 #define CPU_ENABLE_SSE
157 static void cpu_startup(void *);
158 static void fpstate_drop(struct thread *td);
159 static void get_fpcontext(struct thread *td, mcontext_t *mcp);
160 static int set_fpcontext(struct thread *td, const mcontext_t *mcp);
161 #ifdef CPU_ENABLE_SSE
162 static void set_fpregs_xmm(struct save87 *, struct savexmm *);
163 static void fill_fpregs_xmm(struct savexmm *, struct save87 *);
164 #endif /* CPU_ENABLE_SSE */
165 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
167 int need_pre_dma_flush; /* If 1, use wbinvd befor DMA transfer. */
168 int need_post_dma_flush; /* If 1, use invd after DMA transfer. */
171 extern vm_offset_t ksym_start, ksym_end;
174 int _udatasel, _ucodesel;
177 static int ispc98 = 1;
178 SYSCTL_INT(_machdep, OID_AUTO, ispc98, CTLFLAG_RD, &ispc98, 0, "");
183 static void osendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
185 #ifdef COMPAT_FREEBSD4
186 static void freebsd4_sendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
193 * The number of PHYSMAP entries must be one less than the number of
194 * PHYSSEG entries because the PHYSMAP entry that spans the largest
195 * physical address that is accessible by ISA DMA is split into two
198 #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
200 vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
201 vm_paddr_t dump_avail[PHYSMAP_SIZE + 2];
203 /* must be 2 less so 0 0 can signal end of chunks */
204 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2)
205 #define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2)
207 struct kva_md_info kmi;
209 static struct trapframe proc0_tf;
210 struct pcpu __pcpu[MAXCPU];
221 * Good {morning,afternoon,evening,night}.
225 panicifcpuunsupported();
232 * Display physical memory.
234 memsize = ptoa((uintmax_t)Maxmem);
235 printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20);
238 * Display any holes after the first chunk of extended memory.
243 printf("Physical memory chunk(s):\n");
244 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
247 size = phys_avail[indx + 1] - phys_avail[indx];
249 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
250 (uintmax_t)phys_avail[indx],
251 (uintmax_t)phys_avail[indx + 1] - 1,
252 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
256 vm_ksubmap_init(&kmi);
258 printf("avail memory = %ju (%ju MB)\n",
259 ptoa((uintmax_t)cnt.v_free_count),
260 ptoa((uintmax_t)cnt.v_free_count) / 1048576);
263 * Set up buffers, so they can be used to read disk labels.
266 vm_pager_bufferinit();
271 * Send an interrupt to process.
273 * Stack is set up to allow sigcode stored
274 * at top to call routine, followed by kcall
275 * to sigreturn routine below. After sigreturn
276 * resets the signal mask, the stack, and the
277 * frame pointer, it returns to the user
282 osendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
284 struct osigframe sf, *fp;
288 struct trapframe *regs;
294 PROC_LOCK_ASSERT(p, MA_OWNED);
295 sig = ksi->ksi_signo;
297 mtx_assert(&psp->ps_mtx, MA_OWNED);
299 oonstack = sigonstack(regs->tf_esp);
301 /* Allocate space for the signal handler context. */
302 if ((td->td_pflags & TDP_ALTSTACK) && !oonstack &&
303 SIGISMEMBER(psp->ps_sigonstack, sig)) {
304 fp = (struct osigframe *)(td->td_sigstk.ss_sp +
305 td->td_sigstk.ss_size - sizeof(struct osigframe));
306 #if defined(COMPAT_43)
307 td->td_sigstk.ss_flags |= SS_ONSTACK;
310 fp = (struct osigframe *)regs->tf_esp - 1;
312 /* Translate the signal if appropriate. */
313 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
314 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
316 /* Build the argument list for the signal handler. */
318 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc;
319 bzero(&sf.sf_siginfo, sizeof(sf.sf_siginfo));
320 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
321 /* Signal handler installed with SA_SIGINFO. */
322 sf.sf_arg2 = (register_t)&fp->sf_siginfo;
323 sf.sf_siginfo.si_signo = sig;
324 sf.sf_siginfo.si_code = ksi->ksi_code;
325 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher;
328 /* Old FreeBSD-style arguments. */
329 sf.sf_arg2 = ksi->ksi_code;
330 sf.sf_addr = (register_t)ksi->ksi_addr;
331 sf.sf_ahu.sf_handler = catcher;
333 mtx_unlock(&psp->ps_mtx);
336 /* Save most if not all of trap frame. */
337 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax;
338 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx;
339 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx;
340 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx;
341 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi;
342 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi;
343 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs;
344 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds;
345 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss;
346 sf.sf_siginfo.si_sc.sc_es = regs->tf_es;
347 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs;
348 sf.sf_siginfo.si_sc.sc_gs = rgs();
349 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp;
351 /* Build the signal context to be used by osigreturn(). */
352 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0;
353 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask);
354 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp;
355 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp;
356 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip;
357 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags;
358 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno;
359 sf.sf_siginfo.si_sc.sc_err = regs->tf_err;
362 * If we're a vm86 process, we want to save the segment registers.
363 * We also change eflags to be our emulated eflags, not the actual
366 if (regs->tf_eflags & PSL_VM) {
367 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */
368 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
369 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
371 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs;
372 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs;
373 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es;
374 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds;
376 if (vm86->vm86_has_vme == 0)
377 sf.sf_siginfo.si_sc.sc_ps =
378 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
379 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
381 /* See sendsig() for comments. */
382 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
386 * Copy the sigframe out to the user's stack.
388 if (copyout(&sf, fp, sizeof(*fp)) != 0) {
390 printf("process %ld has trashed its stack\n", (long)p->p_pid);
396 regs->tf_esp = (int)fp;
397 regs->tf_eip = PS_STRINGS - szosigcode;
398 regs->tf_eflags &= ~(PSL_T | PSL_D);
399 regs->tf_cs = _ucodesel;
400 regs->tf_ds = _udatasel;
401 regs->tf_es = _udatasel;
402 regs->tf_fs = _udatasel;
404 regs->tf_ss = _udatasel;
406 mtx_lock(&psp->ps_mtx);
408 #endif /* COMPAT_43 */
410 #ifdef COMPAT_FREEBSD4
412 freebsd4_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
414 struct sigframe4 sf, *sfp;
418 struct trapframe *regs;
424 PROC_LOCK_ASSERT(p, MA_OWNED);
425 sig = ksi->ksi_signo;
427 mtx_assert(&psp->ps_mtx, MA_OWNED);
429 oonstack = sigonstack(regs->tf_esp);
431 /* Save user context. */
432 bzero(&sf, sizeof(sf));
433 sf.sf_uc.uc_sigmask = *mask;
434 sf.sf_uc.uc_stack = td->td_sigstk;
435 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
436 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
437 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
438 sf.sf_uc.uc_mcontext.mc_gs = rgs();
439 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
440 bzero(sf.sf_uc.uc_mcontext.mc_fpregs,
441 sizeof(sf.sf_uc.uc_mcontext.mc_fpregs));
442 bzero(sf.sf_uc.uc_mcontext.__spare__,
443 sizeof(sf.sf_uc.uc_mcontext.__spare__));
444 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
446 /* Allocate space for the signal handler context. */
447 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
448 SIGISMEMBER(psp->ps_sigonstack, sig)) {
449 sfp = (struct sigframe4 *)(td->td_sigstk.ss_sp +
450 td->td_sigstk.ss_size - sizeof(struct sigframe4));
451 #if defined(COMPAT_43)
452 td->td_sigstk.ss_flags |= SS_ONSTACK;
455 sfp = (struct sigframe4 *)regs->tf_esp - 1;
457 /* Translate the signal if appropriate. */
458 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
459 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
461 /* Build the argument list for the signal handler. */
463 sf.sf_ucontext = (register_t)&sfp->sf_uc;
464 bzero(&sf.sf_si, sizeof(sf.sf_si));
465 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
466 /* Signal handler installed with SA_SIGINFO. */
467 sf.sf_siginfo = (register_t)&sfp->sf_si;
468 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
470 /* Fill in POSIX parts */
471 sf.sf_si.si_signo = sig;
472 sf.sf_si.si_code = ksi->ksi_code;
473 sf.sf_si.si_addr = ksi->ksi_addr;
475 /* Old FreeBSD-style arguments. */
476 sf.sf_siginfo = ksi->ksi_code;
477 sf.sf_addr = (register_t)ksi->ksi_addr;
478 sf.sf_ahu.sf_handler = catcher;
480 mtx_unlock(&psp->ps_mtx);
484 * If we're a vm86 process, we want to save the segment registers.
485 * We also change eflags to be our emulated eflags, not the actual
488 if (regs->tf_eflags & PSL_VM) {
489 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
490 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
492 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
493 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
494 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
495 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
497 if (vm86->vm86_has_vme == 0)
498 sf.sf_uc.uc_mcontext.mc_eflags =
499 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
500 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
503 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
504 * syscalls made by the signal handler. This just avoids
505 * wasting time for our lazy fixup of such faults. PSL_NT
506 * does nothing in vm86 mode, but vm86 programs can set it
507 * almost legitimately in probes for old cpu types.
509 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
513 * Copy the sigframe out to the user's stack.
515 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
517 printf("process %ld has trashed its stack\n", (long)p->p_pid);
523 regs->tf_esp = (int)sfp;
524 regs->tf_eip = PS_STRINGS - szfreebsd4_sigcode;
525 regs->tf_eflags &= ~(PSL_T | PSL_D);
526 regs->tf_cs = _ucodesel;
527 regs->tf_ds = _udatasel;
528 regs->tf_es = _udatasel;
529 regs->tf_fs = _udatasel;
530 regs->tf_ss = _udatasel;
532 mtx_lock(&psp->ps_mtx);
534 #endif /* COMPAT_FREEBSD4 */
537 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
539 struct sigframe sf, *sfp;
544 struct trapframe *regs;
545 struct segment_descriptor *sdp;
551 PROC_LOCK_ASSERT(p, MA_OWNED);
552 sig = ksi->ksi_signo;
554 mtx_assert(&psp->ps_mtx, MA_OWNED);
555 #ifdef COMPAT_FREEBSD4
556 if (SIGISMEMBER(psp->ps_freebsd4, sig)) {
557 freebsd4_sendsig(catcher, ksi, mask);
562 if (SIGISMEMBER(psp->ps_osigset, sig)) {
563 osendsig(catcher, ksi, mask);
568 oonstack = sigonstack(regs->tf_esp);
570 /* Save user context. */
571 bzero(&sf, sizeof(sf));
572 sf.sf_uc.uc_sigmask = *mask;
573 sf.sf_uc.uc_stack = td->td_sigstk;
574 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
575 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
576 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
577 sf.sf_uc.uc_mcontext.mc_gs = rgs();
578 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
579 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
580 get_fpcontext(td, &sf.sf_uc.uc_mcontext);
583 * Unconditionally fill the fsbase and gsbase into the mcontext.
585 sdp = &td->td_pcb->pcb_fsd;
586 sf.sf_uc.uc_mcontext.mc_fsbase = sdp->sd_hibase << 24 |
588 sdp = &td->td_pcb->pcb_gsd;
589 sf.sf_uc.uc_mcontext.mc_gsbase = sdp->sd_hibase << 24 |
591 bzero(sf.sf_uc.uc_mcontext.mc_spare1,
592 sizeof(sf.sf_uc.uc_mcontext.mc_spare1));
593 bzero(sf.sf_uc.uc_mcontext.mc_spare2,
594 sizeof(sf.sf_uc.uc_mcontext.mc_spare2));
595 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
597 /* Allocate space for the signal handler context. */
598 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
599 SIGISMEMBER(psp->ps_sigonstack, sig)) {
600 sp = td->td_sigstk.ss_sp +
601 td->td_sigstk.ss_size - sizeof(struct sigframe);
602 #if defined(COMPAT_43)
603 td->td_sigstk.ss_flags |= SS_ONSTACK;
606 sp = (char *)regs->tf_esp - sizeof(struct sigframe);
607 /* Align to 16 bytes. */
608 sfp = (struct sigframe *)((unsigned int)sp & ~0xF);
610 /* Translate the signal if appropriate. */
611 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
612 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
614 /* Build the argument list for the signal handler. */
616 sf.sf_ucontext = (register_t)&sfp->sf_uc;
617 bzero(&sf.sf_si, sizeof(sf.sf_si));
618 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
619 /* Signal handler installed with SA_SIGINFO. */
620 sf.sf_siginfo = (register_t)&sfp->sf_si;
621 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
623 /* Fill in POSIX parts */
624 sf.sf_si = ksi->ksi_info;
625 sf.sf_si.si_signo = sig; /* maybe a translated signal */
627 /* Old FreeBSD-style arguments. */
628 sf.sf_siginfo = ksi->ksi_code;
629 sf.sf_addr = (register_t)ksi->ksi_addr;
630 sf.sf_ahu.sf_handler = catcher;
632 mtx_unlock(&psp->ps_mtx);
636 * If we're a vm86 process, we want to save the segment registers.
637 * We also change eflags to be our emulated eflags, not the actual
640 if (regs->tf_eflags & PSL_VM) {
641 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
642 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
644 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
645 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
646 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
647 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
649 if (vm86->vm86_has_vme == 0)
650 sf.sf_uc.uc_mcontext.mc_eflags =
651 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
652 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
655 * Clear PSL_NT to inhibit T_TSSFLT faults on return from
656 * syscalls made by the signal handler. This just avoids
657 * wasting time for our lazy fixup of such faults. PSL_NT
658 * does nothing in vm86 mode, but vm86 programs can set it
659 * almost legitimately in probes for old cpu types.
661 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
665 * Copy the sigframe out to the user's stack.
667 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
669 printf("process %ld has trashed its stack\n", (long)p->p_pid);
675 regs->tf_esp = (int)sfp;
676 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode);
677 regs->tf_eflags &= ~(PSL_T | PSL_D);
678 regs->tf_cs = _ucodesel;
679 regs->tf_ds = _udatasel;
680 regs->tf_es = _udatasel;
681 regs->tf_fs = _udatasel;
682 regs->tf_ss = _udatasel;
684 mtx_lock(&psp->ps_mtx);
688 * System call to cleanup state after a signal
689 * has been taken. Reset signal mask and
690 * stack state from context left by sendsig (above).
691 * Return to previous pc and psl as specified by
692 * context left by sendsig. Check carefully to
693 * make sure that the user has not modified the
694 * state to gain improper privileges.
702 struct osigreturn_args /* {
703 struct osigcontext *sigcntxp;
706 struct osigcontext sc;
707 struct trapframe *regs;
708 struct osigcontext *scp;
713 error = copyin(uap->sigcntxp, &sc, sizeof(sc));
718 if (eflags & PSL_VM) {
719 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
720 struct vm86_kernel *vm86;
723 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
724 * set up the vm86 area, and we can't enter vm86 mode.
726 if (td->td_pcb->pcb_ext == 0)
728 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
729 if (vm86->vm86_inited == 0)
732 /* Go back to user mode if both flags are set. */
733 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
734 ksiginfo_init_trap(&ksi);
735 ksi.ksi_signo = SIGBUS;
736 ksi.ksi_code = BUS_OBJERR;
737 ksi.ksi_addr = (void *)regs->tf_eip;
738 trapsignal(td, &ksi);
741 if (vm86->vm86_has_vme) {
742 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
743 (eflags & VME_USERCHANGE) | PSL_VM;
745 vm86->vm86_eflags = eflags; /* save VIF, VIP */
746 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
747 (eflags & VM_USERCHANGE) | PSL_VM;
749 tf->tf_vm86_ds = scp->sc_ds;
750 tf->tf_vm86_es = scp->sc_es;
751 tf->tf_vm86_fs = scp->sc_fs;
752 tf->tf_vm86_gs = scp->sc_gs;
753 tf->tf_ds = _udatasel;
754 tf->tf_es = _udatasel;
755 tf->tf_fs = _udatasel;
758 * Don't allow users to change privileged or reserved flags.
761 * XXX do allow users to change the privileged flag PSL_RF.
762 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
763 * should sometimes set it there too. tf_eflags is kept in
764 * the signal context during signal handling and there is no
765 * other place to remember it, so the PSL_RF bit may be
766 * corrupted by the signal handler without us knowing.
767 * Corruption of the PSL_RF bit at worst causes one more or
768 * one less debugger trap, so allowing it is fairly harmless.
770 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
775 * Don't allow users to load a valid privileged %cs. Let the
776 * hardware check for invalid selectors, excess privilege in
777 * other selectors, invalid %eip's and invalid %esp's.
779 if (!CS_SECURE(scp->sc_cs)) {
780 ksiginfo_init_trap(&ksi);
781 ksi.ksi_signo = SIGBUS;
782 ksi.ksi_code = BUS_OBJERR;
783 ksi.ksi_trapno = T_PROTFLT;
784 ksi.ksi_addr = (void *)regs->tf_eip;
785 trapsignal(td, &ksi);
788 regs->tf_ds = scp->sc_ds;
789 regs->tf_es = scp->sc_es;
790 regs->tf_fs = scp->sc_fs;
793 /* Restore remaining registers. */
794 regs->tf_eax = scp->sc_eax;
795 regs->tf_ebx = scp->sc_ebx;
796 regs->tf_ecx = scp->sc_ecx;
797 regs->tf_edx = scp->sc_edx;
798 regs->tf_esi = scp->sc_esi;
799 regs->tf_edi = scp->sc_edi;
800 regs->tf_cs = scp->sc_cs;
801 regs->tf_ss = scp->sc_ss;
802 regs->tf_isp = scp->sc_isp;
803 regs->tf_ebp = scp->sc_fp;
804 regs->tf_esp = scp->sc_sp;
805 regs->tf_eip = scp->sc_pc;
806 regs->tf_eflags = eflags;
808 #if defined(COMPAT_43)
809 if (scp->sc_onstack & 1)
810 td->td_sigstk.ss_flags |= SS_ONSTACK;
812 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
814 kern_sigprocmask(td, SIG_SETMASK, (sigset_t *)&scp->sc_mask, NULL,
816 return (EJUSTRETURN);
818 #endif /* COMPAT_43 */
820 #ifdef COMPAT_FREEBSD4
825 freebsd4_sigreturn(td, uap)
827 struct freebsd4_sigreturn_args /* {
828 const ucontext4 *sigcntxp;
832 struct trapframe *regs;
833 struct ucontext4 *ucp;
834 int cs, eflags, error;
837 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
842 eflags = ucp->uc_mcontext.mc_eflags;
843 if (eflags & PSL_VM) {
844 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
845 struct vm86_kernel *vm86;
848 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
849 * set up the vm86 area, and we can't enter vm86 mode.
851 if (td->td_pcb->pcb_ext == 0)
853 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
854 if (vm86->vm86_inited == 0)
857 /* Go back to user mode if both flags are set. */
858 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
859 ksiginfo_init_trap(&ksi);
860 ksi.ksi_signo = SIGBUS;
861 ksi.ksi_code = BUS_OBJERR;
862 ksi.ksi_addr = (void *)regs->tf_eip;
863 trapsignal(td, &ksi);
865 if (vm86->vm86_has_vme) {
866 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
867 (eflags & VME_USERCHANGE) | PSL_VM;
869 vm86->vm86_eflags = eflags; /* save VIF, VIP */
870 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
871 (eflags & VM_USERCHANGE) | PSL_VM;
873 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
874 tf->tf_eflags = eflags;
875 tf->tf_vm86_ds = tf->tf_ds;
876 tf->tf_vm86_es = tf->tf_es;
877 tf->tf_vm86_fs = tf->tf_fs;
878 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
879 tf->tf_ds = _udatasel;
880 tf->tf_es = _udatasel;
881 tf->tf_fs = _udatasel;
884 * Don't allow users to change privileged or reserved flags.
887 * XXX do allow users to change the privileged flag PSL_RF.
888 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
889 * should sometimes set it there too. tf_eflags is kept in
890 * the signal context during signal handling and there is no
891 * other place to remember it, so the PSL_RF bit may be
892 * corrupted by the signal handler without us knowing.
893 * Corruption of the PSL_RF bit at worst causes one more or
894 * one less debugger trap, so allowing it is fairly harmless.
896 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
897 uprintf("pid %d (%s): freebsd4_sigreturn eflags = 0x%x\n",
898 td->td_proc->p_pid, td->td_name, eflags);
903 * Don't allow users to load a valid privileged %cs. Let the
904 * hardware check for invalid selectors, excess privilege in
905 * other selectors, invalid %eip's and invalid %esp's.
907 cs = ucp->uc_mcontext.mc_cs;
908 if (!CS_SECURE(cs)) {
909 uprintf("pid %d (%s): freebsd4_sigreturn cs = 0x%x\n",
910 td->td_proc->p_pid, td->td_name, cs);
911 ksiginfo_init_trap(&ksi);
912 ksi.ksi_signo = SIGBUS;
913 ksi.ksi_code = BUS_OBJERR;
914 ksi.ksi_trapno = T_PROTFLT;
915 ksi.ksi_addr = (void *)regs->tf_eip;
916 trapsignal(td, &ksi);
920 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
923 #if defined(COMPAT_43)
924 if (ucp->uc_mcontext.mc_onstack & 1)
925 td->td_sigstk.ss_flags |= SS_ONSTACK;
927 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
929 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
930 return (EJUSTRETURN);
932 #endif /* COMPAT_FREEBSD4 */
938 sys_sigreturn(td, uap)
940 struct sigreturn_args /* {
941 const struct __ucontext *sigcntxp;
945 struct trapframe *regs;
947 int cs, eflags, error, ret;
950 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
955 eflags = ucp->uc_mcontext.mc_eflags;
956 if (eflags & PSL_VM) {
957 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
958 struct vm86_kernel *vm86;
961 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
962 * set up the vm86 area, and we can't enter vm86 mode.
964 if (td->td_pcb->pcb_ext == 0)
966 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
967 if (vm86->vm86_inited == 0)
970 /* Go back to user mode if both flags are set. */
971 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
972 ksiginfo_init_trap(&ksi);
973 ksi.ksi_signo = SIGBUS;
974 ksi.ksi_code = BUS_OBJERR;
975 ksi.ksi_addr = (void *)regs->tf_eip;
976 trapsignal(td, &ksi);
979 if (vm86->vm86_has_vme) {
980 eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
981 (eflags & VME_USERCHANGE) | PSL_VM;
983 vm86->vm86_eflags = eflags; /* save VIF, VIP */
984 eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
985 (eflags & VM_USERCHANGE) | PSL_VM;
987 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
988 tf->tf_eflags = eflags;
989 tf->tf_vm86_ds = tf->tf_ds;
990 tf->tf_vm86_es = tf->tf_es;
991 tf->tf_vm86_fs = tf->tf_fs;
992 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
993 tf->tf_ds = _udatasel;
994 tf->tf_es = _udatasel;
995 tf->tf_fs = _udatasel;
998 * Don't allow users to change privileged or reserved flags.
1001 * XXX do allow users to change the privileged flag PSL_RF.
1002 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers
1003 * should sometimes set it there too. tf_eflags is kept in
1004 * the signal context during signal handling and there is no
1005 * other place to remember it, so the PSL_RF bit may be
1006 * corrupted by the signal handler without us knowing.
1007 * Corruption of the PSL_RF bit at worst causes one more or
1008 * one less debugger trap, so allowing it is fairly harmless.
1010 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) {
1011 uprintf("pid %d (%s): sigreturn eflags = 0x%x\n",
1012 td->td_proc->p_pid, td->td_name, eflags);
1017 * Don't allow users to load a valid privileged %cs. Let the
1018 * hardware check for invalid selectors, excess privilege in
1019 * other selectors, invalid %eip's and invalid %esp's.
1021 cs = ucp->uc_mcontext.mc_cs;
1022 if (!CS_SECURE(cs)) {
1023 uprintf("pid %d (%s): sigreturn cs = 0x%x\n",
1024 td->td_proc->p_pid, td->td_name, cs);
1025 ksiginfo_init_trap(&ksi);
1026 ksi.ksi_signo = SIGBUS;
1027 ksi.ksi_code = BUS_OBJERR;
1028 ksi.ksi_trapno = T_PROTFLT;
1029 ksi.ksi_addr = (void *)regs->tf_eip;
1030 trapsignal(td, &ksi);
1034 ret = set_fpcontext(td, &ucp->uc_mcontext);
1037 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
1040 #if defined(COMPAT_43)
1041 if (ucp->uc_mcontext.mc_onstack & 1)
1042 td->td_sigstk.ss_flags |= SS_ONSTACK;
1044 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
1047 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
1048 return (EJUSTRETURN);
1052 * Machine dependent boot() routine
1054 * I haven't seen anything to put here yet
1055 * Possibly some stuff might be grafted back here from boot()
1063 * Flush the D-cache for non-DMA I/O so that the I-cache can
1064 * be made coherent later.
1067 cpu_flush_dcache(void *ptr, size_t len)
1069 /* Not applicable */
1072 /* Get current clock frequency for the given cpu id. */
1074 cpu_est_clockrate(int cpu_id, uint64_t *rate)
1076 uint64_t tsc1, tsc2;
1079 if (pcpu_find(cpu_id) == NULL || rate == NULL)
1081 if ((cpu_feature & CPUID_TSC) == 0)
1082 return (EOPNOTSUPP);
1086 /* Schedule ourselves on the indicated cpu. */
1087 thread_lock(curthread);
1088 sched_bind(curthread, cpu_id);
1089 thread_unlock(curthread);
1093 /* Calibrate by measuring a short delay. */
1094 reg = intr_disable();
1099 *rate = (tsc2 - tsc1) * 1000;
1103 thread_lock(curthread);
1104 sched_unbind(curthread);
1105 thread_unlock(curthread);
1114 * Shutdown the CPU as much as possible
1123 static int idle_mwait = 1; /* Use MONITOR/MWAIT for short idle. */
1124 TUNABLE_INT("machdep.idle_mwait", &idle_mwait);
1125 SYSCTL_INT(_machdep, OID_AUTO, idle_mwait, CTLFLAG_RW, &idle_mwait,
1126 0, "Use MONITOR/MWAIT for short idle");
1128 #define STATE_RUNNING 0x0
1129 #define STATE_MWAIT 0x1
1130 #define STATE_SLEEPING 0x2
1133 cpu_idle_hlt(int busy)
1137 state = (int *)PCPU_PTR(monitorbuf);
1138 *state = STATE_SLEEPING;
1140 * We must absolutely guarentee that hlt is the next instruction
1141 * after sti or we introduce a timing window.
1144 if (sched_runnable())
1147 __asm __volatile("sti; hlt");
1148 *state = STATE_RUNNING;
1152 * MWAIT cpu power states. Lower 4 bits are sub-states.
1154 #define MWAIT_C0 0xf0
1155 #define MWAIT_C1 0x00
1156 #define MWAIT_C2 0x10
1157 #define MWAIT_C3 0x20
1158 #define MWAIT_C4 0x30
1161 cpu_idle_mwait(int busy)
1165 state = (int *)PCPU_PTR(monitorbuf);
1166 *state = STATE_MWAIT;
1167 if (!sched_runnable()) {
1168 cpu_monitor(state, 0, 0);
1169 if (*state == STATE_MWAIT)
1170 cpu_mwait(0, MWAIT_C1);
1172 *state = STATE_RUNNING;
1176 cpu_idle_spin(int busy)
1181 state = (int *)PCPU_PTR(monitorbuf);
1182 *state = STATE_RUNNING;
1183 for (i = 0; i < 1000; i++) {
1184 if (sched_runnable())
1190 void (*cpu_idle_fn)(int) = cpu_idle_hlt;
1196 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d",
1199 ap_watchdog(PCPU_GET(cpuid));
1201 /* If we are busy - try to use fast methods. */
1203 if ((cpu_feature2 & CPUID2_MON) && idle_mwait) {
1204 cpu_idle_mwait(busy);
1209 /* If we have time - switch timers into idle mode. */
1215 /* Call main idle method. */
1218 /* Switch timers mack into active mode. */
1224 CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done",
1229 cpu_idle_wakeup(int cpu)
1234 pcpu = pcpu_find(cpu);
1235 state = (int *)pcpu->pc_monitorbuf;
1237 * This doesn't need to be atomic since missing the race will
1238 * simply result in unnecessary IPIs.
1240 if (*state == STATE_SLEEPING)
1242 if (*state == STATE_MWAIT)
1243 *state = STATE_RUNNING;
1248 * Ordered by speed/power consumption.
1254 { cpu_idle_spin, "spin" },
1255 { cpu_idle_mwait, "mwait" },
1256 { cpu_idle_hlt, "hlt" },
1261 idle_sysctl_available(SYSCTL_HANDLER_ARGS)
1267 avail = malloc(256, M_TEMP, M_WAITOK);
1269 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
1270 if (strstr(idle_tbl[i].id_name, "mwait") &&
1271 (cpu_feature2 & CPUID2_MON) == 0)
1273 p += sprintf(p, "%s%s", p != avail ? ", " : "",
1274 idle_tbl[i].id_name);
1276 error = sysctl_handle_string(oidp, avail, 0, req);
1277 free(avail, M_TEMP);
1281 SYSCTL_PROC(_machdep, OID_AUTO, idle_available, CTLTYPE_STRING | CTLFLAG_RD,
1282 0, 0, idle_sysctl_available, "A", "list of available idle functions");
1285 idle_sysctl(SYSCTL_HANDLER_ARGS)
1293 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
1294 if (idle_tbl[i].id_fn == cpu_idle_fn) {
1295 p = idle_tbl[i].id_name;
1299 strncpy(buf, p, sizeof(buf));
1300 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
1301 if (error != 0 || req->newptr == NULL)
1303 for (i = 0; idle_tbl[i].id_name != NULL; i++) {
1304 if (strstr(idle_tbl[i].id_name, "mwait") &&
1305 (cpu_feature2 & CPUID2_MON) == 0)
1307 if (strcmp(idle_tbl[i].id_name, buf))
1309 cpu_idle_fn = idle_tbl[i].id_fn;
1315 SYSCTL_PROC(_machdep, OID_AUTO, idle, CTLTYPE_STRING | CTLFLAG_RW, 0, 0,
1316 idle_sysctl, "A", "currently selected idle function");
1318 uint64_t (*atomic_load_acq_64)(volatile uint64_t *) =
1319 atomic_load_acq_64_i386;
1320 void (*atomic_store_rel_64)(volatile uint64_t *, uint64_t) =
1321 atomic_store_rel_64_i386;
1324 cpu_probe_cmpxchg8b(void)
1327 if ((cpu_feature & CPUID_CX8) != 0) {
1328 atomic_load_acq_64 = atomic_load_acq_64_i586;
1329 atomic_store_rel_64 = atomic_store_rel_64_i586;
1334 * Reset registers to default values on exec.
1337 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
1339 struct trapframe *regs = td->td_frame;
1340 struct pcb *pcb = td->td_pcb;
1342 /* Reset pc->pcb_gs and %gs before possibly invalidating it. */
1343 pcb->pcb_gs = _udatasel;
1346 mtx_lock_spin(&dt_lock);
1347 if (td->td_proc->p_md.md_ldt)
1350 mtx_unlock_spin(&dt_lock);
1352 bzero((char *)regs, sizeof(struct trapframe));
1353 regs->tf_eip = imgp->entry_addr;
1354 regs->tf_esp = stack;
1355 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T);
1356 regs->tf_ss = _udatasel;
1357 regs->tf_ds = _udatasel;
1358 regs->tf_es = _udatasel;
1359 regs->tf_fs = _udatasel;
1360 regs->tf_cs = _ucodesel;
1362 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */
1363 regs->tf_ebx = imgp->ps_strings;
1366 * Reset the hardware debug registers if they were in use.
1367 * They won't have any meaning for the newly exec'd process.
1369 if (pcb->pcb_flags & PCB_DBREGS) {
1376 if (pcb == PCPU_GET(curpcb)) {
1378 * Clear the debug registers on the running
1379 * CPU, otherwise they will end up affecting
1380 * the next process we switch to.
1384 pcb->pcb_flags &= ~PCB_DBREGS;
1388 * Initialize the math emulator (if any) for the current process.
1389 * Actually, just clear the bit that says that the emulator has
1390 * been initialized. Initialization is delayed until the process
1391 * traps to the emulator (if it is done at all) mainly because
1392 * emulators don't provide an entry point for initialization.
1394 td->td_pcb->pcb_flags &= ~FP_SOFTFP;
1395 pcb->pcb_initial_npxcw = __INITIAL_NPXCW__;
1398 * Drop the FP state if we hold it, so that the process gets a
1399 * clean FP state if it uses the FPU again.
1404 * XXX - Linux emulator
1405 * Make sure sure edx is 0x0 on entry. Linux binaries depend
1408 td->td_retval[1] = 0;
1419 * CR0_MP, CR0_NE and CR0_TS are set for NPX (FPU) support:
1421 * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
1422 * instructions. We must set the CR0_MP bit and use the CR0_TS
1423 * bit to control the trap, because setting the CR0_EM bit does
1424 * not cause WAIT instructions to trap. It's important to trap
1425 * WAIT instructions - otherwise the "wait" variants of no-wait
1426 * control instructions would degenerate to the "no-wait" variants
1427 * after FP context switches but work correctly otherwise. It's
1428 * particularly important to trap WAITs when there is no NPX -
1429 * otherwise the "wait" variants would always degenerate.
1431 * Try setting CR0_NE to get correct error reporting on 486DX's.
1432 * Setting it should fail or do nothing on lesser processors.
1434 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
1439 u_long bootdev; /* not a struct cdev *- encoding is different */
1440 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev,
1441 CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in struct cdev *format)");
1444 * Initialize 386 and configure to run kernel
1448 * Initialize segments & interrupt table
1453 union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */
1454 union descriptor ldt[NLDT]; /* local descriptor table */
1455 static struct gate_descriptor idt0[NIDT];
1456 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
1457 struct region_descriptor r_gdt, r_idt; /* table descriptors */
1458 struct mtx dt_lock; /* lock for GDT and LDT */
1460 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
1461 extern int has_f00f_bug;
1464 static struct i386tss dblfault_tss;
1465 static char dblfault_stack[PAGE_SIZE];
1467 extern vm_offset_t proc0kstack;
1471 * software prototypes -- in more palatable form.
1473 * GCODE_SEL through GUDATA_SEL must be in this order for syscall/sysret
1474 * GUFS_SEL and GUGS_SEL must be in this order (swtch.s knows it)
1476 struct soft_segment_descriptor gdt_segs[] = {
1477 /* GNULL_SEL 0 Null Descriptor */
1483 .ssd_xx = 0, .ssd_xx1 = 0,
1486 /* GPRIV_SEL 1 SMP Per-Processor Private Data Descriptor */
1488 .ssd_limit = 0xfffff,
1489 .ssd_type = SDT_MEMRWA,
1492 .ssd_xx = 0, .ssd_xx1 = 0,
1495 /* GUFS_SEL 2 %fs Descriptor for user */
1497 .ssd_limit = 0xfffff,
1498 .ssd_type = SDT_MEMRWA,
1501 .ssd_xx = 0, .ssd_xx1 = 0,
1504 /* GUGS_SEL 3 %gs Descriptor for user */
1506 .ssd_limit = 0xfffff,
1507 .ssd_type = SDT_MEMRWA,
1510 .ssd_xx = 0, .ssd_xx1 = 0,
1513 /* GCODE_SEL 4 Code Descriptor for kernel */
1515 .ssd_limit = 0xfffff,
1516 .ssd_type = SDT_MEMERA,
1519 .ssd_xx = 0, .ssd_xx1 = 0,
1522 /* GDATA_SEL 5 Data Descriptor for kernel */
1524 .ssd_limit = 0xfffff,
1525 .ssd_type = SDT_MEMRWA,
1528 .ssd_xx = 0, .ssd_xx1 = 0,
1531 /* GUCODE_SEL 6 Code Descriptor for user */
1533 .ssd_limit = 0xfffff,
1534 .ssd_type = SDT_MEMERA,
1537 .ssd_xx = 0, .ssd_xx1 = 0,
1540 /* GUDATA_SEL 7 Data Descriptor for user */
1542 .ssd_limit = 0xfffff,
1543 .ssd_type = SDT_MEMRWA,
1546 .ssd_xx = 0, .ssd_xx1 = 0,
1549 /* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */
1550 { .ssd_base = 0x400,
1551 .ssd_limit = 0xfffff,
1552 .ssd_type = SDT_MEMRWA,
1555 .ssd_xx = 0, .ssd_xx1 = 0,
1558 /* GPROC0_SEL 9 Proc 0 Tss Descriptor */
1561 .ssd_limit = sizeof(struct i386tss)-1,
1562 .ssd_type = SDT_SYS386TSS,
1565 .ssd_xx = 0, .ssd_xx1 = 0,
1568 /* GLDT_SEL 10 LDT Descriptor */
1569 { .ssd_base = (int) ldt,
1570 .ssd_limit = sizeof(ldt)-1,
1571 .ssd_type = SDT_SYSLDT,
1574 .ssd_xx = 0, .ssd_xx1 = 0,
1577 /* GUSERLDT_SEL 11 User LDT Descriptor per process */
1578 { .ssd_base = (int) ldt,
1579 .ssd_limit = (512 * sizeof(union descriptor)-1),
1580 .ssd_type = SDT_SYSLDT,
1583 .ssd_xx = 0, .ssd_xx1 = 0,
1586 /* GPANIC_SEL 12 Panic Tss Descriptor */
1587 { .ssd_base = (int) &dblfault_tss,
1588 .ssd_limit = sizeof(struct i386tss)-1,
1589 .ssd_type = SDT_SYS386TSS,
1592 .ssd_xx = 0, .ssd_xx1 = 0,
1595 /* GBIOSCODE32_SEL 13 BIOS 32-bit interface (32bit Code) */
1597 .ssd_limit = 0xfffff,
1598 .ssd_type = SDT_MEMERA,
1601 .ssd_xx = 0, .ssd_xx1 = 0,
1604 /* GBIOSCODE16_SEL 14 BIOS 32-bit interface (16bit Code) */
1606 .ssd_limit = 0xfffff,
1607 .ssd_type = SDT_MEMERA,
1610 .ssd_xx = 0, .ssd_xx1 = 0,
1613 /* GBIOSDATA_SEL 15 BIOS 32-bit interface (Data) */
1615 .ssd_limit = 0xfffff,
1616 .ssd_type = SDT_MEMRWA,
1619 .ssd_xx = 0, .ssd_xx1 = 0,
1622 /* GBIOSUTIL_SEL 16 BIOS 16-bit interface (Utility) */
1624 .ssd_limit = 0xfffff,
1625 .ssd_type = SDT_MEMRWA,
1628 .ssd_xx = 0, .ssd_xx1 = 0,
1631 /* GBIOSARGS_SEL 17 BIOS 16-bit interface (Arguments) */
1633 .ssd_limit = 0xfffff,
1634 .ssd_type = SDT_MEMRWA,
1637 .ssd_xx = 0, .ssd_xx1 = 0,
1640 /* GNDIS_SEL 18 NDIS Descriptor */
1646 .ssd_xx = 0, .ssd_xx1 = 0,
1651 static struct soft_segment_descriptor ldt_segs[] = {
1652 /* Null Descriptor - overwritten by call gate */
1658 .ssd_xx = 0, .ssd_xx1 = 0,
1661 /* Null Descriptor - overwritten by call gate */
1667 .ssd_xx = 0, .ssd_xx1 = 0,
1670 /* Null Descriptor - overwritten by call gate */
1676 .ssd_xx = 0, .ssd_xx1 = 0,
1679 /* Code Descriptor for user */
1681 .ssd_limit = 0xfffff,
1682 .ssd_type = SDT_MEMERA,
1685 .ssd_xx = 0, .ssd_xx1 = 0,
1688 /* Null Descriptor - overwritten by call gate */
1694 .ssd_xx = 0, .ssd_xx1 = 0,
1697 /* Data Descriptor for user */
1699 .ssd_limit = 0xfffff,
1700 .ssd_type = SDT_MEMRWA,
1703 .ssd_xx = 0, .ssd_xx1 = 0,
1709 setidt(idx, func, typ, dpl, selec)
1716 struct gate_descriptor *ip;
1719 ip->gd_looffset = (int)func;
1720 ip->gd_selector = selec;
1726 ip->gd_hioffset = ((int)func)>>16 ;
1730 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
1731 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
1732 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
1733 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
1734 IDTVEC(xmm), IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall);
1738 * Display the index and function name of any IDT entries that don't use
1739 * the default 'rsvd' entry point.
1741 DB_SHOW_COMMAND(idt, db_show_idt)
1743 struct gate_descriptor *ip;
1748 for (idx = 0; idx < NIDT && !db_pager_quit; idx++) {
1749 func = (ip->gd_hioffset << 16 | ip->gd_looffset);
1750 if (func != (uintptr_t)&IDTVEC(rsvd)) {
1751 db_printf("%3d\t", idx);
1752 db_printsym(func, DB_STGY_PROC);
1759 /* Show privileged registers. */
1760 DB_SHOW_COMMAND(sysregs, db_show_sysregs)
1762 uint64_t idtr, gdtr;
1765 db_printf("idtr\t0x%08x/%04x\n",
1766 (u_int)(idtr >> 16), (u_int)idtr & 0xffff);
1768 db_printf("gdtr\t0x%08x/%04x\n",
1769 (u_int)(gdtr >> 16), (u_int)gdtr & 0xffff);
1770 db_printf("ldtr\t0x%04x\n", rldt());
1771 db_printf("tr\t0x%04x\n", rtr());
1772 db_printf("cr0\t0x%08x\n", rcr0());
1773 db_printf("cr2\t0x%08x\n", rcr2());
1774 db_printf("cr3\t0x%08x\n", rcr3());
1775 db_printf("cr4\t0x%08x\n", rcr4());
1781 struct segment_descriptor *sd;
1782 struct soft_segment_descriptor *ssd;
1784 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
1785 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
1786 ssd->ssd_type = sd->sd_type;
1787 ssd->ssd_dpl = sd->sd_dpl;
1788 ssd->ssd_p = sd->sd_p;
1789 ssd->ssd_def32 = sd->sd_def32;
1790 ssd->ssd_gran = sd->sd_gran;
1800 if (basemem > 640) {
1801 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
1807 * XXX if biosbasemem is now < 640, there is a `hole'
1808 * between the end of base memory and the start of
1809 * ISA memory. The hole may be empty or it may
1810 * contain BIOS code or data. Map it read/write so
1811 * that the BIOS can write to it. (Memory from 0 to
1812 * the physical end of the kernel is mapped read-only
1813 * to begin with and then parts of it are remapped.
1814 * The parts that aren't remapped form holes that
1815 * remain read-only and are unused by the kernel.
1816 * The base memory area is below the physical end of
1817 * the kernel and right now forms a read-only hole.
1818 * The part of it from PAGE_SIZE to
1819 * (trunc_page(biosbasemem * 1024) - 1) will be
1820 * remapped and used by the kernel later.)
1822 * This code is similar to the code used in
1823 * pmap_mapdev, but since no memory needs to be
1824 * allocated we simply change the mapping.
1826 for (pa = trunc_page(basemem * 1024);
1827 pa < ISA_HOLE_START; pa += PAGE_SIZE)
1828 pmap_kenter(KERNBASE + pa, pa);
1831 * Map pages between basemem and ISA_HOLE_START, if any, r/w into
1832 * the vm86 page table so that vm86 can scribble on them using
1833 * the vm86 map too. XXX: why 2 ways for this and only 1 way for
1834 * page 0, at least as initialized here?
1836 pte = (pt_entry_t *)vm86paddr;
1837 for (i = basemem / 4; i < 160; i++)
1838 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
1842 * Populate the (physmap) array with base/bound pairs describing the
1843 * available physical memory in the system, then test this memory and
1844 * build the phys_avail array describing the actually-available memory.
1846 * If we cannot accurately determine the physical memory map, then use
1847 * value from the 0xE801 call, and failing that, the RTC.
1849 * Total memory size may be set by the kernel environment variable
1850 * hw.physmem or the compile-time define MAXMEM.
1852 * XXX first should be vm_paddr_t.
1855 getmemsize(int first)
1857 int off, physmap_idx, pa_indx, da_indx;
1858 u_long physmem_tunable, memtest;
1859 vm_paddr_t physmap[PHYSMAP_SIZE];
1861 quad_t dcons_addr, dcons_size;
1868 bzero(physmap, sizeof(physmap));
1870 /* XXX - some of EPSON machines can't use PG_N */
1872 if (pc98_machine_type & M_EPSON_PC98) {
1873 switch (epson_machine_id) {
1877 case EPSON_PC486_HX:
1878 case EPSON_PC486_HG:
1879 case EPSON_PC486_HA:
1885 under16 = pc98_getmemsize(&basemem, &extmem);
1889 physmap[1] = basemem * 1024;
1891 physmap[physmap_idx] = 0x100000;
1892 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
1895 * Now, physmap contains a map of physical memory.
1899 /* make hole for AP bootstrap code */
1900 physmap[1] = mp_bootaddress(physmap[1]);
1904 * Maxmem isn't the "maximum memory", it's one larger than the
1905 * highest page of the physical address space. It should be
1906 * called something like "Maxphyspage". We may adjust this
1907 * based on ``hw.physmem'' and the results of the memory test.
1909 Maxmem = atop(physmap[physmap_idx + 1]);
1912 Maxmem = MAXMEM / 4;
1915 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable))
1916 Maxmem = atop(physmem_tunable);
1919 * By default keep the memtest enabled. Use a general name so that
1920 * one could eventually do more with the code than just disable it.
1923 TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest);
1925 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
1926 (boothowto & RB_VERBOSE))
1927 printf("Physical memory use set to %ldK\n", Maxmem * 4);
1930 * If Maxmem has been increased beyond what the system has detected,
1931 * extend the last memory segment to the new limit.
1933 if (atop(physmap[physmap_idx + 1]) < Maxmem)
1934 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
1937 * We need to divide chunk if Maxmem is larger than 16MB and
1938 * under 16MB area is not full of memory.
1939 * (1) system area (15-16MB region) is cut off
1940 * (2) extended memory is only over 16MB area (ex. Melco "HYPERMEMORY")
1942 if ((under16 != 16 * 1024) && (extmem > 15 * 1024)) {
1943 /* 15M - 16M region is cut off, so need to divide chunk */
1944 physmap[physmap_idx + 1] = under16 * 1024;
1946 physmap[physmap_idx] = 0x1000000;
1947 physmap[physmap_idx + 1] = physmap[2] + extmem * 1024;
1950 /* call pmap initialization to make new kernel address space */
1951 pmap_bootstrap(first);
1954 * Size up each available chunk of physical memory.
1956 physmap[0] = PAGE_SIZE; /* mask off page 0 */
1959 phys_avail[pa_indx++] = physmap[0];
1960 phys_avail[pa_indx] = physmap[0];
1961 dump_avail[da_indx] = physmap[0];
1965 * Get dcons buffer address
1967 if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
1968 getenv_quad("dcons.size", &dcons_size) == 0)
1972 * physmap is in bytes, so when converting to page boundaries,
1973 * round up the start address and round down the end address.
1975 for (i = 0; i <= physmap_idx; i += 2) {
1978 end = ptoa((vm_paddr_t)Maxmem);
1979 if (physmap[i + 1] < end)
1980 end = trunc_page(physmap[i + 1]);
1981 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
1982 int tmp, page_bad, full;
1983 int *ptr = (int *)CADDR1;
1987 * block out kernel memory as not available.
1989 if (pa >= KERNLOAD && pa < first)
1993 * block out dcons buffer
1996 && pa >= trunc_page(dcons_addr)
1997 && pa < dcons_addr + dcons_size)
2005 * map page into kernel: valid, read/write,non-cacheable
2007 *pte = pa | PG_V | PG_RW | pg_n;
2012 * Test for alternating 1's and 0's
2014 *(volatile int *)ptr = 0xaaaaaaaa;
2015 if (*(volatile int *)ptr != 0xaaaaaaaa)
2018 * Test for alternating 0's and 1's
2020 *(volatile int *)ptr = 0x55555555;
2021 if (*(volatile int *)ptr != 0x55555555)
2026 *(volatile int *)ptr = 0xffffffff;
2027 if (*(volatile int *)ptr != 0xffffffff)
2032 *(volatile int *)ptr = 0x0;
2033 if (*(volatile int *)ptr != 0x0)
2036 * Restore original value.
2042 * Adjust array of valid/good pages.
2044 if (page_bad == TRUE)
2047 * If this good page is a continuation of the
2048 * previous set of good pages, then just increase
2049 * the end pointer. Otherwise start a new chunk.
2050 * Note that "end" points one higher than end,
2051 * making the range >= start and < end.
2052 * If we're also doing a speculative memory
2053 * test and we at or past the end, bump up Maxmem
2054 * so that we keep going. The first bad page
2055 * will terminate the loop.
2057 if (phys_avail[pa_indx] == pa) {
2058 phys_avail[pa_indx] += PAGE_SIZE;
2061 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
2063 "Too many holes in the physical address space, giving up\n");
2068 phys_avail[pa_indx++] = pa; /* start */
2069 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
2073 if (dump_avail[da_indx] == pa) {
2074 dump_avail[da_indx] += PAGE_SIZE;
2077 if (da_indx == DUMP_AVAIL_ARRAY_END) {
2081 dump_avail[da_indx++] = pa; /* start */
2082 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
2094 * The last chunk must contain at least one page plus the message
2095 * buffer to avoid complicating other code (message buffer address
2096 * calculation, etc.).
2098 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
2099 round_page(msgbufsize) >= phys_avail[pa_indx]) {
2100 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
2101 phys_avail[pa_indx--] = 0;
2102 phys_avail[pa_indx--] = 0;
2105 Maxmem = atop(phys_avail[pa_indx]);
2107 /* Trim off space for the message buffer. */
2108 phys_avail[pa_indx] -= round_page(msgbufsize);
2110 /* Map the message buffer. */
2111 for (off = 0; off < round_page(msgbufsize); off += PAGE_SIZE)
2112 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] +
2120 struct gate_descriptor *gdp;
2121 int gsel_tss, metadata_missing, x, pa;
2125 thread0.td_kstack = proc0kstack;
2126 thread0.td_kstack_pages = KSTACK_PAGES;
2127 kstack0_sz = thread0.td_kstack_pages * PAGE_SIZE;
2128 thread0.td_pcb = (struct pcb *)(thread0.td_kstack + kstack0_sz) - 1;
2131 * This may be done better later if it gets more high level
2132 * components in it. If so just link td->td_proc here.
2134 proc_linkup0(&proc0, &thread0);
2141 metadata_missing = 0;
2142 if (bootinfo.bi_modulep) {
2143 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE;
2144 preload_bootstrap_relocate(KERNBASE);
2146 metadata_missing = 1;
2149 kern_envp = static_env;
2150 else if (bootinfo.bi_envp)
2151 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE;
2153 /* Init basic tunables, hz etc */
2157 * Make gdt memory segments. All segments cover the full 4GB
2158 * of address space and permissions are enforced at page level.
2160 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1);
2161 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1);
2162 gdt_segs[GUCODE_SEL].ssd_limit = atop(0 - 1);
2163 gdt_segs[GUDATA_SEL].ssd_limit = atop(0 - 1);
2164 gdt_segs[GUFS_SEL].ssd_limit = atop(0 - 1);
2165 gdt_segs[GUGS_SEL].ssd_limit = atop(0 - 1);
2168 gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1);
2169 gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
2170 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
2172 for (x = 0; x < NGDT; x++)
2173 ssdtosd(&gdt_segs[x], &gdt[x].sd);
2175 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
2176 r_gdt.rd_base = (int) gdt;
2177 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN);
2180 pcpu_init(pc, 0, sizeof(struct pcpu));
2181 for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE)
2182 pmap_kenter(pa + KERNBASE, pa);
2183 dpcpu_init((void *)(first + KERNBASE), 0);
2184 first += DPCPU_SIZE;
2185 PCPU_SET(prvspace, pc);
2186 PCPU_SET(curthread, &thread0);
2187 PCPU_SET(curpcb, thread0.td_pcb);
2190 * Initialize mutexes.
2192 * icu_lock: in order to allow an interrupt to occur in a critical
2193 * section, to set pcpu->ipending (etc...) properly, we
2194 * must be able to get the icu lock, so it can't be
2198 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE);
2200 /* make ldt memory segments */
2201 ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
2202 ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
2203 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
2204 ssdtosd(&ldt_segs[x], &ldt[x].sd);
2206 _default_ldt = GSEL(GLDT_SEL, SEL_KPL);
2208 PCPU_SET(currentldt, _default_ldt);
2211 for (x = 0; x < NIDT; x++)
2212 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL,
2213 GSEL(GCODE_SEL, SEL_KPL));
2214 setidt(IDT_DE, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL,
2215 GSEL(GCODE_SEL, SEL_KPL));
2216 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL,
2217 GSEL(GCODE_SEL, SEL_KPL));
2218 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYS386IGT, SEL_KPL,
2219 GSEL(GCODE_SEL, SEL_KPL));
2220 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL,
2221 GSEL(GCODE_SEL, SEL_KPL));
2222 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL,
2223 GSEL(GCODE_SEL, SEL_KPL));
2224 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL,
2225 GSEL(GCODE_SEL, SEL_KPL));
2226 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2227 GSEL(GCODE_SEL, SEL_KPL));
2228 setidt(IDT_NM, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL
2229 , GSEL(GCODE_SEL, SEL_KPL));
2230 setidt(IDT_DF, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL));
2231 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL,
2232 GSEL(GCODE_SEL, SEL_KPL));
2233 setidt(IDT_TS, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL,
2234 GSEL(GCODE_SEL, SEL_KPL));
2235 setidt(IDT_NP, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL,
2236 GSEL(GCODE_SEL, SEL_KPL));
2237 setidt(IDT_SS, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL,
2238 GSEL(GCODE_SEL, SEL_KPL));
2239 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2240 GSEL(GCODE_SEL, SEL_KPL));
2241 setidt(IDT_PF, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL,
2242 GSEL(GCODE_SEL, SEL_KPL));
2243 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL,
2244 GSEL(GCODE_SEL, SEL_KPL));
2245 setidt(IDT_AC, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL,
2246 GSEL(GCODE_SEL, SEL_KPL));
2247 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL,
2248 GSEL(GCODE_SEL, SEL_KPL));
2249 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL,
2250 GSEL(GCODE_SEL, SEL_KPL));
2251 setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL,
2252 GSEL(GCODE_SEL, SEL_KPL));
2254 r_idt.rd_limit = sizeof(idt0) - 1;
2255 r_idt.rd_base = (int) idt;
2259 * Initialize the i8254 before the console so that console
2260 * initialization can use DELAY().
2265 * Initialize the console before we print anything out.
2269 if (metadata_missing)
2270 printf("WARNING: loader(8) metadata is missing!\n");
2277 ksym_start = bootinfo.bi_symtab;
2278 ksym_end = bootinfo.bi_esymtab;
2284 if (boothowto & RB_KDB)
2285 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
2288 finishidentcpu(); /* Final stage of CPU initialization */
2289 setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
2290 GSEL(GCODE_SEL, SEL_KPL));
2291 setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
2292 GSEL(GCODE_SEL, SEL_KPL));
2293 initializecpu(); /* Initialize CPU registers */
2295 /* make an initial tss so cpu can get interrupt stack on syscall! */
2296 /* Note: -16 is so we can grow the trapframe if we came from vm86 */
2297 PCPU_SET(common_tss.tss_esp0, thread0.td_kstack +
2298 kstack0_sz - sizeof(struct pcb) - 16);
2299 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
2300 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
2301 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
2302 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
2303 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
2306 /* pointer to selector slot for %fs/%gs */
2307 PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
2309 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
2310 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
2311 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
2312 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
2313 dblfault_tss.tss_cr3 = (int)IdlePTD;
2314 dblfault_tss.tss_eip = (int)dblfault_handler;
2315 dblfault_tss.tss_eflags = PSL_KERNEL;
2316 dblfault_tss.tss_ds = dblfault_tss.tss_es =
2317 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
2318 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
2319 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
2320 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
2324 init_param2(physmem);
2326 /* now running on new page tables, configured,and u/iom is accessible */
2328 msgbufinit(msgbufp, msgbufsize);
2330 /* make a call gate to reenter kernel with */
2331 gdp = &ldt[LSYS5CALLS_SEL].gd;
2333 x = (int) &IDTVEC(lcall_syscall);
2334 gdp->gd_looffset = x;
2335 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
2337 gdp->gd_type = SDT_SYS386CGT;
2338 gdp->gd_dpl = SEL_UPL;
2340 gdp->gd_hioffset = x >> 16;
2342 /* XXX does this work? */
2344 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL];
2345 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL];
2347 /* transfer to user mode */
2349 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
2350 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
2352 /* setup proc 0's pcb */
2353 thread0.td_pcb->pcb_flags = 0;
2354 thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
2355 thread0.td_pcb->pcb_ext = 0;
2356 thread0.td_frame = &proc0_tf;
2358 cpu_probe_cmpxchg8b();
2362 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
2368 spinlock_enter(void)
2374 if (td->td_md.md_spinlock_count == 0) {
2375 flags = intr_disable();
2376 td->td_md.md_spinlock_count = 1;
2377 td->td_md.md_saved_flags = flags;
2379 td->td_md.md_spinlock_count++;
2391 flags = td->td_md.md_saved_flags;
2392 td->td_md.md_spinlock_count--;
2393 if (td->td_md.md_spinlock_count == 0)
2394 intr_restore(flags);
2397 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
2398 static void f00f_hack(void *unused);
2399 SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL);
2402 f00f_hack(void *unused)
2404 struct gate_descriptor *new_idt;
2412 printf("Intel Pentium detected, installing workaround for F00F bug\n");
2414 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2);
2416 panic("kmem_alloc returned 0");
2418 /* Put the problematic entry (#6) at the end of the lower page. */
2419 new_idt = (struct gate_descriptor*)
2420 (tmp + PAGE_SIZE - 7 * sizeof(struct gate_descriptor));
2421 bcopy(idt, new_idt, sizeof(idt0));
2422 r_idt.rd_base = (u_int)new_idt;
2425 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE,
2426 VM_PROT_READ, FALSE) != KERN_SUCCESS)
2427 panic("vm_map_protect failed");
2429 #endif /* defined(I586_CPU) && !NO_F00F_HACK */
2432 * Construct a PCB from a trapframe. This is called from kdb_trap() where
2433 * we want to start a backtrace from the function that caused us to enter
2434 * the debugger. We have the context in the trapframe, but base the trace
2435 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
2436 * enough for a backtrace.
2439 makectx(struct trapframe *tf, struct pcb *pcb)
2442 pcb->pcb_edi = tf->tf_edi;
2443 pcb->pcb_esi = tf->tf_esi;
2444 pcb->pcb_ebp = tf->tf_ebp;
2445 pcb->pcb_ebx = tf->tf_ebx;
2446 pcb->pcb_eip = tf->tf_eip;
2447 pcb->pcb_esp = (ISPL(tf->tf_cs)) ? tf->tf_esp : (int)(tf + 1) - 8;
2451 ptrace_set_pc(struct thread *td, u_long addr)
2454 td->td_frame->tf_eip = addr;
2459 ptrace_single_step(struct thread *td)
2461 td->td_frame->tf_eflags |= PSL_T;
2466 ptrace_clear_single_step(struct thread *td)
2468 td->td_frame->tf_eflags &= ~PSL_T;
2473 fill_regs(struct thread *td, struct reg *regs)
2476 struct trapframe *tp;
2480 regs->r_gs = pcb->pcb_gs;
2481 return (fill_frame_regs(tp, regs));
2485 fill_frame_regs(struct trapframe *tp, struct reg *regs)
2487 regs->r_fs = tp->tf_fs;
2488 regs->r_es = tp->tf_es;
2489 regs->r_ds = tp->tf_ds;
2490 regs->r_edi = tp->tf_edi;
2491 regs->r_esi = tp->tf_esi;
2492 regs->r_ebp = tp->tf_ebp;
2493 regs->r_ebx = tp->tf_ebx;
2494 regs->r_edx = tp->tf_edx;
2495 regs->r_ecx = tp->tf_ecx;
2496 regs->r_eax = tp->tf_eax;
2497 regs->r_eip = tp->tf_eip;
2498 regs->r_cs = tp->tf_cs;
2499 regs->r_eflags = tp->tf_eflags;
2500 regs->r_esp = tp->tf_esp;
2501 regs->r_ss = tp->tf_ss;
2506 set_regs(struct thread *td, struct reg *regs)
2509 struct trapframe *tp;
2512 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) ||
2513 !CS_SECURE(regs->r_cs))
2516 tp->tf_fs = regs->r_fs;
2517 tp->tf_es = regs->r_es;
2518 tp->tf_ds = regs->r_ds;
2519 tp->tf_edi = regs->r_edi;
2520 tp->tf_esi = regs->r_esi;
2521 tp->tf_ebp = regs->r_ebp;
2522 tp->tf_ebx = regs->r_ebx;
2523 tp->tf_edx = regs->r_edx;
2524 tp->tf_ecx = regs->r_ecx;
2525 tp->tf_eax = regs->r_eax;
2526 tp->tf_eip = regs->r_eip;
2527 tp->tf_cs = regs->r_cs;
2528 tp->tf_eflags = regs->r_eflags;
2529 tp->tf_esp = regs->r_esp;
2530 tp->tf_ss = regs->r_ss;
2531 pcb->pcb_gs = regs->r_gs;
2535 #ifdef CPU_ENABLE_SSE
2537 fill_fpregs_xmm(sv_xmm, sv_87)
2538 struct savexmm *sv_xmm;
2539 struct save87 *sv_87;
2541 register struct env87 *penv_87 = &sv_87->sv_env;
2542 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
2545 bzero(sv_87, sizeof(*sv_87));
2547 /* FPU control/status */
2548 penv_87->en_cw = penv_xmm->en_cw;
2549 penv_87->en_sw = penv_xmm->en_sw;
2550 penv_87->en_tw = penv_xmm->en_tw;
2551 penv_87->en_fip = penv_xmm->en_fip;
2552 penv_87->en_fcs = penv_xmm->en_fcs;
2553 penv_87->en_opcode = penv_xmm->en_opcode;
2554 penv_87->en_foo = penv_xmm->en_foo;
2555 penv_87->en_fos = penv_xmm->en_fos;
2558 for (i = 0; i < 8; ++i)
2559 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc;
2563 set_fpregs_xmm(sv_87, sv_xmm)
2564 struct save87 *sv_87;
2565 struct savexmm *sv_xmm;
2567 register struct env87 *penv_87 = &sv_87->sv_env;
2568 register struct envxmm *penv_xmm = &sv_xmm->sv_env;
2571 /* FPU control/status */
2572 penv_xmm->en_cw = penv_87->en_cw;
2573 penv_xmm->en_sw = penv_87->en_sw;
2574 penv_xmm->en_tw = penv_87->en_tw;
2575 penv_xmm->en_fip = penv_87->en_fip;
2576 penv_xmm->en_fcs = penv_87->en_fcs;
2577 penv_xmm->en_opcode = penv_87->en_opcode;
2578 penv_xmm->en_foo = penv_87->en_foo;
2579 penv_xmm->en_fos = penv_87->en_fos;
2582 for (i = 0; i < 8; ++i)
2583 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i];
2585 #endif /* CPU_ENABLE_SSE */
2588 fill_fpregs(struct thread *td, struct fpreg *fpregs)
2591 KASSERT(td == curthread || TD_IS_SUSPENDED(td),
2592 ("not suspended thread %p", td));
2596 bzero(fpregs, sizeof(*fpregs));
2598 #ifdef CPU_ENABLE_SSE
2600 fill_fpregs_xmm(&td->td_pcb->pcb_user_save.sv_xmm,
2601 (struct save87 *)fpregs);
2603 #endif /* CPU_ENABLE_SSE */
2604 bcopy(&td->td_pcb->pcb_user_save.sv_87, fpregs,
2610 set_fpregs(struct thread *td, struct fpreg *fpregs)
2613 #ifdef CPU_ENABLE_SSE
2615 set_fpregs_xmm((struct save87 *)fpregs,
2616 &td->td_pcb->pcb_user_save.sv_xmm);
2618 #endif /* CPU_ENABLE_SSE */
2619 bcopy(fpregs, &td->td_pcb->pcb_user_save.sv_87,
2628 * Get machine context.
2631 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
2633 struct trapframe *tp;
2634 struct segment_descriptor *sdp;
2638 PROC_LOCK(curthread->td_proc);
2639 mcp->mc_onstack = sigonstack(tp->tf_esp);
2640 PROC_UNLOCK(curthread->td_proc);
2641 mcp->mc_gs = td->td_pcb->pcb_gs;
2642 mcp->mc_fs = tp->tf_fs;
2643 mcp->mc_es = tp->tf_es;
2644 mcp->mc_ds = tp->tf_ds;
2645 mcp->mc_edi = tp->tf_edi;
2646 mcp->mc_esi = tp->tf_esi;
2647 mcp->mc_ebp = tp->tf_ebp;
2648 mcp->mc_isp = tp->tf_isp;
2649 mcp->mc_eflags = tp->tf_eflags;
2650 if (flags & GET_MC_CLEAR_RET) {
2653 mcp->mc_eflags &= ~PSL_C;
2655 mcp->mc_eax = tp->tf_eax;
2656 mcp->mc_edx = tp->tf_edx;
2658 mcp->mc_ebx = tp->tf_ebx;
2659 mcp->mc_ecx = tp->tf_ecx;
2660 mcp->mc_eip = tp->tf_eip;
2661 mcp->mc_cs = tp->tf_cs;
2662 mcp->mc_esp = tp->tf_esp;
2663 mcp->mc_ss = tp->tf_ss;
2664 mcp->mc_len = sizeof(*mcp);
2665 get_fpcontext(td, mcp);
2666 sdp = &td->td_pcb->pcb_fsd;
2667 mcp->mc_fsbase = sdp->sd_hibase << 24 | sdp->sd_lobase;
2668 sdp = &td->td_pcb->pcb_gsd;
2669 mcp->mc_gsbase = sdp->sd_hibase << 24 | sdp->sd_lobase;
2670 bzero(mcp->mc_spare1, sizeof(mcp->mc_spare1));
2671 bzero(mcp->mc_spare2, sizeof(mcp->mc_spare2));
2676 * Set machine context.
2678 * However, we don't set any but the user modifiable flags, and we won't
2679 * touch the cs selector.
2682 set_mcontext(struct thread *td, const mcontext_t *mcp)
2684 struct trapframe *tp;
2688 if (mcp->mc_len != sizeof(*mcp))
2690 eflags = (mcp->mc_eflags & PSL_USERCHANGE) |
2691 (tp->tf_eflags & ~PSL_USERCHANGE);
2692 if ((ret = set_fpcontext(td, mcp)) == 0) {
2693 tp->tf_fs = mcp->mc_fs;
2694 tp->tf_es = mcp->mc_es;
2695 tp->tf_ds = mcp->mc_ds;
2696 tp->tf_edi = mcp->mc_edi;
2697 tp->tf_esi = mcp->mc_esi;
2698 tp->tf_ebp = mcp->mc_ebp;
2699 tp->tf_ebx = mcp->mc_ebx;
2700 tp->tf_edx = mcp->mc_edx;
2701 tp->tf_ecx = mcp->mc_ecx;
2702 tp->tf_eax = mcp->mc_eax;
2703 tp->tf_eip = mcp->mc_eip;
2704 tp->tf_eflags = eflags;
2705 tp->tf_esp = mcp->mc_esp;
2706 tp->tf_ss = mcp->mc_ss;
2707 td->td_pcb->pcb_gs = mcp->mc_gs;
2714 get_fpcontext(struct thread *td, mcontext_t *mcp)
2718 mcp->mc_fpformat = _MC_FPFMT_NODEV;
2719 mcp->mc_ownedfp = _MC_FPOWNED_NONE;
2720 bzero(mcp->mc_fpstate, sizeof(mcp->mc_fpstate));
2722 mcp->mc_ownedfp = npxgetregs(td);
2723 bcopy(&td->td_pcb->pcb_user_save, &mcp->mc_fpstate,
2724 sizeof(mcp->mc_fpstate));
2725 mcp->mc_fpformat = npxformat();
2730 set_fpcontext(struct thread *td, const mcontext_t *mcp)
2733 if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
2735 else if (mcp->mc_fpformat != _MC_FPFMT_387 &&
2736 mcp->mc_fpformat != _MC_FPFMT_XMM)
2738 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE)
2739 /* We don't care what state is left in the FPU or PCB. */
2741 else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
2742 mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
2744 #ifdef CPU_ENABLE_SSE
2746 ((union savefpu *)&mcp->mc_fpstate)->sv_xmm.sv_env.
2747 en_mxcsr &= cpu_mxcsr_mask;
2749 npxsetregs(td, (union savefpu *)&mcp->mc_fpstate);
2757 fpstate_drop(struct thread *td)
2762 if (PCPU_GET(fpcurthread) == td)
2766 * XXX force a full drop of the npx. The above only drops it if we
2767 * owned it. npxgetregs() has the same bug in the !cpu_fxsr case.
2769 * XXX I don't much like npxgetregs()'s semantics of doing a full
2770 * drop. Dropping only to the pcb matches fnsave's behaviour.
2771 * We only need to drop to !PCB_INITDONE in sendsig(). But
2772 * sendsig() is the only caller of npxgetregs()... perhaps we just
2773 * have too many layers.
2775 curthread->td_pcb->pcb_flags &= ~(PCB_NPXINITDONE |
2776 PCB_NPXUSERINITDONE);
2781 fill_dbregs(struct thread *td, struct dbreg *dbregs)
2786 dbregs->dr[0] = rdr0();
2787 dbregs->dr[1] = rdr1();
2788 dbregs->dr[2] = rdr2();
2789 dbregs->dr[3] = rdr3();
2790 dbregs->dr[4] = rdr4();
2791 dbregs->dr[5] = rdr5();
2792 dbregs->dr[6] = rdr6();
2793 dbregs->dr[7] = rdr7();
2796 dbregs->dr[0] = pcb->pcb_dr0;
2797 dbregs->dr[1] = pcb->pcb_dr1;
2798 dbregs->dr[2] = pcb->pcb_dr2;
2799 dbregs->dr[3] = pcb->pcb_dr3;
2802 dbregs->dr[6] = pcb->pcb_dr6;
2803 dbregs->dr[7] = pcb->pcb_dr7;
2809 set_dbregs(struct thread *td, struct dbreg *dbregs)
2815 load_dr0(dbregs->dr[0]);
2816 load_dr1(dbregs->dr[1]);
2817 load_dr2(dbregs->dr[2]);
2818 load_dr3(dbregs->dr[3]);
2819 load_dr4(dbregs->dr[4]);
2820 load_dr5(dbregs->dr[5]);
2821 load_dr6(dbregs->dr[6]);
2822 load_dr7(dbregs->dr[7]);
2825 * Don't let an illegal value for dr7 get set. Specifically,
2826 * check for undefined settings. Setting these bit patterns
2827 * result in undefined behaviour and can lead to an unexpected
2830 for (i = 0; i < 4; i++) {
2831 if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02)
2833 if (DBREG_DR7_LEN(dbregs->dr[7], i) == 0x02)
2840 * Don't let a process set a breakpoint that is not within the
2841 * process's address space. If a process could do this, it
2842 * could halt the system by setting a breakpoint in the kernel
2843 * (if ddb was enabled). Thus, we need to check to make sure
2844 * that no breakpoints are being enabled for addresses outside
2845 * process's address space.
2847 * XXX - what about when the watched area of the user's
2848 * address space is written into from within the kernel
2849 * ... wouldn't that still cause a breakpoint to be generated
2850 * from within kernel mode?
2853 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) {
2854 /* dr0 is enabled */
2855 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
2859 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) {
2860 /* dr1 is enabled */
2861 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
2865 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) {
2866 /* dr2 is enabled */
2867 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
2871 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) {
2872 /* dr3 is enabled */
2873 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
2877 pcb->pcb_dr0 = dbregs->dr[0];
2878 pcb->pcb_dr1 = dbregs->dr[1];
2879 pcb->pcb_dr2 = dbregs->dr[2];
2880 pcb->pcb_dr3 = dbregs->dr[3];
2881 pcb->pcb_dr6 = dbregs->dr[6];
2882 pcb->pcb_dr7 = dbregs->dr[7];
2884 pcb->pcb_flags |= PCB_DBREGS;
2891 * Return > 0 if a hardware breakpoint has been hit, and the
2892 * breakpoint was in user space. Return 0, otherwise.
2895 user_dbreg_trap(void)
2897 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */
2898 u_int32_t bp; /* breakpoint bits extracted from dr6 */
2899 int nbp; /* number of breakpoints that triggered */
2900 caddr_t addr[4]; /* breakpoint addresses */
2904 if ((dr7 & 0x000000ff) == 0) {
2906 * all GE and LE bits in the dr7 register are zero,
2907 * thus the trap couldn't have been caused by the
2908 * hardware debug registers
2915 bp = dr6 & 0x0000000f;
2919 * None of the breakpoint bits are set meaning this
2920 * trap was not caused by any of the debug registers
2926 * at least one of the breakpoints were hit, check to see
2927 * which ones and if any of them are user space addresses
2931 addr[nbp++] = (caddr_t)rdr0();
2934 addr[nbp++] = (caddr_t)rdr1();
2937 addr[nbp++] = (caddr_t)rdr2();
2940 addr[nbp++] = (caddr_t)rdr3();
2943 for (i = 0; i < nbp; i++) {
2944 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) {
2946 * addr[i] is in user space
2953 * None of the breakpoints are in user space.
2961 * Provide inb() and outb() as functions. They are normally only available as
2962 * inline functions, thus cannot be called from the debugger.
2965 /* silence compiler warnings */
2966 u_char inb_(u_short);
2967 void outb_(u_short, u_char);
2976 outb_(u_short port, u_char data)