2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1992 Terrence R. Lambert.
4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
7 * This code is derived from software contributed to Berkeley by
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
44 #include "opt_atalk.h"
45 #include "opt_atpic.h"
46 #include "opt_compat.h"
52 #include "opt_kstack_pages.h"
53 #include "opt_maxmem.h"
54 #include "opt_msgbuf.h"
55 #include "opt_perfmon.h"
57 #include <sys/param.h>
59 #include <sys/systm.h>
63 #include <sys/callout.h>
64 #include <sys/clock.h>
67 #include <sys/eventhandler.h>
69 #include <sys/imgact.h>
71 #include <sys/kernel.h>
73 #include <sys/linker.h>
75 #include <sys/malloc.h>
76 #include <sys/memrange.h>
77 #include <sys/msgbuf.h>
78 #include <sys/mutex.h>
80 #include <sys/ptrace.h>
81 #include <sys/reboot.h>
82 #include <sys/sched.h>
83 #include <sys/signalvar.h>
84 #include <sys/sysctl.h>
85 #include <sys/sysent.h>
86 #include <sys/sysproto.h>
87 #include <sys/ucontext.h>
88 #include <sys/vmmeter.h>
91 #include <vm/vm_extern.h>
92 #include <vm/vm_kern.h>
93 #include <vm/vm_page.h>
94 #include <vm/vm_map.h>
95 #include <vm/vm_object.h>
96 #include <vm/vm_pager.h>
97 #include <vm/vm_param.h>
101 #error KDB must be enabled in order for DDB to work!
106 #include <net/netisr.h>
108 #include <machine/clock.h>
109 #include <machine/cpu.h>
110 #include <machine/cputypes.h>
111 #include <machine/intr_machdep.h>
112 #include <machine/md_var.h>
113 #include <machine/metadata.h>
114 #include <machine/pc/bios.h>
115 #include <machine/pcb.h>
116 #include <machine/proc.h>
117 #include <machine/reg.h>
118 #include <machine/sigframe.h>
119 #include <machine/specialreg.h>
121 #include <machine/perfmon.h>
123 #include <machine/tss.h>
125 #include <machine/smp.h>
129 #include <amd64/isa/icu.h>
131 #include <machine/apicvar.h>
134 #include <isa/isareg.h>
137 /* Sanity check for __curthread() */
138 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
140 extern u_int64_t hammer_time(u_int64_t, u_int64_t);
141 extern void dblfault_handler(void);
143 extern void printcpuinfo(void); /* XXX header file */
144 extern void identify_cpu(void);
145 extern void panicifcpuunsupported(void);
147 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
148 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
150 static void cpu_startup(void *);
151 static void get_fpcontext(struct thread *td, mcontext_t *mcp);
152 static int set_fpcontext(struct thread *td, const mcontext_t *mcp);
153 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL)
156 extern vm_offset_t ksym_start, ksym_end;
159 int _udatasel, _ucodesel, _ucode32sel;
167 * The number of PHYSMAP entries must be one less than the number of
168 * PHYSSEG entries because the PHYSMAP entry that spans the largest
169 * physical address that is accessible by ISA DMA is split into two
172 #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
174 vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
175 vm_paddr_t dump_avail[PHYSMAP_SIZE + 2];
177 /* must be 2 less so 0 0 can signal end of chunks */
178 #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2)
179 #define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2)
181 struct kva_md_info kmi;
183 static struct trapframe proc0_tf;
184 struct region_descriptor r_gdt, r_idt;
186 struct pcpu __pcpu[MAXCPU];
190 struct mem_range_softc mem_range_softc;
197 * Good {morning,afternoon,evening,night}.
201 panicifcpuunsupported();
205 printf("usable memory = %ju (%ju MB)\n", ptoa((uintmax_t)physmem),
206 ptoa((uintmax_t)physmem) / 1048576);
209 * Display any holes after the first chunk of extended memory.
214 printf("Physical memory chunk(s):\n");
215 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
218 size = phys_avail[indx + 1] - phys_avail[indx];
220 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
221 (uintmax_t)phys_avail[indx],
222 (uintmax_t)phys_avail[indx + 1] - 1,
223 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
227 vm_ksubmap_init(&kmi);
229 printf("avail memory = %ju (%ju MB)\n",
230 ptoa((uintmax_t)cnt.v_free_count),
231 ptoa((uintmax_t)cnt.v_free_count) / 1048576);
234 * Set up buffers, so they can be used to read disk labels.
237 vm_pager_bufferinit();
243 * Send an interrupt to process.
245 * Stack is set up to allow sigcode stored
246 * at top to call routine, followed by kcall
247 * to sigreturn routine below. After sigreturn
248 * resets the signal mask, the stack, and the
249 * frame pointer, it returns to the user
253 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
255 struct sigframe sf, *sfp;
260 struct trapframe *regs;
266 PROC_LOCK_ASSERT(p, MA_OWNED);
267 sig = ksi->ksi_signo;
269 mtx_assert(&psp->ps_mtx, MA_OWNED);
271 oonstack = sigonstack(regs->tf_rsp);
273 /* Save user context. */
274 bzero(&sf, sizeof(sf));
275 sf.sf_uc.uc_sigmask = *mask;
276 sf.sf_uc.uc_stack = td->td_sigstk;
277 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
278 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
279 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
280 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_rdi, sizeof(*regs));
281 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
282 get_fpcontext(td, &sf.sf_uc.uc_mcontext);
285 /* Allocate space for the signal handler context. */
286 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
287 SIGISMEMBER(psp->ps_sigonstack, sig)) {
288 sp = td->td_sigstk.ss_sp +
289 td->td_sigstk.ss_size - sizeof(struct sigframe);
290 #if defined(COMPAT_43)
291 td->td_sigstk.ss_flags |= SS_ONSTACK;
294 sp = (char *)regs->tf_rsp - sizeof(struct sigframe) - 128;
295 /* Align to 16 bytes. */
296 sfp = (struct sigframe *)((unsigned long)sp & ~0xFul);
298 /* Translate the signal if appropriate. */
299 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
300 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
302 /* Build the argument list for the signal handler. */
303 regs->tf_rdi = sig; /* arg 1 in %rdi */
304 regs->tf_rdx = (register_t)&sfp->sf_uc; /* arg 3 in %rdx */
305 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
306 /* Signal handler installed with SA_SIGINFO. */
307 regs->tf_rsi = (register_t)&sfp->sf_si; /* arg 2 in %rsi */
308 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
310 /* Fill in POSIX parts */
311 sf.sf_si = ksi->ksi_info;
312 sf.sf_si.si_signo = sig; /* maybe a translated signal */
313 regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */
315 /* Old FreeBSD-style arguments. */
316 regs->tf_rsi = ksi->ksi_code; /* arg 2 in %rsi */
317 regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */
318 sf.sf_ahu.sf_handler = catcher;
320 mtx_unlock(&psp->ps_mtx);
324 * Copy the sigframe out to the user's stack.
326 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
328 printf("process %ld has trashed its stack\n", (long)p->p_pid);
334 regs->tf_rsp = (long)sfp;
335 regs->tf_rip = PS_STRINGS - *(p->p_sysent->sv_szsigcode);
336 regs->tf_rflags &= ~PSL_T;
337 regs->tf_cs = _ucodesel;
339 mtx_lock(&psp->ps_mtx);
343 * System call to cleanup state after a signal
344 * has been taken. Reset signal mask and
345 * stack state from context left by sendsig (above).
346 * Return to previous pc and psl as specified by
347 * context left by sendsig. Check carefully to
348 * make sure that the user has not modified the
349 * state to gain improper privileges.
356 struct sigreturn_args /* {
357 const struct __ucontext *sigcntxp;
361 struct proc *p = td->td_proc;
362 struct trapframe *regs;
363 const ucontext_t *ucp;
368 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
373 rflags = ucp->uc_mcontext.mc_rflags;
375 * Don't allow users to change privileged or reserved flags.
378 * XXX do allow users to change the privileged flag PSL_RF.
379 * The cpu sets PSL_RF in tf_rflags for faults. Debuggers
380 * should sometimes set it there too. tf_rflags is kept in
381 * the signal context during signal handling and there is no
382 * other place to remember it, so the PSL_RF bit may be
383 * corrupted by the signal handler without us knowing.
384 * Corruption of the PSL_RF bit at worst causes one more or
385 * one less debugger trap, so allowing it is fairly harmless.
387 if (!EFL_SECURE(rflags & ~PSL_RF, regs->tf_rflags & ~PSL_RF)) {
388 printf("sigreturn: rflags = 0x%lx\n", rflags);
393 * Don't allow users to load a valid privileged %cs. Let the
394 * hardware check for invalid selectors, excess privilege in
395 * other selectors, invalid %eip's and invalid %esp's.
397 cs = ucp->uc_mcontext.mc_cs;
398 if (!CS_SECURE(cs)) {
399 printf("sigreturn: cs = 0x%x\n", cs);
400 ksiginfo_init_trap(&ksi);
401 ksi.ksi_signo = SIGBUS;
402 ksi.ksi_code = BUS_OBJERR;
403 ksi.ksi_trapno = T_PROTFLT;
404 ksi.ksi_addr = (void *)regs->tf_rip;
405 trapsignal(td, &ksi);
409 ret = set_fpcontext(td, &ucp->uc_mcontext);
412 bcopy(&ucp->uc_mcontext.mc_rdi, regs, sizeof(*regs));
415 #if defined(COMPAT_43)
416 if (ucp->uc_mcontext.mc_onstack & 1)
417 td->td_sigstk.ss_flags |= SS_ONSTACK;
419 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
422 td->td_sigmask = ucp->uc_sigmask;
423 SIG_CANTMASK(td->td_sigmask);
426 td->td_pcb->pcb_flags |= PCB_FULLCTX;
427 return (EJUSTRETURN);
430 #ifdef COMPAT_FREEBSD4
432 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
435 return sigreturn(td, (struct sigreturn_args *)uap);
441 * Machine dependent boot() routine
443 * I haven't seen anything to put here yet
444 * Possibly some stuff might be grafted back here from boot()
451 /* Get current clock frequency for the given cpu id. */
453 cpu_est_clockrate(int cpu_id, uint64_t *rate)
458 if (pcpu_find(cpu_id) == NULL || rate == NULL)
461 /* If we're booting, trust the rate calibrated moments ago. */
468 /* Schedule ourselves on the indicated cpu. */
469 thread_lock(curthread);
470 sched_bind(curthread, cpu_id);
471 thread_unlock(curthread);
474 /* Calibrate by measuring a short delay. */
475 reg = intr_disable();
482 thread_lock(curthread);
483 sched_unbind(curthread);
484 thread_unlock(curthread);
488 * Calculate the difference in readings, convert to Mhz, and
489 * subtract 0.5% of the total. Empirical testing has shown that
490 * overhead in DELAY() works out to approximately this value.
493 *rate = tsc2 * 1000 - tsc2 * 5;
498 * Shutdown the CPU as much as possible
508 * Hook to idle the CPU when possible. In the SMP case we default to
509 * off because a halted cpu will not currently pick up a new thread in the
510 * run queue until the next timer tick. If turned on this will result in
511 * approximately a 4.2% loss in real time performance in buildworld tests
512 * (but improves user and sys times oddly enough), and saves approximately
513 * 5% in power consumption on an idle machine (tests w/2xCPU 1.1GHz P3).
515 * XXX we need to have a cpu mask of idle cpus and generate an IPI or
516 * otherwise generate some sort of interrupt to wake up cpus sitting in HLT.
517 * Then we can have our cake and eat it too.
519 * XXX I'm turning it on for SMP as well by default for now. It seems to
520 * help lock contention somewhat, and this is critical for HTT. -Peter
522 static int cpu_idle_hlt = 1;
523 TUNABLE_INT("machdep.cpu_idle_hlt", &cpu_idle_hlt);
524 SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW,
525 &cpu_idle_hlt, 0, "Idle loop HLT enable");
528 cpu_idle_default(void)
531 * we must absolutely guarentee that hlt is the
532 * absolute next instruction after sti or we
533 * introduce a timing window.
535 __asm __volatile("sti; hlt");
539 * Note that we have to be careful here to avoid a race between checking
540 * sched_runnable() and actually halting. If we don't do this, we may waste
541 * the time between calling hlt and the next interrupt even though there
542 * is a runnable process.
549 if (mp_grab_cpu_hlt())
554 if (sched_runnable())
561 /* Other subsystems (e.g., ACPI) can hook this later. */
562 void (*cpu_idle_hook)(void) = cpu_idle_default;
565 * Clear registers on exec
568 exec_setregs(td, entry, stack, ps_strings)
574 struct trapframe *regs = td->td_frame;
575 struct pcb *pcb = td->td_pcb;
578 wrmsr(MSR_FSBASE, 0);
579 wrmsr(MSR_KGSBASE, 0); /* User value while we're in the kernel */
587 pcb->pcb_ds = _udatasel;
588 pcb->pcb_es = _udatasel;
589 pcb->pcb_fs = _udatasel;
590 pcb->pcb_gs = _udatasel;
592 bzero((char *)regs, sizeof(struct trapframe));
593 regs->tf_rip = entry;
594 regs->tf_rsp = ((stack - 8) & ~0xFul) + 8;
595 regs->tf_rdi = stack; /* argv */
596 regs->tf_rflags = PSL_USER | (regs->tf_rflags & PSL_T);
597 regs->tf_ss = _udatasel;
598 regs->tf_cs = _ucodesel;
601 * Reset the hardware debug registers if they were in use.
602 * They won't have any meaning for the newly exec'd process.
604 if (pcb->pcb_flags & PCB_DBREGS) {
611 if (pcb == PCPU_GET(curpcb)) {
613 * Clear the debug registers on the running
614 * CPU, otherwise they will end up affecting
615 * the next process we switch to.
619 pcb->pcb_flags &= ~PCB_DBREGS;
623 * Drop the FP state if we hold it, so that the process gets a
624 * clean FP state if it uses the FPU again.
636 * CR0_MP, CR0_NE and CR0_TS are also set by npx_probe() for the
637 * BSP. See the comments there about why we set them.
639 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
644 * Initialize amd64 and configure to run kernel
648 * Initialize segments & interrupt table
651 struct user_segment_descriptor gdt[NGDT * MAXCPU];/* global descriptor table */
652 static struct gate_descriptor idt0[NIDT];
653 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
655 static char dblfault_stack[PAGE_SIZE] __aligned(16);
657 struct amd64tss common_tss[MAXCPU];
659 /* software prototypes -- in more palatable form */
660 struct soft_segment_descriptor gdt_segs[] = {
661 /* GNULL_SEL 0 Null Descriptor */
662 { 0x0, /* segment base address */
664 0, /* segment type */
665 0, /* segment descriptor priority level */
666 0, /* segment descriptor present */
668 0, /* default 32 vs 16 bit size */
669 0 /* limit granularity (byte/page units)*/ },
670 /* GCODE_SEL 1 Code Descriptor for kernel */
671 { 0x0, /* segment base address */
672 0xfffff, /* length - all address space */
673 SDT_MEMERA, /* segment type */
674 SEL_KPL, /* segment descriptor priority level */
675 1, /* segment descriptor present */
677 0, /* default 32 vs 16 bit size */
678 1 /* limit granularity (byte/page units)*/ },
679 /* GDATA_SEL 2 Data Descriptor for kernel */
680 { 0x0, /* segment base address */
681 0xfffff, /* length - all address space */
682 SDT_MEMRWA, /* segment type */
683 SEL_KPL, /* segment descriptor priority level */
684 1, /* segment descriptor present */
686 0, /* default 32 vs 16 bit size */
687 1 /* limit granularity (byte/page units)*/ },
688 /* GUCODE32_SEL 3 32 bit Code Descriptor for user */
689 { 0x0, /* segment base address */
690 0xfffff, /* length - all address space */
691 SDT_MEMERA, /* segment type */
692 SEL_UPL, /* segment descriptor priority level */
693 1, /* segment descriptor present */
695 1, /* default 32 vs 16 bit size */
696 1 /* limit granularity (byte/page units)*/ },
697 /* GUDATA_SEL 4 32/64 bit Data Descriptor for user */
698 { 0x0, /* segment base address */
699 0xfffff, /* length - all address space */
700 SDT_MEMRWA, /* segment type */
701 SEL_UPL, /* segment descriptor priority level */
702 1, /* segment descriptor present */
704 1, /* default 32 vs 16 bit size */
705 1 /* limit granularity (byte/page units)*/ },
706 /* GUCODE_SEL 5 64 bit Code Descriptor for user */
707 { 0x0, /* segment base address */
708 0xfffff, /* length - all address space */
709 SDT_MEMERA, /* segment type */
710 SEL_UPL, /* segment descriptor priority level */
711 1, /* segment descriptor present */
713 0, /* default 32 vs 16 bit size */
714 1 /* limit granularity (byte/page units)*/ },
715 /* GPROC0_SEL 6 Proc 0 Tss Descriptor */
717 0x0, /* segment base address */
718 sizeof(struct amd64tss)-1,/* length - all address space */
719 SDT_SYSTSS, /* segment type */
720 SEL_KPL, /* segment descriptor priority level */
721 1, /* segment descriptor present */
723 0, /* unused - default 32 vs 16 bit size */
724 0 /* limit granularity (byte/page units)*/ },
725 /* Actually, the TSS is a system descriptor which is double size */
726 { 0x0, /* segment base address */
728 0, /* segment type */
729 0, /* segment descriptor priority level */
730 0, /* segment descriptor present */
732 0, /* default 32 vs 16 bit size */
733 0 /* limit granularity (byte/page units)*/ },
734 /* GUGS32_SEL 8 32 bit GS Descriptor for user */
735 { 0x0, /* segment base address */
736 0xfffff, /* length - all address space */
737 SDT_MEMRWA, /* segment type */
738 SEL_UPL, /* segment descriptor priority level */
739 1, /* segment descriptor present */
741 1, /* default 32 vs 16 bit size */
742 1 /* limit granularity (byte/page units)*/ },
746 setidt(idx, func, typ, dpl, ist)
753 struct gate_descriptor *ip;
756 ip->gd_looffset = (uintptr_t)func;
757 ip->gd_selector = GSEL(GCODE_SEL, SEL_KPL);
763 ip->gd_hioffset = ((uintptr_t)func)>>16 ;
767 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
768 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
769 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
770 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
771 IDTVEC(xmm), IDTVEC(dblfault),
772 IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
776 struct user_segment_descriptor *sd;
777 struct soft_segment_descriptor *ssd;
780 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
781 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
782 ssd->ssd_type = sd->sd_type;
783 ssd->ssd_dpl = sd->sd_dpl;
784 ssd->ssd_p = sd->sd_p;
785 ssd->ssd_long = sd->sd_long;
786 ssd->ssd_def32 = sd->sd_def32;
787 ssd->ssd_gran = sd->sd_gran;
792 struct soft_segment_descriptor *ssd;
793 struct user_segment_descriptor *sd;
796 sd->sd_lobase = (ssd->ssd_base) & 0xffffff;
797 sd->sd_hibase = (ssd->ssd_base >> 24) & 0xff;
798 sd->sd_lolimit = (ssd->ssd_limit) & 0xffff;
799 sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf;
800 sd->sd_type = ssd->ssd_type;
801 sd->sd_dpl = ssd->ssd_dpl;
802 sd->sd_p = ssd->ssd_p;
803 sd->sd_long = ssd->ssd_long;
804 sd->sd_def32 = ssd->ssd_def32;
805 sd->sd_gran = ssd->ssd_gran;
810 struct soft_segment_descriptor *ssd;
811 struct system_segment_descriptor *sd;
814 sd->sd_lobase = (ssd->ssd_base) & 0xffffff;
815 sd->sd_hibase = (ssd->ssd_base >> 24) & 0xfffffffffful;
816 sd->sd_lolimit = (ssd->ssd_limit) & 0xffff;
817 sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf;
818 sd->sd_type = ssd->ssd_type;
819 sd->sd_dpl = ssd->ssd_dpl;
820 sd->sd_p = ssd->ssd_p;
821 sd->sd_gran = ssd->ssd_gran;
824 #if !defined(DEV_ATPIC) && defined(DEV_ISA)
825 #include <isa/isavar.h>
827 isa_irq_pending(void)
837 * Populate the (physmap) array with base/bound pairs describing the
838 * available physical memory in the system, then test this memory and
839 * build the phys_avail array describing the actually-available memory.
841 * If we cannot accurately determine the physical memory map, then use
842 * value from the 0xE801 call, and failing that, the RTC.
844 * Total memory size may be set by the kernel environment variable
845 * hw.physmem or the compile-time define MAXMEM.
847 * XXX first should be vm_paddr_t.
850 getmemsize(caddr_t kmdp, u_int64_t first)
852 int i, off, physmap_idx, pa_indx, da_indx;
853 vm_paddr_t pa, physmap[PHYSMAP_SIZE];
854 u_long physmem_tunable;
856 struct bios_smap *smapbase, *smap, *smapend;
858 quad_t dcons_addr, dcons_size;
860 bzero(physmap, sizeof(physmap));
865 * get memory map from INT 15:E820, kindly supplied by the loader.
867 * subr_module.c says:
868 * "Consumer may safely assume that size value precedes data."
869 * ie: an int32_t immediately precedes smap.
871 smapbase = (struct bios_smap *)preload_search_info(kmdp,
872 MODINFO_METADATA | MODINFOMD_SMAP);
873 if (smapbase == NULL)
874 panic("No BIOS smap info from loader!");
876 smapsize = *((u_int32_t *)smapbase - 1);
877 smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize);
879 for (smap = smapbase; smap < smapend; smap++) {
880 if (boothowto & RB_VERBOSE)
881 printf("SMAP type=%02x base=%016lx len=%016lx\n",
882 smap->type, smap->base, smap->length);
884 if (smap->type != SMAP_TYPE_MEMORY)
887 if (smap->length == 0)
890 for (i = 0; i <= physmap_idx; i += 2) {
891 if (smap->base < physmap[i + 1]) {
892 if (boothowto & RB_VERBOSE)
894 "Overlapping or non-monotonic memory region, ignoring second region\n");
899 if (smap->base == physmap[physmap_idx + 1]) {
900 physmap[physmap_idx + 1] += smap->length;
905 if (physmap_idx == PHYSMAP_SIZE) {
907 "Too many segments in the physical address map, giving up\n");
910 physmap[physmap_idx] = smap->base;
911 physmap[physmap_idx + 1] = smap->base + smap->length;
915 * Find the 'base memory' segment for SMP
918 for (i = 0; i <= physmap_idx; i += 2) {
919 if (physmap[i] == 0x00000000) {
920 basemem = physmap[i + 1] / 1024;
925 panic("BIOS smap did not include a basemem segment!");
928 /* make hole for AP bootstrap code */
929 physmap[1] = mp_bootaddress(physmap[1] / 1024);
933 * Maxmem isn't the "maximum memory", it's one larger than the
934 * highest page of the physical address space. It should be
935 * called something like "Maxphyspage". We may adjust this
936 * based on ``hw.physmem'' and the results of the memory test.
938 Maxmem = atop(physmap[physmap_idx + 1]);
944 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable))
945 Maxmem = atop(physmem_tunable);
948 * Don't allow MAXMEM or hw.physmem to extend the amount of memory
951 if (Maxmem > atop(physmap[physmap_idx + 1]))
952 Maxmem = atop(physmap[physmap_idx + 1]);
954 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
955 (boothowto & RB_VERBOSE))
956 printf("Physical memory use set to %ldK\n", Maxmem * 4);
958 /* call pmap initialization to make new kernel address space */
959 pmap_bootstrap(&first);
962 * Size up each available chunk of physical memory.
964 physmap[0] = PAGE_SIZE; /* mask off page 0 */
967 phys_avail[pa_indx++] = physmap[0];
968 phys_avail[pa_indx] = physmap[0];
969 dump_avail[da_indx] = physmap[0];
973 * Get dcons buffer address
975 if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
976 getenv_quad("dcons.size", &dcons_size) == 0)
980 * physmap is in bytes, so when converting to page boundaries,
981 * round up the start address and round down the end address.
983 for (i = 0; i <= physmap_idx; i += 2) {
986 end = ptoa((vm_paddr_t)Maxmem);
987 if (physmap[i + 1] < end)
988 end = trunc_page(physmap[i + 1]);
989 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
990 int tmp, page_bad, full;
991 int *ptr = (int *)CADDR1;
995 * block out kernel memory as not available.
997 if (pa >= 0x100000 && pa < first)
1001 * block out dcons buffer
1004 && pa >= trunc_page(dcons_addr)
1005 && pa < dcons_addr + dcons_size)
1011 * map page into kernel: valid, read/write,non-cacheable
1013 *pte = pa | PG_V | PG_RW | PG_N;
1018 * Test for alternating 1's and 0's
1020 *(volatile int *)ptr = 0xaaaaaaaa;
1021 if (*(volatile int *)ptr != 0xaaaaaaaa)
1024 * Test for alternating 0's and 1's
1026 *(volatile int *)ptr = 0x55555555;
1027 if (*(volatile int *)ptr != 0x55555555)
1032 *(volatile int *)ptr = 0xffffffff;
1033 if (*(volatile int *)ptr != 0xffffffff)
1038 *(volatile int *)ptr = 0x0;
1039 if (*(volatile int *)ptr != 0x0)
1042 * Restore original value.
1047 * Adjust array of valid/good pages.
1049 if (page_bad == TRUE)
1052 * If this good page is a continuation of the
1053 * previous set of good pages, then just increase
1054 * the end pointer. Otherwise start a new chunk.
1055 * Note that "end" points one higher than end,
1056 * making the range >= start and < end.
1057 * If we're also doing a speculative memory
1058 * test and we at or past the end, bump up Maxmem
1059 * so that we keep going. The first bad page
1060 * will terminate the loop.
1062 if (phys_avail[pa_indx] == pa) {
1063 phys_avail[pa_indx] += PAGE_SIZE;
1066 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
1068 "Too many holes in the physical address space, giving up\n");
1073 phys_avail[pa_indx++] = pa; /* start */
1074 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
1078 if (dump_avail[da_indx] == pa) {
1079 dump_avail[da_indx] += PAGE_SIZE;
1082 if (da_indx == DUMP_AVAIL_ARRAY_END) {
1086 dump_avail[da_indx++] = pa; /* start */
1087 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
1099 * The last chunk must contain at least one page plus the message
1100 * buffer to avoid complicating other code (message buffer address
1101 * calculation, etc.).
1103 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
1104 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) {
1105 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
1106 phys_avail[pa_indx--] = 0;
1107 phys_avail[pa_indx--] = 0;
1110 Maxmem = atop(phys_avail[pa_indx]);
1112 /* Trim off space for the message buffer. */
1113 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE);
1115 /* Map the message buffer. */
1116 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE)
1117 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] +
1122 hammer_time(u_int64_t modulep, u_int64_t physfree)
1130 thread0.td_kstack = physfree + KERNBASE;
1131 bzero((void *)thread0.td_kstack, KSTACK_PAGES * PAGE_SIZE);
1132 physfree += KSTACK_PAGES * PAGE_SIZE;
1133 thread0.td_pcb = (struct pcb *)
1134 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
1137 * This may be done better later if it gets more high level
1138 * components in it. If so just link td->td_proc here.
1140 proc_linkup(&proc0, &thread0);
1142 preload_metadata = (caddr_t)(uintptr_t)(modulep + KERNBASE);
1143 preload_bootstrap_relocate(KERNBASE);
1144 kmdp = preload_search_by_type("elf kernel");
1146 kmdp = preload_search_by_type("elf64 kernel");
1147 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
1148 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *) + KERNBASE;
1150 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
1151 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
1154 /* Init basic tunables, hz etc */
1158 * make gdt memory segments
1160 gdt_segs[GPROC0_SEL].ssd_base = (uintptr_t)&common_tss[0];
1162 for (x = 0; x < NGDT; x++) {
1163 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1))
1164 ssdtosd(&gdt_segs[x], &gdt[x]);
1166 ssdtosyssd(&gdt_segs[GPROC0_SEL],
1167 (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
1169 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
1170 r_gdt.rd_base = (long) gdt;
1174 wrmsr(MSR_FSBASE, 0); /* User value */
1175 wrmsr(MSR_GSBASE, (u_int64_t)pc);
1176 wrmsr(MSR_KGSBASE, 0); /* User value while in the kernel */
1178 pcpu_init(pc, 0, sizeof(struct pcpu));
1179 PCPU_SET(prvspace, pc);
1180 PCPU_SET(curthread, &thread0);
1181 PCPU_SET(curpcb, thread0.td_pcb);
1182 PCPU_SET(tssp, &common_tss[0]);
1185 * Initialize mutexes.
1187 * icu_lock: in order to allow an interrupt to occur in a critical
1188 * section, to set pcpu->ipending (etc...) properly, we
1189 * must be able to get the icu lock, so it can't be
1193 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS);
1196 for (x = 0; x < NIDT; x++)
1197 setidt(x, &IDTVEC(rsvd), SDT_SYSIGT, SEL_KPL, 0);
1198 setidt(IDT_DE, &IDTVEC(div), SDT_SYSIGT, SEL_KPL, 0);
1199 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYSIGT, SEL_KPL, 0);
1200 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYSIGT, SEL_KPL, 1);
1201 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYSIGT, SEL_UPL, 0);
1202 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYSIGT, SEL_KPL, 0);
1203 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYSIGT, SEL_KPL, 0);
1204 setidt(IDT_UD, &IDTVEC(ill), SDT_SYSIGT, SEL_KPL, 0);
1205 setidt(IDT_NM, &IDTVEC(dna), SDT_SYSIGT, SEL_KPL, 0);
1206 setidt(IDT_DF, &IDTVEC(dblfault), SDT_SYSIGT, SEL_KPL, 1);
1207 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYSIGT, SEL_KPL, 0);
1208 setidt(IDT_TS, &IDTVEC(tss), SDT_SYSIGT, SEL_KPL, 0);
1209 setidt(IDT_NP, &IDTVEC(missing), SDT_SYSIGT, SEL_KPL, 0);
1210 setidt(IDT_SS, &IDTVEC(stk), SDT_SYSIGT, SEL_KPL, 0);
1211 setidt(IDT_GP, &IDTVEC(prot), SDT_SYSIGT, SEL_KPL, 0);
1212 setidt(IDT_PF, &IDTVEC(page), SDT_SYSIGT, SEL_KPL, 0);
1213 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYSIGT, SEL_KPL, 0);
1214 setidt(IDT_AC, &IDTVEC(align), SDT_SYSIGT, SEL_KPL, 0);
1215 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYSIGT, SEL_KPL, 0);
1216 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYSIGT, SEL_KPL, 0);
1218 r_idt.rd_limit = sizeof(idt0) - 1;
1219 r_idt.rd_base = (long) idt;
1223 * Initialize the i8254 before the console so that console
1224 * initialization can use DELAY().
1229 * Initialize the console before we print anything out.
1238 /* Reset and mask the atpics and leave them shut down. */
1242 * Point the ICU spurious interrupt vectors at the APIC spurious
1243 * interrupt handler.
1245 setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0);
1246 setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0);
1249 #error "have you forgotten the isa device?";
1255 if (boothowto & RB_KDB)
1256 kdb_enter("Boot flags requested debugger");
1259 identify_cpu(); /* Final stage of CPU initialization */
1260 initializecpu(); /* Initialize CPU registers */
1262 /* make an initial tss so cpu can get interrupt stack on syscall! */
1263 common_tss[0].tss_rsp0 = thread0.td_kstack + \
1264 KSTACK_PAGES * PAGE_SIZE - sizeof(struct pcb);
1265 /* Ensure the stack is aligned to 16 bytes */
1266 common_tss[0].tss_rsp0 &= ~0xFul;
1267 PCPU_SET(rsp0, common_tss[0].tss_rsp0);
1269 /* doublefault stack space, runs on ist1 */
1270 common_tss[0].tss_ist1 = (long)&dblfault_stack[sizeof(dblfault_stack)];
1272 /* Set the IO permission bitmap (empty due to tss seg limit) */
1273 common_tss[0].tss_iobase = sizeof(struct amd64tss);
1275 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
1278 /* Set up the fast syscall stuff */
1279 msr = rdmsr(MSR_EFER) | EFER_SCE;
1280 wrmsr(MSR_EFER, msr);
1281 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
1282 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
1283 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
1284 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
1285 wrmsr(MSR_STAR, msr);
1286 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
1288 getmemsize(kmdp, physfree);
1289 init_param2(physmem);
1291 /* now running on new page tables, configured,and u/iom is accessible */
1293 msgbufinit(msgbufp, MSGBUF_SIZE);
1296 /* transfer to user mode */
1298 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
1299 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
1300 _ucode32sel = GSEL(GUCODE32_SEL, SEL_UPL);
1302 /* setup proc 0's pcb */
1303 thread0.td_pcb->pcb_flags = 0; /* XXXKSE */
1304 thread0.td_pcb->pcb_cr3 = KPML4phys;
1305 thread0.td_frame = &proc0_tf;
1307 env = getenv("kernelname");
1309 strlcpy(kernelname, env, sizeof(kernelname));
1311 /* Location of kernel stack for locore */
1312 return ((u_int64_t)thread0.td_pcb);
1316 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
1319 pcpu->pc_acpi_id = 0xffffffff;
1323 spinlock_enter(void)
1328 if (td->td_md.md_spinlock_count == 0)
1329 td->td_md.md_saved_flags = intr_disable();
1330 td->td_md.md_spinlock_count++;
1341 td->td_md.md_spinlock_count--;
1342 if (td->td_md.md_spinlock_count == 0)
1343 intr_restore(td->td_md.md_saved_flags);
1347 * Construct a PCB from a trapframe. This is called from kdb_trap() where
1348 * we want to start a backtrace from the function that caused us to enter
1349 * the debugger. We have the context in the trapframe, but base the trace
1350 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
1351 * enough for a backtrace.
1354 makectx(struct trapframe *tf, struct pcb *pcb)
1357 pcb->pcb_r12 = tf->tf_r12;
1358 pcb->pcb_r13 = tf->tf_r13;
1359 pcb->pcb_r14 = tf->tf_r14;
1360 pcb->pcb_r15 = tf->tf_r15;
1361 pcb->pcb_rbp = tf->tf_rbp;
1362 pcb->pcb_rbx = tf->tf_rbx;
1363 pcb->pcb_rip = tf->tf_rip;
1364 pcb->pcb_rsp = (ISPL(tf->tf_cs)) ? tf->tf_rsp : (long)(tf + 1) - 8;
1368 ptrace_set_pc(struct thread *td, unsigned long addr)
1370 td->td_frame->tf_rip = addr;
1375 ptrace_single_step(struct thread *td)
1377 td->td_frame->tf_rflags |= PSL_T;
1382 ptrace_clear_single_step(struct thread *td)
1384 td->td_frame->tf_rflags &= ~PSL_T;
1389 fill_regs(struct thread *td, struct reg *regs)
1391 struct trapframe *tp;
1394 regs->r_r15 = tp->tf_r15;
1395 regs->r_r14 = tp->tf_r14;
1396 regs->r_r13 = tp->tf_r13;
1397 regs->r_r12 = tp->tf_r12;
1398 regs->r_r11 = tp->tf_r11;
1399 regs->r_r10 = tp->tf_r10;
1400 regs->r_r9 = tp->tf_r9;
1401 regs->r_r8 = tp->tf_r8;
1402 regs->r_rdi = tp->tf_rdi;
1403 regs->r_rsi = tp->tf_rsi;
1404 regs->r_rbp = tp->tf_rbp;
1405 regs->r_rbx = tp->tf_rbx;
1406 regs->r_rdx = tp->tf_rdx;
1407 regs->r_rcx = tp->tf_rcx;
1408 regs->r_rax = tp->tf_rax;
1409 regs->r_rip = tp->tf_rip;
1410 regs->r_cs = tp->tf_cs;
1411 regs->r_rflags = tp->tf_rflags;
1412 regs->r_rsp = tp->tf_rsp;
1413 regs->r_ss = tp->tf_ss;
1418 set_regs(struct thread *td, struct reg *regs)
1420 struct trapframe *tp;
1424 rflags = regs->r_rflags & 0xffffffff;
1425 if (!EFL_SECURE(rflags, tp->tf_rflags) || !CS_SECURE(regs->r_cs))
1427 tp->tf_r15 = regs->r_r15;
1428 tp->tf_r14 = regs->r_r14;
1429 tp->tf_r13 = regs->r_r13;
1430 tp->tf_r12 = regs->r_r12;
1431 tp->tf_r11 = regs->r_r11;
1432 tp->tf_r10 = regs->r_r10;
1433 tp->tf_r9 = regs->r_r9;
1434 tp->tf_r8 = regs->r_r8;
1435 tp->tf_rdi = regs->r_rdi;
1436 tp->tf_rsi = regs->r_rsi;
1437 tp->tf_rbp = regs->r_rbp;
1438 tp->tf_rbx = regs->r_rbx;
1439 tp->tf_rdx = regs->r_rdx;
1440 tp->tf_rcx = regs->r_rcx;
1441 tp->tf_rax = regs->r_rax;
1442 tp->tf_rip = regs->r_rip;
1443 tp->tf_cs = regs->r_cs;
1444 tp->tf_rflags = rflags;
1445 tp->tf_rsp = regs->r_rsp;
1446 tp->tf_ss = regs->r_ss;
1447 td->td_pcb->pcb_flags |= PCB_FULLCTX;
1451 /* XXX check all this stuff! */
1452 /* externalize from sv_xmm */
1454 fill_fpregs_xmm(struct savefpu *sv_xmm, struct fpreg *fpregs)
1456 struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env;
1457 struct envxmm *penv_xmm = &sv_xmm->sv_env;
1461 bzero(fpregs, sizeof(*fpregs));
1463 /* FPU control/status */
1464 penv_fpreg->en_cw = penv_xmm->en_cw;
1465 penv_fpreg->en_sw = penv_xmm->en_sw;
1466 penv_fpreg->en_tw = penv_xmm->en_tw;
1467 penv_fpreg->en_opcode = penv_xmm->en_opcode;
1468 penv_fpreg->en_rip = penv_xmm->en_rip;
1469 penv_fpreg->en_rdp = penv_xmm->en_rdp;
1470 penv_fpreg->en_mxcsr = penv_xmm->en_mxcsr;
1471 penv_fpreg->en_mxcsr_mask = penv_xmm->en_mxcsr_mask;
1474 for (i = 0; i < 8; ++i)
1475 bcopy(sv_xmm->sv_fp[i].fp_acc.fp_bytes, fpregs->fpr_acc[i], 10);
1478 for (i = 0; i < 16; ++i)
1479 bcopy(sv_xmm->sv_xmm[i].xmm_bytes, fpregs->fpr_xacc[i], 16);
1482 /* internalize from fpregs into sv_xmm */
1484 set_fpregs_xmm(struct fpreg *fpregs, struct savefpu *sv_xmm)
1486 struct envxmm *penv_xmm = &sv_xmm->sv_env;
1487 struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env;
1491 /* FPU control/status */
1492 penv_xmm->en_cw = penv_fpreg->en_cw;
1493 penv_xmm->en_sw = penv_fpreg->en_sw;
1494 penv_xmm->en_tw = penv_fpreg->en_tw;
1495 penv_xmm->en_opcode = penv_fpreg->en_opcode;
1496 penv_xmm->en_rip = penv_fpreg->en_rip;
1497 penv_xmm->en_rdp = penv_fpreg->en_rdp;
1498 penv_xmm->en_mxcsr = penv_fpreg->en_mxcsr;
1499 penv_xmm->en_mxcsr_mask = penv_fpreg->en_mxcsr_mask & cpu_mxcsr_mask;
1502 for (i = 0; i < 8; ++i)
1503 bcopy(fpregs->fpr_acc[i], sv_xmm->sv_fp[i].fp_acc.fp_bytes, 10);
1506 for (i = 0; i < 16; ++i)
1507 bcopy(fpregs->fpr_xacc[i], sv_xmm->sv_xmm[i].xmm_bytes, 16);
1510 /* externalize from td->pcb */
1512 fill_fpregs(struct thread *td, struct fpreg *fpregs)
1515 fill_fpregs_xmm(&td->td_pcb->pcb_save, fpregs);
1519 /* internalize to td->pcb */
1521 set_fpregs(struct thread *td, struct fpreg *fpregs)
1524 set_fpregs_xmm(fpregs, &td->td_pcb->pcb_save);
1529 * Get machine context.
1532 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
1534 struct trapframe *tp;
1537 PROC_LOCK(curthread->td_proc);
1538 mcp->mc_onstack = sigonstack(tp->tf_rsp);
1539 PROC_UNLOCK(curthread->td_proc);
1540 mcp->mc_r15 = tp->tf_r15;
1541 mcp->mc_r14 = tp->tf_r14;
1542 mcp->mc_r13 = tp->tf_r13;
1543 mcp->mc_r12 = tp->tf_r12;
1544 mcp->mc_r11 = tp->tf_r11;
1545 mcp->mc_r10 = tp->tf_r10;
1546 mcp->mc_r9 = tp->tf_r9;
1547 mcp->mc_r8 = tp->tf_r8;
1548 mcp->mc_rdi = tp->tf_rdi;
1549 mcp->mc_rsi = tp->tf_rsi;
1550 mcp->mc_rbp = tp->tf_rbp;
1551 mcp->mc_rbx = tp->tf_rbx;
1552 mcp->mc_rcx = tp->tf_rcx;
1553 mcp->mc_rflags = tp->tf_rflags;
1554 if (flags & GET_MC_CLEAR_RET) {
1557 mcp->mc_rflags &= ~PSL_C;
1559 mcp->mc_rax = tp->tf_rax;
1560 mcp->mc_rdx = tp->tf_rdx;
1562 mcp->mc_rip = tp->tf_rip;
1563 mcp->mc_cs = tp->tf_cs;
1564 mcp->mc_rsp = tp->tf_rsp;
1565 mcp->mc_ss = tp->tf_ss;
1566 mcp->mc_len = sizeof(*mcp);
1567 get_fpcontext(td, mcp);
1572 * Set machine context.
1574 * However, we don't set any but the user modifiable flags, and we won't
1575 * touch the cs selector.
1578 set_mcontext(struct thread *td, const mcontext_t *mcp)
1580 struct trapframe *tp;
1585 if (mcp->mc_len != sizeof(*mcp))
1587 rflags = (mcp->mc_rflags & PSL_USERCHANGE) |
1588 (tp->tf_rflags & ~PSL_USERCHANGE);
1589 ret = set_fpcontext(td, mcp);
1592 tp->tf_r15 = mcp->mc_r15;
1593 tp->tf_r14 = mcp->mc_r14;
1594 tp->tf_r13 = mcp->mc_r13;
1595 tp->tf_r12 = mcp->mc_r12;
1596 tp->tf_r11 = mcp->mc_r11;
1597 tp->tf_r10 = mcp->mc_r10;
1598 tp->tf_r9 = mcp->mc_r9;
1599 tp->tf_r8 = mcp->mc_r8;
1600 tp->tf_rdi = mcp->mc_rdi;
1601 tp->tf_rsi = mcp->mc_rsi;
1602 tp->tf_rbp = mcp->mc_rbp;
1603 tp->tf_rbx = mcp->mc_rbx;
1604 tp->tf_rdx = mcp->mc_rdx;
1605 tp->tf_rcx = mcp->mc_rcx;
1606 tp->tf_rax = mcp->mc_rax;
1607 tp->tf_rip = mcp->mc_rip;
1608 tp->tf_rflags = rflags;
1609 tp->tf_rsp = mcp->mc_rsp;
1610 tp->tf_ss = mcp->mc_ss;
1611 td->td_pcb->pcb_flags |= PCB_FULLCTX;
1616 get_fpcontext(struct thread *td, mcontext_t *mcp)
1619 mcp->mc_ownedfp = fpugetregs(td, (struct savefpu *)&mcp->mc_fpstate);
1620 mcp->mc_fpformat = fpuformat();
1624 set_fpcontext(struct thread *td, const mcontext_t *mcp)
1626 struct savefpu *fpstate;
1628 if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
1630 else if (mcp->mc_fpformat != _MC_FPFMT_XMM)
1632 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE)
1633 /* We don't care what state is left in the FPU or PCB. */
1635 else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
1636 mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
1638 * XXX we violate the dubious requirement that fpusetregs()
1639 * be called with interrupts disabled.
1640 * XXX obsolete on trap-16 systems?
1642 fpstate = (struct savefpu *)&mcp->mc_fpstate;
1643 fpstate->sv_env.en_mxcsr &= cpu_mxcsr_mask;
1644 fpusetregs(td, fpstate);
1651 fpstate_drop(struct thread *td)
1656 if (PCPU_GET(fpcurthread) == td)
1659 * XXX force a full drop of the fpu. The above only drops it if we
1662 * XXX I don't much like fpugetregs()'s semantics of doing a full
1663 * drop. Dropping only to the pcb matches fnsave's behaviour.
1664 * We only need to drop to !PCB_INITDONE in sendsig(). But
1665 * sendsig() is the only caller of fpugetregs()... perhaps we just
1666 * have too many layers.
1668 curthread->td_pcb->pcb_flags &= ~PCB_FPUINITDONE;
1673 fill_dbregs(struct thread *td, struct dbreg *dbregs)
1678 dbregs->dr[0] = rdr0();
1679 dbregs->dr[1] = rdr1();
1680 dbregs->dr[2] = rdr2();
1681 dbregs->dr[3] = rdr3();
1682 dbregs->dr[6] = rdr6();
1683 dbregs->dr[7] = rdr7();
1686 dbregs->dr[0] = pcb->pcb_dr0;
1687 dbregs->dr[1] = pcb->pcb_dr1;
1688 dbregs->dr[2] = pcb->pcb_dr2;
1689 dbregs->dr[3] = pcb->pcb_dr3;
1690 dbregs->dr[6] = pcb->pcb_dr6;
1691 dbregs->dr[7] = pcb->pcb_dr7;
1707 set_dbregs(struct thread *td, struct dbreg *dbregs)
1713 load_dr0(dbregs->dr[0]);
1714 load_dr1(dbregs->dr[1]);
1715 load_dr2(dbregs->dr[2]);
1716 load_dr3(dbregs->dr[3]);
1717 load_dr6(dbregs->dr[6]);
1718 load_dr7(dbregs->dr[7]);
1721 * Don't let an illegal value for dr7 get set. Specifically,
1722 * check for undefined settings. Setting these bit patterns
1723 * result in undefined behaviour and can lead to an unexpected
1724 * TRCTRAP or a general protection fault right here.
1725 * Upper bits of dr6 and dr7 must not be set
1727 for (i = 0; i < 4; i++) {
1728 if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02)
1730 if (td->td_frame->tf_cs == _ucode32sel &&
1731 DBREG_DR7_LEN(dbregs->dr[7], i) == DBREG_DR7_LEN_8)
1734 if ((dbregs->dr[6] & 0xffffffff00000000ul) != 0 ||
1735 (dbregs->dr[7] & 0xffffffff00000000ul) != 0)
1741 * Don't let a process set a breakpoint that is not within the
1742 * process's address space. If a process could do this, it
1743 * could halt the system by setting a breakpoint in the kernel
1744 * (if ddb was enabled). Thus, we need to check to make sure
1745 * that no breakpoints are being enabled for addresses outside
1746 * process's address space.
1748 * XXX - what about when the watched area of the user's
1749 * address space is written into from within the kernel
1750 * ... wouldn't that still cause a breakpoint to be generated
1751 * from within kernel mode?
1754 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) {
1755 /* dr0 is enabled */
1756 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
1759 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) {
1760 /* dr1 is enabled */
1761 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
1764 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) {
1765 /* dr2 is enabled */
1766 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
1769 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) {
1770 /* dr3 is enabled */
1771 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
1775 pcb->pcb_dr0 = dbregs->dr[0];
1776 pcb->pcb_dr1 = dbregs->dr[1];
1777 pcb->pcb_dr2 = dbregs->dr[2];
1778 pcb->pcb_dr3 = dbregs->dr[3];
1779 pcb->pcb_dr6 = dbregs->dr[6];
1780 pcb->pcb_dr7 = dbregs->dr[7];
1782 pcb->pcb_flags |= PCB_DBREGS;
1792 load_dr7(0); /* Turn off the control bits first */
1801 * Return > 0 if a hardware breakpoint has been hit, and the
1802 * breakpoint was in user space. Return 0, otherwise.
1805 user_dbreg_trap(void)
1807 u_int64_t dr7, dr6; /* debug registers dr6 and dr7 */
1808 u_int64_t bp; /* breakpoint bits extracted from dr6 */
1809 int nbp; /* number of breakpoints that triggered */
1810 caddr_t addr[4]; /* breakpoint addresses */
1814 if ((dr7 & 0x000000ff) == 0) {
1816 * all GE and LE bits in the dr7 register are zero,
1817 * thus the trap couldn't have been caused by the
1818 * hardware debug registers
1825 bp = dr6 & 0x0000000f;
1829 * None of the breakpoint bits are set meaning this
1830 * trap was not caused by any of the debug registers
1836 * at least one of the breakpoints were hit, check to see
1837 * which ones and if any of them are user space addresses
1841 addr[nbp++] = (caddr_t)rdr0();
1844 addr[nbp++] = (caddr_t)rdr1();
1847 addr[nbp++] = (caddr_t)rdr2();
1850 addr[nbp++] = (caddr_t)rdr3();
1853 for (i = 0; i < nbp; i++) {
1854 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) {
1856 * addr[i] is in user space
1863 * None of the breakpoints are in user space.
1871 * Provide inb() and outb() as functions. They are normally only
1872 * available as macros calling inlined functions, thus cannot be
1873 * called from the debugger.
1875 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined.
1881 /* silence compiler warnings */
1883 void outb(u_int, u_char);
1890 * We use %%dx and not %1 here because i/o is done at %dx and not at
1891 * %edx, while gcc generates inferior code (movw instead of movl)
1892 * if we tell it to load (u_short) port.
1894 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port));
1899 outb(u_int port, u_char data)
1903 * Use an unnecessary assignment to help gcc's register allocator.
1904 * This make a large difference for gcc-1.40 and a tiny difference
1905 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for
1906 * best results. gcc-2.6.0 can't handle this.
1909 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port));