2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1992 Terrence R. Lambert.
4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
7 * This code is derived from software contributed to Berkeley by
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
44 #include "opt_atpic.h"
45 #include "opt_compat.h"
50 #include "opt_kstack_pages.h"
51 #include "opt_maxmem.h"
52 #include "opt_mp_watchdog.h"
53 #include "opt_perfmon.h"
54 #include "opt_platform.h"
55 #include "opt_sched.h"
57 #include <sys/param.h>
59 #include <sys/systm.h>
63 #include <sys/callout.h>
67 #include <sys/eventhandler.h>
69 #include <sys/imgact.h>
71 #include <sys/kernel.h>
73 #include <sys/linker.h>
75 #include <sys/malloc.h>
76 #include <sys/memrange.h>
77 #include <sys/msgbuf.h>
78 #include <sys/mutex.h>
80 #include <sys/ptrace.h>
81 #include <sys/reboot.h>
82 #include <sys/rwlock.h>
83 #include <sys/sched.h>
84 #include <sys/signalvar.h>
88 #include <sys/syscallsubr.h>
89 #include <sys/sysctl.h>
90 #include <sys/sysent.h>
91 #include <sys/sysproto.h>
92 #include <sys/ucontext.h>
93 #include <sys/vmmeter.h>
96 #include <vm/vm_extern.h>
97 #include <vm/vm_kern.h>
98 #include <vm/vm_page.h>
99 #include <vm/vm_map.h>
100 #include <vm/vm_object.h>
101 #include <vm/vm_pager.h>
102 #include <vm/vm_param.h>
106 #error KDB must be enabled in order for DDB to work!
109 #include <ddb/db_sym.h>
112 #include <net/netisr.h>
114 #include <machine/clock.h>
115 #include <machine/cpu.h>
116 #include <machine/cputypes.h>
117 #include <machine/frame.h>
118 #include <machine/intr_machdep.h>
120 #include <machine/md_var.h>
121 #include <machine/metadata.h>
122 #include <machine/mp_watchdog.h>
123 #include <machine/pc/bios.h>
124 #include <machine/pcb.h>
125 #include <machine/proc.h>
126 #include <machine/reg.h>
127 #include <machine/sigframe.h>
128 #include <machine/specialreg.h>
130 #include <machine/perfmon.h>
132 #include <machine/tss.h>
134 #include <machine/smp.h>
141 #include <x86/isa/icu.h>
143 #include <x86/apicvar.h>
146 #include <isa/isareg.h>
148 #include <x86/init.h>
150 /* Sanity check for __curthread() */
151 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
154 * The PTI trampoline stack needs enough space for a hardware trapframe and a
155 * couple of scratch registers, as well as the trapframe left behind after an
158 CTASSERT(PC_PTI_STACK_SZ * sizeof(register_t) >= 2 * sizeof(struct pti_frame) -
159 offsetof(struct pti_frame, pti_rip));
161 extern u_int64_t hammer_time(u_int64_t, u_int64_t);
163 #define CS_SECURE(cs) (ISPL(cs) == SEL_UPL)
164 #define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0)
166 static void cpu_startup(void *);
167 static void get_fpcontext(struct thread *td, mcontext_t *mcp,
168 char *xfpusave, size_t xfpusave_len);
169 static int set_fpcontext(struct thread *td, mcontext_t *mcp,
170 char *xfpustate, size_t xfpustate_len);
171 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
173 /* Preload data parse function */
174 static caddr_t native_parse_preload_data(u_int64_t);
176 /* Native function to fetch and parse the e820 map */
177 static void native_parse_memmap(caddr_t, vm_paddr_t *, int *);
179 /* Default init_ops implementation. */
180 struct init_ops init_ops = {
181 .parse_preload_data = native_parse_preload_data,
182 .early_clock_source_init = i8254_init,
183 .early_delay = i8254_delay,
184 .parse_memmap = native_parse_memmap,
186 .mp_bootaddress = mp_bootaddress,
187 .start_all_aps = native_start_all_aps,
189 .msi_init = msi_init,
192 struct msgbuf *msgbufp;
195 * Physical address of the EFI System Table. Stashed from the metadata hints
196 * passed into the kernel and used by the EFI code to call runtime services.
198 vm_paddr_t efi_systbl_phys;
200 /* Intel ICH registers */
201 #define ICH_PMBASE 0x400
202 #define ICH_SMI_EN ICH_PMBASE + 0x30
204 int _udatasel, _ucodesel, _ucode32sel, _ufssel, _ugssel;
212 * The number of PHYSMAP entries must be one less than the number of
213 * PHYSSEG entries because the PHYSMAP entry that spans the largest
214 * physical address that is accessible by ISA DMA is split into two
217 #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
219 vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
220 vm_paddr_t dump_avail[PHYSMAP_SIZE + 2];
222 /* must be 2 less so 0 0 can signal end of chunks */
223 #define PHYS_AVAIL_ARRAY_END (nitems(phys_avail) - 2)
224 #define DUMP_AVAIL_ARRAY_END (nitems(dump_avail) - 2)
226 struct kva_md_info kmi;
228 static struct trapframe proc0_tf;
229 struct region_descriptor r_gdt, r_idt;
231 struct pcpu __pcpu[MAXCPU];
235 struct mem_range_softc mem_range_softc;
237 struct mtx dt_lock; /* lock for GDT and LDT */
239 void (*vmm_resume_p)(void);
249 * On MacBooks, we need to disallow the legacy USB circuit to
250 * generate an SMI# because this can cause several problems,
251 * namely: incorrect CPU frequency detection and failure to
253 * We do this by disabling a bit in the SMI_EN (SMI Control and
254 * Enable register) of the Intel ICH LPC Interface Bridge.
256 sysenv = kern_getenv("smbios.system.product");
257 if (sysenv != NULL) {
258 if (strncmp(sysenv, "MacBook1,1", 10) == 0 ||
259 strncmp(sysenv, "MacBook3,1", 10) == 0 ||
260 strncmp(sysenv, "MacBook4,1", 10) == 0 ||
261 strncmp(sysenv, "MacBookPro1,1", 13) == 0 ||
262 strncmp(sysenv, "MacBookPro1,2", 13) == 0 ||
263 strncmp(sysenv, "MacBookPro3,1", 13) == 0 ||
264 strncmp(sysenv, "MacBookPro4,1", 13) == 0 ||
265 strncmp(sysenv, "Macmini1,1", 10) == 0) {
267 printf("Disabling LEGACY_USB_EN bit on "
269 outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8);
275 * Good {morning,afternoon,evening,night}.
284 * Display physical memory if SMBIOS reports reasonable amount.
287 sysenv = kern_getenv("smbios.memory.enabled");
288 if (sysenv != NULL) {
289 memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10;
292 if (memsize < ptoa((uintmax_t)vm_cnt.v_free_count))
293 memsize = ptoa((uintmax_t)Maxmem);
294 printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20);
295 realmem = atop(memsize);
298 * Display any holes after the first chunk of extended memory.
303 printf("Physical memory chunk(s):\n");
304 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
307 size = phys_avail[indx + 1] - phys_avail[indx];
309 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
310 (uintmax_t)phys_avail[indx],
311 (uintmax_t)phys_avail[indx + 1] - 1,
312 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
316 vm_ksubmap_init(&kmi);
318 printf("avail memory = %ju (%ju MB)\n",
319 ptoa((uintmax_t)vm_cnt.v_free_count),
320 ptoa((uintmax_t)vm_cnt.v_free_count) / 1048576);
323 * Set up buffers, so they can be used to read disk labels.
326 vm_pager_bufferinit();
332 * Send an interrupt to process.
334 * Stack is set up to allow sigcode stored
335 * at top to call routine, followed by call
336 * to sigreturn routine below. After sigreturn
337 * resets the signal mask, the stack, and the
338 * frame pointer, it returns to the user
342 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
344 struct sigframe sf, *sfp;
350 struct trapframe *regs;
359 PROC_LOCK_ASSERT(p, MA_OWNED);
360 sig = ksi->ksi_signo;
362 mtx_assert(&psp->ps_mtx, MA_OWNED);
364 oonstack = sigonstack(regs->tf_rsp);
366 if (cpu_max_ext_state_size > sizeof(struct savefpu) && use_xsave) {
367 xfpusave_len = cpu_max_ext_state_size - sizeof(struct savefpu);
368 xfpusave = __builtin_alloca(xfpusave_len);
374 /* Save user context. */
375 bzero(&sf, sizeof(sf));
376 sf.sf_uc.uc_sigmask = *mask;
377 sf.sf_uc.uc_stack = td->td_sigstk;
378 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
379 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
380 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
381 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_rdi, sizeof(*regs));
382 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
383 get_fpcontext(td, &sf.sf_uc.uc_mcontext, xfpusave, xfpusave_len);
385 update_pcb_bases(pcb);
386 sf.sf_uc.uc_mcontext.mc_fsbase = pcb->pcb_fsbase;
387 sf.sf_uc.uc_mcontext.mc_gsbase = pcb->pcb_gsbase;
388 bzero(sf.sf_uc.uc_mcontext.mc_spare,
389 sizeof(sf.sf_uc.uc_mcontext.mc_spare));
390 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
392 /* Allocate space for the signal handler context. */
393 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
394 SIGISMEMBER(psp->ps_sigonstack, sig)) {
395 sp = (char *)td->td_sigstk.ss_sp + td->td_sigstk.ss_size;
396 #if defined(COMPAT_43)
397 td->td_sigstk.ss_flags |= SS_ONSTACK;
400 sp = (char *)regs->tf_rsp - 128;
401 if (xfpusave != NULL) {
403 sp = (char *)((unsigned long)sp & ~0x3Ful);
404 sf.sf_uc.uc_mcontext.mc_xfpustate = (register_t)sp;
406 sp -= sizeof(struct sigframe);
407 /* Align to 16 bytes. */
408 sfp = (struct sigframe *)((unsigned long)sp & ~0xFul);
410 /* Build the argument list for the signal handler. */
411 regs->tf_rdi = sig; /* arg 1 in %rdi */
412 regs->tf_rdx = (register_t)&sfp->sf_uc; /* arg 3 in %rdx */
413 bzero(&sf.sf_si, sizeof(sf.sf_si));
414 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
415 /* Signal handler installed with SA_SIGINFO. */
416 regs->tf_rsi = (register_t)&sfp->sf_si; /* arg 2 in %rsi */
417 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
419 /* Fill in POSIX parts */
420 sf.sf_si = ksi->ksi_info;
421 sf.sf_si.si_signo = sig; /* maybe a translated signal */
422 regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */
424 /* Old FreeBSD-style arguments. */
425 regs->tf_rsi = ksi->ksi_code; /* arg 2 in %rsi */
426 regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */
427 sf.sf_ahu.sf_handler = catcher;
429 mtx_unlock(&psp->ps_mtx);
433 * Copy the sigframe out to the user's stack.
435 if (copyout(&sf, sfp, sizeof(*sfp)) != 0 ||
436 (xfpusave != NULL && copyout(xfpusave,
437 (void *)sf.sf_uc.uc_mcontext.mc_xfpustate, xfpusave_len)
440 printf("process %ld has trashed its stack\n", (long)p->p_pid);
446 regs->tf_rsp = (long)sfp;
447 regs->tf_rip = p->p_sysent->sv_sigcode_base;
448 regs->tf_rflags &= ~(PSL_T | PSL_D);
449 regs->tf_cs = _ucodesel;
450 regs->tf_ds = _udatasel;
451 regs->tf_ss = _udatasel;
452 regs->tf_es = _udatasel;
453 regs->tf_fs = _ufssel;
454 regs->tf_gs = _ugssel;
455 regs->tf_flags = TF_HASSEGS;
457 mtx_lock(&psp->ps_mtx);
461 * System call to cleanup state after a signal
462 * has been taken. Reset signal mask and
463 * stack state from context left by sendsig (above).
464 * Return to previous pc and psl as specified by
465 * context left by sendsig. Check carefully to
466 * make sure that the user has not modified the
467 * state to gain improper privileges.
472 sys_sigreturn(td, uap)
474 struct sigreturn_args /* {
475 const struct __ucontext *sigcntxp;
481 struct trapframe *regs;
484 size_t xfpustate_len;
492 error = copyin(uap->sigcntxp, &uc, sizeof(uc));
494 uprintf("pid %d (%s): sigreturn copyin failed\n",
495 p->p_pid, td->td_name);
499 if ((ucp->uc_mcontext.mc_flags & ~_MC_FLAG_MASK) != 0) {
500 uprintf("pid %d (%s): sigreturn mc_flags %x\n", p->p_pid,
501 td->td_name, ucp->uc_mcontext.mc_flags);
505 rflags = ucp->uc_mcontext.mc_rflags;
507 * Don't allow users to change privileged or reserved flags.
509 if (!EFL_SECURE(rflags, regs->tf_rflags)) {
510 uprintf("pid %d (%s): sigreturn rflags = 0x%lx\n", p->p_pid,
511 td->td_name, rflags);
516 * Don't allow users to load a valid privileged %cs. Let the
517 * hardware check for invalid selectors, excess privilege in
518 * other selectors, invalid %eip's and invalid %esp's.
520 cs = ucp->uc_mcontext.mc_cs;
521 if (!CS_SECURE(cs)) {
522 uprintf("pid %d (%s): sigreturn cs = 0x%x\n", p->p_pid,
524 ksiginfo_init_trap(&ksi);
525 ksi.ksi_signo = SIGBUS;
526 ksi.ksi_code = BUS_OBJERR;
527 ksi.ksi_trapno = T_PROTFLT;
528 ksi.ksi_addr = (void *)regs->tf_rip;
529 trapsignal(td, &ksi);
533 if ((uc.uc_mcontext.mc_flags & _MC_HASFPXSTATE) != 0) {
534 xfpustate_len = uc.uc_mcontext.mc_xfpustate_len;
535 if (xfpustate_len > cpu_max_ext_state_size -
536 sizeof(struct savefpu)) {
537 uprintf("pid %d (%s): sigreturn xfpusave_len = 0x%zx\n",
538 p->p_pid, td->td_name, xfpustate_len);
541 xfpustate = __builtin_alloca(xfpustate_len);
542 error = copyin((const void *)uc.uc_mcontext.mc_xfpustate,
543 xfpustate, xfpustate_len);
546 "pid %d (%s): sigreturn copying xfpustate failed\n",
547 p->p_pid, td->td_name);
554 ret = set_fpcontext(td, &ucp->uc_mcontext, xfpustate, xfpustate_len);
556 uprintf("pid %d (%s): sigreturn set_fpcontext err %d\n",
557 p->p_pid, td->td_name, ret);
560 bcopy(&ucp->uc_mcontext.mc_rdi, regs, sizeof(*regs));
561 update_pcb_bases(pcb);
562 pcb->pcb_fsbase = ucp->uc_mcontext.mc_fsbase;
563 pcb->pcb_gsbase = ucp->uc_mcontext.mc_gsbase;
565 #if defined(COMPAT_43)
566 if (ucp->uc_mcontext.mc_onstack & 1)
567 td->td_sigstk.ss_flags |= SS_ONSTACK;
569 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
572 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
573 return (EJUSTRETURN);
576 #ifdef COMPAT_FREEBSD4
578 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
581 return sys_sigreturn(td, (struct sigreturn_args *)uap);
586 * Reset registers to default values on exec.
589 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
591 struct trapframe *regs = td->td_frame;
592 struct pcb *pcb = td->td_pcb;
595 if (td->td_proc->p_md.md_ldt != NULL)
598 mtx_unlock(&dt_lock);
600 update_pcb_bases(pcb);
603 clear_pcb_flags(pcb, PCB_32BIT);
604 pcb->pcb_initial_fpucw = __INITIAL_FPUCW__;
606 bzero((char *)regs, sizeof(struct trapframe));
607 regs->tf_rip = imgp->entry_addr;
608 regs->tf_rsp = ((stack - 8) & ~0xFul) + 8;
609 regs->tf_rdi = stack; /* argv */
610 regs->tf_rflags = PSL_USER | (regs->tf_rflags & PSL_T);
611 regs->tf_ss = _udatasel;
612 regs->tf_cs = _ucodesel;
613 regs->tf_ds = _udatasel;
614 regs->tf_es = _udatasel;
615 regs->tf_fs = _ufssel;
616 regs->tf_gs = _ugssel;
617 regs->tf_flags = TF_HASSEGS;
618 td->td_retval[1] = 0;
621 * Reset the hardware debug registers if they were in use.
622 * They won't have any meaning for the newly exec'd process.
624 if (pcb->pcb_flags & PCB_DBREGS) {
633 * Clear the debug registers on the running
634 * CPU, otherwise they will end up affecting
635 * the next process we switch to.
639 clear_pcb_flags(pcb, PCB_DBREGS);
643 * Drop the FP state if we hold it, so that the process gets a
644 * clean FP state if it uses the FPU again.
656 * CR0_MP, CR0_NE and CR0_TS are also set by npx_probe() for the
657 * BSP. See the comments there about why we set them.
659 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
664 * Initialize amd64 and configure to run kernel
668 * Initialize segments & interrupt table
671 struct user_segment_descriptor gdt[NGDT * MAXCPU];/* global descriptor tables */
672 static struct gate_descriptor idt0[NIDT];
673 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
675 static char dblfault_stack[PAGE_SIZE] __aligned(16);
676 static char mce0_stack[PAGE_SIZE] __aligned(16);
677 static char nmi0_stack[PAGE_SIZE] __aligned(16);
678 CTASSERT(sizeof(struct nmi_pcpu) == 16);
680 struct amd64tss common_tss[MAXCPU];
683 * Software prototypes -- in more palatable form.
685 * Keep GUFS32, GUGS32, GUCODE32 and GUDATA at the same
686 * slots as corresponding segments for i386 kernel.
688 struct soft_segment_descriptor gdt_segs[] = {
689 /* GNULL_SEL 0 Null Descriptor */
698 /* GNULL2_SEL 1 Null Descriptor */
707 /* GUFS32_SEL 2 32 bit %gs Descriptor for user */
709 .ssd_limit = 0xfffff,
710 .ssd_type = SDT_MEMRWA,
716 /* GUGS32_SEL 3 32 bit %fs Descriptor for user */
718 .ssd_limit = 0xfffff,
719 .ssd_type = SDT_MEMRWA,
725 /* GCODE_SEL 4 Code Descriptor for kernel */
727 .ssd_limit = 0xfffff,
728 .ssd_type = SDT_MEMERA,
734 /* GDATA_SEL 5 Data Descriptor for kernel */
736 .ssd_limit = 0xfffff,
737 .ssd_type = SDT_MEMRWA,
743 /* GUCODE32_SEL 6 32 bit Code Descriptor for user */
745 .ssd_limit = 0xfffff,
746 .ssd_type = SDT_MEMERA,
752 /* GUDATA_SEL 7 32/64 bit Data Descriptor for user */
754 .ssd_limit = 0xfffff,
755 .ssd_type = SDT_MEMRWA,
761 /* GUCODE_SEL 8 64 bit Code Descriptor for user */
763 .ssd_limit = 0xfffff,
764 .ssd_type = SDT_MEMERA,
770 /* GPROC0_SEL 9 Proc 0 Tss Descriptor */
772 .ssd_limit = sizeof(struct amd64tss) + IOPERM_BITMAP_SIZE - 1,
773 .ssd_type = SDT_SYSTSS,
779 /* Actually, the TSS is a system descriptor which is double size */
788 /* GUSERLDT_SEL 11 LDT Descriptor */
797 /* GUSERLDT_SEL 12 LDT Descriptor, double size */
809 setidt(int idx, inthand_t *func, int typ, int dpl, int ist)
811 struct gate_descriptor *ip;
814 ip->gd_looffset = (uintptr_t)func;
815 ip->gd_selector = GSEL(GCODE_SEL, SEL_KPL);
821 ip->gd_hioffset = ((uintptr_t)func)>>16 ;
825 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
826 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
827 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
828 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
829 IDTVEC(xmm), IDTVEC(dblfault),
830 IDTVEC(div_pti), IDTVEC(dbg_pti), IDTVEC(bpt_pti),
831 IDTVEC(ofl_pti), IDTVEC(bnd_pti), IDTVEC(ill_pti), IDTVEC(dna_pti),
832 IDTVEC(fpusegm_pti), IDTVEC(tss_pti), IDTVEC(missing_pti),
833 IDTVEC(stk_pti), IDTVEC(prot_pti), IDTVEC(page_pti),
834 IDTVEC(rsvd_pti), IDTVEC(fpu_pti), IDTVEC(align_pti),
837 IDTVEC(dtrace_ret), IDTVEC(dtrace_ret_pti),
840 IDTVEC(xen_intr_upcall), IDTVEC(xen_intr_upcall_pti),
842 IDTVEC(fast_syscall), IDTVEC(fast_syscall32),
843 IDTVEC(fast_syscall_pti);
847 * Display the index and function name of any IDT entries that don't use
848 * the default 'rsvd' entry point.
850 DB_SHOW_COMMAND(idt, db_show_idt)
852 struct gate_descriptor *ip;
857 for (idx = 0; idx < NIDT && !db_pager_quit; idx++) {
858 func = ((long)ip->gd_hioffset << 16 | ip->gd_looffset);
859 if (func != (uintptr_t)&IDTVEC(rsvd)) {
860 db_printf("%3d\t", idx);
861 db_printsym(func, DB_STGY_PROC);
868 /* Show privileged registers. */
869 DB_SHOW_COMMAND(sysregs, db_show_sysregs)
874 } __packed idtr, gdtr;
877 __asm __volatile("sidt %0" : "=m" (idtr));
878 db_printf("idtr\t0x%016lx/%04x\n",
879 (u_long)idtr.base, (u_int)idtr.limit);
880 __asm __volatile("sgdt %0" : "=m" (gdtr));
881 db_printf("gdtr\t0x%016lx/%04x\n",
882 (u_long)gdtr.base, (u_int)gdtr.limit);
883 __asm __volatile("sldt %0" : "=r" (ldt));
884 db_printf("ldtr\t0x%04x\n", ldt);
885 __asm __volatile("str %0" : "=r" (tr));
886 db_printf("tr\t0x%04x\n", tr);
887 db_printf("cr0\t0x%016lx\n", rcr0());
888 db_printf("cr2\t0x%016lx\n", rcr2());
889 db_printf("cr3\t0x%016lx\n", rcr3());
890 db_printf("cr4\t0x%016lx\n", rcr4());
891 if (rcr4() & CR4_XSAVE)
892 db_printf("xcr0\t0x%016lx\n", rxcr(0));
893 db_printf("EFER\t0x%016lx\n", rdmsr(MSR_EFER));
894 if (cpu_feature2 & (CPUID2_VMX | CPUID2_SMX))
895 db_printf("FEATURES_CTL\t%016lx\n",
896 rdmsr(MSR_IA32_FEATURE_CONTROL));
897 db_printf("DEBUG_CTL\t0x%016lx\n", rdmsr(MSR_DEBUGCTLMSR));
898 db_printf("PAT\t0x%016lx\n", rdmsr(MSR_PAT));
899 db_printf("GSBASE\t0x%016lx\n", rdmsr(MSR_GSBASE));
902 DB_SHOW_COMMAND(dbregs, db_show_dbregs)
905 db_printf("dr0\t0x%016lx\n", rdr0());
906 db_printf("dr1\t0x%016lx\n", rdr1());
907 db_printf("dr2\t0x%016lx\n", rdr2());
908 db_printf("dr3\t0x%016lx\n", rdr3());
909 db_printf("dr6\t0x%016lx\n", rdr6());
910 db_printf("dr7\t0x%016lx\n", rdr7());
916 struct user_segment_descriptor *sd;
917 struct soft_segment_descriptor *ssd;
920 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase;
921 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
922 ssd->ssd_type = sd->sd_type;
923 ssd->ssd_dpl = sd->sd_dpl;
924 ssd->ssd_p = sd->sd_p;
925 ssd->ssd_long = sd->sd_long;
926 ssd->ssd_def32 = sd->sd_def32;
927 ssd->ssd_gran = sd->sd_gran;
932 struct soft_segment_descriptor *ssd;
933 struct user_segment_descriptor *sd;
936 sd->sd_lobase = (ssd->ssd_base) & 0xffffff;
937 sd->sd_hibase = (ssd->ssd_base >> 24) & 0xff;
938 sd->sd_lolimit = (ssd->ssd_limit) & 0xffff;
939 sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf;
940 sd->sd_type = ssd->ssd_type;
941 sd->sd_dpl = ssd->ssd_dpl;
942 sd->sd_p = ssd->ssd_p;
943 sd->sd_long = ssd->ssd_long;
944 sd->sd_def32 = ssd->ssd_def32;
945 sd->sd_gran = ssd->ssd_gran;
950 struct soft_segment_descriptor *ssd;
951 struct system_segment_descriptor *sd;
954 sd->sd_lobase = (ssd->ssd_base) & 0xffffff;
955 sd->sd_hibase = (ssd->ssd_base >> 24) & 0xfffffffffful;
956 sd->sd_lolimit = (ssd->ssd_limit) & 0xffff;
957 sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf;
958 sd->sd_type = ssd->ssd_type;
959 sd->sd_dpl = ssd->ssd_dpl;
960 sd->sd_p = ssd->ssd_p;
961 sd->sd_gran = ssd->ssd_gran;
964 #if !defined(DEV_ATPIC) && defined(DEV_ISA)
965 #include <isa/isavar.h>
966 #include <isa/isareg.h>
968 * Return a bitmap of the current interrupt requests. This is 8259-specific
969 * and is only suitable for use at probe time.
970 * This is only here to pacify sio. It is NOT FATAL if this doesn't work.
971 * It shouldn't be here. There should probably be an APIC centric
972 * implementation in the apic driver code, if at all.
975 isa_irq_pending(void)
982 return ((irr2 << 8) | irr1);
989 add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
992 int i, insert_idx, physmap_idx;
994 physmap_idx = *physmap_idxp;
1000 * Find insertion point while checking for overlap. Start off by
1001 * assuming the new entry will be added to the end.
1003 * NB: physmap_idx points to the next free slot.
1005 insert_idx = physmap_idx;
1006 for (i = 0; i <= physmap_idx; i += 2) {
1007 if (base < physmap[i + 1]) {
1008 if (base + length <= physmap[i]) {
1012 if (boothowto & RB_VERBOSE)
1014 "Overlapping memory regions, ignoring second region\n");
1019 /* See if we can prepend to the next entry. */
1020 if (insert_idx <= physmap_idx && base + length == physmap[insert_idx]) {
1021 physmap[insert_idx] = base;
1025 /* See if we can append to the previous entry. */
1026 if (insert_idx > 0 && base == physmap[insert_idx - 1]) {
1027 physmap[insert_idx - 1] += length;
1032 *physmap_idxp = physmap_idx;
1033 if (physmap_idx == PHYSMAP_SIZE) {
1035 "Too many segments in the physical address map, giving up\n");
1040 * Move the last 'N' entries down to make room for the new
1043 for (i = (physmap_idx - 2); i > insert_idx; i -= 2) {
1044 physmap[i] = physmap[i - 2];
1045 physmap[i + 1] = physmap[i - 1];
1048 /* Insert the new entry. */
1049 physmap[insert_idx] = base;
1050 physmap[insert_idx + 1] = base + length;
1055 bios_add_smap_entries(struct bios_smap *smapbase, u_int32_t smapsize,
1056 vm_paddr_t *physmap, int *physmap_idx)
1058 struct bios_smap *smap, *smapend;
1060 smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize);
1062 for (smap = smapbase; smap < smapend; smap++) {
1063 if (boothowto & RB_VERBOSE)
1064 printf("SMAP type=%02x base=%016lx len=%016lx\n",
1065 smap->type, smap->base, smap->length);
1067 if (smap->type != SMAP_TYPE_MEMORY)
1070 if (!add_physmap_entry(smap->base, smap->length, physmap,
1077 add_efi_map_entries(struct efi_map_header *efihdr, vm_paddr_t *physmap,
1080 struct efi_md *map, *p;
1085 static const char *types[] = {
1091 "RuntimeServicesCode",
1092 "RuntimeServicesData",
1093 "ConventionalMemory",
1095 "ACPIReclaimMemory",
1098 "MemoryMappedIOPortSpace",
1104 * Memory map data provided by UEFI via the GetMemoryMap
1105 * Boot Services API.
1107 efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
1108 map = (struct efi_md *)((uint8_t *)efihdr + efisz);
1110 if (efihdr->descriptor_size == 0)
1112 ndesc = efihdr->memory_size / efihdr->descriptor_size;
1114 if (boothowto & RB_VERBOSE)
1115 printf("%23s %12s %12s %8s %4s\n",
1116 "Type", "Physical", "Virtual", "#Pages", "Attr");
1118 for (i = 0, p = map; i < ndesc; i++,
1119 p = efi_next_descriptor(p, efihdr->descriptor_size)) {
1120 if (boothowto & RB_VERBOSE) {
1121 if (p->md_type < nitems(types))
1122 type = types[p->md_type];
1125 printf("%23s %012lx %12p %08lx ", type, p->md_phys,
1126 p->md_virt, p->md_pages);
1127 if (p->md_attr & EFI_MD_ATTR_UC)
1129 if (p->md_attr & EFI_MD_ATTR_WC)
1131 if (p->md_attr & EFI_MD_ATTR_WT)
1133 if (p->md_attr & EFI_MD_ATTR_WB)
1135 if (p->md_attr & EFI_MD_ATTR_UCE)
1137 if (p->md_attr & EFI_MD_ATTR_WP)
1139 if (p->md_attr & EFI_MD_ATTR_RP)
1141 if (p->md_attr & EFI_MD_ATTR_XP)
1143 if (p->md_attr & EFI_MD_ATTR_NV)
1145 if (p->md_attr & EFI_MD_ATTR_MORE_RELIABLE)
1146 printf("MORE_RELIABLE ");
1147 if (p->md_attr & EFI_MD_ATTR_RO)
1149 if (p->md_attr & EFI_MD_ATTR_RT)
1154 switch (p->md_type) {
1155 case EFI_MD_TYPE_CODE:
1156 case EFI_MD_TYPE_DATA:
1157 case EFI_MD_TYPE_BS_CODE:
1158 case EFI_MD_TYPE_BS_DATA:
1159 case EFI_MD_TYPE_FREE:
1161 * We're allowed to use any entry with these types.
1168 if (!add_physmap_entry(p->md_phys, (p->md_pages * PAGE_SIZE),
1169 physmap, physmap_idx))
1174 static char bootmethod[16] = "";
1175 SYSCTL_STRING(_machdep, OID_AUTO, bootmethod, CTLFLAG_RD, bootmethod, 0,
1176 "System firmware boot method");
1179 native_parse_memmap(caddr_t kmdp, vm_paddr_t *physmap, int *physmap_idx)
1181 struct bios_smap *smap;
1182 struct efi_map_header *efihdr;
1186 * Memory map from INT 15:E820.
1188 * subr_module.c says:
1189 * "Consumer may safely assume that size value precedes data."
1190 * ie: an int32_t immediately precedes smap.
1193 efihdr = (struct efi_map_header *)preload_search_info(kmdp,
1194 MODINFO_METADATA | MODINFOMD_EFI_MAP);
1195 smap = (struct bios_smap *)preload_search_info(kmdp,
1196 MODINFO_METADATA | MODINFOMD_SMAP);
1197 if (efihdr == NULL && smap == NULL)
1198 panic("No BIOS smap or EFI map info from loader!");
1200 if (efihdr != NULL) {
1201 add_efi_map_entries(efihdr, physmap, physmap_idx);
1202 strlcpy(bootmethod, "UEFI", sizeof(bootmethod));
1204 size = *((u_int32_t *)smap - 1);
1205 bios_add_smap_entries(smap, size, physmap, physmap_idx);
1206 strlcpy(bootmethod, "BIOS", sizeof(bootmethod));
1210 #define PAGES_PER_GB (1024 * 1024 * 1024 / PAGE_SIZE)
1213 * Populate the (physmap) array with base/bound pairs describing the
1214 * available physical memory in the system, then test this memory and
1215 * build the phys_avail array describing the actually-available memory.
1217 * Total memory size may be set by the kernel environment variable
1218 * hw.physmem or the compile-time define MAXMEM.
1220 * XXX first should be vm_paddr_t.
1223 getmemsize(caddr_t kmdp, u_int64_t first)
1225 int i, physmap_idx, pa_indx, da_indx;
1226 vm_paddr_t pa, physmap[PHYSMAP_SIZE];
1227 u_long physmem_start, physmem_tunable, memtest;
1229 quad_t dcons_addr, dcons_size;
1232 bzero(physmap, sizeof(physmap));
1235 init_ops.parse_memmap(kmdp, physmap, &physmap_idx);
1239 * Find the 'base memory' segment for SMP
1242 for (i = 0; i <= physmap_idx; i += 2) {
1243 if (physmap[i] <= 0xA0000) {
1244 basemem = physmap[i + 1] / 1024;
1248 if (basemem == 0 || basemem > 640) {
1251 "Memory map doesn't contain a basemem segment, faking it");
1256 * Make hole for "AP -> long mode" bootstrap code. The
1257 * mp_bootaddress vector is only available when the kernel
1258 * is configured to support APs and APs for the system start
1259 * in 32bit mode (e.g. SMP bare metal).
1261 if (init_ops.mp_bootaddress) {
1262 if (physmap[1] >= 0x100000000)
1264 "Basemem segment is not suitable for AP bootstrap code!");
1265 physmap[1] = init_ops.mp_bootaddress(physmap[1] / 1024);
1269 * Maxmem isn't the "maximum memory", it's one larger than the
1270 * highest page of the physical address space. It should be
1271 * called something like "Maxphyspage". We may adjust this
1272 * based on ``hw.physmem'' and the results of the memory test.
1274 Maxmem = atop(physmap[physmap_idx + 1]);
1277 Maxmem = MAXMEM / 4;
1280 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable))
1281 Maxmem = atop(physmem_tunable);
1284 * The boot memory test is disabled by default, as it takes a
1285 * significant amount of time on large-memory systems, and is
1286 * unfriendly to virtual machines as it unnecessarily touches all
1289 * A general name is used as the code may be extended to support
1290 * additional tests beyond the current "page present" test.
1293 TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest);
1296 * Don't allow MAXMEM or hw.physmem to extend the amount of memory
1299 if (Maxmem > atop(physmap[physmap_idx + 1]))
1300 Maxmem = atop(physmap[physmap_idx + 1]);
1302 if (atop(physmap[physmap_idx + 1]) != Maxmem &&
1303 (boothowto & RB_VERBOSE))
1304 printf("Physical memory use set to %ldK\n", Maxmem * 4);
1306 /* call pmap initialization to make new kernel address space */
1307 pmap_bootstrap(&first);
1310 * Size up each available chunk of physical memory.
1312 * XXX Some BIOSes corrupt low 64KB between suspend and resume.
1313 * By default, mask off the first 16 pages unless we appear to be
1316 physmem_start = (vm_guest > VM_GUEST_NO ? 1 : 16) << PAGE_SHIFT;
1317 TUNABLE_ULONG_FETCH("hw.physmem.start", &physmem_start);
1318 if (physmap[0] < physmem_start) {
1319 if (physmem_start < PAGE_SIZE)
1320 physmap[0] = PAGE_SIZE;
1321 else if (physmem_start >= physmap[1])
1322 physmap[0] = round_page(physmap[1] - PAGE_SIZE);
1324 physmap[0] = round_page(physmem_start);
1328 phys_avail[pa_indx++] = physmap[0];
1329 phys_avail[pa_indx] = physmap[0];
1330 dump_avail[da_indx] = physmap[0];
1334 * Get dcons buffer address
1336 if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
1337 getenv_quad("dcons.size", &dcons_size) == 0)
1341 * physmap is in bytes, so when converting to page boundaries,
1342 * round up the start address and round down the end address.
1346 printf("Testing system memory");
1347 for (i = 0; i <= physmap_idx; i += 2) {
1350 end = ptoa((vm_paddr_t)Maxmem);
1351 if (physmap[i + 1] < end)
1352 end = trunc_page(physmap[i + 1]);
1353 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
1354 int tmp, page_bad, full;
1355 int *ptr = (int *)CADDR1;
1359 * block out kernel memory as not available.
1361 if (pa >= (vm_paddr_t)kernphys && pa < first)
1365 * block out dcons buffer
1368 && pa >= trunc_page(dcons_addr)
1369 && pa < dcons_addr + dcons_size)
1377 * Print a "." every GB to show we're making
1381 if ((page_counter % PAGES_PER_GB) == 0)
1385 * map page into kernel: valid, read/write,non-cacheable
1387 *pte = pa | PG_V | PG_RW | PG_NC_PWT | PG_NC_PCD;
1392 * Test for alternating 1's and 0's
1394 *(volatile int *)ptr = 0xaaaaaaaa;
1395 if (*(volatile int *)ptr != 0xaaaaaaaa)
1398 * Test for alternating 0's and 1's
1400 *(volatile int *)ptr = 0x55555555;
1401 if (*(volatile int *)ptr != 0x55555555)
1406 *(volatile int *)ptr = 0xffffffff;
1407 if (*(volatile int *)ptr != 0xffffffff)
1412 *(volatile int *)ptr = 0x0;
1413 if (*(volatile int *)ptr != 0x0)
1416 * Restore original value.
1422 * Adjust array of valid/good pages.
1424 if (page_bad == TRUE)
1427 * If this good page is a continuation of the
1428 * previous set of good pages, then just increase
1429 * the end pointer. Otherwise start a new chunk.
1430 * Note that "end" points one higher than end,
1431 * making the range >= start and < end.
1432 * If we're also doing a speculative memory
1433 * test and we at or past the end, bump up Maxmem
1434 * so that we keep going. The first bad page
1435 * will terminate the loop.
1437 if (phys_avail[pa_indx] == pa) {
1438 phys_avail[pa_indx] += PAGE_SIZE;
1441 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
1443 "Too many holes in the physical address space, giving up\n");
1448 phys_avail[pa_indx++] = pa; /* start */
1449 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
1453 if (dump_avail[da_indx] == pa) {
1454 dump_avail[da_indx] += PAGE_SIZE;
1457 if (da_indx == DUMP_AVAIL_ARRAY_END) {
1461 dump_avail[da_indx++] = pa; /* start */
1462 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
1476 * The last chunk must contain at least one page plus the message
1477 * buffer to avoid complicating other code (message buffer address
1478 * calculation, etc.).
1480 while (phys_avail[pa_indx - 1] + PAGE_SIZE +
1481 round_page(msgbufsize) >= phys_avail[pa_indx]) {
1482 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
1483 phys_avail[pa_indx--] = 0;
1484 phys_avail[pa_indx--] = 0;
1487 Maxmem = atop(phys_avail[pa_indx]);
1489 /* Trim off space for the message buffer. */
1490 phys_avail[pa_indx] -= round_page(msgbufsize);
1492 /* Map the message buffer. */
1493 msgbufp = (struct msgbuf *)PHYS_TO_DMAP(phys_avail[pa_indx]);
1497 native_parse_preload_data(u_int64_t modulep)
1502 vm_offset_t ksym_start;
1503 vm_offset_t ksym_end;
1506 preload_metadata = (caddr_t)(uintptr_t)(modulep + KERNBASE);
1507 preload_bootstrap_relocate(KERNBASE);
1508 kmdp = preload_search_by_type("elf kernel");
1510 kmdp = preload_search_by_type("elf64 kernel");
1511 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
1512 envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
1515 init_static_kenv(envp, 0);
1517 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
1518 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
1519 db_fetch_ksymtab(ksym_start, ksym_end);
1521 efi_systbl_phys = MD_FETCH(kmdp, MODINFOMD_FW_HANDLE, vm_paddr_t);
1527 amd64_kdb_init(void)
1531 if (boothowto & RB_KDB)
1532 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
1536 /* Set up the fast syscall stuff */
1538 amd64_conf_fast_syscall(void)
1542 msr = rdmsr(MSR_EFER) | EFER_SCE;
1543 wrmsr(MSR_EFER, msr);
1544 wrmsr(MSR_LSTAR, pti ? (u_int64_t)IDTVEC(fast_syscall_pti) :
1545 (u_int64_t)IDTVEC(fast_syscall));
1546 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
1547 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
1548 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
1549 wrmsr(MSR_STAR, msr);
1550 wrmsr(MSR_SF_MASK, PSL_NT | PSL_T | PSL_I | PSL_C | PSL_D);
1554 hammer_time(u_int64_t modulep, u_int64_t physfree)
1559 struct nmi_pcpu *np;
1560 struct xstate_hdr *xhdr;
1567 * This may be done better later if it gets more high level
1568 * components in it. If so just link td->td_proc here.
1570 proc_linkup0(&proc0, &thread0);
1572 kmdp = init_ops.parse_preload_data(modulep);
1575 identify_hypervisor();
1577 /* Init basic tunables, hz etc */
1580 thread0.td_kstack = physfree + KERNBASE;
1581 thread0.td_kstack_pages = kstack_pages;
1582 kstack0_sz = thread0.td_kstack_pages * PAGE_SIZE;
1583 bzero((void *)thread0.td_kstack, kstack0_sz);
1584 physfree += kstack0_sz;
1587 * make gdt memory segments
1589 for (x = 0; x < NGDT; x++) {
1590 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1) &&
1591 x != GUSERLDT_SEL && x != (GUSERLDT_SEL) + 1)
1592 ssdtosd(&gdt_segs[x], &gdt[x]);
1594 gdt_segs[GPROC0_SEL].ssd_base = (uintptr_t)&common_tss[0];
1595 ssdtosyssd(&gdt_segs[GPROC0_SEL],
1596 (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
1598 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
1599 r_gdt.rd_base = (long) gdt;
1603 wrmsr(MSR_FSBASE, 0); /* User value */
1604 wrmsr(MSR_GSBASE, (u_int64_t)pc);
1605 wrmsr(MSR_KGSBASE, 0); /* User value while in the kernel */
1607 pcpu_init(pc, 0, sizeof(struct pcpu));
1608 dpcpu_init((void *)(physfree + KERNBASE), 0);
1609 physfree += DPCPU_SIZE;
1610 PCPU_SET(prvspace, pc);
1611 PCPU_SET(curthread, &thread0);
1612 /* Non-late cninit() and printf() can be moved up to here. */
1613 PCPU_SET(tssp, &common_tss[0]);
1614 PCPU_SET(commontssp, &common_tss[0]);
1615 PCPU_SET(tss, (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
1616 PCPU_SET(ldt, (struct system_segment_descriptor *)&gdt[GUSERLDT_SEL]);
1617 PCPU_SET(fs32p, &gdt[GUFS32_SEL]);
1618 PCPU_SET(gs32p, &gdt[GUGS32_SEL]);
1621 * Initialize mutexes.
1623 * icu_lock: in order to allow an interrupt to occur in a critical
1624 * section, to set pcpu->ipending (etc...) properly, we
1625 * must be able to get the icu lock, so it can't be
1629 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS);
1630 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_DEF);
1633 pti = pti_get_default();
1634 TUNABLE_INT_FETCH("vm.pmap.pti", &pti);
1636 for (x = 0; x < NIDT; x++)
1637 setidt(x, pti ? &IDTVEC(rsvd_pti) : &IDTVEC(rsvd), SDT_SYSIGT,
1639 setidt(IDT_DE, pti ? &IDTVEC(div_pti) : &IDTVEC(div), SDT_SYSIGT,
1641 setidt(IDT_DB, pti ? &IDTVEC(dbg_pti) : &IDTVEC(dbg), SDT_SYSIGT,
1643 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYSIGT, SEL_KPL, 2);
1644 setidt(IDT_BP, pti ? &IDTVEC(bpt_pti) : &IDTVEC(bpt), SDT_SYSIGT,
1646 setidt(IDT_OF, pti ? &IDTVEC(ofl_pti) : &IDTVEC(ofl), SDT_SYSIGT,
1648 setidt(IDT_BR, pti ? &IDTVEC(bnd_pti) : &IDTVEC(bnd), SDT_SYSIGT,
1650 setidt(IDT_UD, pti ? &IDTVEC(ill_pti) : &IDTVEC(ill), SDT_SYSIGT,
1652 setidt(IDT_NM, pti ? &IDTVEC(dna_pti) : &IDTVEC(dna), SDT_SYSIGT,
1654 setidt(IDT_DF, &IDTVEC(dblfault), SDT_SYSIGT, SEL_KPL, 1);
1655 setidt(IDT_FPUGP, pti ? &IDTVEC(fpusegm_pti) : &IDTVEC(fpusegm),
1656 SDT_SYSIGT, SEL_KPL, 0);
1657 setidt(IDT_TS, pti ? &IDTVEC(tss_pti) : &IDTVEC(tss), SDT_SYSIGT,
1659 setidt(IDT_NP, pti ? &IDTVEC(missing_pti) : &IDTVEC(missing),
1660 SDT_SYSIGT, SEL_KPL, 0);
1661 setidt(IDT_SS, pti ? &IDTVEC(stk_pti) : &IDTVEC(stk), SDT_SYSIGT,
1663 setidt(IDT_GP, pti ? &IDTVEC(prot_pti) : &IDTVEC(prot), SDT_SYSIGT,
1665 setidt(IDT_PF, pti ? &IDTVEC(page_pti) : &IDTVEC(page), SDT_SYSIGT,
1667 setidt(IDT_MF, pti ? &IDTVEC(fpu_pti) : &IDTVEC(fpu), SDT_SYSIGT,
1669 setidt(IDT_AC, pti ? &IDTVEC(align_pti) : &IDTVEC(align), SDT_SYSIGT,
1671 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYSIGT, SEL_KPL, 3);
1672 setidt(IDT_XF, pti ? &IDTVEC(xmm_pti) : &IDTVEC(xmm), SDT_SYSIGT,
1674 #ifdef KDTRACE_HOOKS
1675 setidt(IDT_DTRACE_RET, pti ? &IDTVEC(dtrace_ret_pti) :
1676 &IDTVEC(dtrace_ret), SDT_SYSIGT, SEL_UPL, 0);
1679 setidt(IDT_EVTCHN, pti ? &IDTVEC(xen_intr_upcall_pti) :
1680 &IDTVEC(xen_intr_upcall), SDT_SYSIGT, SEL_KPL, 0);
1682 r_idt.rd_limit = sizeof(idt0) - 1;
1683 r_idt.rd_base = (long) idt;
1687 * Initialize the clock before the console so that console
1688 * initialization can use DELAY().
1693 * Use vt(4) by default for UEFI boot (during the sc(4)/vt(4)
1695 * Once bootblocks have updated, we can test directly for
1696 * efi_systbl != NULL here...
1698 if (preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_EFI_MAP)
1700 vty_set_preferred(VTY_VT);
1702 finishidentcpu(); /* Final stage of CPU initialization */
1703 initializecpu(); /* Initialize CPU registers */
1704 initializecpucache();
1706 /* doublefault stack space, runs on ist1 */
1707 common_tss[0].tss_ist1 = (long)&dblfault_stack[sizeof(dblfault_stack)];
1710 * NMI stack, runs on ist2. The pcpu pointer is stored just
1711 * above the start of the ist2 stack.
1713 np = ((struct nmi_pcpu *) &nmi0_stack[sizeof(nmi0_stack)]) - 1;
1714 np->np_pcpu = (register_t) pc;
1715 common_tss[0].tss_ist2 = (long) np;
1718 * MC# stack, runs on ist3. The pcpu pointer is stored just
1719 * above the start of the ist3 stack.
1721 np = ((struct nmi_pcpu *) &mce0_stack[sizeof(mce0_stack)]) - 1;
1722 np->np_pcpu = (register_t) pc;
1723 common_tss[0].tss_ist3 = (long) np;
1725 /* Set the IO permission bitmap (empty due to tss seg limit) */
1726 common_tss[0].tss_iobase = sizeof(struct amd64tss) + IOPERM_BITMAP_SIZE;
1728 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
1731 amd64_conf_fast_syscall();
1734 * Temporary forge some valid pointer to PCB, for exception
1735 * handlers. It is reinitialized properly below after FPU is
1736 * set up. Also set up td_critnest to short-cut the page
1739 cpu_max_ext_state_size = sizeof(struct savefpu);
1740 thread0.td_pcb = get_pcb_td(&thread0);
1741 thread0.td_critnest = 1;
1744 * The console and kdb should be initialized even earlier than here,
1745 * but some console drivers don't work until after getmemsize().
1746 * Default to late console initialization to support these drivers.
1747 * This loses mainly printf()s in getmemsize() and early debugging.
1750 TUNABLE_INT_FETCH("debug.late_console", &late_console);
1751 if (!late_console) {
1756 getmemsize(kmdp, physfree);
1757 init_param2(physmem);
1759 /* now running on new page tables, configured,and u/iom is accessible */
1769 /* Reset and mask the atpics and leave them shut down. */
1773 * Point the ICU spurious interrupt vectors at the APIC spurious
1774 * interrupt handler.
1776 setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0);
1777 setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0);
1780 #error "have you forgotten the isa device?";
1786 msgbufinit(msgbufp, msgbufsize);
1790 * Set up thread0 pcb after fpuinit calculated pcb + fpu save
1791 * area size. Zero out the extended state header in fpu save
1794 thread0.td_pcb = get_pcb_td(&thread0);
1795 thread0.td_pcb->pcb_save = get_pcb_user_save_td(&thread0);
1796 bzero(get_pcb_user_save_td(&thread0), cpu_max_ext_state_size);
1798 xhdr = (struct xstate_hdr *)(get_pcb_user_save_td(&thread0) +
1800 xhdr->xstate_bv = xsave_mask;
1802 /* make an initial tss so cpu can get interrupt stack on syscall! */
1803 rsp0 = (vm_offset_t)thread0.td_pcb;
1804 /* Ensure the stack is aligned to 16 bytes */
1806 common_tss[0].tss_rsp0 = pti ? ((vm_offset_t)PCPU_PTR(pti_stack) +
1807 PC_PTI_STACK_SZ * sizeof(uint64_t)) & ~0xful : rsp0;
1808 PCPU_SET(rsp0, rsp0);
1809 PCPU_SET(curpcb, thread0.td_pcb);
1811 /* transfer to user mode */
1813 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
1814 _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
1815 _ucode32sel = GSEL(GUCODE32_SEL, SEL_UPL);
1816 _ufssel = GSEL(GUFS32_SEL, SEL_UPL);
1817 _ugssel = GSEL(GUGS32_SEL, SEL_UPL);
1823 /* setup proc 0's pcb */
1824 thread0.td_pcb->pcb_flags = 0;
1825 thread0.td_frame = &proc0_tf;
1827 env = kern_getenv("kernelname");
1829 strlcpy(kernelname, env, sizeof(kernelname));
1836 thread0.td_critnest = 0;
1838 TUNABLE_INT_FETCH("hw.ibrs_disable", &hw_ibrs_disable);
1840 /* Location of kernel stack for locore */
1841 return ((u_int64_t)thread0.td_pcb);
1845 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
1848 pcpu->pc_acpi_id = 0xffffffff;
1852 smap_sysctl_handler(SYSCTL_HANDLER_ARGS)
1854 struct bios_smap *smapbase;
1855 struct bios_smap_xattr smap;
1858 int count, error, i;
1860 /* Retrieve the system memory map from the loader. */
1861 kmdp = preload_search_by_type("elf kernel");
1863 kmdp = preload_search_by_type("elf64 kernel");
1864 smapbase = (struct bios_smap *)preload_search_info(kmdp,
1865 MODINFO_METADATA | MODINFOMD_SMAP);
1866 if (smapbase == NULL)
1868 smapattr = (uint32_t *)preload_search_info(kmdp,
1869 MODINFO_METADATA | MODINFOMD_SMAP_XATTR);
1870 count = *((uint32_t *)smapbase - 1) / sizeof(*smapbase);
1872 for (i = 0; i < count; i++) {
1873 smap.base = smapbase[i].base;
1874 smap.length = smapbase[i].length;
1875 smap.type = smapbase[i].type;
1876 if (smapattr != NULL)
1877 smap.xattr = smapattr[i];
1880 error = SYSCTL_OUT(req, &smap, sizeof(smap));
1884 SYSCTL_PROC(_machdep, OID_AUTO, smap, CTLTYPE_OPAQUE|CTLFLAG_RD, NULL, 0,
1885 smap_sysctl_handler, "S,bios_smap_xattr", "Raw BIOS SMAP data");
1888 efi_map_sysctl_handler(SYSCTL_HANDLER_ARGS)
1890 struct efi_map_header *efihdr;
1894 kmdp = preload_search_by_type("elf kernel");
1896 kmdp = preload_search_by_type("elf64 kernel");
1897 efihdr = (struct efi_map_header *)preload_search_info(kmdp,
1898 MODINFO_METADATA | MODINFOMD_EFI_MAP);
1901 efisize = *((uint32_t *)efihdr - 1);
1902 return (SYSCTL_OUT(req, efihdr, efisize));
1904 SYSCTL_PROC(_machdep, OID_AUTO, efi_map, CTLTYPE_OPAQUE|CTLFLAG_RD, NULL, 0,
1905 efi_map_sysctl_handler, "S,efi_map_header", "Raw EFI Memory Map");
1908 spinlock_enter(void)
1914 if (td->td_md.md_spinlock_count == 0) {
1915 flags = intr_disable();
1916 td->td_md.md_spinlock_count = 1;
1917 td->td_md.md_saved_flags = flags;
1919 td->td_md.md_spinlock_count++;
1931 flags = td->td_md.md_saved_flags;
1932 td->td_md.md_spinlock_count--;
1933 if (td->td_md.md_spinlock_count == 0)
1934 intr_restore(flags);
1938 * Construct a PCB from a trapframe. This is called from kdb_trap() where
1939 * we want to start a backtrace from the function that caused us to enter
1940 * the debugger. We have the context in the trapframe, but base the trace
1941 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
1942 * enough for a backtrace.
1945 makectx(struct trapframe *tf, struct pcb *pcb)
1948 pcb->pcb_r12 = tf->tf_r12;
1949 pcb->pcb_r13 = tf->tf_r13;
1950 pcb->pcb_r14 = tf->tf_r14;
1951 pcb->pcb_r15 = tf->tf_r15;
1952 pcb->pcb_rbp = tf->tf_rbp;
1953 pcb->pcb_rbx = tf->tf_rbx;
1954 pcb->pcb_rip = tf->tf_rip;
1955 pcb->pcb_rsp = tf->tf_rsp;
1959 ptrace_set_pc(struct thread *td, unsigned long addr)
1962 td->td_frame->tf_rip = addr;
1963 set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
1968 ptrace_single_step(struct thread *td)
1970 td->td_frame->tf_rflags |= PSL_T;
1975 ptrace_clear_single_step(struct thread *td)
1977 td->td_frame->tf_rflags &= ~PSL_T;
1982 fill_regs(struct thread *td, struct reg *regs)
1984 struct trapframe *tp;
1987 return (fill_frame_regs(tp, regs));
1991 fill_frame_regs(struct trapframe *tp, struct reg *regs)
1993 regs->r_r15 = tp->tf_r15;
1994 regs->r_r14 = tp->tf_r14;
1995 regs->r_r13 = tp->tf_r13;
1996 regs->r_r12 = tp->tf_r12;
1997 regs->r_r11 = tp->tf_r11;
1998 regs->r_r10 = tp->tf_r10;
1999 regs->r_r9 = tp->tf_r9;
2000 regs->r_r8 = tp->tf_r8;
2001 regs->r_rdi = tp->tf_rdi;
2002 regs->r_rsi = tp->tf_rsi;
2003 regs->r_rbp = tp->tf_rbp;
2004 regs->r_rbx = tp->tf_rbx;
2005 regs->r_rdx = tp->tf_rdx;
2006 regs->r_rcx = tp->tf_rcx;
2007 regs->r_rax = tp->tf_rax;
2008 regs->r_rip = tp->tf_rip;
2009 regs->r_cs = tp->tf_cs;
2010 regs->r_rflags = tp->tf_rflags;
2011 regs->r_rsp = tp->tf_rsp;
2012 regs->r_ss = tp->tf_ss;
2013 if (tp->tf_flags & TF_HASSEGS) {
2014 regs->r_ds = tp->tf_ds;
2015 regs->r_es = tp->tf_es;
2016 regs->r_fs = tp->tf_fs;
2017 regs->r_gs = tp->tf_gs;
2028 set_regs(struct thread *td, struct reg *regs)
2030 struct trapframe *tp;
2034 rflags = regs->r_rflags & 0xffffffff;
2035 if (!EFL_SECURE(rflags, tp->tf_rflags) || !CS_SECURE(regs->r_cs))
2037 tp->tf_r15 = regs->r_r15;
2038 tp->tf_r14 = regs->r_r14;
2039 tp->tf_r13 = regs->r_r13;
2040 tp->tf_r12 = regs->r_r12;
2041 tp->tf_r11 = regs->r_r11;
2042 tp->tf_r10 = regs->r_r10;
2043 tp->tf_r9 = regs->r_r9;
2044 tp->tf_r8 = regs->r_r8;
2045 tp->tf_rdi = regs->r_rdi;
2046 tp->tf_rsi = regs->r_rsi;
2047 tp->tf_rbp = regs->r_rbp;
2048 tp->tf_rbx = regs->r_rbx;
2049 tp->tf_rdx = regs->r_rdx;
2050 tp->tf_rcx = regs->r_rcx;
2051 tp->tf_rax = regs->r_rax;
2052 tp->tf_rip = regs->r_rip;
2053 tp->tf_cs = regs->r_cs;
2054 tp->tf_rflags = rflags;
2055 tp->tf_rsp = regs->r_rsp;
2056 tp->tf_ss = regs->r_ss;
2057 if (0) { /* XXXKIB */
2058 tp->tf_ds = regs->r_ds;
2059 tp->tf_es = regs->r_es;
2060 tp->tf_fs = regs->r_fs;
2061 tp->tf_gs = regs->r_gs;
2062 tp->tf_flags = TF_HASSEGS;
2064 set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
2068 /* XXX check all this stuff! */
2069 /* externalize from sv_xmm */
2071 fill_fpregs_xmm(struct savefpu *sv_xmm, struct fpreg *fpregs)
2073 struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env;
2074 struct envxmm *penv_xmm = &sv_xmm->sv_env;
2078 bzero(fpregs, sizeof(*fpregs));
2080 /* FPU control/status */
2081 penv_fpreg->en_cw = penv_xmm->en_cw;
2082 penv_fpreg->en_sw = penv_xmm->en_sw;
2083 penv_fpreg->en_tw = penv_xmm->en_tw;
2084 penv_fpreg->en_opcode = penv_xmm->en_opcode;
2085 penv_fpreg->en_rip = penv_xmm->en_rip;
2086 penv_fpreg->en_rdp = penv_xmm->en_rdp;
2087 penv_fpreg->en_mxcsr = penv_xmm->en_mxcsr;
2088 penv_fpreg->en_mxcsr_mask = penv_xmm->en_mxcsr_mask;
2091 for (i = 0; i < 8; ++i)
2092 bcopy(sv_xmm->sv_fp[i].fp_acc.fp_bytes, fpregs->fpr_acc[i], 10);
2095 for (i = 0; i < 16; ++i)
2096 bcopy(sv_xmm->sv_xmm[i].xmm_bytes, fpregs->fpr_xacc[i], 16);
2099 /* internalize from fpregs into sv_xmm */
2101 set_fpregs_xmm(struct fpreg *fpregs, struct savefpu *sv_xmm)
2103 struct envxmm *penv_xmm = &sv_xmm->sv_env;
2104 struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env;
2108 /* FPU control/status */
2109 penv_xmm->en_cw = penv_fpreg->en_cw;
2110 penv_xmm->en_sw = penv_fpreg->en_sw;
2111 penv_xmm->en_tw = penv_fpreg->en_tw;
2112 penv_xmm->en_opcode = penv_fpreg->en_opcode;
2113 penv_xmm->en_rip = penv_fpreg->en_rip;
2114 penv_xmm->en_rdp = penv_fpreg->en_rdp;
2115 penv_xmm->en_mxcsr = penv_fpreg->en_mxcsr;
2116 penv_xmm->en_mxcsr_mask = penv_fpreg->en_mxcsr_mask & cpu_mxcsr_mask;
2119 for (i = 0; i < 8; ++i)
2120 bcopy(fpregs->fpr_acc[i], sv_xmm->sv_fp[i].fp_acc.fp_bytes, 10);
2123 for (i = 0; i < 16; ++i)
2124 bcopy(fpregs->fpr_xacc[i], sv_xmm->sv_xmm[i].xmm_bytes, 16);
2127 /* externalize from td->pcb */
2129 fill_fpregs(struct thread *td, struct fpreg *fpregs)
2132 KASSERT(td == curthread || TD_IS_SUSPENDED(td) ||
2133 P_SHOULDSTOP(td->td_proc),
2134 ("not suspended thread %p", td));
2136 fill_fpregs_xmm(get_pcb_user_save_td(td), fpregs);
2140 /* internalize to td->pcb */
2142 set_fpregs(struct thread *td, struct fpreg *fpregs)
2145 set_fpregs_xmm(fpregs, get_pcb_user_save_td(td));
2151 * Get machine context.
2154 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
2157 struct trapframe *tp;
2161 PROC_LOCK(curthread->td_proc);
2162 mcp->mc_onstack = sigonstack(tp->tf_rsp);
2163 PROC_UNLOCK(curthread->td_proc);
2164 mcp->mc_r15 = tp->tf_r15;
2165 mcp->mc_r14 = tp->tf_r14;
2166 mcp->mc_r13 = tp->tf_r13;
2167 mcp->mc_r12 = tp->tf_r12;
2168 mcp->mc_r11 = tp->tf_r11;
2169 mcp->mc_r10 = tp->tf_r10;
2170 mcp->mc_r9 = tp->tf_r9;
2171 mcp->mc_r8 = tp->tf_r8;
2172 mcp->mc_rdi = tp->tf_rdi;
2173 mcp->mc_rsi = tp->tf_rsi;
2174 mcp->mc_rbp = tp->tf_rbp;
2175 mcp->mc_rbx = tp->tf_rbx;
2176 mcp->mc_rcx = tp->tf_rcx;
2177 mcp->mc_rflags = tp->tf_rflags;
2178 if (flags & GET_MC_CLEAR_RET) {
2181 mcp->mc_rflags &= ~PSL_C;
2183 mcp->mc_rax = tp->tf_rax;
2184 mcp->mc_rdx = tp->tf_rdx;
2186 mcp->mc_rip = tp->tf_rip;
2187 mcp->mc_cs = tp->tf_cs;
2188 mcp->mc_rsp = tp->tf_rsp;
2189 mcp->mc_ss = tp->tf_ss;
2190 mcp->mc_ds = tp->tf_ds;
2191 mcp->mc_es = tp->tf_es;
2192 mcp->mc_fs = tp->tf_fs;
2193 mcp->mc_gs = tp->tf_gs;
2194 mcp->mc_flags = tp->tf_flags;
2195 mcp->mc_len = sizeof(*mcp);
2196 get_fpcontext(td, mcp, NULL, 0);
2197 update_pcb_bases(pcb);
2198 mcp->mc_fsbase = pcb->pcb_fsbase;
2199 mcp->mc_gsbase = pcb->pcb_gsbase;
2200 mcp->mc_xfpustate = 0;
2201 mcp->mc_xfpustate_len = 0;
2202 bzero(mcp->mc_spare, sizeof(mcp->mc_spare));
2207 * Set machine context.
2209 * However, we don't set any but the user modifiable flags, and we won't
2210 * touch the cs selector.
2213 set_mcontext(struct thread *td, mcontext_t *mcp)
2216 struct trapframe *tp;
2223 if (mcp->mc_len != sizeof(*mcp) ||
2224 (mcp->mc_flags & ~_MC_FLAG_MASK) != 0)
2226 rflags = (mcp->mc_rflags & PSL_USERCHANGE) |
2227 (tp->tf_rflags & ~PSL_USERCHANGE);
2228 if (mcp->mc_flags & _MC_HASFPXSTATE) {
2229 if (mcp->mc_xfpustate_len > cpu_max_ext_state_size -
2230 sizeof(struct savefpu))
2232 xfpustate = __builtin_alloca(mcp->mc_xfpustate_len);
2233 ret = copyin((void *)mcp->mc_xfpustate, xfpustate,
2234 mcp->mc_xfpustate_len);
2239 ret = set_fpcontext(td, mcp, xfpustate, mcp->mc_xfpustate_len);
2242 tp->tf_r15 = mcp->mc_r15;
2243 tp->tf_r14 = mcp->mc_r14;
2244 tp->tf_r13 = mcp->mc_r13;
2245 tp->tf_r12 = mcp->mc_r12;
2246 tp->tf_r11 = mcp->mc_r11;
2247 tp->tf_r10 = mcp->mc_r10;
2248 tp->tf_r9 = mcp->mc_r9;
2249 tp->tf_r8 = mcp->mc_r8;
2250 tp->tf_rdi = mcp->mc_rdi;
2251 tp->tf_rsi = mcp->mc_rsi;
2252 tp->tf_rbp = mcp->mc_rbp;
2253 tp->tf_rbx = mcp->mc_rbx;
2254 tp->tf_rdx = mcp->mc_rdx;
2255 tp->tf_rcx = mcp->mc_rcx;
2256 tp->tf_rax = mcp->mc_rax;
2257 tp->tf_rip = mcp->mc_rip;
2258 tp->tf_rflags = rflags;
2259 tp->tf_rsp = mcp->mc_rsp;
2260 tp->tf_ss = mcp->mc_ss;
2261 tp->tf_flags = mcp->mc_flags;
2262 if (tp->tf_flags & TF_HASSEGS) {
2263 tp->tf_ds = mcp->mc_ds;
2264 tp->tf_es = mcp->mc_es;
2265 tp->tf_fs = mcp->mc_fs;
2266 tp->tf_gs = mcp->mc_gs;
2268 set_pcb_flags(pcb, PCB_FULL_IRET);
2269 if (mcp->mc_flags & _MC_HASBASES) {
2270 pcb->pcb_fsbase = mcp->mc_fsbase;
2271 pcb->pcb_gsbase = mcp->mc_gsbase;
2277 get_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpusave,
2278 size_t xfpusave_len)
2280 size_t max_len, len;
2282 mcp->mc_ownedfp = fpugetregs(td);
2283 bcopy(get_pcb_user_save_td(td), &mcp->mc_fpstate[0],
2284 sizeof(mcp->mc_fpstate));
2285 mcp->mc_fpformat = fpuformat();
2286 if (!use_xsave || xfpusave_len == 0)
2288 max_len = cpu_max_ext_state_size - sizeof(struct savefpu);
2290 if (len > max_len) {
2292 bzero(xfpusave + max_len, len - max_len);
2294 mcp->mc_flags |= _MC_HASFPXSTATE;
2295 mcp->mc_xfpustate_len = len;
2296 bcopy(get_pcb_user_save_td(td) + 1, xfpusave, len);
2300 set_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpustate,
2301 size_t xfpustate_len)
2305 if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
2307 else if (mcp->mc_fpformat != _MC_FPFMT_XMM)
2309 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE) {
2310 /* We don't care what state is left in the FPU or PCB. */
2313 } else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
2314 mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
2315 error = fpusetregs(td, (struct savefpu *)&mcp->mc_fpstate,
2316 xfpustate, xfpustate_len);
2323 fpstate_drop(struct thread *td)
2326 KASSERT(PCB_USER_FPU(td->td_pcb), ("fpstate_drop: kernel-owned fpu"));
2328 if (PCPU_GET(fpcurthread) == td)
2331 * XXX force a full drop of the fpu. The above only drops it if we
2334 * XXX I don't much like fpugetuserregs()'s semantics of doing a full
2335 * drop. Dropping only to the pcb matches fnsave's behaviour.
2336 * We only need to drop to !PCB_INITDONE in sendsig(). But
2337 * sendsig() is the only caller of fpugetuserregs()... perhaps we just
2338 * have too many layers.
2340 clear_pcb_flags(curthread->td_pcb,
2341 PCB_FPUINITDONE | PCB_USERFPUINITDONE);
2346 fill_dbregs(struct thread *td, struct dbreg *dbregs)
2351 dbregs->dr[0] = rdr0();
2352 dbregs->dr[1] = rdr1();
2353 dbregs->dr[2] = rdr2();
2354 dbregs->dr[3] = rdr3();
2355 dbregs->dr[6] = rdr6();
2356 dbregs->dr[7] = rdr7();
2359 dbregs->dr[0] = pcb->pcb_dr0;
2360 dbregs->dr[1] = pcb->pcb_dr1;
2361 dbregs->dr[2] = pcb->pcb_dr2;
2362 dbregs->dr[3] = pcb->pcb_dr3;
2363 dbregs->dr[6] = pcb->pcb_dr6;
2364 dbregs->dr[7] = pcb->pcb_dr7;
2380 set_dbregs(struct thread *td, struct dbreg *dbregs)
2386 load_dr0(dbregs->dr[0]);
2387 load_dr1(dbregs->dr[1]);
2388 load_dr2(dbregs->dr[2]);
2389 load_dr3(dbregs->dr[3]);
2390 load_dr6(dbregs->dr[6]);
2391 load_dr7(dbregs->dr[7]);
2394 * Don't let an illegal value for dr7 get set. Specifically,
2395 * check for undefined settings. Setting these bit patterns
2396 * result in undefined behaviour and can lead to an unexpected
2397 * TRCTRAP or a general protection fault right here.
2398 * Upper bits of dr6 and dr7 must not be set
2400 for (i = 0; i < 4; i++) {
2401 if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02)
2403 if (td->td_frame->tf_cs == _ucode32sel &&
2404 DBREG_DR7_LEN(dbregs->dr[7], i) == DBREG_DR7_LEN_8)
2407 if ((dbregs->dr[6] & 0xffffffff00000000ul) != 0 ||
2408 (dbregs->dr[7] & 0xffffffff00000000ul) != 0)
2414 * Don't let a process set a breakpoint that is not within the
2415 * process's address space. If a process could do this, it
2416 * could halt the system by setting a breakpoint in the kernel
2417 * (if ddb was enabled). Thus, we need to check to make sure
2418 * that no breakpoints are being enabled for addresses outside
2419 * process's address space.
2421 * XXX - what about when the watched area of the user's
2422 * address space is written into from within the kernel
2423 * ... wouldn't that still cause a breakpoint to be generated
2424 * from within kernel mode?
2427 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) {
2428 /* dr0 is enabled */
2429 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
2432 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) {
2433 /* dr1 is enabled */
2434 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
2437 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) {
2438 /* dr2 is enabled */
2439 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
2442 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) {
2443 /* dr3 is enabled */
2444 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
2448 pcb->pcb_dr0 = dbregs->dr[0];
2449 pcb->pcb_dr1 = dbregs->dr[1];
2450 pcb->pcb_dr2 = dbregs->dr[2];
2451 pcb->pcb_dr3 = dbregs->dr[3];
2452 pcb->pcb_dr6 = dbregs->dr[6];
2453 pcb->pcb_dr7 = dbregs->dr[7];
2455 set_pcb_flags(pcb, PCB_DBREGS);
2465 load_dr7(0); /* Turn off the control bits first */
2474 * Return > 0 if a hardware breakpoint has been hit, and the
2475 * breakpoint was in user space. Return 0, otherwise.
2478 user_dbreg_trap(void)
2480 u_int64_t dr7, dr6; /* debug registers dr6 and dr7 */
2481 u_int64_t bp; /* breakpoint bits extracted from dr6 */
2482 int nbp; /* number of breakpoints that triggered */
2483 caddr_t addr[4]; /* breakpoint addresses */
2487 if ((dr7 & 0x000000ff) == 0) {
2489 * all GE and LE bits in the dr7 register are zero,
2490 * thus the trap couldn't have been caused by the
2491 * hardware debug registers
2498 bp = dr6 & 0x0000000f;
2502 * None of the breakpoint bits are set meaning this
2503 * trap was not caused by any of the debug registers
2509 * at least one of the breakpoints were hit, check to see
2510 * which ones and if any of them are user space addresses
2514 addr[nbp++] = (caddr_t)rdr0();
2517 addr[nbp++] = (caddr_t)rdr1();
2520 addr[nbp++] = (caddr_t)rdr2();
2523 addr[nbp++] = (caddr_t)rdr3();
2526 for (i = 0; i < nbp; i++) {
2527 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) {
2529 * addr[i] is in user space
2536 * None of the breakpoints are in user space.
2542 * The pcb_flags is only modified by current thread, or by other threads
2543 * when current thread is stopped. However, current thread may change it
2544 * from the interrupt context in cpu_switch(), or in the trap handler.
2545 * When we read-modify-write pcb_flags from C sources, compiler may generate
2546 * code that is not atomic regarding the interrupt handler. If a trap or
2547 * interrupt happens and any flag is modified from the handler, it can be
2548 * clobbered with the cached value later. Therefore, we implement setting
2549 * and clearing flags with single-instruction functions, which do not race
2550 * with possible modification of the flags from the trap or interrupt context,
2551 * because traps and interrupts are executed only on instruction boundary.
2554 set_pcb_flags_raw(struct pcb *pcb, const u_int flags)
2557 __asm __volatile("orl %1,%0"
2558 : "=m" (pcb->pcb_flags) : "ir" (flags), "m" (pcb->pcb_flags)
2564 * The support for RDFSBASE, WRFSBASE and similar instructions for %gs
2565 * base requires that kernel saves MSR_FSBASE and MSR_{K,}GSBASE into
2566 * pcb if user space modified the bases. We must save on the context
2567 * switch or if the return to usermode happens through the doreti.
2569 * Tracking of both events is performed by the pcb flag PCB_FULL_IRET,
2570 * which have a consequence that the base MSRs must be saved each time
2571 * the PCB_FULL_IRET flag is set. We disable interrupts to sync with
2575 set_pcb_flags(struct pcb *pcb, const u_int flags)
2579 if (curpcb == pcb &&
2580 (flags & PCB_FULL_IRET) != 0 &&
2581 (pcb->pcb_flags & PCB_FULL_IRET) == 0 &&
2582 (cpu_stdext_feature & CPUID_STDEXT_FSGSBASE) != 0) {
2584 if ((pcb->pcb_flags & PCB_FULL_IRET) == 0) {
2585 if (rfs() == _ufssel)
2586 pcb->pcb_fsbase = rdfsbase();
2587 if (rgs() == _ugssel)
2588 pcb->pcb_gsbase = rdmsr(MSR_KGSBASE);
2590 set_pcb_flags_raw(pcb, flags);
2593 set_pcb_flags_raw(pcb, flags);
2598 clear_pcb_flags(struct pcb *pcb, const u_int flags)
2601 __asm __volatile("andl %1,%0"
2602 : "=m" (pcb->pcb_flags) : "ir" (~flags), "m" (pcb->pcb_flags)
2609 * Provide inb() and outb() as functions. They are normally only available as
2610 * inline functions, thus cannot be called from the debugger.
2613 /* silence compiler warnings */
2614 u_char inb_(u_short);
2615 void outb_(u_short, u_char);
2624 outb_(u_short port, u_char data)