2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com)
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
37 #include <sys/sysctl.h>
42 #include <machine/cpufunc.h>
43 #include <machine/psl.h>
44 #include <machine/pmap.h>
45 #include <machine/md_var.h>
46 #include <machine/specialreg.h>
47 #include <machine/smp.h>
48 #include <machine/vmm.h>
49 #include <machine/vmm_dev.h>
50 #include <machine/vmm_instruction_emul.h>
52 #include "vmm_lapic.h"
55 #include "vmm_ioport.h"
58 #include "vlapic_priv.h"
63 #include "svm_softc.h"
68 SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW, NULL, NULL);
71 * SVM CPUID function 0x8000_000A, edx bit decoding.
73 #define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */
74 #define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */
75 #define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */
76 #define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */
77 #define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */
78 #define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */
79 #define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */
80 #define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */
81 #define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */
82 #define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */
84 #define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \
94 static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT;
95 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean,
98 static MALLOC_DEFINE(M_SVM, "svm", "svm");
99 static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic");
101 /* Per-CPU context area. */
102 extern struct pcpu __pcpu[];
104 static uint32_t svm_feature; /* AMD SVM features. */
105 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RD, &svm_feature, 0,
106 "SVM features advertised by CPUID.8000000AH:EDX");
108 static int disable_npf_assist;
109 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN,
110 &disable_npf_assist, 0, NULL);
112 /* Maximum ASIDs supported by the processor */
113 static uint32_t nasid;
114 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RD, &nasid, 0,
115 "Number of ASIDs supported by this processor");
117 /* Current ASID generation for each host cpu */
118 static struct asid asid[MAXCPU];
121 * SVM host state saved area of size 4KB for each core.
123 static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
125 static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery");
126 static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry");
127 static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window");
129 static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val);
135 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID);
142 return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST);
146 svm_disable(void *arg __unused)
150 efer = rdmsr(MSR_EFER);
152 wrmsr(MSR_EFER, efer);
156 * Disable SVM on all CPUs.
162 smp_rendezvous(NULL, svm_disable, NULL, NULL);
167 * Verify that all the features required by bhyve are available.
170 check_svm_features(void)
174 /* CPUID Fn8000_000A is for SVM */
175 do_cpuid(0x8000000A, regs);
176 svm_feature = regs[3];
179 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid));
181 /* bhyve requires the Nested Paging feature */
182 if (!(svm_feature & AMD_CPUID_SVM_NP)) {
183 printf("SVM: Nested Paging feature not available.\n");
187 /* bhyve requires the NRIP Save feature */
188 if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) {
189 printf("SVM: NRIP Save feature not available.\n");
197 svm_enable(void *arg __unused)
201 efer = rdmsr(MSR_EFER);
203 wrmsr(MSR_EFER, efer);
205 wrmsr(MSR_VM_HSAVE_PA, vtophys(hsave[curcpu]));
209 * Return 1 if SVM is enabled on this processor and 0 otherwise.
216 /* Section 15.4 Enabling SVM from APM2. */
217 if ((amd_feature2 & AMDID2_SVM) == 0) {
218 printf("SVM: not available.\n");
222 msr = rdmsr(MSR_VM_CR);
223 if ((msr & VM_CR_SVMDIS) != 0) {
224 printf("SVM: disabled by BIOS.\n");
236 if (!svm_available())
239 error = check_svm_features();
243 vmcb_clean &= VMCB_CACHE_DEFAULT;
245 for (cpu = 0; cpu < MAXCPU; cpu++) {
247 * Initialize the host ASIDs to their "highest" valid values.
249 * The next ASID allocation will rollover both 'gen' and 'num'
250 * and start off the sequence at {1,1}.
252 asid[cpu].gen = ~0UL;
253 asid[cpu].num = nasid - 1;
257 svm_npt_init(ipinum);
259 /* Enable SVM on all CPUs */
260 smp_rendezvous(NULL, svm_enable, NULL, NULL);
272 /* Pentium compatible MSRs */
273 #define MSR_PENTIUM_START 0
274 #define MSR_PENTIUM_END 0x1FFF
275 /* AMD 6th generation and Intel compatible MSRs */
276 #define MSR_AMD6TH_START 0xC0000000UL
277 #define MSR_AMD6TH_END 0xC0001FFFUL
278 /* AMD 7th and 8th generation compatible MSRs */
279 #define MSR_AMD7TH_START 0xC0010000UL
280 #define MSR_AMD7TH_END 0xC0011FFFUL
283 * Get the index and bit position for a MSR in permission bitmap.
284 * Two bits are used for each MSR: lower bit for read and higher bit for write.
287 svm_msr_index(uint64_t msr, int *index, int *bit)
292 *bit = (msr % 4) * 2;
295 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) {
300 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1);
301 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) {
302 off = (msr - MSR_AMD6TH_START);
303 *index = (off + base) / 4;
307 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1);
308 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) {
309 off = (msr - MSR_AMD7TH_START);
310 *index = (off + base) / 4;
318 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor.
321 svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write)
323 int index, bit, error;
325 error = svm_msr_index(msr, &index, &bit);
326 KASSERT(error == 0, ("%s: invalid msr %#lx", __func__, msr));
327 KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE,
328 ("%s: invalid index %d for msr %#lx", __func__, index, msr));
329 KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d "
330 "msr %#lx", __func__, bit, msr));
333 perm_bitmap[index] &= ~(1UL << bit);
336 perm_bitmap[index] &= ~(2UL << bit);
340 svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr)
343 svm_msr_perm(perm_bitmap, msr, true, true);
347 svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr)
350 svm_msr_perm(perm_bitmap, msr, true, false);
354 svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask)
356 struct vmcb_ctrl *ctrl;
358 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx));
360 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
361 return (ctrl->intercept[idx] & bitmask ? 1 : 0);
365 svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask,
368 struct vmcb_ctrl *ctrl;
371 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx));
373 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
374 oldval = ctrl->intercept[idx];
377 ctrl->intercept[idx] |= bitmask;
379 ctrl->intercept[idx] &= ~bitmask;
381 if (ctrl->intercept[idx] != oldval) {
382 svm_set_dirty(sc, vcpu, VMCB_CACHE_I);
383 VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified "
384 "from %#x to %#x", idx, oldval, ctrl->intercept[idx]);
389 svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask)
392 svm_set_intercept(sc, vcpu, off, bitmask, 0);
396 svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask)
399 svm_set_intercept(sc, vcpu, off, bitmask, 1);
403 vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa,
404 uint64_t msrpm_base_pa, uint64_t np_pml4)
406 struct vmcb_ctrl *ctrl;
407 struct vmcb_state *state;
411 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
412 state = svm_get_vmcb_state(sc, vcpu);
414 ctrl->iopm_base_pa = iopm_base_pa;
415 ctrl->msrpm_base_pa = msrpm_base_pa;
417 /* Enable nested paging */
419 ctrl->n_cr3 = np_pml4;
422 * Intercept accesses to the control registers that are not shadowed
423 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8.
425 for (n = 0; n < 16; n++) {
426 mask = (BIT(n) << 16) | BIT(n);
427 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8)
428 svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask);
430 svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask);
435 * Intercept everything when tracing guest exceptions otherwise
436 * just intercept machine check exception.
438 if (vcpu_trace_exceptions(sc->vm, vcpu)) {
439 for (n = 0; n < 32; n++) {
441 * Skip unimplemented vectors in the exception bitmap.
443 if (n == 2 || n == 9) {
446 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(n));
449 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC));
452 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */
453 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO);
454 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR);
455 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID);
456 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR);
457 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT);
458 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI);
459 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI);
460 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN);
461 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
462 VMCB_INTCPT_FERR_FREEZE);
464 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR);
465 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT);
468 * From section "Canonicalization and Consistency Checks" in APMv2
469 * the VMRUN intercept bit must be set to pass the consistency check.
471 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN);
474 * The ASID will be set to a non-zero value just before VMRUN.
479 * Section 15.21.1, Interrupt Masking in EFLAGS
480 * Section 15.21.2, Virtualizing APIC.TPR
482 * This must be set for %rflag and %cr8 isolation of guest and host.
484 ctrl->v_intr_masking = 1;
486 /* Enable Last Branch Record aka LBR for debugging */
487 ctrl->lbr_virt_en = 1;
488 state->dbgctl = BIT(0);
490 /* EFER_SVM must always be set when the guest is executing */
491 state->efer = EFER_SVM;
493 /* Set up the PAT to power-on state */
494 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) |
495 PAT_VALUE(1, PAT_WRITE_THROUGH) |
496 PAT_VALUE(2, PAT_UNCACHED) |
497 PAT_VALUE(3, PAT_UNCACHEABLE) |
498 PAT_VALUE(4, PAT_WRITE_BACK) |
499 PAT_VALUE(5, PAT_WRITE_THROUGH) |
500 PAT_VALUE(6, PAT_UNCACHED) |
501 PAT_VALUE(7, PAT_UNCACHEABLE);
505 * Initialize a virtual machine.
508 svm_vminit(struct vm *vm, pmap_t pmap)
510 struct svm_softc *svm_sc;
511 struct svm_vcpu *vcpu;
512 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa;
515 svm_sc = malloc(sizeof (struct svm_softc), M_SVM, M_WAITOK | M_ZERO);
517 svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pml4);
520 * Intercept read and write accesses to all MSRs.
522 memset(svm_sc->msr_bitmap, 0xFF, sizeof(svm_sc->msr_bitmap));
525 * Access to the following MSRs is redirected to the VMCB when the
526 * guest is executing. Therefore it is safe to allow the guest to
527 * read/write these MSRs directly without hypervisor involvement.
529 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE);
530 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE);
531 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE);
533 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR);
534 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR);
535 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR);
536 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK);
537 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR);
538 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR);
539 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR);
540 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT);
542 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC);
545 * Intercept writes to make sure that the EFER_SVM bit is not cleared.
547 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER);
549 /* Intercept access to all I/O ports. */
550 memset(svm_sc->iopm_bitmap, 0xFF, sizeof(svm_sc->iopm_bitmap));
552 iopm_pa = vtophys(svm_sc->iopm_bitmap);
553 msrpm_pa = vtophys(svm_sc->msr_bitmap);
554 pml4_pa = svm_sc->nptp;
555 for (i = 0; i < VM_MAXCPU; i++) {
556 vcpu = svm_get_vcpu(svm_sc, i);
558 vcpu->lastcpu = NOCPU;
559 vcpu->vmcb_pa = vtophys(&vcpu->vmcb);
560 vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa);
561 svm_msr_guest_init(svm_sc, i);
567 svm_cpl(struct vmcb_state *state)
572 * "Retrieve the CPL from the CPL field in the VMCB, not
573 * from any segment DPL"
578 static enum vm_cpu_mode
579 svm_vcpu_mode(struct vmcb *vmcb)
581 struct vmcb_segment seg;
582 struct vmcb_state *state;
585 state = &vmcb->state;
587 if (state->efer & EFER_LMA) {
588 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg);
589 KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__,
593 * Section 4.8.1 for APM2, check if Code Segment has
594 * Long attribute set in descriptor.
596 if (seg.attrib & VMCB_CS_ATTRIB_L)
597 return (CPU_MODE_64BIT);
599 return (CPU_MODE_COMPATIBILITY);
600 } else if (state->cr0 & CR0_PE) {
601 return (CPU_MODE_PROTECTED);
603 return (CPU_MODE_REAL);
607 static enum vm_paging_mode
608 svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer)
611 if ((cr0 & CR0_PG) == 0)
612 return (PAGING_MODE_FLAT);
613 if ((cr4 & CR4_PAE) == 0)
614 return (PAGING_MODE_32);
616 return (PAGING_MODE_64);
618 return (PAGING_MODE_PAE);
622 * ins/outs utility routines
625 svm_inout_str_index(struct svm_regctx *regs, int in)
629 val = in ? regs->sctx_rdi : regs->sctx_rsi;
635 svm_inout_str_count(struct svm_regctx *regs, int rep)
639 val = rep ? regs->sctx_rcx : 1;
645 svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1,
646 int in, struct vm_inout_str *vis)
651 vis->seg_name = VM_REG_GUEST_ES;
653 /* The segment field has standard encoding */
654 s = (info1 >> 10) & 0x7;
655 vis->seg_name = vm_segment_name(s);
658 error = vmcb_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc);
659 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error));
663 svm_inout_str_addrsize(uint64_t info1)
667 size = (info1 >> 7) & 0x7;
670 return (2); /* 16 bit */
672 return (4); /* 32 bit */
674 return (8); /* 64 bit */
676 panic("%s: invalid size encoding %d", __func__, size);
681 svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging)
683 struct vmcb_state *state;
685 state = &vmcb->state;
686 paging->cr3 = state->cr3;
687 paging->cpl = svm_cpl(state);
688 paging->cpu_mode = svm_vcpu_mode(vmcb);
689 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4,
696 * Handle guest I/O intercept.
699 svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
701 struct vmcb_ctrl *ctrl;
702 struct vmcb_state *state;
703 struct svm_regctx *regs;
704 struct vm_inout_str *vis;
708 state = svm_get_vmcb_state(svm_sc, vcpu);
709 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
710 regs = svm_get_guest_regctx(svm_sc, vcpu);
712 info1 = ctrl->exitinfo1;
713 inout_string = info1 & BIT(2) ? 1 : 0;
716 * The effective segment number in EXITINFO1[12:10] is populated
717 * only if the processor has the DecodeAssist capability.
719 * XXX this is not specified explicitly in APMv2 but can be verified
722 if (inout_string && !decode_assist())
725 vmexit->exitcode = VM_EXITCODE_INOUT;
726 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0;
727 vmexit->u.inout.string = inout_string;
728 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0;
729 vmexit->u.inout.bytes = (info1 >> 4) & 0x7;
730 vmexit->u.inout.port = (uint16_t)(info1 >> 16);
731 vmexit->u.inout.eax = (uint32_t)(state->rax);
734 vmexit->exitcode = VM_EXITCODE_INOUT_STR;
735 vis = &vmexit->u.inout_str;
736 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging);
737 vis->rflags = state->rflags;
738 vis->cr0 = state->cr0;
739 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in);
740 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep);
741 vis->addrsize = svm_inout_str_addrsize(info1);
742 svm_inout_str_seginfo(svm_sc, vcpu, info1,
743 vmexit->u.inout.in, vis);
750 npf_fault_type(uint64_t exitinfo1)
753 if (exitinfo1 & VMCB_NPF_INFO1_W)
754 return (VM_PROT_WRITE);
755 else if (exitinfo1 & VMCB_NPF_INFO1_ID)
756 return (VM_PROT_EXECUTE);
758 return (VM_PROT_READ);
762 svm_npf_emul_fault(uint64_t exitinfo1)
765 if (exitinfo1 & VMCB_NPF_INFO1_ID) {
769 if (exitinfo1 & VMCB_NPF_INFO1_GPT) {
773 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) {
781 svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit)
783 struct vm_guest_paging *paging;
784 struct vmcb_segment seg;
785 struct vmcb_ctrl *ctrl;
790 paging = &vmexit->u.inst_emul.paging;
792 vmexit->exitcode = VM_EXITCODE_INST_EMUL;
793 vmexit->u.inst_emul.gpa = gpa;
794 vmexit->u.inst_emul.gla = VIE_INVALID_GLA;
795 svm_paging_info(vmcb, paging);
797 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg);
798 KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error));
800 switch(paging->cpu_mode) {
801 case CPU_MODE_PROTECTED:
802 case CPU_MODE_COMPATIBILITY:
804 * Section 4.8.1 of APM2, Default Operand Size or D bit.
806 vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ?
810 vmexit->u.inst_emul.cs_d = 0;
815 * Copy the instruction bytes into 'vie' if available.
817 if (decode_assist() && !disable_npf_assist) {
818 inst_len = ctrl->inst_len;
819 inst_bytes = ctrl->inst_bytes;
824 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len);
829 intrtype_to_str(int intr_type)
832 case VMCB_EVENTINJ_TYPE_INTR:
834 case VMCB_EVENTINJ_TYPE_NMI:
836 case VMCB_EVENTINJ_TYPE_INTn:
838 case VMCB_EVENTINJ_TYPE_EXCEPTION:
839 return ("exception");
841 panic("%s: unknown intr_type %d", __func__, intr_type);
847 * Inject an event to vcpu as described in section 15.20, "Event injection".
850 svm_eventinject(struct svm_softc *sc, int vcpu, int intr_type, int vector,
851 uint32_t error, bool ec_valid)
853 struct vmcb_ctrl *ctrl;
855 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
857 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0,
858 ("%s: event already pending %#lx", __func__, ctrl->eventinj));
860 KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d",
864 case VMCB_EVENTINJ_TYPE_INTR:
865 case VMCB_EVENTINJ_TYPE_NMI:
866 case VMCB_EVENTINJ_TYPE_INTn:
868 case VMCB_EVENTINJ_TYPE_EXCEPTION:
869 if (vector >= 0 && vector <= 31 && vector != 2)
873 panic("%s: invalid intr_type/vector: %d/%d", __func__,
876 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID;
878 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID;
879 ctrl->eventinj |= (uint64_t)error << 32;
880 VCPU_CTR3(sc->vm, vcpu, "Injecting %s at vector %d errcode %#x",
881 intrtype_to_str(intr_type), vector, error);
883 VCPU_CTR2(sc->vm, vcpu, "Injecting %s at vector %d",
884 intrtype_to_str(intr_type), vector);
889 svm_update_virqinfo(struct svm_softc *sc, int vcpu)
892 struct vlapic *vlapic;
893 struct vmcb_ctrl *ctrl;
897 vlapic = vm_lapic(vm, vcpu);
898 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
900 /* Update %cr8 in the emulated vlapic */
901 vlapic_set_cr8(vlapic, ctrl->v_tpr);
904 * If V_IRQ indicates that the interrupt injection attempted on then
905 * last VMRUN was successful then update the vlapic accordingly.
907 if (ctrl->v_intr_vector != 0) {
908 pending = ctrl->v_irq;
909 KASSERT(ctrl->v_intr_vector >= 16, ("%s: invalid "
910 "v_intr_vector %d", __func__, ctrl->v_intr_vector));
911 KASSERT(!ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__));
912 VCPU_CTR2(vm, vcpu, "v_intr_vector %d %s", ctrl->v_intr_vector,
913 pending ? "pending" : "accepted");
915 vlapic_intr_accepted(vlapic, ctrl->v_intr_vector);
920 svm_save_intinfo(struct svm_softc *svm_sc, int vcpu)
922 struct vmcb_ctrl *ctrl;
925 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
926 intinfo = ctrl->exitintinfo;
927 if (!VMCB_EXITINTINFO_VALID(intinfo))
931 * From APMv2, Section "Intercepts during IDT interrupt delivery"
933 * If a #VMEXIT happened during event delivery then record the event
934 * that was being delivered.
936 VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n",
937 intinfo, VMCB_EXITINTINFO_VECTOR(intinfo));
938 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1);
939 vm_exit_intinfo(svm_sc->vm, vcpu, intinfo);
943 vintr_intercept_enabled(struct svm_softc *sc, int vcpu)
946 return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
951 enable_intr_window_exiting(struct svm_softc *sc, int vcpu)
953 struct vmcb_ctrl *ctrl;
955 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
957 if (ctrl->v_irq && ctrl->v_intr_vector == 0) {
958 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__));
959 KASSERT(vintr_intercept_enabled(sc, vcpu),
960 ("%s: vintr intercept should be enabled", __func__));
964 VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting");
967 ctrl->v_intr_vector = 0;
968 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
969 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
973 disable_intr_window_exiting(struct svm_softc *sc, int vcpu)
975 struct vmcb_ctrl *ctrl;
977 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
979 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) {
980 KASSERT(!vintr_intercept_enabled(sc, vcpu),
981 ("%s: vintr intercept should be disabled", __func__));
986 if (ctrl->v_intr_vector == 0)
987 VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting");
989 VCPU_CTR0(sc->vm, vcpu, "Clearing V_IRQ interrupt injection");
992 ctrl->v_intr_vector = 0;
993 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
994 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
998 svm_modify_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t val)
1000 struct vmcb_ctrl *ctrl;
1003 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1004 oldval = ctrl->intr_shadow;
1005 newval = val ? 1 : 0;
1006 if (newval != oldval) {
1007 ctrl->intr_shadow = newval;
1008 VCPU_CTR1(sc->vm, vcpu, "Setting intr_shadow to %d", newval);
1014 svm_get_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t *val)
1016 struct vmcb_ctrl *ctrl;
1018 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1019 *val = ctrl->intr_shadow;
1024 * Once an NMI is injected it blocks delivery of further NMIs until the handler
1025 * executes an IRET. The IRET intercept is enabled when an NMI is injected to
1026 * to track when the vcpu is done handling the NMI.
1029 nmi_blocked(struct svm_softc *sc, int vcpu)
1033 blocked = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
1039 enable_nmi_blocking(struct svm_softc *sc, int vcpu)
1042 KASSERT(!nmi_blocked(sc, vcpu), ("vNMI already blocked"));
1043 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking enabled");
1044 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
1048 clear_nmi_blocking(struct svm_softc *sc, int vcpu)
1052 KASSERT(nmi_blocked(sc, vcpu), ("vNMI already unblocked"));
1053 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking cleared");
1055 * When the IRET intercept is cleared the vcpu will attempt to execute
1056 * the "iret" when it runs next. However, it is possible to inject
1057 * another NMI into the vcpu before the "iret" has actually executed.
1059 * For e.g. if the "iret" encounters a #NPF when accessing the stack
1060 * it will trap back into the hypervisor. If an NMI is pending for
1061 * the vcpu it will be injected into the guest.
1063 * XXX this needs to be fixed
1065 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
1068 * Set 'intr_shadow' to prevent an NMI from being injected on the
1071 error = svm_modify_intr_shadow(sc, vcpu, 1);
1072 KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error));
1076 emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val,
1082 error = lapic_wrmsr(sc->vm, vcpu, num, val, retu);
1083 else if (num == MSR_EFER)
1084 error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, val);
1086 error = svm_wrmsr(sc, vcpu, num, val, retu);
1092 emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num, bool *retu)
1094 struct vmcb_state *state;
1095 struct svm_regctx *ctx;
1100 error = lapic_rdmsr(sc->vm, vcpu, num, &result, retu);
1102 error = svm_rdmsr(sc, vcpu, num, &result, retu);
1105 state = svm_get_vmcb_state(sc, vcpu);
1106 ctx = svm_get_guest_regctx(sc, vcpu);
1107 state->rax = result & 0xffffffff;
1108 ctx->sctx_rdx = result >> 32;
1116 exit_reason_to_str(uint64_t reason)
1118 static char reasonbuf[32];
1121 case VMCB_EXIT_INVALID:
1122 return ("invalvmcb");
1123 case VMCB_EXIT_SHUTDOWN:
1124 return ("shutdown");
1126 return ("nptfault");
1127 case VMCB_EXIT_PAUSE:
1131 case VMCB_EXIT_CPUID:
1137 case VMCB_EXIT_INTR:
1141 case VMCB_EXIT_VINTR:
1145 case VMCB_EXIT_IRET:
1147 case VMCB_EXIT_MONITOR:
1149 case VMCB_EXIT_MWAIT:
1152 snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason);
1159 * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs
1160 * that are due to instruction intercepts as well as MSR and IOIO intercepts
1161 * and exceptions caused by INT3, INTO and BOUND instructions.
1163 * Return 1 if the nRIP is valid and 0 otherwise.
1166 nrip_valid(uint64_t exitcode)
1169 case 0x00 ... 0x0F: /* read of CR0 through CR15 */
1170 case 0x10 ... 0x1F: /* write of CR0 through CR15 */
1171 case 0x20 ... 0x2F: /* read of DR0 through DR15 */
1172 case 0x30 ... 0x3F: /* write of DR0 through DR15 */
1173 case 0x43: /* INT3 */
1174 case 0x44: /* INTO */
1175 case 0x45: /* BOUND */
1176 case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */
1177 case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */
1185 * Collateral for a generic SVM VM-exit.
1188 vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2)
1191 vme->exitcode = VM_EXITCODE_SVM;
1192 vme->u.svm.exitcode = code;
1193 vme->u.svm.exitinfo1 = info1;
1194 vme->u.svm.exitinfo2 = info2;
1198 svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
1201 struct vmcb_state *state;
1202 struct vmcb_ctrl *ctrl;
1203 struct svm_regctx *ctx;
1204 struct vm_exception exception;
1205 uint64_t code, info1, info2, val;
1206 uint32_t eax, ecx, edx;
1207 int error, errcode_valid, handled, idtvec, reflect;
1210 ctx = svm_get_guest_regctx(svm_sc, vcpu);
1211 vmcb = svm_get_vmcb(svm_sc, vcpu);
1212 state = &vmcb->state;
1216 code = ctrl->exitcode;
1217 info1 = ctrl->exitinfo1;
1218 info2 = ctrl->exitinfo2;
1220 vmexit->exitcode = VM_EXITCODE_BOGUS;
1221 vmexit->rip = state->rip;
1222 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0;
1224 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1);
1227 * #VMEXIT(INVALID) needs to be handled early because the VMCB is
1228 * in an inconsistent state and can trigger assertions that would
1229 * never happen otherwise.
1231 if (code == VMCB_EXIT_INVALID) {
1232 vm_exit_svm(vmexit, code, info1, info2);
1236 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event "
1237 "injection valid bit is set %#lx", __func__, ctrl->eventinj));
1239 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15,
1240 ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)",
1241 vmexit->inst_length, code, info1, info2));
1243 svm_update_virqinfo(svm_sc, vcpu);
1244 svm_save_intinfo(svm_sc, vcpu);
1247 case VMCB_EXIT_IRET:
1249 * Restart execution at "iret" but with the intercept cleared.
1251 vmexit->inst_length = 0;
1252 clear_nmi_blocking(svm_sc, vcpu);
1255 case VMCB_EXIT_VINTR: /* interrupt window exiting */
1256 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1);
1259 case VMCB_EXIT_INTR: /* external interrupt */
1260 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1);
1263 case VMCB_EXIT_NMI: /* external NMI */
1267 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1);
1269 idtvec = code - 0x40;
1273 * Call the machine check handler by hand. Also don't
1274 * reflect the machine check back into the guest.
1277 VCPU_CTR0(svm_sc->vm, vcpu, "Vectoring to MCE handler");
1278 __asm __volatile("int $18");
1281 error = svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2,
1283 KASSERT(error == 0, ("%s: error %d updating cr2",
1303 * The 'nrip' field is populated for INT3, INTO and
1304 * BOUND exceptions and this also implies that
1305 * 'inst_length' is non-zero.
1307 * Reset 'inst_length' to zero so the guest %rip at
1308 * event injection is identical to what it was when
1309 * the exception originally happened.
1311 VCPU_CTR2(svm_sc->vm, vcpu, "Reset inst_length from %d "
1312 "to zero before injecting exception %d",
1313 vmexit->inst_length, idtvec);
1314 vmexit->inst_length = 0;
1320 KASSERT(vmexit->inst_length == 0, ("invalid inst_length (%d) "
1321 "when reflecting exception %d into guest",
1322 vmexit->inst_length, idtvec));
1325 /* Reflect the exception back into the guest */
1326 bzero(&exception, sizeof(struct vm_exception));
1327 exception.vector = idtvec;
1328 if (errcode_valid) {
1329 exception.error_code = info1;
1330 exception.error_code_valid = 1;
1332 VCPU_CTR2(svm_sc->vm, vcpu, "Reflecting exception "
1333 "%d/%#x into the guest", exception.vector,
1334 exception.error_code);
1335 error = vm_inject_exception(svm_sc->vm, vcpu,
1337 KASSERT(error == 0, ("%s: vm_inject_exception error %d",
1342 case VMCB_EXIT_MSR: /* MSR access. */
1344 ecx = ctx->sctx_rcx;
1345 edx = ctx->sctx_rdx;
1349 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1);
1350 val = (uint64_t)edx << 32 | eax;
1351 VCPU_CTR2(svm_sc->vm, vcpu, "wrmsr %#x val %#lx",
1353 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) {
1354 vmexit->exitcode = VM_EXITCODE_WRMSR;
1355 vmexit->u.msr.code = ecx;
1356 vmexit->u.msr.wval = val;
1360 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
1361 ("emulate_wrmsr retu with bogus exitcode"));
1364 VCPU_CTR1(svm_sc->vm, vcpu, "rdmsr %#x", ecx);
1365 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1);
1366 if (emulate_rdmsr(svm_sc, vcpu, ecx, &retu)) {
1367 vmexit->exitcode = VM_EXITCODE_RDMSR;
1368 vmexit->u.msr.code = ecx;
1372 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
1373 ("emulate_rdmsr retu with bogus exitcode"));
1378 handled = svm_handle_io(svm_sc, vcpu, vmexit);
1379 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1);
1381 case VMCB_EXIT_CPUID:
1382 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1);
1383 handled = x86_emulate_cpuid(svm_sc->vm, vcpu,
1384 (uint32_t *)&state->rax,
1385 (uint32_t *)&ctx->sctx_rbx,
1386 (uint32_t *)&ctx->sctx_rcx,
1387 (uint32_t *)&ctx->sctx_rdx);
1390 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1);
1391 vmexit->exitcode = VM_EXITCODE_HLT;
1392 vmexit->u.hlt.rflags = state->rflags;
1394 case VMCB_EXIT_PAUSE:
1395 vmexit->exitcode = VM_EXITCODE_PAUSE;
1396 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1);
1399 /* EXITINFO2 contains the faulting guest physical address */
1400 if (info1 & VMCB_NPF_INFO1_RSV) {
1401 VCPU_CTR2(svm_sc->vm, vcpu, "nested page fault with "
1402 "reserved bits set: info1(%#lx) info2(%#lx)",
1404 } else if (vm_mem_allocated(svm_sc->vm, info2)) {
1405 vmexit->exitcode = VM_EXITCODE_PAGING;
1406 vmexit->u.paging.gpa = info2;
1407 vmexit->u.paging.fault_type = npf_fault_type(info1);
1408 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
1409 VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault "
1410 "on gpa %#lx/%#lx at rip %#lx",
1411 info2, info1, state->rip);
1412 } else if (svm_npf_emul_fault(info1)) {
1413 svm_handle_inst_emul(vmcb, info2, vmexit);
1414 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INST_EMUL, 1);
1415 VCPU_CTR3(svm_sc->vm, vcpu, "inst_emul fault "
1416 "for gpa %#lx/%#lx at rip %#lx",
1417 info2, info1, state->rip);
1420 case VMCB_EXIT_MONITOR:
1421 vmexit->exitcode = VM_EXITCODE_MONITOR;
1423 case VMCB_EXIT_MWAIT:
1424 vmexit->exitcode = VM_EXITCODE_MWAIT;
1427 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1);
1431 VCPU_CTR4(svm_sc->vm, vcpu, "%s %s vmexit at %#lx/%d",
1432 handled ? "handled" : "unhandled", exit_reason_to_str(code),
1433 vmexit->rip, vmexit->inst_length);
1436 vmexit->rip += vmexit->inst_length;
1437 vmexit->inst_length = 0;
1438 state->rip = vmexit->rip;
1440 if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
1442 * If this VM exit was not claimed by anybody then
1443 * treat it as a generic SVM exit.
1445 vm_exit_svm(vmexit, code, info1, info2);
1448 * The exitcode and collateral have been populated.
1449 * The VM exit will be processed further in userland.
1457 svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu)
1461 if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo))
1464 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not "
1465 "valid: %#lx", __func__, intinfo));
1467 svm_eventinject(svm_sc, vcpu, VMCB_EXITINTINFO_TYPE(intinfo),
1468 VMCB_EXITINTINFO_VECTOR(intinfo),
1469 VMCB_EXITINTINFO_EC(intinfo),
1470 VMCB_EXITINTINFO_EC_VALID(intinfo));
1471 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1);
1472 VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo);
1476 * Inject event to virtual cpu.
1479 svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic)
1481 struct vmcb_ctrl *ctrl;
1482 struct vmcb_state *state;
1483 struct svm_vcpu *vcpustate;
1485 int vector, need_intr_window, pending_apic_vector;
1487 state = svm_get_vmcb_state(sc, vcpu);
1488 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1489 vcpustate = svm_get_vcpu(sc, vcpu);
1491 need_intr_window = 0;
1492 pending_apic_vector = 0;
1494 if (vcpustate->nextrip != state->rip) {
1495 ctrl->intr_shadow = 0;
1496 VCPU_CTR2(sc->vm, vcpu, "Guest interrupt blocking "
1497 "cleared due to rip change: %#lx/%#lx",
1498 vcpustate->nextrip, state->rip);
1502 * Inject pending events or exceptions for this vcpu.
1504 * An event might be pending because the previous #VMEXIT happened
1505 * during event delivery (i.e. ctrl->exitintinfo).
1507 * An event might also be pending because an exception was injected
1508 * by the hypervisor (e.g. #PF during instruction emulation).
1510 svm_inj_intinfo(sc, vcpu);
1512 /* NMI event has priority over interrupts. */
1513 if (vm_nmi_pending(sc->vm, vcpu)) {
1514 if (nmi_blocked(sc, vcpu)) {
1516 * Can't inject another NMI if the guest has not
1517 * yet executed an "iret" after the last NMI.
1519 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due "
1521 } else if (ctrl->intr_shadow) {
1523 * Can't inject an NMI if the vcpu is in an intr_shadow.
1525 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due to "
1526 "interrupt shadow");
1527 need_intr_window = 1;
1529 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) {
1531 * If there is already an exception/interrupt pending
1532 * then defer the NMI until after that.
1534 VCPU_CTR1(sc->vm, vcpu, "Cannot inject NMI due to "
1535 "eventinj %#lx", ctrl->eventinj);
1538 * Use self-IPI to trigger a VM-exit as soon as
1539 * possible after the event injection is completed.
1541 * This works only if the external interrupt exiting
1542 * is at a lower priority than the event injection.
1544 * Although not explicitly specified in APMv2 the
1545 * relative priorities were verified empirically.
1547 ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */
1549 vm_nmi_clear(sc->vm, vcpu);
1551 /* Inject NMI, vector number is not used */
1552 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_NMI,
1555 /* virtual NMI blocking is now in effect */
1556 enable_nmi_blocking(sc, vcpu);
1558 VCPU_CTR0(sc->vm, vcpu, "Injecting vNMI");
1562 if (!vm_extint_pending(sc->vm, vcpu)) {
1564 * APIC interrupts are delivered using the V_IRQ offload.
1566 * The primary benefit is that the hypervisor doesn't need to
1567 * deal with the various conditions that inhibit interrupts.
1568 * It also means that TPR changes via CR8 will be handled
1569 * without any hypervisor involvement.
1571 * Note that the APIC vector must remain pending in the vIRR
1572 * until it is confirmed that it was delivered to the guest.
1573 * This can be confirmed based on the value of V_IRQ at the
1574 * next #VMEXIT (1 = pending, 0 = delivered).
1576 * Also note that it is possible that another higher priority
1577 * vector can become pending before this vector is delivered
1578 * to the guest. This is alright because vcpu_notify_event()
1579 * will send an IPI and force the vcpu to trap back into the
1580 * hypervisor. The higher priority vector will be injected on
1583 if (vlapic_pending_intr(vlapic, &vector)) {
1584 KASSERT(vector >= 16 && vector <= 255,
1585 ("invalid vector %d from local APIC", vector));
1586 pending_apic_vector = vector;
1591 /* Ask the legacy pic for a vector to inject */
1592 vatpic_pending_intr(sc->vm, &vector);
1593 KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d from INTR",
1597 * If the guest has disabled interrupts or is in an interrupt shadow
1598 * then we cannot inject the pending interrupt.
1600 if ((state->rflags & PSL_I) == 0) {
1601 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to "
1602 "rflags %#lx", vector, state->rflags);
1603 need_intr_window = 1;
1607 if (ctrl->intr_shadow) {
1608 VCPU_CTR1(sc->vm, vcpu, "Cannot inject vector %d due to "
1609 "interrupt shadow", vector);
1610 need_intr_window = 1;
1614 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) {
1615 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to "
1616 "eventinj %#lx", vector, ctrl->eventinj);
1617 need_intr_window = 1;
1622 * Legacy PIC interrupts are delivered via the event injection
1625 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false);
1627 vm_extint_clear(sc->vm, vcpu);
1628 vatpic_intr_accepted(sc->vm, vector);
1631 * Force a VM-exit as soon as the vcpu is ready to accept another
1632 * interrupt. This is done because the PIC might have another vector
1633 * that it wants to inject. Also, if the APIC has a pending interrupt
1634 * that was preempted by the ExtInt then it allows us to inject the
1635 * APIC vector as soon as possible.
1637 need_intr_window = 1;
1640 * The guest can modify the TPR by writing to %CR8. In guest mode
1641 * the processor reflects this write to V_TPR without hypervisor
1644 * The guest can also modify the TPR by writing to it via the memory
1645 * mapped APIC page. In this case, the write will be emulated by the
1646 * hypervisor. For this reason V_TPR must be updated before every
1649 v_tpr = vlapic_get_cr8(vlapic);
1650 KASSERT(v_tpr >= 0 && v_tpr <= 15, ("invalid v_tpr %#x", v_tpr));
1651 if (ctrl->v_tpr != v_tpr) {
1652 VCPU_CTR2(sc->vm, vcpu, "VMCB V_TPR changed from %#x to %#x",
1653 ctrl->v_tpr, v_tpr);
1654 ctrl->v_tpr = v_tpr;
1655 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
1658 if (pending_apic_vector) {
1660 * If an APIC vector is being injected then interrupt window
1661 * exiting is not possible on this VMRUN.
1663 KASSERT(!need_intr_window, ("intr_window exiting impossible"));
1664 VCPU_CTR1(sc->vm, vcpu, "Injecting vector %d using V_IRQ",
1665 pending_apic_vector);
1668 ctrl->v_ign_tpr = 0;
1669 ctrl->v_intr_vector = pending_apic_vector;
1670 ctrl->v_intr_prio = pending_apic_vector >> 4;
1671 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
1672 } else if (need_intr_window) {
1674 * We use V_IRQ in conjunction with the VINTR intercept to
1675 * trap into the hypervisor as soon as a virtual interrupt
1678 * Since injected events are not subject to intercept checks
1679 * we need to ensure that the V_IRQ is not actually going to
1680 * be delivered on VM entry. The KASSERT below enforces this.
1682 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 ||
1683 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow,
1684 ("Bogus intr_window_exiting: eventinj (%#lx), "
1685 "intr_shadow (%u), rflags (%#lx)",
1686 ctrl->eventinj, ctrl->intr_shadow, state->rflags));
1687 enable_intr_window_exiting(sc, vcpu);
1689 disable_intr_window_exiting(sc, vcpu);
1693 static __inline void
1694 restore_host_tss(void)
1696 struct system_segment_descriptor *tss_sd;
1699 * The TSS descriptor was in use prior to launching the guest so it
1700 * has been marked busy.
1702 * 'ltr' requires the descriptor to be marked available so change the
1703 * type to "64-bit available TSS".
1705 tss_sd = PCPU_GET(tss);
1706 tss_sd->sd_type = SDT_SYSTSS;
1707 ltr(GSEL(GPROC0_SEL, SEL_KPL));
1711 check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu)
1713 struct svm_vcpu *vcpustate;
1714 struct vmcb_ctrl *ctrl;
1718 KASSERT(CPU_ISSET(thiscpu, &pmap->pm_active), ("%s: nested pmap not "
1719 "active on cpu %u", __func__, thiscpu));
1721 vcpustate = svm_get_vcpu(sc, vcpuid);
1722 ctrl = svm_get_vmcb_ctrl(sc, vcpuid);
1725 * The TLB entries associated with the vcpu's ASID are not valid
1726 * if either of the following conditions is true:
1728 * 1. The vcpu's ASID generation is different than the host cpu's
1729 * ASID generation. This happens when the vcpu migrates to a new
1730 * host cpu. It can also happen when the number of vcpus executing
1731 * on a host cpu is greater than the number of ASIDs available.
1733 * 2. The pmap generation number is different than the value cached in
1734 * the 'vcpustate'. This happens when the host invalidates pages
1735 * belonging to the guest.
1737 * asidgen eptgen Action
1744 * (a) There is no mismatch in eptgen or ASID generation and therefore
1745 * no further action is needed.
1747 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is
1748 * retained and the TLB entries associated with this ASID
1749 * are flushed by VMRUN.
1751 * (b2) If the cpu does not support FlushByAsid then a new ASID is
1754 * (c) A new ASID is allocated.
1756 * (d) A new ASID is allocated.
1760 eptgen = pmap->pm_eptgen;
1761 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING;
1763 if (vcpustate->asid.gen != asid[thiscpu].gen) {
1764 alloc_asid = true; /* (c) and (d) */
1765 } else if (vcpustate->eptgen != eptgen) {
1766 if (flush_by_asid())
1767 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */
1769 alloc_asid = true; /* (b2) */
1772 * This is the common case (a).
1774 KASSERT(!alloc_asid, ("ASID allocation not necessary"));
1775 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING,
1776 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl));
1780 if (++asid[thiscpu].num >= nasid) {
1781 asid[thiscpu].num = 1;
1782 if (++asid[thiscpu].gen == 0)
1783 asid[thiscpu].gen = 1;
1785 * If this cpu does not support "flush-by-asid"
1786 * then flush the entire TLB on a generation
1787 * bump. Subsequent ASID allocation in this
1788 * generation can be done without a TLB flush.
1790 if (!flush_by_asid())
1791 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL;
1793 vcpustate->asid.gen = asid[thiscpu].gen;
1794 vcpustate->asid.num = asid[thiscpu].num;
1796 ctrl->asid = vcpustate->asid.num;
1797 svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID);
1799 * If this cpu supports "flush-by-asid" then the TLB
1800 * was not flushed after the generation bump. The TLB
1801 * is flushed selectively after every new ASID allocation.
1803 if (flush_by_asid())
1804 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST;
1806 vcpustate->eptgen = eptgen;
1808 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero"));
1809 KASSERT(ctrl->asid == vcpustate->asid.num,
1810 ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num));
1813 static __inline void
1817 __asm __volatile("clgi" : : :);
1820 static __inline void
1824 __asm __volatile("stgi" : : :);
1828 * Start vcpu with specified RIP.
1831 svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap,
1832 void *rend_cookie, void *suspended_cookie)
1834 struct svm_regctx *gctx;
1835 struct svm_softc *svm_sc;
1836 struct svm_vcpu *vcpustate;
1837 struct vmcb_state *state;
1838 struct vmcb_ctrl *ctrl;
1839 struct vm_exit *vmexit;
1840 struct vlapic *vlapic;
1849 vcpustate = svm_get_vcpu(svm_sc, vcpu);
1850 state = svm_get_vmcb_state(svm_sc, vcpu);
1851 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
1852 vmexit = vm_exitinfo(vm, vcpu);
1853 vlapic = vm_lapic(vm, vcpu);
1856 * Stash 'curcpu' on the stack as 'thiscpu'.
1858 * The per-cpu data area is not accessible until MSR_GSBASE is restored
1859 * after the #VMEXIT. Since VMRUN is executed inside a critical section
1860 * 'curcpu' and 'thiscpu' are guaranteed to identical.
1864 gctx = svm_get_guest_regctx(svm_sc, vcpu);
1865 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa;
1867 if (vcpustate->lastcpu != thiscpu) {
1869 * Force new ASID allocation by invalidating the generation.
1871 vcpustate->asid.gen = 0;
1874 * Invalidate the VMCB state cache by marking all fields dirty.
1876 svm_set_dirty(svm_sc, vcpu, 0xffffffff);
1880 * Setting 'vcpustate->lastcpu' here is bit premature because
1881 * we may return from this function without actually executing
1882 * the VMRUN instruction. This could happen if a rendezvous
1883 * or an AST is pending on the first time through the loop.
1885 * This works for now but any new side-effects of vcpu
1886 * migration should take this case into account.
1888 vcpustate->lastcpu = thiscpu;
1889 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1);
1892 svm_msr_guest_enter(svm_sc, vcpu);
1894 /* Update Guest RIP */
1899 * Disable global interrupts to guarantee atomicity during
1900 * loading of guest state. This includes not only the state
1901 * loaded by the "vmrun" instruction but also software state
1902 * maintained by the hypervisor: suspended and rendezvous
1903 * state, NPT generation number, vlapic interrupts etc.
1907 if (vcpu_suspended(suspended_cookie)) {
1909 vm_exit_suspended(vm, vcpu, state->rip);
1913 if (vcpu_rendezvous_pending(rend_cookie)) {
1915 vm_exit_rendezvous(vm, vcpu, state->rip);
1919 /* We are asked to give the cpu by scheduler. */
1920 if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) {
1922 vm_exit_astpending(vm, vcpu, state->rip);
1926 svm_inj_interrupts(svm_sc, vcpu, vlapic);
1928 /* Activate the nested pmap on 'thiscpu' */
1929 CPU_SET_ATOMIC_ACQ(thiscpu, &pmap->pm_active);
1932 * Check the pmap generation and the ASID generation to
1933 * ensure that the vcpu does not use stale TLB mappings.
1935 check_asid(svm_sc, vcpu, pmap, thiscpu);
1937 ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty;
1938 vcpustate->dirty = 0;
1939 VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean);
1941 /* Launch Virtual Machine. */
1942 VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip);
1943 svm_launch(vmcb_pa, gctx);
1945 CPU_CLR_ATOMIC(thiscpu, &pmap->pm_active);
1948 * Restore MSR_GSBASE to point to the pcpu data area.
1950 * Note that accesses done via PCPU_GET/PCPU_SET will work
1951 * only after MSR_GSBASE is restored.
1953 * Also note that we don't bother restoring MSR_KGSBASE
1954 * since it is not used in the kernel and will be restored
1955 * when the VMRUN ioctl returns to userspace.
1957 wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[thiscpu]);
1958 KASSERT(curcpu == thiscpu, ("thiscpu/curcpu (%u/%u) mismatch",
1962 * The host GDTR and IDTR is saved by VMRUN and restored
1963 * automatically on #VMEXIT. However, the host TSS needs
1964 * to be restored explicitly.
1968 /* #VMEXIT disables interrupts so re-enable them here. */
1971 /* Update 'nextrip' */
1972 vcpustate->nextrip = state->rip;
1974 /* Handle #VMEXIT and if required return to user space. */
1975 handled = svm_vmexit(svm_sc, vcpu, vmexit);
1978 svm_msr_guest_exit(svm_sc, vcpu);
1984 svm_vmcleanup(void *arg)
1986 struct svm_softc *sc = arg;
1992 swctx_regptr(struct svm_regctx *regctx, int reg)
1996 case VM_REG_GUEST_RBX:
1997 return (®ctx->sctx_rbx);
1998 case VM_REG_GUEST_RCX:
1999 return (®ctx->sctx_rcx);
2000 case VM_REG_GUEST_RDX:
2001 return (®ctx->sctx_rdx);
2002 case VM_REG_GUEST_RDI:
2003 return (®ctx->sctx_rdi);
2004 case VM_REG_GUEST_RSI:
2005 return (®ctx->sctx_rsi);
2006 case VM_REG_GUEST_RBP:
2007 return (®ctx->sctx_rbp);
2008 case VM_REG_GUEST_R8:
2009 return (®ctx->sctx_r8);
2010 case VM_REG_GUEST_R9:
2011 return (®ctx->sctx_r9);
2012 case VM_REG_GUEST_R10:
2013 return (®ctx->sctx_r10);
2014 case VM_REG_GUEST_R11:
2015 return (®ctx->sctx_r11);
2016 case VM_REG_GUEST_R12:
2017 return (®ctx->sctx_r12);
2018 case VM_REG_GUEST_R13:
2019 return (®ctx->sctx_r13);
2020 case VM_REG_GUEST_R14:
2021 return (®ctx->sctx_r14);
2022 case VM_REG_GUEST_R15:
2023 return (®ctx->sctx_r15);
2030 svm_getreg(void *arg, int vcpu, int ident, uint64_t *val)
2032 struct svm_softc *svm_sc;
2037 if (ident == VM_REG_GUEST_INTR_SHADOW) {
2038 return (svm_get_intr_shadow(svm_sc, vcpu, val));
2041 if (vmcb_read(svm_sc, vcpu, ident, val) == 0) {
2045 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident);
2052 VCPU_CTR1(svm_sc->vm, vcpu, "svm_getreg: unknown register %#x", ident);
2057 svm_setreg(void *arg, int vcpu, int ident, uint64_t val)
2059 struct svm_softc *svm_sc;
2064 if (ident == VM_REG_GUEST_INTR_SHADOW) {
2065 return (svm_modify_intr_shadow(svm_sc, vcpu, val));
2068 if (vmcb_write(svm_sc, vcpu, ident, val) == 0) {
2072 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident);
2080 * XXX deal with CR3 and invalidate TLB entries tagged with the
2081 * vcpu's ASID. This needs to be treated differently depending on
2082 * whether 'running' is true/false.
2085 VCPU_CTR1(svm_sc->vm, vcpu, "svm_setreg: unknown register %#x", ident);
2090 svm_setcap(void *arg, int vcpu, int type, int val)
2092 struct svm_softc *sc;
2098 case VM_CAP_HALT_EXIT:
2099 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2100 VMCB_INTCPT_HLT, val);
2102 case VM_CAP_PAUSE_EXIT:
2103 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2104 VMCB_INTCPT_PAUSE, val);
2106 case VM_CAP_UNRESTRICTED_GUEST:
2107 /* Unrestricted guest execution cannot be disabled in SVM */
2119 svm_getcap(void *arg, int vcpu, int type, int *retval)
2121 struct svm_softc *sc;
2128 case VM_CAP_HALT_EXIT:
2129 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2132 case VM_CAP_PAUSE_EXIT:
2133 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2136 case VM_CAP_UNRESTRICTED_GUEST:
2137 *retval = 1; /* unrestricted guest is always enabled */
2146 static struct vlapic *
2147 svm_vlapic_init(void *arg, int vcpuid)
2149 struct svm_softc *svm_sc;
2150 struct vlapic *vlapic;
2153 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO);
2154 vlapic->vm = svm_sc->vm;
2155 vlapic->vcpuid = vcpuid;
2156 vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid];
2158 vlapic_init(vlapic);
2164 svm_vlapic_cleanup(void *arg, struct vlapic *vlapic)
2167 vlapic_cleanup(vlapic);
2168 free(vlapic, M_SVM_VLAPIC);
2171 struct vmm_ops vmm_ops_amd = {