2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com)
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include "opt_bhyve_snapshot.h"
34 #include <sys/param.h>
35 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
43 #include <sys/sysctl.h>
48 #include <machine/cpufunc.h>
49 #include <machine/psl.h>
50 #include <machine/md_var.h>
51 #include <machine/specialreg.h>
52 #include <machine/smp.h>
53 #include <machine/vmm.h>
54 #include <machine/vmm_dev.h>
55 #include <machine/vmm_instruction_emul.h>
56 #include <machine/vmm_snapshot.h>
58 #include "vmm_lapic.h"
61 #include "vmm_ioport.h"
64 #include "vlapic_priv.h"
69 #include "svm_softc.h"
74 SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
78 * SVM CPUID function 0x8000_000A, edx bit decoding.
80 #define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */
81 #define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */
82 #define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */
83 #define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */
84 #define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */
85 #define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */
86 #define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */
87 #define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */
88 #define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */
89 #define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */
90 #define AMD_CPUID_SVM_AVIC BIT(13) /* AVIC present */
92 #define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \
103 static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT;
104 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean,
107 static MALLOC_DEFINE(M_SVM, "svm", "svm");
108 static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic");
110 static uint32_t svm_feature = ~0U; /* AMD SVM features. */
111 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RDTUN, &svm_feature, 0,
112 "SVM features advertised by CPUID.8000000AH:EDX");
114 static int disable_npf_assist;
115 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN,
116 &disable_npf_assist, 0, NULL);
118 /* Maximum ASIDs supported by the processor */
119 static uint32_t nasid;
120 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RDTUN, &nasid, 0,
121 "Number of ASIDs supported by this processor");
123 /* Current ASID generation for each host cpu */
124 static struct asid asid[MAXCPU];
127 * SVM host state saved area of size 4KB for each core.
129 static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
131 static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery");
132 static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry");
133 static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window");
135 static int svm_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc);
136 static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val);
142 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID);
149 return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST);
153 svm_disable(void *arg __unused)
157 efer = rdmsr(MSR_EFER);
159 wrmsr(MSR_EFER, efer);
163 * Disable SVM on all CPUs.
169 smp_rendezvous(NULL, svm_disable, NULL, NULL);
174 * Verify that all the features required by bhyve are available.
177 check_svm_features(void)
181 /* CPUID Fn8000_000A is for SVM */
182 do_cpuid(0x8000000A, regs);
183 svm_feature &= regs[3];
186 * The number of ASIDs can be configured to be less than what is
187 * supported by the hardware but not more.
189 if (nasid == 0 || nasid > regs[1])
191 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid));
193 /* bhyve requires the Nested Paging feature */
194 if (!(svm_feature & AMD_CPUID_SVM_NP)) {
195 printf("SVM: Nested Paging feature not available.\n");
199 /* bhyve requires the NRIP Save feature */
200 if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) {
201 printf("SVM: NRIP Save feature not available.\n");
209 svm_enable(void *arg __unused)
213 efer = rdmsr(MSR_EFER);
215 wrmsr(MSR_EFER, efer);
217 wrmsr(MSR_VM_HSAVE_PA, vtophys(hsave[curcpu]));
221 * Return 1 if SVM is enabled on this processor and 0 otherwise.
228 /* Section 15.4 Enabling SVM from APM2. */
229 if ((amd_feature2 & AMDID2_SVM) == 0) {
230 printf("SVM: not available.\n");
234 msr = rdmsr(MSR_VM_CR);
235 if ((msr & VM_CR_SVMDIS) != 0) {
236 printf("SVM: disabled by BIOS.\n");
244 svm_modinit(int ipinum)
248 if (!svm_available())
251 error = check_svm_features();
255 vmcb_clean &= VMCB_CACHE_DEFAULT;
257 for (cpu = 0; cpu < MAXCPU; cpu++) {
259 * Initialize the host ASIDs to their "highest" valid values.
261 * The next ASID allocation will rollover both 'gen' and 'num'
262 * and start off the sequence at {1,1}.
264 asid[cpu].gen = ~0UL;
265 asid[cpu].num = nasid - 1;
269 svm_npt_init(ipinum);
271 /* Enable SVM on all CPUs */
272 smp_rendezvous(NULL, svm_enable, NULL, NULL);
284 #ifdef BHYVE_SNAPSHOT
286 svm_set_tsc_offset(struct svm_softc *sc, int vcpu, uint64_t offset)
289 struct vmcb_ctrl *ctrl;
291 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
292 ctrl->tsc_offset = offset;
294 svm_set_dirty(sc, vcpu, VMCB_CACHE_I);
295 VCPU_CTR1(sc->vm, vcpu, "tsc offset changed to %#lx", offset);
297 error = vm_set_tsc_offset(sc->vm, vcpu, offset);
303 /* Pentium compatible MSRs */
304 #define MSR_PENTIUM_START 0
305 #define MSR_PENTIUM_END 0x1FFF
306 /* AMD 6th generation and Intel compatible MSRs */
307 #define MSR_AMD6TH_START 0xC0000000UL
308 #define MSR_AMD6TH_END 0xC0001FFFUL
309 /* AMD 7th and 8th generation compatible MSRs */
310 #define MSR_AMD7TH_START 0xC0010000UL
311 #define MSR_AMD7TH_END 0xC0011FFFUL
314 * Get the index and bit position for a MSR in permission bitmap.
315 * Two bits are used for each MSR: lower bit for read and higher bit for write.
318 svm_msr_index(uint64_t msr, int *index, int *bit)
323 *bit = (msr % 4) * 2;
326 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) {
331 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1);
332 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) {
333 off = (msr - MSR_AMD6TH_START);
334 *index = (off + base) / 4;
338 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1);
339 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) {
340 off = (msr - MSR_AMD7TH_START);
341 *index = (off + base) / 4;
349 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor.
352 svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write)
354 int index, bit, error;
356 error = svm_msr_index(msr, &index, &bit);
357 KASSERT(error == 0, ("%s: invalid msr %#lx", __func__, msr));
358 KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE,
359 ("%s: invalid index %d for msr %#lx", __func__, index, msr));
360 KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d "
361 "msr %#lx", __func__, bit, msr));
364 perm_bitmap[index] &= ~(1UL << bit);
367 perm_bitmap[index] &= ~(2UL << bit);
371 svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr)
374 svm_msr_perm(perm_bitmap, msr, true, true);
378 svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr)
381 svm_msr_perm(perm_bitmap, msr, true, false);
385 svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask)
387 struct vmcb_ctrl *ctrl;
389 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx));
391 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
392 return (ctrl->intercept[idx] & bitmask ? 1 : 0);
396 svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask,
399 struct vmcb_ctrl *ctrl;
402 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx));
404 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
405 oldval = ctrl->intercept[idx];
408 ctrl->intercept[idx] |= bitmask;
410 ctrl->intercept[idx] &= ~bitmask;
412 if (ctrl->intercept[idx] != oldval) {
413 svm_set_dirty(sc, vcpu, VMCB_CACHE_I);
414 VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified "
415 "from %#x to %#x", idx, oldval, ctrl->intercept[idx]);
420 svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask)
423 svm_set_intercept(sc, vcpu, off, bitmask, 0);
427 svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask)
430 svm_set_intercept(sc, vcpu, off, bitmask, 1);
434 vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa,
435 uint64_t msrpm_base_pa, uint64_t np_pml4)
437 struct vmcb_ctrl *ctrl;
438 struct vmcb_state *state;
442 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
443 state = svm_get_vmcb_state(sc, vcpu);
445 ctrl->iopm_base_pa = iopm_base_pa;
446 ctrl->msrpm_base_pa = msrpm_base_pa;
448 /* Enable nested paging */
450 ctrl->n_cr3 = np_pml4;
453 * Intercept accesses to the control registers that are not shadowed
454 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8.
456 for (n = 0; n < 16; n++) {
457 mask = (BIT(n) << 16) | BIT(n);
458 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8)
459 svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask);
461 svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask);
465 * Intercept everything when tracing guest exceptions otherwise
466 * just intercept machine check exception.
468 if (vcpu_trace_exceptions(sc->vm, vcpu)) {
469 for (n = 0; n < 32; n++) {
471 * Skip unimplemented vectors in the exception bitmap.
473 if (n == 2 || n == 9) {
476 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(n));
479 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC));
482 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */
483 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO);
484 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR);
485 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID);
486 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR);
487 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT);
488 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI);
489 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI);
490 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN);
491 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
492 VMCB_INTCPT_FERR_FREEZE);
493 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVD);
494 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVLPGA);
496 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR);
497 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT);
500 * Intercept SVM instructions since AMD enables them in guests otherwise.
501 * Non-intercepted VMMCALL causes #UD, skip it.
503 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMLOAD);
504 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMSAVE);
505 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_STGI);
506 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI);
507 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT);
508 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_ICEBP);
511 * From section "Canonicalization and Consistency Checks" in APMv2
512 * the VMRUN intercept bit must be set to pass the consistency check.
514 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN);
517 * The ASID will be set to a non-zero value just before VMRUN.
522 * Section 15.21.1, Interrupt Masking in EFLAGS
523 * Section 15.21.2, Virtualizing APIC.TPR
525 * This must be set for %rflag and %cr8 isolation of guest and host.
527 ctrl->v_intr_masking = 1;
529 /* Enable Last Branch Record aka LBR for debugging */
530 ctrl->lbr_virt_en = 1;
531 state->dbgctl = BIT(0);
533 /* EFER_SVM must always be set when the guest is executing */
534 state->efer = EFER_SVM;
536 /* Set up the PAT to power-on state */
537 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) |
538 PAT_VALUE(1, PAT_WRITE_THROUGH) |
539 PAT_VALUE(2, PAT_UNCACHED) |
540 PAT_VALUE(3, PAT_UNCACHEABLE) |
541 PAT_VALUE(4, PAT_WRITE_BACK) |
542 PAT_VALUE(5, PAT_WRITE_THROUGH) |
543 PAT_VALUE(6, PAT_UNCACHED) |
544 PAT_VALUE(7, PAT_UNCACHEABLE);
546 /* Set up DR6/7 to power-on state */
547 state->dr6 = DBREG_DR6_RESERVED1;
548 state->dr7 = DBREG_DR7_RESERVED1;
552 * Initialize a virtual machine.
555 svm_init(struct vm *vm, pmap_t pmap)
557 struct svm_softc *svm_sc;
558 struct svm_vcpu *vcpu;
559 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa;
563 svm_sc = malloc(sizeof (*svm_sc), M_SVM, M_WAITOK | M_ZERO);
564 if (((uintptr_t)svm_sc & PAGE_MASK) != 0)
565 panic("malloc of svm_softc not aligned on page boundary");
567 svm_sc->msr_bitmap = contigmalloc(SVM_MSR_BITMAP_SIZE, M_SVM,
568 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0);
569 if (svm_sc->msr_bitmap == NULL)
570 panic("contigmalloc of SVM MSR bitmap failed");
571 svm_sc->iopm_bitmap = contigmalloc(SVM_IO_BITMAP_SIZE, M_SVM,
572 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0);
573 if (svm_sc->iopm_bitmap == NULL)
574 panic("contigmalloc of SVM IO bitmap failed");
577 svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pmltop);
580 * Intercept read and write accesses to all MSRs.
582 memset(svm_sc->msr_bitmap, 0xFF, SVM_MSR_BITMAP_SIZE);
585 * Access to the following MSRs is redirected to the VMCB when the
586 * guest is executing. Therefore it is safe to allow the guest to
587 * read/write these MSRs directly without hypervisor involvement.
589 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE);
590 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE);
591 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE);
593 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR);
594 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR);
595 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR);
596 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK);
597 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR);
598 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR);
599 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR);
600 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT);
602 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC);
605 * Intercept writes to make sure that the EFER_SVM bit is not cleared.
607 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER);
609 /* Intercept access to all I/O ports. */
610 memset(svm_sc->iopm_bitmap, 0xFF, SVM_IO_BITMAP_SIZE);
612 iopm_pa = vtophys(svm_sc->iopm_bitmap);
613 msrpm_pa = vtophys(svm_sc->msr_bitmap);
614 pml4_pa = svm_sc->nptp;
615 maxcpus = vm_get_maxcpus(svm_sc->vm);
616 for (i = 0; i < maxcpus; i++) {
617 vcpu = svm_get_vcpu(svm_sc, i);
619 vcpu->lastcpu = NOCPU;
620 vcpu->vmcb_pa = vtophys(&vcpu->vmcb);
621 vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa);
622 svm_msr_guest_init(svm_sc, i);
628 * Collateral for a generic SVM VM-exit.
631 vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2)
634 vme->exitcode = VM_EXITCODE_SVM;
635 vme->u.svm.exitcode = code;
636 vme->u.svm.exitinfo1 = info1;
637 vme->u.svm.exitinfo2 = info2;
641 svm_cpl(struct vmcb_state *state)
646 * "Retrieve the CPL from the CPL field in the VMCB, not
647 * from any segment DPL"
652 static enum vm_cpu_mode
653 svm_vcpu_mode(struct vmcb *vmcb)
655 struct vmcb_segment seg;
656 struct vmcb_state *state;
659 state = &vmcb->state;
661 if (state->efer & EFER_LMA) {
662 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg);
663 KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__,
667 * Section 4.8.1 for APM2, check if Code Segment has
668 * Long attribute set in descriptor.
670 if (seg.attrib & VMCB_CS_ATTRIB_L)
671 return (CPU_MODE_64BIT);
673 return (CPU_MODE_COMPATIBILITY);
674 } else if (state->cr0 & CR0_PE) {
675 return (CPU_MODE_PROTECTED);
677 return (CPU_MODE_REAL);
681 static enum vm_paging_mode
682 svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer)
685 if ((cr0 & CR0_PG) == 0)
686 return (PAGING_MODE_FLAT);
687 if ((cr4 & CR4_PAE) == 0)
688 return (PAGING_MODE_32);
690 return (PAGING_MODE_64);
692 return (PAGING_MODE_PAE);
696 * ins/outs utility routines
699 svm_inout_str_index(struct svm_regctx *regs, int in)
703 val = in ? regs->sctx_rdi : regs->sctx_rsi;
709 svm_inout_str_count(struct svm_regctx *regs, int rep)
713 val = rep ? regs->sctx_rcx : 1;
719 svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1,
720 int in, struct vm_inout_str *vis)
725 vis->seg_name = VM_REG_GUEST_ES;
727 /* The segment field has standard encoding */
728 s = (info1 >> 10) & 0x7;
729 vis->seg_name = vm_segment_name(s);
732 error = svm_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc);
733 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error));
737 svm_inout_str_addrsize(uint64_t info1)
741 size = (info1 >> 7) & 0x7;
744 return (2); /* 16 bit */
746 return (4); /* 32 bit */
748 return (8); /* 64 bit */
750 panic("%s: invalid size encoding %d", __func__, size);
755 svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging)
757 struct vmcb_state *state;
759 state = &vmcb->state;
760 paging->cr3 = state->cr3;
761 paging->cpl = svm_cpl(state);
762 paging->cpu_mode = svm_vcpu_mode(vmcb);
763 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4,
770 * Handle guest I/O intercept.
773 svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
775 struct vmcb_ctrl *ctrl;
776 struct vmcb_state *state;
777 struct svm_regctx *regs;
778 struct vm_inout_str *vis;
782 state = svm_get_vmcb_state(svm_sc, vcpu);
783 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
784 regs = svm_get_guest_regctx(svm_sc, vcpu);
786 info1 = ctrl->exitinfo1;
787 inout_string = info1 & BIT(2) ? 1 : 0;
790 * The effective segment number in EXITINFO1[12:10] is populated
791 * only if the processor has the DecodeAssist capability.
793 * XXX this is not specified explicitly in APMv2 but can be verified
796 if (inout_string && !decode_assist())
799 vmexit->exitcode = VM_EXITCODE_INOUT;
800 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0;
801 vmexit->u.inout.string = inout_string;
802 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0;
803 vmexit->u.inout.bytes = (info1 >> 4) & 0x7;
804 vmexit->u.inout.port = (uint16_t)(info1 >> 16);
805 vmexit->u.inout.eax = (uint32_t)(state->rax);
808 vmexit->exitcode = VM_EXITCODE_INOUT_STR;
809 vis = &vmexit->u.inout_str;
810 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging);
811 vis->rflags = state->rflags;
812 vis->cr0 = state->cr0;
813 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in);
814 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep);
815 vis->addrsize = svm_inout_str_addrsize(info1);
816 svm_inout_str_seginfo(svm_sc, vcpu, info1,
817 vmexit->u.inout.in, vis);
824 npf_fault_type(uint64_t exitinfo1)
827 if (exitinfo1 & VMCB_NPF_INFO1_W)
828 return (VM_PROT_WRITE);
829 else if (exitinfo1 & VMCB_NPF_INFO1_ID)
830 return (VM_PROT_EXECUTE);
832 return (VM_PROT_READ);
836 svm_npf_emul_fault(uint64_t exitinfo1)
839 if (exitinfo1 & VMCB_NPF_INFO1_ID) {
843 if (exitinfo1 & VMCB_NPF_INFO1_GPT) {
847 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) {
855 svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit)
857 struct vm_guest_paging *paging;
858 struct vmcb_segment seg;
859 struct vmcb_ctrl *ctrl;
864 paging = &vmexit->u.inst_emul.paging;
866 vmexit->exitcode = VM_EXITCODE_INST_EMUL;
867 vmexit->u.inst_emul.gpa = gpa;
868 vmexit->u.inst_emul.gla = VIE_INVALID_GLA;
869 svm_paging_info(vmcb, paging);
871 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg);
872 KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error));
874 switch(paging->cpu_mode) {
876 vmexit->u.inst_emul.cs_base = seg.base;
877 vmexit->u.inst_emul.cs_d = 0;
879 case CPU_MODE_PROTECTED:
880 case CPU_MODE_COMPATIBILITY:
881 vmexit->u.inst_emul.cs_base = seg.base;
884 * Section 4.8.1 of APM2, Default Operand Size or D bit.
886 vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ?
890 vmexit->u.inst_emul.cs_base = 0;
891 vmexit->u.inst_emul.cs_d = 0;
896 * Copy the instruction bytes into 'vie' if available.
898 if (decode_assist() && !disable_npf_assist) {
899 inst_len = ctrl->inst_len;
900 inst_bytes = ctrl->inst_bytes;
905 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len);
910 intrtype_to_str(int intr_type)
913 case VMCB_EVENTINJ_TYPE_INTR:
915 case VMCB_EVENTINJ_TYPE_NMI:
917 case VMCB_EVENTINJ_TYPE_INTn:
919 case VMCB_EVENTINJ_TYPE_EXCEPTION:
920 return ("exception");
922 panic("%s: unknown intr_type %d", __func__, intr_type);
928 * Inject an event to vcpu as described in section 15.20, "Event injection".
931 svm_eventinject(struct svm_softc *sc, int vcpu, int intr_type, int vector,
932 uint32_t error, bool ec_valid)
934 struct vmcb_ctrl *ctrl;
936 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
938 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0,
939 ("%s: event already pending %#lx", __func__, ctrl->eventinj));
941 KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d",
945 case VMCB_EVENTINJ_TYPE_INTR:
946 case VMCB_EVENTINJ_TYPE_NMI:
947 case VMCB_EVENTINJ_TYPE_INTn:
949 case VMCB_EVENTINJ_TYPE_EXCEPTION:
950 if (vector >= 0 && vector <= 31 && vector != 2)
954 panic("%s: invalid intr_type/vector: %d/%d", __func__,
957 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID;
959 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID;
960 ctrl->eventinj |= (uint64_t)error << 32;
961 VCPU_CTR3(sc->vm, vcpu, "Injecting %s at vector %d errcode %#x",
962 intrtype_to_str(intr_type), vector, error);
964 VCPU_CTR2(sc->vm, vcpu, "Injecting %s at vector %d",
965 intrtype_to_str(intr_type), vector);
970 svm_update_virqinfo(struct svm_softc *sc, int vcpu)
973 struct vlapic *vlapic;
974 struct vmcb_ctrl *ctrl;
977 vlapic = vm_lapic(vm, vcpu);
978 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
980 /* Update %cr8 in the emulated vlapic */
981 vlapic_set_cr8(vlapic, ctrl->v_tpr);
983 /* Virtual interrupt injection is not used. */
984 KASSERT(ctrl->v_intr_vector == 0, ("%s: invalid "
985 "v_intr_vector %d", __func__, ctrl->v_intr_vector));
989 svm_save_intinfo(struct svm_softc *svm_sc, int vcpu)
991 struct vmcb_ctrl *ctrl;
994 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
995 intinfo = ctrl->exitintinfo;
996 if (!VMCB_EXITINTINFO_VALID(intinfo))
1000 * From APMv2, Section "Intercepts during IDT interrupt delivery"
1002 * If a #VMEXIT happened during event delivery then record the event
1003 * that was being delivered.
1005 VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n",
1006 intinfo, VMCB_EXITINTINFO_VECTOR(intinfo));
1007 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1);
1008 vm_exit_intinfo(svm_sc->vm, vcpu, intinfo);
1013 vintr_intercept_enabled(struct svm_softc *sc, int vcpu)
1016 return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
1017 VMCB_INTCPT_VINTR));
1021 static __inline void
1022 enable_intr_window_exiting(struct svm_softc *sc, int vcpu)
1024 struct vmcb_ctrl *ctrl;
1026 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1028 if (ctrl->v_irq && ctrl->v_intr_vector == 0) {
1029 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__));
1030 KASSERT(vintr_intercept_enabled(sc, vcpu),
1031 ("%s: vintr intercept should be enabled", __func__));
1035 VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting");
1037 ctrl->v_ign_tpr = 1;
1038 ctrl->v_intr_vector = 0;
1039 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
1040 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
1043 static __inline void
1044 disable_intr_window_exiting(struct svm_softc *sc, int vcpu)
1046 struct vmcb_ctrl *ctrl;
1048 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1050 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) {
1051 KASSERT(!vintr_intercept_enabled(sc, vcpu),
1052 ("%s: vintr intercept should be disabled", __func__));
1056 VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting");
1058 ctrl->v_intr_vector = 0;
1059 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
1060 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
1064 svm_modify_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t val)
1066 struct vmcb_ctrl *ctrl;
1069 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1070 oldval = ctrl->intr_shadow;
1071 newval = val ? 1 : 0;
1072 if (newval != oldval) {
1073 ctrl->intr_shadow = newval;
1074 VCPU_CTR1(sc->vm, vcpu, "Setting intr_shadow to %d", newval);
1080 svm_get_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t *val)
1082 struct vmcb_ctrl *ctrl;
1084 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1085 *val = ctrl->intr_shadow;
1090 * Once an NMI is injected it blocks delivery of further NMIs until the handler
1091 * executes an IRET. The IRET intercept is enabled when an NMI is injected to
1092 * to track when the vcpu is done handling the NMI.
1095 nmi_blocked(struct svm_softc *sc, int vcpu)
1099 blocked = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
1105 enable_nmi_blocking(struct svm_softc *sc, int vcpu)
1108 KASSERT(!nmi_blocked(sc, vcpu), ("vNMI already blocked"));
1109 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking enabled");
1110 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
1114 clear_nmi_blocking(struct svm_softc *sc, int vcpu)
1118 KASSERT(nmi_blocked(sc, vcpu), ("vNMI already unblocked"));
1119 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking cleared");
1121 * When the IRET intercept is cleared the vcpu will attempt to execute
1122 * the "iret" when it runs next. However, it is possible to inject
1123 * another NMI into the vcpu before the "iret" has actually executed.
1125 * For e.g. if the "iret" encounters a #NPF when accessing the stack
1126 * it will trap back into the hypervisor. If an NMI is pending for
1127 * the vcpu it will be injected into the guest.
1129 * XXX this needs to be fixed
1131 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
1134 * Set 'intr_shadow' to prevent an NMI from being injected on the
1137 error = svm_modify_intr_shadow(sc, vcpu, 1);
1138 KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error));
1141 #define EFER_MBZ_BITS 0xFFFFFFFFFFFF0200UL
1144 svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t newval, bool *retu)
1146 struct vm_exit *vme;
1147 struct vmcb_state *state;
1148 uint64_t changed, lma, oldval;
1151 state = svm_get_vmcb_state(sc, vcpu);
1153 oldval = state->efer;
1154 VCPU_CTR2(sc->vm, vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval);
1156 newval &= ~0xFE; /* clear the Read-As-Zero (RAZ) bits */
1157 changed = oldval ^ newval;
1159 if (newval & EFER_MBZ_BITS)
1162 /* APMv2 Table 14-5 "Long-Mode Consistency Checks" */
1163 if (changed & EFER_LME) {
1164 if (state->cr0 & CR0_PG)
1168 /* EFER.LMA = EFER.LME & CR0.PG */
1169 if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0)
1174 if ((newval & EFER_LMA) != lma)
1177 if (newval & EFER_NXE) {
1178 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_NO_EXECUTE))
1183 * XXX bhyve does not enforce segment limits in 64-bit mode. Until
1184 * this is fixed flag guest attempt to set EFER_LMSLE as an error.
1186 if (newval & EFER_LMSLE) {
1187 vme = vm_exitinfo(sc->vm, vcpu);
1188 vm_exit_svm(vme, VMCB_EXIT_MSR, 1, 0);
1193 if (newval & EFER_FFXSR) {
1194 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_FFXSR))
1198 if (newval & EFER_TCE) {
1199 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_TCE))
1203 error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, newval);
1204 KASSERT(error == 0, ("%s: error %d updating efer", __func__, error));
1207 vm_inject_gp(sc->vm, vcpu);
1212 emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val,
1218 error = lapic_wrmsr(sc->vm, vcpu, num, val, retu);
1219 else if (num == MSR_EFER)
1220 error = svm_write_efer(sc, vcpu, val, retu);
1222 error = svm_wrmsr(sc, vcpu, num, val, retu);
1228 emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num, bool *retu)
1230 struct vmcb_state *state;
1231 struct svm_regctx *ctx;
1236 error = lapic_rdmsr(sc->vm, vcpu, num, &result, retu);
1238 error = svm_rdmsr(sc, vcpu, num, &result, retu);
1241 state = svm_get_vmcb_state(sc, vcpu);
1242 ctx = svm_get_guest_regctx(sc, vcpu);
1243 state->rax = result & 0xffffffff;
1244 ctx->sctx_rdx = result >> 32;
1252 exit_reason_to_str(uint64_t reason)
1255 static char reasonbuf[32];
1256 static const struct {
1260 { .reason = VMCB_EXIT_INVALID, .str = "invalvmcb" },
1261 { .reason = VMCB_EXIT_SHUTDOWN, .str = "shutdown" },
1262 { .reason = VMCB_EXIT_NPF, .str = "nptfault" },
1263 { .reason = VMCB_EXIT_PAUSE, .str = "pause" },
1264 { .reason = VMCB_EXIT_HLT, .str = "hlt" },
1265 { .reason = VMCB_EXIT_CPUID, .str = "cpuid" },
1266 { .reason = VMCB_EXIT_IO, .str = "inout" },
1267 { .reason = VMCB_EXIT_MC, .str = "mchk" },
1268 { .reason = VMCB_EXIT_INTR, .str = "extintr" },
1269 { .reason = VMCB_EXIT_NMI, .str = "nmi" },
1270 { .reason = VMCB_EXIT_VINTR, .str = "vintr" },
1271 { .reason = VMCB_EXIT_MSR, .str = "msr" },
1272 { .reason = VMCB_EXIT_IRET, .str = "iret" },
1273 { .reason = VMCB_EXIT_MONITOR, .str = "monitor" },
1274 { .reason = VMCB_EXIT_MWAIT, .str = "mwait" },
1275 { .reason = VMCB_EXIT_VMRUN, .str = "vmrun" },
1276 { .reason = VMCB_EXIT_VMMCALL, .str = "vmmcall" },
1277 { .reason = VMCB_EXIT_VMLOAD, .str = "vmload" },
1278 { .reason = VMCB_EXIT_VMSAVE, .str = "vmsave" },
1279 { .reason = VMCB_EXIT_STGI, .str = "stgi" },
1280 { .reason = VMCB_EXIT_CLGI, .str = "clgi" },
1281 { .reason = VMCB_EXIT_SKINIT, .str = "skinit" },
1282 { .reason = VMCB_EXIT_ICEBP, .str = "icebp" },
1283 { .reason = VMCB_EXIT_INVD, .str = "invd" },
1284 { .reason = VMCB_EXIT_INVLPGA, .str = "invlpga" },
1287 for (i = 0; i < nitems(reasons); i++) {
1288 if (reasons[i].reason == reason)
1289 return (reasons[i].str);
1291 snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason);
1297 * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs
1298 * that are due to instruction intercepts as well as MSR and IOIO intercepts
1299 * and exceptions caused by INT3, INTO and BOUND instructions.
1301 * Return 1 if the nRIP is valid and 0 otherwise.
1304 nrip_valid(uint64_t exitcode)
1307 case 0x00 ... 0x0F: /* read of CR0 through CR15 */
1308 case 0x10 ... 0x1F: /* write of CR0 through CR15 */
1309 case 0x20 ... 0x2F: /* read of DR0 through DR15 */
1310 case 0x30 ... 0x3F: /* write of DR0 through DR15 */
1311 case 0x43: /* INT3 */
1312 case 0x44: /* INTO */
1313 case 0x45: /* BOUND */
1314 case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */
1315 case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */
1323 svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
1326 struct vmcb_state *state;
1327 struct vmcb_ctrl *ctrl;
1328 struct svm_regctx *ctx;
1329 uint64_t code, info1, info2, val;
1330 uint32_t eax, ecx, edx;
1331 int error, errcode_valid, handled, idtvec, reflect;
1334 ctx = svm_get_guest_regctx(svm_sc, vcpu);
1335 vmcb = svm_get_vmcb(svm_sc, vcpu);
1336 state = &vmcb->state;
1340 code = ctrl->exitcode;
1341 info1 = ctrl->exitinfo1;
1342 info2 = ctrl->exitinfo2;
1344 vmexit->exitcode = VM_EXITCODE_BOGUS;
1345 vmexit->rip = state->rip;
1346 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0;
1348 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1);
1351 * #VMEXIT(INVALID) needs to be handled early because the VMCB is
1352 * in an inconsistent state and can trigger assertions that would
1353 * never happen otherwise.
1355 if (code == VMCB_EXIT_INVALID) {
1356 vm_exit_svm(vmexit, code, info1, info2);
1360 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event "
1361 "injection valid bit is set %#lx", __func__, ctrl->eventinj));
1363 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15,
1364 ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)",
1365 vmexit->inst_length, code, info1, info2));
1367 svm_update_virqinfo(svm_sc, vcpu);
1368 svm_save_intinfo(svm_sc, vcpu);
1371 case VMCB_EXIT_IRET:
1373 * Restart execution at "iret" but with the intercept cleared.
1375 vmexit->inst_length = 0;
1376 clear_nmi_blocking(svm_sc, vcpu);
1379 case VMCB_EXIT_VINTR: /* interrupt window exiting */
1380 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1);
1383 case VMCB_EXIT_INTR: /* external interrupt */
1384 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1);
1387 case VMCB_EXIT_NMI: /* external NMI */
1391 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1);
1393 idtvec = code - 0x40;
1397 * Call the machine check handler by hand. Also don't
1398 * reflect the machine check back into the guest.
1401 VCPU_CTR0(svm_sc->vm, vcpu, "Vectoring to MCE handler");
1402 __asm __volatile("int $18");
1405 error = svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2,
1407 KASSERT(error == 0, ("%s: error %d updating cr2",
1427 * The 'nrip' field is populated for INT3, INTO and
1428 * BOUND exceptions and this also implies that
1429 * 'inst_length' is non-zero.
1431 * Reset 'inst_length' to zero so the guest %rip at
1432 * event injection is identical to what it was when
1433 * the exception originally happened.
1435 VCPU_CTR2(svm_sc->vm, vcpu, "Reset inst_length from %d "
1436 "to zero before injecting exception %d",
1437 vmexit->inst_length, idtvec);
1438 vmexit->inst_length = 0;
1445 KASSERT(vmexit->inst_length == 0, ("invalid inst_length (%d) "
1446 "when reflecting exception %d into guest",
1447 vmexit->inst_length, idtvec));
1450 /* Reflect the exception back into the guest */
1451 VCPU_CTR2(svm_sc->vm, vcpu, "Reflecting exception "
1452 "%d/%#x into the guest", idtvec, (int)info1);
1453 error = vm_inject_exception(svm_sc->vm, vcpu, idtvec,
1454 errcode_valid, info1, 0);
1455 KASSERT(error == 0, ("%s: vm_inject_exception error %d",
1460 case VMCB_EXIT_MSR: /* MSR access. */
1462 ecx = ctx->sctx_rcx;
1463 edx = ctx->sctx_rdx;
1467 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1);
1468 val = (uint64_t)edx << 32 | eax;
1469 VCPU_CTR2(svm_sc->vm, vcpu, "wrmsr %#x val %#lx",
1471 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) {
1472 vmexit->exitcode = VM_EXITCODE_WRMSR;
1473 vmexit->u.msr.code = ecx;
1474 vmexit->u.msr.wval = val;
1478 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
1479 ("emulate_wrmsr retu with bogus exitcode"));
1482 VCPU_CTR1(svm_sc->vm, vcpu, "rdmsr %#x", ecx);
1483 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1);
1484 if (emulate_rdmsr(svm_sc, vcpu, ecx, &retu)) {
1485 vmexit->exitcode = VM_EXITCODE_RDMSR;
1486 vmexit->u.msr.code = ecx;
1490 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
1491 ("emulate_rdmsr retu with bogus exitcode"));
1496 handled = svm_handle_io(svm_sc, vcpu, vmexit);
1497 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1);
1499 case VMCB_EXIT_CPUID:
1500 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1);
1501 handled = x86_emulate_cpuid(svm_sc->vm, vcpu, &state->rax,
1502 &ctx->sctx_rbx, &ctx->sctx_rcx, &ctx->sctx_rdx);
1505 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1);
1506 vmexit->exitcode = VM_EXITCODE_HLT;
1507 vmexit->u.hlt.rflags = state->rflags;
1509 case VMCB_EXIT_PAUSE:
1510 vmexit->exitcode = VM_EXITCODE_PAUSE;
1511 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1);
1514 /* EXITINFO2 contains the faulting guest physical address */
1515 if (info1 & VMCB_NPF_INFO1_RSV) {
1516 VCPU_CTR2(svm_sc->vm, vcpu, "nested page fault with "
1517 "reserved bits set: info1(%#lx) info2(%#lx)",
1519 } else if (vm_mem_allocated(svm_sc->vm, vcpu, info2)) {
1520 vmexit->exitcode = VM_EXITCODE_PAGING;
1521 vmexit->u.paging.gpa = info2;
1522 vmexit->u.paging.fault_type = npf_fault_type(info1);
1523 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
1524 VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault "
1525 "on gpa %#lx/%#lx at rip %#lx",
1526 info2, info1, state->rip);
1527 } else if (svm_npf_emul_fault(info1)) {
1528 svm_handle_inst_emul(vmcb, info2, vmexit);
1529 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INST_EMUL, 1);
1530 VCPU_CTR3(svm_sc->vm, vcpu, "inst_emul fault "
1531 "for gpa %#lx/%#lx at rip %#lx",
1532 info2, info1, state->rip);
1535 case VMCB_EXIT_MONITOR:
1536 vmexit->exitcode = VM_EXITCODE_MONITOR;
1538 case VMCB_EXIT_MWAIT:
1539 vmexit->exitcode = VM_EXITCODE_MWAIT;
1541 case VMCB_EXIT_SHUTDOWN:
1542 case VMCB_EXIT_VMRUN:
1543 case VMCB_EXIT_VMMCALL:
1544 case VMCB_EXIT_VMLOAD:
1545 case VMCB_EXIT_VMSAVE:
1546 case VMCB_EXIT_STGI:
1547 case VMCB_EXIT_CLGI:
1548 case VMCB_EXIT_SKINIT:
1549 case VMCB_EXIT_ICEBP:
1550 case VMCB_EXIT_INVD:
1551 case VMCB_EXIT_INVLPGA:
1552 vm_inject_ud(svm_sc->vm, vcpu);
1556 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1);
1560 VCPU_CTR4(svm_sc->vm, vcpu, "%s %s vmexit at %#lx/%d",
1561 handled ? "handled" : "unhandled", exit_reason_to_str(code),
1562 vmexit->rip, vmexit->inst_length);
1565 vmexit->rip += vmexit->inst_length;
1566 vmexit->inst_length = 0;
1567 state->rip = vmexit->rip;
1569 if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
1571 * If this VM exit was not claimed by anybody then
1572 * treat it as a generic SVM exit.
1574 vm_exit_svm(vmexit, code, info1, info2);
1577 * The exitcode and collateral have been populated.
1578 * The VM exit will be processed further in userland.
1586 svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu)
1590 if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo))
1593 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not "
1594 "valid: %#lx", __func__, intinfo));
1596 svm_eventinject(svm_sc, vcpu, VMCB_EXITINTINFO_TYPE(intinfo),
1597 VMCB_EXITINTINFO_VECTOR(intinfo),
1598 VMCB_EXITINTINFO_EC(intinfo),
1599 VMCB_EXITINTINFO_EC_VALID(intinfo));
1600 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1);
1601 VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo);
1605 * Inject event to virtual cpu.
1608 svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic)
1610 struct vmcb_ctrl *ctrl;
1611 struct vmcb_state *state;
1612 struct svm_vcpu *vcpustate;
1614 int vector, need_intr_window;
1617 state = svm_get_vmcb_state(sc, vcpu);
1618 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1619 vcpustate = svm_get_vcpu(sc, vcpu);
1621 need_intr_window = 0;
1623 if (vcpustate->nextrip != state->rip) {
1624 ctrl->intr_shadow = 0;
1625 VCPU_CTR2(sc->vm, vcpu, "Guest interrupt blocking "
1626 "cleared due to rip change: %#lx/%#lx",
1627 vcpustate->nextrip, state->rip);
1631 * Inject pending events or exceptions for this vcpu.
1633 * An event might be pending because the previous #VMEXIT happened
1634 * during event delivery (i.e. ctrl->exitintinfo).
1636 * An event might also be pending because an exception was injected
1637 * by the hypervisor (e.g. #PF during instruction emulation).
1639 svm_inj_intinfo(sc, vcpu);
1641 /* NMI event has priority over interrupts. */
1642 if (vm_nmi_pending(sc->vm, vcpu)) {
1643 if (nmi_blocked(sc, vcpu)) {
1645 * Can't inject another NMI if the guest has not
1646 * yet executed an "iret" after the last NMI.
1648 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due "
1650 } else if (ctrl->intr_shadow) {
1652 * Can't inject an NMI if the vcpu is in an intr_shadow.
1654 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due to "
1655 "interrupt shadow");
1656 need_intr_window = 1;
1658 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) {
1660 * If there is already an exception/interrupt pending
1661 * then defer the NMI until after that.
1663 VCPU_CTR1(sc->vm, vcpu, "Cannot inject NMI due to "
1664 "eventinj %#lx", ctrl->eventinj);
1667 * Use self-IPI to trigger a VM-exit as soon as
1668 * possible after the event injection is completed.
1670 * This works only if the external interrupt exiting
1671 * is at a lower priority than the event injection.
1673 * Although not explicitly specified in APMv2 the
1674 * relative priorities were verified empirically.
1676 ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */
1678 vm_nmi_clear(sc->vm, vcpu);
1680 /* Inject NMI, vector number is not used */
1681 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_NMI,
1684 /* virtual NMI blocking is now in effect */
1685 enable_nmi_blocking(sc, vcpu);
1687 VCPU_CTR0(sc->vm, vcpu, "Injecting vNMI");
1691 extint_pending = vm_extint_pending(sc->vm, vcpu);
1692 if (!extint_pending) {
1693 if (!vlapic_pending_intr(vlapic, &vector))
1695 KASSERT(vector >= 16 && vector <= 255,
1696 ("invalid vector %d from local APIC", vector));
1698 /* Ask the legacy pic for a vector to inject */
1699 vatpic_pending_intr(sc->vm, &vector);
1700 KASSERT(vector >= 0 && vector <= 255,
1701 ("invalid vector %d from INTR", vector));
1705 * If the guest has disabled interrupts or is in an interrupt shadow
1706 * then we cannot inject the pending interrupt.
1708 if ((state->rflags & PSL_I) == 0) {
1709 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to "
1710 "rflags %#lx", vector, state->rflags);
1711 need_intr_window = 1;
1715 if (ctrl->intr_shadow) {
1716 VCPU_CTR1(sc->vm, vcpu, "Cannot inject vector %d due to "
1717 "interrupt shadow", vector);
1718 need_intr_window = 1;
1722 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) {
1723 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to "
1724 "eventinj %#lx", vector, ctrl->eventinj);
1725 need_intr_window = 1;
1729 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false);
1731 if (!extint_pending) {
1732 vlapic_intr_accepted(vlapic, vector);
1734 vm_extint_clear(sc->vm, vcpu);
1735 vatpic_intr_accepted(sc->vm, vector);
1739 * Force a VM-exit as soon as the vcpu is ready to accept another
1740 * interrupt. This is done because the PIC might have another vector
1741 * that it wants to inject. Also, if the APIC has a pending interrupt
1742 * that was preempted by the ExtInt then it allows us to inject the
1743 * APIC vector as soon as possible.
1745 need_intr_window = 1;
1748 * The guest can modify the TPR by writing to %CR8. In guest mode
1749 * the processor reflects this write to V_TPR without hypervisor
1752 * The guest can also modify the TPR by writing to it via the memory
1753 * mapped APIC page. In this case, the write will be emulated by the
1754 * hypervisor. For this reason V_TPR must be updated before every
1757 v_tpr = vlapic_get_cr8(vlapic);
1758 KASSERT(v_tpr <= 15, ("invalid v_tpr %#x", v_tpr));
1759 if (ctrl->v_tpr != v_tpr) {
1760 VCPU_CTR2(sc->vm, vcpu, "VMCB V_TPR changed from %#x to %#x",
1761 ctrl->v_tpr, v_tpr);
1762 ctrl->v_tpr = v_tpr;
1763 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
1766 if (need_intr_window) {
1768 * We use V_IRQ in conjunction with the VINTR intercept to
1769 * trap into the hypervisor as soon as a virtual interrupt
1772 * Since injected events are not subject to intercept checks
1773 * we need to ensure that the V_IRQ is not actually going to
1774 * be delivered on VM entry. The KASSERT below enforces this.
1776 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 ||
1777 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow,
1778 ("Bogus intr_window_exiting: eventinj (%#lx), "
1779 "intr_shadow (%u), rflags (%#lx)",
1780 ctrl->eventinj, ctrl->intr_shadow, state->rflags));
1781 enable_intr_window_exiting(sc, vcpu);
1783 disable_intr_window_exiting(sc, vcpu);
1787 static __inline void
1788 restore_host_tss(void)
1790 struct system_segment_descriptor *tss_sd;
1793 * The TSS descriptor was in use prior to launching the guest so it
1794 * has been marked busy.
1796 * 'ltr' requires the descriptor to be marked available so change the
1797 * type to "64-bit available TSS".
1799 tss_sd = PCPU_GET(tss);
1800 tss_sd->sd_type = SDT_SYSTSS;
1801 ltr(GSEL(GPROC0_SEL, SEL_KPL));
1805 svm_pmap_activate(struct svm_softc *sc, int vcpuid, pmap_t pmap)
1807 struct svm_vcpu *vcpustate;
1808 struct vmcb_ctrl *ctrl;
1814 CPU_SET_ATOMIC(cpu, &pmap->pm_active);
1815 smr_enter(pmap->pm_eptsmr);
1817 vcpustate = svm_get_vcpu(sc, vcpuid);
1818 ctrl = svm_get_vmcb_ctrl(sc, vcpuid);
1821 * The TLB entries associated with the vcpu's ASID are not valid
1822 * if either of the following conditions is true:
1824 * 1. The vcpu's ASID generation is different than the host cpu's
1825 * ASID generation. This happens when the vcpu migrates to a new
1826 * host cpu. It can also happen when the number of vcpus executing
1827 * on a host cpu is greater than the number of ASIDs available.
1829 * 2. The pmap generation number is different than the value cached in
1830 * the 'vcpustate'. This happens when the host invalidates pages
1831 * belonging to the guest.
1833 * asidgen eptgen Action
1840 * (a) There is no mismatch in eptgen or ASID generation and therefore
1841 * no further action is needed.
1843 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is
1844 * retained and the TLB entries associated with this ASID
1845 * are flushed by VMRUN.
1847 * (b2) If the cpu does not support FlushByAsid then a new ASID is
1850 * (c) A new ASID is allocated.
1852 * (d) A new ASID is allocated.
1856 eptgen = atomic_load_long(&pmap->pm_eptgen);
1857 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING;
1859 if (vcpustate->asid.gen != asid[cpu].gen) {
1860 alloc_asid = true; /* (c) and (d) */
1861 } else if (vcpustate->eptgen != eptgen) {
1862 if (flush_by_asid())
1863 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */
1865 alloc_asid = true; /* (b2) */
1868 * This is the common case (a).
1870 KASSERT(!alloc_asid, ("ASID allocation not necessary"));
1871 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING,
1872 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl));
1876 if (++asid[cpu].num >= nasid) {
1878 if (++asid[cpu].gen == 0)
1881 * If this cpu does not support "flush-by-asid"
1882 * then flush the entire TLB on a generation
1883 * bump. Subsequent ASID allocation in this
1884 * generation can be done without a TLB flush.
1886 if (!flush_by_asid())
1887 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL;
1889 vcpustate->asid.gen = asid[cpu].gen;
1890 vcpustate->asid.num = asid[cpu].num;
1892 ctrl->asid = vcpustate->asid.num;
1893 svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID);
1895 * If this cpu supports "flush-by-asid" then the TLB
1896 * was not flushed after the generation bump. The TLB
1897 * is flushed selectively after every new ASID allocation.
1899 if (flush_by_asid())
1900 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST;
1902 vcpustate->eptgen = eptgen;
1904 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero"));
1905 KASSERT(ctrl->asid == vcpustate->asid.num,
1906 ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num));
1910 svm_pmap_deactivate(pmap_t pmap)
1912 smr_exit(pmap->pm_eptsmr);
1913 CPU_CLR_ATOMIC(curcpu, &pmap->pm_active);
1916 static __inline void
1920 __asm __volatile("clgi");
1923 static __inline void
1927 __asm __volatile("stgi");
1930 static __inline void
1931 svm_dr_enter_guest(struct svm_regctx *gctx)
1934 /* Save host control debug registers. */
1935 gctx->host_dr7 = rdr7();
1936 gctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR);
1939 * Disable debugging in DR7 and DEBUGCTL to avoid triggering
1940 * exceptions in the host based on the guest DRx values. The
1941 * guest DR6, DR7, and DEBUGCTL are saved/restored in the
1945 wrmsr(MSR_DEBUGCTLMSR, 0);
1947 /* Save host debug registers. */
1948 gctx->host_dr0 = rdr0();
1949 gctx->host_dr1 = rdr1();
1950 gctx->host_dr2 = rdr2();
1951 gctx->host_dr3 = rdr3();
1952 gctx->host_dr6 = rdr6();
1954 /* Restore guest debug registers. */
1955 load_dr0(gctx->sctx_dr0);
1956 load_dr1(gctx->sctx_dr1);
1957 load_dr2(gctx->sctx_dr2);
1958 load_dr3(gctx->sctx_dr3);
1961 static __inline void
1962 svm_dr_leave_guest(struct svm_regctx *gctx)
1965 /* Save guest debug registers. */
1966 gctx->sctx_dr0 = rdr0();
1967 gctx->sctx_dr1 = rdr1();
1968 gctx->sctx_dr2 = rdr2();
1969 gctx->sctx_dr3 = rdr3();
1972 * Restore host debug registers. Restore DR7 and DEBUGCTL
1975 load_dr0(gctx->host_dr0);
1976 load_dr1(gctx->host_dr1);
1977 load_dr2(gctx->host_dr2);
1978 load_dr3(gctx->host_dr3);
1979 load_dr6(gctx->host_dr6);
1980 wrmsr(MSR_DEBUGCTLMSR, gctx->host_debugctl);
1981 load_dr7(gctx->host_dr7);
1985 * Start vcpu with specified RIP.
1988 svm_run(void *arg, int vcpu, register_t rip, pmap_t pmap,
1989 struct vm_eventinfo *evinfo)
1991 struct svm_regctx *gctx;
1992 struct svm_softc *svm_sc;
1993 struct svm_vcpu *vcpustate;
1994 struct vmcb_state *state;
1995 struct vmcb_ctrl *ctrl;
1996 struct vm_exit *vmexit;
1997 struct vlapic *vlapic;
2006 vcpustate = svm_get_vcpu(svm_sc, vcpu);
2007 state = svm_get_vmcb_state(svm_sc, vcpu);
2008 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
2009 vmexit = vm_exitinfo(vm, vcpu);
2010 vlapic = vm_lapic(vm, vcpu);
2012 gctx = svm_get_guest_regctx(svm_sc, vcpu);
2013 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa;
2015 if (vcpustate->lastcpu != curcpu) {
2017 * Force new ASID allocation by invalidating the generation.
2019 vcpustate->asid.gen = 0;
2022 * Invalidate the VMCB state cache by marking all fields dirty.
2024 svm_set_dirty(svm_sc, vcpu, 0xffffffff);
2028 * Setting 'vcpustate->lastcpu' here is bit premature because
2029 * we may return from this function without actually executing
2030 * the VMRUN instruction. This could happen if a rendezvous
2031 * or an AST is pending on the first time through the loop.
2033 * This works for now but any new side-effects of vcpu
2034 * migration should take this case into account.
2036 vcpustate->lastcpu = curcpu;
2037 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1);
2040 svm_msr_guest_enter(svm_sc, vcpu);
2042 /* Update Guest RIP */
2047 * Disable global interrupts to guarantee atomicity during
2048 * loading of guest state. This includes not only the state
2049 * loaded by the "vmrun" instruction but also software state
2050 * maintained by the hypervisor: suspended and rendezvous
2051 * state, NPT generation number, vlapic interrupts etc.
2055 if (vcpu_suspended(evinfo)) {
2057 vm_exit_suspended(vm, vcpu, state->rip);
2061 if (vcpu_rendezvous_pending(evinfo)) {
2063 vm_exit_rendezvous(vm, vcpu, state->rip);
2067 if (vcpu_reqidle(evinfo)) {
2069 vm_exit_reqidle(vm, vcpu, state->rip);
2073 /* We are asked to give the cpu by scheduler. */
2074 if (vcpu_should_yield(vm, vcpu)) {
2076 vm_exit_astpending(vm, vcpu, state->rip);
2080 if (vcpu_debugged(vm, vcpu)) {
2082 vm_exit_debug(vm, vcpu, state->rip);
2087 * #VMEXIT resumes the host with the guest LDTR, so
2088 * save the current LDT selector so it can be restored
2089 * after an exit. The userspace hypervisor probably
2090 * doesn't use a LDT, but save and restore it to be
2095 svm_inj_interrupts(svm_sc, vcpu, vlapic);
2098 * Check the pmap generation and the ASID generation to
2099 * ensure that the vcpu does not use stale TLB mappings.
2101 svm_pmap_activate(svm_sc, vcpu, pmap);
2103 ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty;
2104 vcpustate->dirty = 0;
2105 VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean);
2107 /* Launch Virtual Machine. */
2108 VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip);
2109 svm_dr_enter_guest(gctx);
2110 svm_launch(vmcb_pa, gctx, get_pcpu());
2111 svm_dr_leave_guest(gctx);
2113 svm_pmap_deactivate(pmap);
2116 * The host GDTR and IDTR is saved by VMRUN and restored
2117 * automatically on #VMEXIT. However, the host TSS needs
2118 * to be restored explicitly.
2122 /* Restore host LDTR. */
2125 /* #VMEXIT disables interrupts so re-enable them here. */
2128 /* Update 'nextrip' */
2129 vcpustate->nextrip = state->rip;
2131 /* Handle #VMEXIT and if required return to user space. */
2132 handled = svm_vmexit(svm_sc, vcpu, vmexit);
2135 svm_msr_guest_exit(svm_sc, vcpu);
2141 svm_cleanup(void *arg)
2143 struct svm_softc *sc = arg;
2145 contigfree(sc->iopm_bitmap, SVM_IO_BITMAP_SIZE, M_SVM);
2146 contigfree(sc->msr_bitmap, SVM_MSR_BITMAP_SIZE, M_SVM);
2151 swctx_regptr(struct svm_regctx *regctx, int reg)
2155 case VM_REG_GUEST_RBX:
2156 return (®ctx->sctx_rbx);
2157 case VM_REG_GUEST_RCX:
2158 return (®ctx->sctx_rcx);
2159 case VM_REG_GUEST_RDX:
2160 return (®ctx->sctx_rdx);
2161 case VM_REG_GUEST_RDI:
2162 return (®ctx->sctx_rdi);
2163 case VM_REG_GUEST_RSI:
2164 return (®ctx->sctx_rsi);
2165 case VM_REG_GUEST_RBP:
2166 return (®ctx->sctx_rbp);
2167 case VM_REG_GUEST_R8:
2168 return (®ctx->sctx_r8);
2169 case VM_REG_GUEST_R9:
2170 return (®ctx->sctx_r9);
2171 case VM_REG_GUEST_R10:
2172 return (®ctx->sctx_r10);
2173 case VM_REG_GUEST_R11:
2174 return (®ctx->sctx_r11);
2175 case VM_REG_GUEST_R12:
2176 return (®ctx->sctx_r12);
2177 case VM_REG_GUEST_R13:
2178 return (®ctx->sctx_r13);
2179 case VM_REG_GUEST_R14:
2180 return (®ctx->sctx_r14);
2181 case VM_REG_GUEST_R15:
2182 return (®ctx->sctx_r15);
2183 case VM_REG_GUEST_DR0:
2184 return (®ctx->sctx_dr0);
2185 case VM_REG_GUEST_DR1:
2186 return (®ctx->sctx_dr1);
2187 case VM_REG_GUEST_DR2:
2188 return (®ctx->sctx_dr2);
2189 case VM_REG_GUEST_DR3:
2190 return (®ctx->sctx_dr3);
2197 svm_getreg(void *arg, int vcpu, int ident, uint64_t *val)
2199 struct svm_softc *svm_sc;
2204 if (ident == VM_REG_GUEST_INTR_SHADOW) {
2205 return (svm_get_intr_shadow(svm_sc, vcpu, val));
2208 if (vmcb_read(svm_sc, vcpu, ident, val) == 0) {
2212 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident);
2219 VCPU_CTR1(svm_sc->vm, vcpu, "svm_getreg: unknown register %#x", ident);
2224 svm_setreg(void *arg, int vcpu, int ident, uint64_t val)
2226 struct svm_softc *svm_sc;
2231 if (ident == VM_REG_GUEST_INTR_SHADOW) {
2232 return (svm_modify_intr_shadow(svm_sc, vcpu, val));
2235 /* Do not permit user write access to VMCB fields by offset. */
2236 if (!VMCB_ACCESS_OK(ident)) {
2237 if (vmcb_write(svm_sc, vcpu, ident, val) == 0) {
2242 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident);
2249 if (ident == VM_REG_GUEST_ENTRY_INST_LENGTH) {
2255 * XXX deal with CR3 and invalidate TLB entries tagged with the
2256 * vcpu's ASID. This needs to be treated differently depending on
2257 * whether 'running' is true/false.
2260 VCPU_CTR1(svm_sc->vm, vcpu, "svm_setreg: unknown register %#x", ident);
2265 svm_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
2267 return (vmcb_getdesc(arg, vcpu, reg, desc));
2271 svm_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
2273 return (vmcb_setdesc(arg, vcpu, reg, desc));
2276 #ifdef BHYVE_SNAPSHOT
2278 svm_snapshot_reg(void *arg, int vcpu, int ident,
2279 struct vm_snapshot_meta *meta)
2284 if (meta->op == VM_SNAPSHOT_SAVE) {
2285 ret = svm_getreg(arg, vcpu, ident, &val);
2289 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done);
2290 } else if (meta->op == VM_SNAPSHOT_RESTORE) {
2291 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done);
2293 ret = svm_setreg(arg, vcpu, ident, val);
2307 svm_setcap(void *arg, int vcpu, int type, int val)
2309 struct svm_softc *sc;
2315 case VM_CAP_HALT_EXIT:
2316 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2317 VMCB_INTCPT_HLT, val);
2319 case VM_CAP_PAUSE_EXIT:
2320 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2321 VMCB_INTCPT_PAUSE, val);
2323 case VM_CAP_UNRESTRICTED_GUEST:
2324 /* Unrestricted guest execution cannot be disabled in SVM */
2336 svm_getcap(void *arg, int vcpu, int type, int *retval)
2338 struct svm_softc *sc;
2345 case VM_CAP_HALT_EXIT:
2346 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2349 case VM_CAP_PAUSE_EXIT:
2350 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2353 case VM_CAP_UNRESTRICTED_GUEST:
2354 *retval = 1; /* unrestricted guest is always enabled */
2363 static struct vmspace *
2364 svm_vmspace_alloc(vm_offset_t min, vm_offset_t max)
2366 return (svm_npt_alloc(min, max));
2370 svm_vmspace_free(struct vmspace *vmspace)
2372 svm_npt_free(vmspace);
2375 static struct vlapic *
2376 svm_vlapic_init(void *arg, int vcpuid)
2378 struct svm_softc *svm_sc;
2379 struct vlapic *vlapic;
2382 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO);
2383 vlapic->vm = svm_sc->vm;
2384 vlapic->vcpuid = vcpuid;
2385 vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid];
2387 vlapic_init(vlapic);
2393 svm_vlapic_cleanup(void *arg, struct vlapic *vlapic)
2396 vlapic_cleanup(vlapic);
2397 free(vlapic, M_SVM_VLAPIC);
2400 #ifdef BHYVE_SNAPSHOT
2402 svm_snapshot(void *arg, struct vm_snapshot_meta *meta)
2404 /* struct svm_softc is AMD's representation for SVM softc */
2405 struct svm_softc *sc;
2406 struct svm_vcpu *vcpu;
2414 KASSERT(sc != NULL, ("%s: arg was NULL", __func__));
2416 SNAPSHOT_VAR_OR_LEAVE(sc->nptp, meta, ret, done);
2418 for (i = 0; i < VM_MAXCPU; i++) {
2419 vcpu = &sc->vcpu[i];
2422 /* VMCB fields for virtual cpu i */
2423 SNAPSHOT_VAR_OR_LEAVE(vmcb->ctrl.v_tpr, meta, ret, done);
2424 val = vmcb->ctrl.v_tpr;
2425 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done);
2426 vmcb->ctrl.v_tpr = val;
2428 SNAPSHOT_VAR_OR_LEAVE(vmcb->ctrl.asid, meta, ret, done);
2429 val = vmcb->ctrl.np_enable;
2430 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done);
2431 vmcb->ctrl.np_enable = val;
2433 val = vmcb->ctrl.intr_shadow;
2434 SNAPSHOT_VAR_OR_LEAVE(val, meta, ret, done);
2435 vmcb->ctrl.intr_shadow = val;
2436 SNAPSHOT_VAR_OR_LEAVE(vmcb->ctrl.tlb_ctrl, meta, ret, done);
2438 SNAPSHOT_BUF_OR_LEAVE(vmcb->state.pad1,
2439 sizeof(vmcb->state.pad1),
2441 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.cpl, meta, ret, done);
2442 SNAPSHOT_BUF_OR_LEAVE(vmcb->state.pad2,
2443 sizeof(vmcb->state.pad2),
2445 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.efer, meta, ret, done);
2446 SNAPSHOT_BUF_OR_LEAVE(vmcb->state.pad3,
2447 sizeof(vmcb->state.pad3),
2449 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.cr4, meta, ret, done);
2450 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.cr3, meta, ret, done);
2451 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.cr0, meta, ret, done);
2452 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.dr7, meta, ret, done);
2453 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.dr6, meta, ret, done);
2454 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.rflags, meta, ret, done);
2455 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.rip, meta, ret, done);
2456 SNAPSHOT_BUF_OR_LEAVE(vmcb->state.pad4,
2457 sizeof(vmcb->state.pad4),
2459 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.rsp, meta, ret, done);
2460 SNAPSHOT_BUF_OR_LEAVE(vmcb->state.pad5,
2461 sizeof(vmcb->state.pad5),
2463 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.rax, meta, ret, done);
2464 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.star, meta, ret, done);
2465 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.lstar, meta, ret, done);
2466 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.cstar, meta, ret, done);
2467 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.sfmask, meta, ret, done);
2468 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.kernelgsbase,
2470 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.sysenter_cs, meta, ret, done);
2471 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.sysenter_esp,
2473 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.sysenter_eip,
2475 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.cr2, meta, ret, done);
2476 SNAPSHOT_BUF_OR_LEAVE(vmcb->state.pad6,
2477 sizeof(vmcb->state.pad6),
2479 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.g_pat, meta, ret, done);
2480 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.dbgctl, meta, ret, done);
2481 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.br_from, meta, ret, done);
2482 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.br_to, meta, ret, done);
2483 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.int_from, meta, ret, done);
2484 SNAPSHOT_VAR_OR_LEAVE(vmcb->state.int_to, meta, ret, done);
2485 SNAPSHOT_BUF_OR_LEAVE(vmcb->state.pad7,
2486 sizeof(vmcb->state.pad7),
2489 /* Snapshot swctx for virtual cpu i */
2490 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbp, meta, ret, done);
2491 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbx, meta, ret, done);
2492 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rcx, meta, ret, done);
2493 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdx, meta, ret, done);
2494 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdi, meta, ret, done);
2495 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rsi, meta, ret, done);
2496 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r8, meta, ret, done);
2497 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r9, meta, ret, done);
2498 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r10, meta, ret, done);
2499 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r11, meta, ret, done);
2500 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r12, meta, ret, done);
2501 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r13, meta, ret, done);
2502 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r14, meta, ret, done);
2503 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r15, meta, ret, done);
2504 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr0, meta, ret, done);
2505 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr1, meta, ret, done);
2506 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr2, meta, ret, done);
2507 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr3, meta, ret, done);
2509 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.host_dr0, meta, ret, done);
2510 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.host_dr1, meta, ret, done);
2511 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.host_dr2, meta, ret, done);
2512 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.host_dr3, meta, ret, done);
2513 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.host_dr6, meta, ret, done);
2514 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.host_dr7, meta, ret, done);
2515 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.host_debugctl, meta, ret,
2518 /* Restore other svm_vcpu struct fields */
2520 /* Restore NEXTRIP field */
2521 SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, ret, done);
2523 /* Restore lastcpu field */
2524 SNAPSHOT_VAR_OR_LEAVE(vcpu->lastcpu, meta, ret, done);
2525 SNAPSHOT_VAR_OR_LEAVE(vcpu->dirty, meta, ret, done);
2527 /* Restore EPTGEN field - EPT is Extended Page Tabel */
2528 SNAPSHOT_VAR_OR_LEAVE(vcpu->eptgen, meta, ret, done);
2530 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.gen, meta, ret, done);
2531 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.num, meta, ret, done);
2533 /* Set all caches dirty */
2534 if (meta->op == VM_SNAPSHOT_RESTORE) {
2535 svm_set_dirty(sc, i, VMCB_CACHE_ASID);
2536 svm_set_dirty(sc, i, VMCB_CACHE_IOPM);
2537 svm_set_dirty(sc, i, VMCB_CACHE_I);
2538 svm_set_dirty(sc, i, VMCB_CACHE_TPR);
2539 svm_set_dirty(sc, i, VMCB_CACHE_CR2);
2540 svm_set_dirty(sc, i, VMCB_CACHE_CR);
2541 svm_set_dirty(sc, i, VMCB_CACHE_DT);
2542 svm_set_dirty(sc, i, VMCB_CACHE_SEG);
2543 svm_set_dirty(sc, i, VMCB_CACHE_NP);
2547 if (meta->op == VM_SNAPSHOT_RESTORE)
2555 svm_vmcx_snapshot(void *arg, struct vm_snapshot_meta *meta, int vcpu)
2558 struct svm_softc *sc;
2559 int err, running, hostcpu;
2561 sc = (struct svm_softc *)arg;
2564 KASSERT(arg != NULL, ("%s: arg was NULL", __func__));
2565 vmcb = svm_get_vmcb(sc, vcpu);
2567 running = vcpu_is_running(sc->vm, vcpu, &hostcpu);
2568 if (running && hostcpu !=curcpu) {
2569 printf("%s: %s%d is running", __func__, vm_name(sc->vm), vcpu);
2573 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CR0, meta);
2574 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CR2, meta);
2575 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CR3, meta);
2576 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CR4, meta);
2578 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_DR7, meta);
2580 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_RAX, meta);
2582 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_RSP, meta);
2583 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_RIP, meta);
2584 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_RFLAGS, meta);
2586 /* Guest segments */
2588 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_ES, meta);
2589 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_ES, meta);
2592 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_CS, meta);
2593 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_CS, meta);
2596 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_SS, meta);
2597 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_SS, meta);
2600 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_DS, meta);
2601 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_DS, meta);
2604 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_FS, meta);
2605 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_FS, meta);
2608 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_GS, meta);
2609 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_GS, meta);
2612 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_TR, meta);
2613 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_TR, meta);
2616 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_LDTR, meta);
2617 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_LDTR, meta);
2620 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_EFER, meta);
2623 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_IDTR, meta);
2624 err += vmcb_snapshot_desc(sc, vcpu, VM_REG_GUEST_GDTR, meta);
2626 /* Specific AMD registers */
2627 err += vmcb_snapshot_any(sc, vcpu,
2628 VMCB_ACCESS(VMCB_OFF_SYSENTER_CS, 8), meta);
2629 err += vmcb_snapshot_any(sc, vcpu,
2630 VMCB_ACCESS(VMCB_OFF_SYSENTER_ESP, 8), meta);
2631 err += vmcb_snapshot_any(sc, vcpu,
2632 VMCB_ACCESS(VMCB_OFF_SYSENTER_EIP, 8), meta);
2634 err += vmcb_snapshot_any(sc, vcpu,
2635 VMCB_ACCESS(VMCB_OFF_NPT_BASE, 8), meta);
2637 err += vmcb_snapshot_any(sc, vcpu,
2638 VMCB_ACCESS(VMCB_OFF_CR_INTERCEPT, 4), meta);
2639 err += vmcb_snapshot_any(sc, vcpu,
2640 VMCB_ACCESS(VMCB_OFF_DR_INTERCEPT, 4), meta);
2641 err += vmcb_snapshot_any(sc, vcpu,
2642 VMCB_ACCESS(VMCB_OFF_EXC_INTERCEPT, 4), meta);
2643 err += vmcb_snapshot_any(sc, vcpu,
2644 VMCB_ACCESS(VMCB_OFF_INST1_INTERCEPT, 4), meta);
2645 err += vmcb_snapshot_any(sc, vcpu,
2646 VMCB_ACCESS(VMCB_OFF_INST2_INTERCEPT, 4), meta);
2648 err += vmcb_snapshot_any(sc, vcpu,
2649 VMCB_ACCESS(VMCB_OFF_TLB_CTRL, 4), meta);
2651 err += vmcb_snapshot_any(sc, vcpu,
2652 VMCB_ACCESS(VMCB_OFF_EXITINFO1, 8), meta);
2653 err += vmcb_snapshot_any(sc, vcpu,
2654 VMCB_ACCESS(VMCB_OFF_EXITINFO2, 8), meta);
2655 err += vmcb_snapshot_any(sc, vcpu,
2656 VMCB_ACCESS(VMCB_OFF_EXITINTINFO, 8), meta);
2658 err += vmcb_snapshot_any(sc, vcpu,
2659 VMCB_ACCESS(VMCB_OFF_VIRQ, 8), meta);
2661 err += vmcb_snapshot_any(sc, vcpu,
2662 VMCB_ACCESS(VMCB_OFF_GUEST_PAT, 8), meta);
2664 err += vmcb_snapshot_any(sc, vcpu,
2665 VMCB_ACCESS(VMCB_OFF_AVIC_BAR, 8), meta);
2666 err += vmcb_snapshot_any(sc, vcpu,
2667 VMCB_ACCESS(VMCB_OFF_AVIC_PAGE, 8), meta);
2668 err += vmcb_snapshot_any(sc, vcpu,
2669 VMCB_ACCESS(VMCB_OFF_AVIC_LT, 8), meta);
2670 err += vmcb_snapshot_any(sc, vcpu,
2671 VMCB_ACCESS(VMCB_OFF_AVIC_PT, 8), meta);
2673 err += vmcb_snapshot_any(sc, vcpu,
2674 VMCB_ACCESS(VMCB_OFF_IO_PERM, 8), meta);
2675 err += vmcb_snapshot_any(sc, vcpu,
2676 VMCB_ACCESS(VMCB_OFF_MSR_PERM, 8), meta);
2678 err += vmcb_snapshot_any(sc, vcpu,
2679 VMCB_ACCESS(VMCB_OFF_ASID, 4), meta);
2681 err += vmcb_snapshot_any(sc, vcpu,
2682 VMCB_ACCESS(VMCB_OFF_EXIT_REASON, 8), meta);
2684 err += svm_snapshot_reg(sc, vcpu, VM_REG_GUEST_INTR_SHADOW, meta);
2690 svm_restore_tsc(void *arg, int vcpu, uint64_t offset)
2694 err = svm_set_tsc_offset(arg, vcpu, offset);
2700 const struct vmm_ops vmm_ops_amd = {
2701 .modinit = svm_modinit,
2702 .modcleanup = svm_modcleanup,
2703 .modresume = svm_modresume,
2706 .cleanup = svm_cleanup,
2707 .getreg = svm_getreg,
2708 .setreg = svm_setreg,
2709 .getdesc = svm_getdesc,
2710 .setdesc = svm_setdesc,
2711 .getcap = svm_getcap,
2712 .setcap = svm_setcap,
2713 .vmspace_alloc = svm_vmspace_alloc,
2714 .vmspace_free = svm_vmspace_free,
2715 .vlapic_init = svm_vlapic_init,
2716 .vlapic_cleanup = svm_vlapic_cleanup,
2717 #ifdef BHYVE_SNAPSHOT
2718 .snapshot = svm_snapshot,
2719 .vmcx_snapshot = svm_vmcx_snapshot,
2720 .restore_tsc = svm_restore_tsc,