2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com)
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
39 #include <sys/sysctl.h>
44 #include <machine/cpufunc.h>
45 #include <machine/psl.h>
46 #include <machine/md_var.h>
47 #include <machine/reg.h>
48 #include <machine/specialreg.h>
49 #include <machine/smp.h>
50 #include <machine/vmm.h>
51 #include <machine/vmm_dev.h>
52 #include <machine/vmm_instruction_emul.h>
54 #include "vmm_lapic.h"
57 #include "vmm_ioport.h"
60 #include "vlapic_priv.h"
65 #include "svm_softc.h"
70 SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW, NULL, NULL);
73 * SVM CPUID function 0x8000_000A, edx bit decoding.
75 #define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */
76 #define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */
77 #define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */
78 #define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */
79 #define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */
80 #define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */
81 #define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */
82 #define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */
83 #define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */
84 #define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */
85 #define AMD_CPUID_SVM_AVIC BIT(13) /* AVIC present */
87 #define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \
98 static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT;
99 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean,
102 static MALLOC_DEFINE(M_SVM, "svm", "svm");
103 static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic");
105 /* Per-CPU context area. */
106 extern struct pcpu __pcpu[];
108 static uint32_t svm_feature = ~0U; /* AMD SVM features. */
109 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RDTUN, &svm_feature, 0,
110 "SVM features advertised by CPUID.8000000AH:EDX");
112 static int disable_npf_assist;
113 SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN,
114 &disable_npf_assist, 0, NULL);
116 /* Maximum ASIDs supported by the processor */
117 static uint32_t nasid;
118 SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RDTUN, &nasid, 0,
119 "Number of ASIDs supported by this processor");
121 /* Current ASID generation for each host cpu */
122 static struct asid asid[MAXCPU];
125 * SVM host state saved area of size 4KB for each core.
127 static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
129 static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery");
130 static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry");
131 static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window");
133 static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val);
139 return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID);
146 return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST);
150 svm_disable(void *arg __unused)
154 efer = rdmsr(MSR_EFER);
156 wrmsr(MSR_EFER, efer);
160 * Disable SVM on all CPUs.
166 smp_rendezvous(NULL, svm_disable, NULL, NULL);
171 * Verify that all the features required by bhyve are available.
174 check_svm_features(void)
178 /* CPUID Fn8000_000A is for SVM */
179 do_cpuid(0x8000000A, regs);
180 svm_feature &= regs[3];
183 * The number of ASIDs can be configured to be less than what is
184 * supported by the hardware but not more.
186 if (nasid == 0 || nasid > regs[1])
188 KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid));
190 /* bhyve requires the Nested Paging feature */
191 if (!(svm_feature & AMD_CPUID_SVM_NP)) {
192 printf("SVM: Nested Paging feature not available.\n");
196 /* bhyve requires the NRIP Save feature */
197 if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) {
198 printf("SVM: NRIP Save feature not available.\n");
206 svm_enable(void *arg __unused)
210 efer = rdmsr(MSR_EFER);
212 wrmsr(MSR_EFER, efer);
214 wrmsr(MSR_VM_HSAVE_PA, vtophys(hsave[curcpu]));
218 * Return 1 if SVM is enabled on this processor and 0 otherwise.
225 /* Section 15.4 Enabling SVM from APM2. */
226 if ((amd_feature2 & AMDID2_SVM) == 0) {
227 printf("SVM: not available.\n");
231 msr = rdmsr(MSR_VM_CR);
232 if ((msr & VM_CR_SVMDIS) != 0) {
233 printf("SVM: disabled by BIOS.\n");
245 if (!svm_available())
248 error = check_svm_features();
252 vmcb_clean &= VMCB_CACHE_DEFAULT;
254 for (cpu = 0; cpu < MAXCPU; cpu++) {
256 * Initialize the host ASIDs to their "highest" valid values.
258 * The next ASID allocation will rollover both 'gen' and 'num'
259 * and start off the sequence at {1,1}.
261 asid[cpu].gen = ~0UL;
262 asid[cpu].num = nasid - 1;
266 svm_npt_init(ipinum);
268 /* Enable SVM on all CPUs */
269 smp_rendezvous(NULL, svm_enable, NULL, NULL);
281 /* Pentium compatible MSRs */
282 #define MSR_PENTIUM_START 0
283 #define MSR_PENTIUM_END 0x1FFF
284 /* AMD 6th generation and Intel compatible MSRs */
285 #define MSR_AMD6TH_START 0xC0000000UL
286 #define MSR_AMD6TH_END 0xC0001FFFUL
287 /* AMD 7th and 8th generation compatible MSRs */
288 #define MSR_AMD7TH_START 0xC0010000UL
289 #define MSR_AMD7TH_END 0xC0011FFFUL
292 * Get the index and bit position for a MSR in permission bitmap.
293 * Two bits are used for each MSR: lower bit for read and higher bit for write.
296 svm_msr_index(uint64_t msr, int *index, int *bit)
301 *bit = (msr % 4) * 2;
304 if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) {
309 base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1);
310 if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) {
311 off = (msr - MSR_AMD6TH_START);
312 *index = (off + base) / 4;
316 base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1);
317 if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) {
318 off = (msr - MSR_AMD7TH_START);
319 *index = (off + base) / 4;
327 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor.
330 svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write)
332 int index, bit, error;
334 error = svm_msr_index(msr, &index, &bit);
335 KASSERT(error == 0, ("%s: invalid msr %#lx", __func__, msr));
336 KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE,
337 ("%s: invalid index %d for msr %#lx", __func__, index, msr));
338 KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d "
339 "msr %#lx", __func__, bit, msr));
342 perm_bitmap[index] &= ~(1UL << bit);
345 perm_bitmap[index] &= ~(2UL << bit);
349 svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr)
352 svm_msr_perm(perm_bitmap, msr, true, true);
356 svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr)
359 svm_msr_perm(perm_bitmap, msr, true, false);
363 svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask)
365 struct vmcb_ctrl *ctrl;
367 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx));
369 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
370 return (ctrl->intercept[idx] & bitmask ? 1 : 0);
374 svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask,
377 struct vmcb_ctrl *ctrl;
380 KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx));
382 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
383 oldval = ctrl->intercept[idx];
386 ctrl->intercept[idx] |= bitmask;
388 ctrl->intercept[idx] &= ~bitmask;
390 if (ctrl->intercept[idx] != oldval) {
391 svm_set_dirty(sc, vcpu, VMCB_CACHE_I);
392 VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified "
393 "from %#x to %#x", idx, oldval, ctrl->intercept[idx]);
398 svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask)
401 svm_set_intercept(sc, vcpu, off, bitmask, 0);
405 svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask)
408 svm_set_intercept(sc, vcpu, off, bitmask, 1);
412 vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa,
413 uint64_t msrpm_base_pa, uint64_t np_pml4)
415 struct vmcb_ctrl *ctrl;
416 struct vmcb_state *state;
420 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
421 state = svm_get_vmcb_state(sc, vcpu);
423 ctrl->iopm_base_pa = iopm_base_pa;
424 ctrl->msrpm_base_pa = msrpm_base_pa;
426 /* Enable nested paging */
428 ctrl->n_cr3 = np_pml4;
431 * Intercept accesses to the control registers that are not shadowed
432 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8.
434 for (n = 0; n < 16; n++) {
435 mask = (BIT(n) << 16) | BIT(n);
436 if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8)
437 svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask);
439 svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask);
444 * Intercept everything when tracing guest exceptions otherwise
445 * just intercept machine check exception.
447 if (vcpu_trace_exceptions(sc->vm, vcpu)) {
448 for (n = 0; n < 32; n++) {
450 * Skip unimplemented vectors in the exception bitmap.
452 if (n == 2 || n == 9) {
455 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(n));
458 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC));
461 /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */
462 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO);
463 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR);
464 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID);
465 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR);
466 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT);
467 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI);
468 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI);
469 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN);
470 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
471 VMCB_INTCPT_FERR_FREEZE);
473 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR);
474 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT);
477 * From section "Canonicalization and Consistency Checks" in APMv2
478 * the VMRUN intercept bit must be set to pass the consistency check.
480 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN);
483 * The ASID will be set to a non-zero value just before VMRUN.
488 * Section 15.21.1, Interrupt Masking in EFLAGS
489 * Section 15.21.2, Virtualizing APIC.TPR
491 * This must be set for %rflag and %cr8 isolation of guest and host.
493 ctrl->v_intr_masking = 1;
495 /* Enable Last Branch Record aka LBR for debugging */
496 ctrl->lbr_virt_en = 1;
497 state->dbgctl = BIT(0);
499 /* EFER_SVM must always be set when the guest is executing */
500 state->efer = EFER_SVM;
502 /* Set up the PAT to power-on state */
503 state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) |
504 PAT_VALUE(1, PAT_WRITE_THROUGH) |
505 PAT_VALUE(2, PAT_UNCACHED) |
506 PAT_VALUE(3, PAT_UNCACHEABLE) |
507 PAT_VALUE(4, PAT_WRITE_BACK) |
508 PAT_VALUE(5, PAT_WRITE_THROUGH) |
509 PAT_VALUE(6, PAT_UNCACHED) |
510 PAT_VALUE(7, PAT_UNCACHEABLE);
512 /* Set up DR6/7 to power-on state */
513 state->dr6 = DBREG_DR6_RESERVED1;
514 state->dr7 = DBREG_DR7_RESERVED1;
518 * Initialize a virtual machine.
521 svm_vminit(struct vm *vm, pmap_t pmap)
523 struct svm_softc *svm_sc;
524 struct svm_vcpu *vcpu;
525 vm_paddr_t msrpm_pa, iopm_pa, pml4_pa;
529 svm_sc = malloc(sizeof (*svm_sc), M_SVM, M_WAITOK | M_ZERO);
530 if (((uintptr_t)svm_sc & PAGE_MASK) != 0)
531 panic("malloc of svm_softc not aligned on page boundary");
533 svm_sc->msr_bitmap = contigmalloc(SVM_MSR_BITMAP_SIZE, M_SVM,
534 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0);
535 if (svm_sc->msr_bitmap == NULL)
536 panic("contigmalloc of SVM MSR bitmap failed");
537 svm_sc->iopm_bitmap = contigmalloc(SVM_IO_BITMAP_SIZE, M_SVM,
538 M_WAITOK, 0, ~(vm_paddr_t)0, PAGE_SIZE, 0);
539 if (svm_sc->iopm_bitmap == NULL)
540 panic("contigmalloc of SVM IO bitmap failed");
543 svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pml4);
546 * Intercept read and write accesses to all MSRs.
548 memset(svm_sc->msr_bitmap, 0xFF, SVM_MSR_BITMAP_SIZE);
551 * Access to the following MSRs is redirected to the VMCB when the
552 * guest is executing. Therefore it is safe to allow the guest to
553 * read/write these MSRs directly without hypervisor involvement.
555 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE);
556 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE);
557 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE);
559 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR);
560 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR);
561 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR);
562 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK);
563 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR);
564 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR);
565 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR);
566 svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT);
568 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC);
571 * Intercept writes to make sure that the EFER_SVM bit is not cleared.
573 svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER);
575 /* Intercept access to all I/O ports. */
576 memset(svm_sc->iopm_bitmap, 0xFF, SVM_IO_BITMAP_SIZE);
578 iopm_pa = vtophys(svm_sc->iopm_bitmap);
579 msrpm_pa = vtophys(svm_sc->msr_bitmap);
580 pml4_pa = svm_sc->nptp;
581 maxcpus = vm_get_maxcpus(svm_sc->vm);
582 for (i = 0; i < maxcpus; i++) {
583 vcpu = svm_get_vcpu(svm_sc, i);
585 vcpu->lastcpu = NOCPU;
586 vcpu->vmcb_pa = vtophys(&vcpu->vmcb);
587 vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa);
588 svm_msr_guest_init(svm_sc, i);
594 * Collateral for a generic SVM VM-exit.
597 vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2)
600 vme->exitcode = VM_EXITCODE_SVM;
601 vme->u.svm.exitcode = code;
602 vme->u.svm.exitinfo1 = info1;
603 vme->u.svm.exitinfo2 = info2;
607 svm_cpl(struct vmcb_state *state)
612 * "Retrieve the CPL from the CPL field in the VMCB, not
613 * from any segment DPL"
618 static enum vm_cpu_mode
619 svm_vcpu_mode(struct vmcb *vmcb)
621 struct vmcb_segment seg;
622 struct vmcb_state *state;
625 state = &vmcb->state;
627 if (state->efer & EFER_LMA) {
628 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg);
629 KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__,
633 * Section 4.8.1 for APM2, check if Code Segment has
634 * Long attribute set in descriptor.
636 if (seg.attrib & VMCB_CS_ATTRIB_L)
637 return (CPU_MODE_64BIT);
639 return (CPU_MODE_COMPATIBILITY);
640 } else if (state->cr0 & CR0_PE) {
641 return (CPU_MODE_PROTECTED);
643 return (CPU_MODE_REAL);
647 static enum vm_paging_mode
648 svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer)
651 if ((cr0 & CR0_PG) == 0)
652 return (PAGING_MODE_FLAT);
653 if ((cr4 & CR4_PAE) == 0)
654 return (PAGING_MODE_32);
656 return (PAGING_MODE_64);
658 return (PAGING_MODE_PAE);
662 * ins/outs utility routines
665 svm_inout_str_index(struct svm_regctx *regs, int in)
669 val = in ? regs->sctx_rdi : regs->sctx_rsi;
675 svm_inout_str_count(struct svm_regctx *regs, int rep)
679 val = rep ? regs->sctx_rcx : 1;
685 svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1,
686 int in, struct vm_inout_str *vis)
691 vis->seg_name = VM_REG_GUEST_ES;
693 /* The segment field has standard encoding */
694 s = (info1 >> 10) & 0x7;
695 vis->seg_name = vm_segment_name(s);
698 error = vmcb_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc);
699 KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error));
703 svm_inout_str_addrsize(uint64_t info1)
707 size = (info1 >> 7) & 0x7;
710 return (2); /* 16 bit */
712 return (4); /* 32 bit */
714 return (8); /* 64 bit */
716 panic("%s: invalid size encoding %d", __func__, size);
721 svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging)
723 struct vmcb_state *state;
725 state = &vmcb->state;
726 paging->cr3 = state->cr3;
727 paging->cpl = svm_cpl(state);
728 paging->cpu_mode = svm_vcpu_mode(vmcb);
729 paging->paging_mode = svm_paging_mode(state->cr0, state->cr4,
736 * Handle guest I/O intercept.
739 svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
741 struct vmcb_ctrl *ctrl;
742 struct vmcb_state *state;
743 struct svm_regctx *regs;
744 struct vm_inout_str *vis;
748 state = svm_get_vmcb_state(svm_sc, vcpu);
749 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
750 regs = svm_get_guest_regctx(svm_sc, vcpu);
752 info1 = ctrl->exitinfo1;
753 inout_string = info1 & BIT(2) ? 1 : 0;
756 * The effective segment number in EXITINFO1[12:10] is populated
757 * only if the processor has the DecodeAssist capability.
759 * XXX this is not specified explicitly in APMv2 but can be verified
762 if (inout_string && !decode_assist())
765 vmexit->exitcode = VM_EXITCODE_INOUT;
766 vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0;
767 vmexit->u.inout.string = inout_string;
768 vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0;
769 vmexit->u.inout.bytes = (info1 >> 4) & 0x7;
770 vmexit->u.inout.port = (uint16_t)(info1 >> 16);
771 vmexit->u.inout.eax = (uint32_t)(state->rax);
774 vmexit->exitcode = VM_EXITCODE_INOUT_STR;
775 vis = &vmexit->u.inout_str;
776 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging);
777 vis->rflags = state->rflags;
778 vis->cr0 = state->cr0;
779 vis->index = svm_inout_str_index(regs, vmexit->u.inout.in);
780 vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep);
781 vis->addrsize = svm_inout_str_addrsize(info1);
782 svm_inout_str_seginfo(svm_sc, vcpu, info1,
783 vmexit->u.inout.in, vis);
790 npf_fault_type(uint64_t exitinfo1)
793 if (exitinfo1 & VMCB_NPF_INFO1_W)
794 return (VM_PROT_WRITE);
795 else if (exitinfo1 & VMCB_NPF_INFO1_ID)
796 return (VM_PROT_EXECUTE);
798 return (VM_PROT_READ);
802 svm_npf_emul_fault(uint64_t exitinfo1)
805 if (exitinfo1 & VMCB_NPF_INFO1_ID) {
809 if (exitinfo1 & VMCB_NPF_INFO1_GPT) {
813 if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) {
821 svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit)
823 struct vm_guest_paging *paging;
824 struct vmcb_segment seg;
825 struct vmcb_ctrl *ctrl;
830 paging = &vmexit->u.inst_emul.paging;
832 vmexit->exitcode = VM_EXITCODE_INST_EMUL;
833 vmexit->u.inst_emul.gpa = gpa;
834 vmexit->u.inst_emul.gla = VIE_INVALID_GLA;
835 svm_paging_info(vmcb, paging);
837 error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg);
838 KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error));
840 switch(paging->cpu_mode) {
842 vmexit->u.inst_emul.cs_base = seg.base;
843 vmexit->u.inst_emul.cs_d = 0;
845 case CPU_MODE_PROTECTED:
846 case CPU_MODE_COMPATIBILITY:
847 vmexit->u.inst_emul.cs_base = seg.base;
850 * Section 4.8.1 of APM2, Default Operand Size or D bit.
852 vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ?
856 vmexit->u.inst_emul.cs_base = 0;
857 vmexit->u.inst_emul.cs_d = 0;
862 * Copy the instruction bytes into 'vie' if available.
864 if (decode_assist() && !disable_npf_assist) {
865 inst_len = ctrl->inst_len;
866 inst_bytes = ctrl->inst_bytes;
871 vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len);
876 intrtype_to_str(int intr_type)
879 case VMCB_EVENTINJ_TYPE_INTR:
881 case VMCB_EVENTINJ_TYPE_NMI:
883 case VMCB_EVENTINJ_TYPE_INTn:
885 case VMCB_EVENTINJ_TYPE_EXCEPTION:
886 return ("exception");
888 panic("%s: unknown intr_type %d", __func__, intr_type);
894 * Inject an event to vcpu as described in section 15.20, "Event injection".
897 svm_eventinject(struct svm_softc *sc, int vcpu, int intr_type, int vector,
898 uint32_t error, bool ec_valid)
900 struct vmcb_ctrl *ctrl;
902 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
904 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0,
905 ("%s: event already pending %#lx", __func__, ctrl->eventinj));
907 KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d",
911 case VMCB_EVENTINJ_TYPE_INTR:
912 case VMCB_EVENTINJ_TYPE_NMI:
913 case VMCB_EVENTINJ_TYPE_INTn:
915 case VMCB_EVENTINJ_TYPE_EXCEPTION:
916 if (vector >= 0 && vector <= 31 && vector != 2)
920 panic("%s: invalid intr_type/vector: %d/%d", __func__,
923 ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID;
925 ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID;
926 ctrl->eventinj |= (uint64_t)error << 32;
927 VCPU_CTR3(sc->vm, vcpu, "Injecting %s at vector %d errcode %#x",
928 intrtype_to_str(intr_type), vector, error);
930 VCPU_CTR2(sc->vm, vcpu, "Injecting %s at vector %d",
931 intrtype_to_str(intr_type), vector);
936 svm_update_virqinfo(struct svm_softc *sc, int vcpu)
939 struct vlapic *vlapic;
940 struct vmcb_ctrl *ctrl;
943 vlapic = vm_lapic(vm, vcpu);
944 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
946 /* Update %cr8 in the emulated vlapic */
947 vlapic_set_cr8(vlapic, ctrl->v_tpr);
949 /* Virtual interrupt injection is not used. */
950 KASSERT(ctrl->v_intr_vector == 0, ("%s: invalid "
951 "v_intr_vector %d", __func__, ctrl->v_intr_vector));
955 svm_save_intinfo(struct svm_softc *svm_sc, int vcpu)
957 struct vmcb_ctrl *ctrl;
960 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
961 intinfo = ctrl->exitintinfo;
962 if (!VMCB_EXITINTINFO_VALID(intinfo))
966 * From APMv2, Section "Intercepts during IDT interrupt delivery"
968 * If a #VMEXIT happened during event delivery then record the event
969 * that was being delivered.
971 VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n",
972 intinfo, VMCB_EXITINTINFO_VECTOR(intinfo));
973 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1);
974 vm_exit_intinfo(svm_sc->vm, vcpu, intinfo);
979 vintr_intercept_enabled(struct svm_softc *sc, int vcpu)
982 return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
988 enable_intr_window_exiting(struct svm_softc *sc, int vcpu)
990 struct vmcb_ctrl *ctrl;
992 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
994 if (ctrl->v_irq && ctrl->v_intr_vector == 0) {
995 KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__));
996 KASSERT(vintr_intercept_enabled(sc, vcpu),
997 ("%s: vintr intercept should be enabled", __func__));
1001 VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting");
1003 ctrl->v_ign_tpr = 1;
1004 ctrl->v_intr_vector = 0;
1005 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
1006 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
1009 static __inline void
1010 disable_intr_window_exiting(struct svm_softc *sc, int vcpu)
1012 struct vmcb_ctrl *ctrl;
1014 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1016 if (!ctrl->v_irq && ctrl->v_intr_vector == 0) {
1017 KASSERT(!vintr_intercept_enabled(sc, vcpu),
1018 ("%s: vintr intercept should be disabled", __func__));
1022 VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting");
1024 ctrl->v_intr_vector = 0;
1025 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
1026 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
1030 svm_modify_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t val)
1032 struct vmcb_ctrl *ctrl;
1035 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1036 oldval = ctrl->intr_shadow;
1037 newval = val ? 1 : 0;
1038 if (newval != oldval) {
1039 ctrl->intr_shadow = newval;
1040 VCPU_CTR1(sc->vm, vcpu, "Setting intr_shadow to %d", newval);
1046 svm_get_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t *val)
1048 struct vmcb_ctrl *ctrl;
1050 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1051 *val = ctrl->intr_shadow;
1056 * Once an NMI is injected it blocks delivery of further NMIs until the handler
1057 * executes an IRET. The IRET intercept is enabled when an NMI is injected to
1058 * to track when the vcpu is done handling the NMI.
1061 nmi_blocked(struct svm_softc *sc, int vcpu)
1065 blocked = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
1071 enable_nmi_blocking(struct svm_softc *sc, int vcpu)
1074 KASSERT(!nmi_blocked(sc, vcpu), ("vNMI already blocked"));
1075 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking enabled");
1076 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
1080 clear_nmi_blocking(struct svm_softc *sc, int vcpu)
1084 KASSERT(nmi_blocked(sc, vcpu), ("vNMI already unblocked"));
1085 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking cleared");
1087 * When the IRET intercept is cleared the vcpu will attempt to execute
1088 * the "iret" when it runs next. However, it is possible to inject
1089 * another NMI into the vcpu before the "iret" has actually executed.
1091 * For e.g. if the "iret" encounters a #NPF when accessing the stack
1092 * it will trap back into the hypervisor. If an NMI is pending for
1093 * the vcpu it will be injected into the guest.
1095 * XXX this needs to be fixed
1097 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
1100 * Set 'intr_shadow' to prevent an NMI from being injected on the
1103 error = svm_modify_intr_shadow(sc, vcpu, 1);
1104 KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error));
1107 #define EFER_MBZ_BITS 0xFFFFFFFFFFFF0200UL
1110 svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t newval, bool *retu)
1112 struct vm_exit *vme;
1113 struct vmcb_state *state;
1114 uint64_t changed, lma, oldval;
1117 state = svm_get_vmcb_state(sc, vcpu);
1119 oldval = state->efer;
1120 VCPU_CTR2(sc->vm, vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval);
1122 newval &= ~0xFE; /* clear the Read-As-Zero (RAZ) bits */
1123 changed = oldval ^ newval;
1125 if (newval & EFER_MBZ_BITS)
1128 /* APMv2 Table 14-5 "Long-Mode Consistency Checks" */
1129 if (changed & EFER_LME) {
1130 if (state->cr0 & CR0_PG)
1134 /* EFER.LMA = EFER.LME & CR0.PG */
1135 if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0)
1140 if ((newval & EFER_LMA) != lma)
1143 if (newval & EFER_NXE) {
1144 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_NO_EXECUTE))
1149 * XXX bhyve does not enforce segment limits in 64-bit mode. Until
1150 * this is fixed flag guest attempt to set EFER_LMSLE as an error.
1152 if (newval & EFER_LMSLE) {
1153 vme = vm_exitinfo(sc->vm, vcpu);
1154 vm_exit_svm(vme, VMCB_EXIT_MSR, 1, 0);
1159 if (newval & EFER_FFXSR) {
1160 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_FFXSR))
1164 if (newval & EFER_TCE) {
1165 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_TCE))
1169 error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, newval);
1170 KASSERT(error == 0, ("%s: error %d updating efer", __func__, error));
1173 vm_inject_gp(sc->vm, vcpu);
1178 emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val,
1184 error = lapic_wrmsr(sc->vm, vcpu, num, val, retu);
1185 else if (num == MSR_EFER)
1186 error = svm_write_efer(sc, vcpu, val, retu);
1188 error = svm_wrmsr(sc, vcpu, num, val, retu);
1194 emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num, bool *retu)
1196 struct vmcb_state *state;
1197 struct svm_regctx *ctx;
1202 error = lapic_rdmsr(sc->vm, vcpu, num, &result, retu);
1204 error = svm_rdmsr(sc, vcpu, num, &result, retu);
1207 state = svm_get_vmcb_state(sc, vcpu);
1208 ctx = svm_get_guest_regctx(sc, vcpu);
1209 state->rax = result & 0xffffffff;
1210 ctx->sctx_rdx = result >> 32;
1218 exit_reason_to_str(uint64_t reason)
1220 static char reasonbuf[32];
1223 case VMCB_EXIT_INVALID:
1224 return ("invalvmcb");
1225 case VMCB_EXIT_SHUTDOWN:
1226 return ("shutdown");
1228 return ("nptfault");
1229 case VMCB_EXIT_PAUSE:
1233 case VMCB_EXIT_CPUID:
1239 case VMCB_EXIT_INTR:
1243 case VMCB_EXIT_VINTR:
1247 case VMCB_EXIT_IRET:
1249 case VMCB_EXIT_MONITOR:
1251 case VMCB_EXIT_MWAIT:
1254 snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason);
1261 * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs
1262 * that are due to instruction intercepts as well as MSR and IOIO intercepts
1263 * and exceptions caused by INT3, INTO and BOUND instructions.
1265 * Return 1 if the nRIP is valid and 0 otherwise.
1268 nrip_valid(uint64_t exitcode)
1271 case 0x00 ... 0x0F: /* read of CR0 through CR15 */
1272 case 0x10 ... 0x1F: /* write of CR0 through CR15 */
1273 case 0x20 ... 0x2F: /* read of DR0 through DR15 */
1274 case 0x30 ... 0x3F: /* write of DR0 through DR15 */
1275 case 0x43: /* INT3 */
1276 case 0x44: /* INTO */
1277 case 0x45: /* BOUND */
1278 case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */
1279 case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */
1287 svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
1290 struct vmcb_state *state;
1291 struct vmcb_ctrl *ctrl;
1292 struct svm_regctx *ctx;
1293 uint64_t code, info1, info2, val;
1294 uint32_t eax, ecx, edx;
1295 int error, errcode_valid, handled, idtvec, reflect;
1298 ctx = svm_get_guest_regctx(svm_sc, vcpu);
1299 vmcb = svm_get_vmcb(svm_sc, vcpu);
1300 state = &vmcb->state;
1304 code = ctrl->exitcode;
1305 info1 = ctrl->exitinfo1;
1306 info2 = ctrl->exitinfo2;
1308 vmexit->exitcode = VM_EXITCODE_BOGUS;
1309 vmexit->rip = state->rip;
1310 vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0;
1312 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1);
1315 * #VMEXIT(INVALID) needs to be handled early because the VMCB is
1316 * in an inconsistent state and can trigger assertions that would
1317 * never happen otherwise.
1319 if (code == VMCB_EXIT_INVALID) {
1320 vm_exit_svm(vmexit, code, info1, info2);
1324 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event "
1325 "injection valid bit is set %#lx", __func__, ctrl->eventinj));
1327 KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15,
1328 ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)",
1329 vmexit->inst_length, code, info1, info2));
1331 svm_update_virqinfo(svm_sc, vcpu);
1332 svm_save_intinfo(svm_sc, vcpu);
1335 case VMCB_EXIT_IRET:
1337 * Restart execution at "iret" but with the intercept cleared.
1339 vmexit->inst_length = 0;
1340 clear_nmi_blocking(svm_sc, vcpu);
1343 case VMCB_EXIT_VINTR: /* interrupt window exiting */
1344 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1);
1347 case VMCB_EXIT_INTR: /* external interrupt */
1348 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1);
1351 case VMCB_EXIT_NMI: /* external NMI */
1355 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1);
1357 idtvec = code - 0x40;
1361 * Call the machine check handler by hand. Also don't
1362 * reflect the machine check back into the guest.
1365 VCPU_CTR0(svm_sc->vm, vcpu, "Vectoring to MCE handler");
1366 __asm __volatile("int $18");
1369 error = svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2,
1371 KASSERT(error == 0, ("%s: error %d updating cr2",
1391 * The 'nrip' field is populated for INT3, INTO and
1392 * BOUND exceptions and this also implies that
1393 * 'inst_length' is non-zero.
1395 * Reset 'inst_length' to zero so the guest %rip at
1396 * event injection is identical to what it was when
1397 * the exception originally happened.
1399 VCPU_CTR2(svm_sc->vm, vcpu, "Reset inst_length from %d "
1400 "to zero before injecting exception %d",
1401 vmexit->inst_length, idtvec);
1402 vmexit->inst_length = 0;
1409 KASSERT(vmexit->inst_length == 0, ("invalid inst_length (%d) "
1410 "when reflecting exception %d into guest",
1411 vmexit->inst_length, idtvec));
1414 /* Reflect the exception back into the guest */
1415 VCPU_CTR2(svm_sc->vm, vcpu, "Reflecting exception "
1416 "%d/%#x into the guest", idtvec, (int)info1);
1417 error = vm_inject_exception(svm_sc->vm, vcpu, idtvec,
1418 errcode_valid, info1, 0);
1419 KASSERT(error == 0, ("%s: vm_inject_exception error %d",
1424 case VMCB_EXIT_MSR: /* MSR access. */
1426 ecx = ctx->sctx_rcx;
1427 edx = ctx->sctx_rdx;
1431 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1);
1432 val = (uint64_t)edx << 32 | eax;
1433 VCPU_CTR2(svm_sc->vm, vcpu, "wrmsr %#x val %#lx",
1435 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) {
1436 vmexit->exitcode = VM_EXITCODE_WRMSR;
1437 vmexit->u.msr.code = ecx;
1438 vmexit->u.msr.wval = val;
1442 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
1443 ("emulate_wrmsr retu with bogus exitcode"));
1446 VCPU_CTR1(svm_sc->vm, vcpu, "rdmsr %#x", ecx);
1447 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1);
1448 if (emulate_rdmsr(svm_sc, vcpu, ecx, &retu)) {
1449 vmexit->exitcode = VM_EXITCODE_RDMSR;
1450 vmexit->u.msr.code = ecx;
1454 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
1455 ("emulate_rdmsr retu with bogus exitcode"));
1460 handled = svm_handle_io(svm_sc, vcpu, vmexit);
1461 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1);
1463 case VMCB_EXIT_CPUID:
1464 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1);
1465 handled = x86_emulate_cpuid(svm_sc->vm, vcpu,
1466 (uint32_t *)&state->rax,
1467 (uint32_t *)&ctx->sctx_rbx,
1468 (uint32_t *)&ctx->sctx_rcx,
1469 (uint32_t *)&ctx->sctx_rdx);
1472 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1);
1473 vmexit->exitcode = VM_EXITCODE_HLT;
1474 vmexit->u.hlt.rflags = state->rflags;
1476 case VMCB_EXIT_PAUSE:
1477 vmexit->exitcode = VM_EXITCODE_PAUSE;
1478 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1);
1481 /* EXITINFO2 contains the faulting guest physical address */
1482 if (info1 & VMCB_NPF_INFO1_RSV) {
1483 VCPU_CTR2(svm_sc->vm, vcpu, "nested page fault with "
1484 "reserved bits set: info1(%#lx) info2(%#lx)",
1486 } else if (vm_mem_allocated(svm_sc->vm, vcpu, info2)) {
1487 vmexit->exitcode = VM_EXITCODE_PAGING;
1488 vmexit->u.paging.gpa = info2;
1489 vmexit->u.paging.fault_type = npf_fault_type(info1);
1490 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
1491 VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault "
1492 "on gpa %#lx/%#lx at rip %#lx",
1493 info2, info1, state->rip);
1494 } else if (svm_npf_emul_fault(info1)) {
1495 svm_handle_inst_emul(vmcb, info2, vmexit);
1496 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INST_EMUL, 1);
1497 VCPU_CTR3(svm_sc->vm, vcpu, "inst_emul fault "
1498 "for gpa %#lx/%#lx at rip %#lx",
1499 info2, info1, state->rip);
1502 case VMCB_EXIT_MONITOR:
1503 vmexit->exitcode = VM_EXITCODE_MONITOR;
1505 case VMCB_EXIT_MWAIT:
1506 vmexit->exitcode = VM_EXITCODE_MWAIT;
1509 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1);
1513 VCPU_CTR4(svm_sc->vm, vcpu, "%s %s vmexit at %#lx/%d",
1514 handled ? "handled" : "unhandled", exit_reason_to_str(code),
1515 vmexit->rip, vmexit->inst_length);
1518 vmexit->rip += vmexit->inst_length;
1519 vmexit->inst_length = 0;
1520 state->rip = vmexit->rip;
1522 if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
1524 * If this VM exit was not claimed by anybody then
1525 * treat it as a generic SVM exit.
1527 vm_exit_svm(vmexit, code, info1, info2);
1530 * The exitcode and collateral have been populated.
1531 * The VM exit will be processed further in userland.
1539 svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu)
1543 if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo))
1546 KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not "
1547 "valid: %#lx", __func__, intinfo));
1549 svm_eventinject(svm_sc, vcpu, VMCB_EXITINTINFO_TYPE(intinfo),
1550 VMCB_EXITINTINFO_VECTOR(intinfo),
1551 VMCB_EXITINTINFO_EC(intinfo),
1552 VMCB_EXITINTINFO_EC_VALID(intinfo));
1553 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1);
1554 VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo);
1558 * Inject event to virtual cpu.
1561 svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic)
1563 struct vmcb_ctrl *ctrl;
1564 struct vmcb_state *state;
1565 struct svm_vcpu *vcpustate;
1567 int vector, need_intr_window;
1570 state = svm_get_vmcb_state(sc, vcpu);
1571 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1572 vcpustate = svm_get_vcpu(sc, vcpu);
1574 need_intr_window = 0;
1576 if (vcpustate->nextrip != state->rip) {
1577 ctrl->intr_shadow = 0;
1578 VCPU_CTR2(sc->vm, vcpu, "Guest interrupt blocking "
1579 "cleared due to rip change: %#lx/%#lx",
1580 vcpustate->nextrip, state->rip);
1584 * Inject pending events or exceptions for this vcpu.
1586 * An event might be pending because the previous #VMEXIT happened
1587 * during event delivery (i.e. ctrl->exitintinfo).
1589 * An event might also be pending because an exception was injected
1590 * by the hypervisor (e.g. #PF during instruction emulation).
1592 svm_inj_intinfo(sc, vcpu);
1594 /* NMI event has priority over interrupts. */
1595 if (vm_nmi_pending(sc->vm, vcpu)) {
1596 if (nmi_blocked(sc, vcpu)) {
1598 * Can't inject another NMI if the guest has not
1599 * yet executed an "iret" after the last NMI.
1601 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due "
1603 } else if (ctrl->intr_shadow) {
1605 * Can't inject an NMI if the vcpu is in an intr_shadow.
1607 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due to "
1608 "interrupt shadow");
1609 need_intr_window = 1;
1611 } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) {
1613 * If there is already an exception/interrupt pending
1614 * then defer the NMI until after that.
1616 VCPU_CTR1(sc->vm, vcpu, "Cannot inject NMI due to "
1617 "eventinj %#lx", ctrl->eventinj);
1620 * Use self-IPI to trigger a VM-exit as soon as
1621 * possible after the event injection is completed.
1623 * This works only if the external interrupt exiting
1624 * is at a lower priority than the event injection.
1626 * Although not explicitly specified in APMv2 the
1627 * relative priorities were verified empirically.
1629 ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */
1631 vm_nmi_clear(sc->vm, vcpu);
1633 /* Inject NMI, vector number is not used */
1634 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_NMI,
1637 /* virtual NMI blocking is now in effect */
1638 enable_nmi_blocking(sc, vcpu);
1640 VCPU_CTR0(sc->vm, vcpu, "Injecting vNMI");
1644 extint_pending = vm_extint_pending(sc->vm, vcpu);
1645 if (!extint_pending) {
1646 if (!vlapic_pending_intr(vlapic, &vector))
1648 KASSERT(vector >= 16 && vector <= 255,
1649 ("invalid vector %d from local APIC", vector));
1651 /* Ask the legacy pic for a vector to inject */
1652 vatpic_pending_intr(sc->vm, &vector);
1653 KASSERT(vector >= 0 && vector <= 255,
1654 ("invalid vector %d from INTR", vector));
1658 * If the guest has disabled interrupts or is in an interrupt shadow
1659 * then we cannot inject the pending interrupt.
1661 if ((state->rflags & PSL_I) == 0) {
1662 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to "
1663 "rflags %#lx", vector, state->rflags);
1664 need_intr_window = 1;
1668 if (ctrl->intr_shadow) {
1669 VCPU_CTR1(sc->vm, vcpu, "Cannot inject vector %d due to "
1670 "interrupt shadow", vector);
1671 need_intr_window = 1;
1675 if (ctrl->eventinj & VMCB_EVENTINJ_VALID) {
1676 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to "
1677 "eventinj %#lx", vector, ctrl->eventinj);
1678 need_intr_window = 1;
1682 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false);
1684 if (!extint_pending) {
1685 vlapic_intr_accepted(vlapic, vector);
1687 vm_extint_clear(sc->vm, vcpu);
1688 vatpic_intr_accepted(sc->vm, vector);
1692 * Force a VM-exit as soon as the vcpu is ready to accept another
1693 * interrupt. This is done because the PIC might have another vector
1694 * that it wants to inject. Also, if the APIC has a pending interrupt
1695 * that was preempted by the ExtInt then it allows us to inject the
1696 * APIC vector as soon as possible.
1698 need_intr_window = 1;
1701 * The guest can modify the TPR by writing to %CR8. In guest mode
1702 * the processor reflects this write to V_TPR without hypervisor
1705 * The guest can also modify the TPR by writing to it via the memory
1706 * mapped APIC page. In this case, the write will be emulated by the
1707 * hypervisor. For this reason V_TPR must be updated before every
1710 v_tpr = vlapic_get_cr8(vlapic);
1711 KASSERT(v_tpr <= 15, ("invalid v_tpr %#x", v_tpr));
1712 if (ctrl->v_tpr != v_tpr) {
1713 VCPU_CTR2(sc->vm, vcpu, "VMCB V_TPR changed from %#x to %#x",
1714 ctrl->v_tpr, v_tpr);
1715 ctrl->v_tpr = v_tpr;
1716 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
1719 if (need_intr_window) {
1721 * We use V_IRQ in conjunction with the VINTR intercept to
1722 * trap into the hypervisor as soon as a virtual interrupt
1725 * Since injected events are not subject to intercept checks
1726 * we need to ensure that the V_IRQ is not actually going to
1727 * be delivered on VM entry. The KASSERT below enforces this.
1729 KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 ||
1730 (state->rflags & PSL_I) == 0 || ctrl->intr_shadow,
1731 ("Bogus intr_window_exiting: eventinj (%#lx), "
1732 "intr_shadow (%u), rflags (%#lx)",
1733 ctrl->eventinj, ctrl->intr_shadow, state->rflags));
1734 enable_intr_window_exiting(sc, vcpu);
1736 disable_intr_window_exiting(sc, vcpu);
1740 static __inline void
1741 restore_host_tss(void)
1743 struct system_segment_descriptor *tss_sd;
1746 * The TSS descriptor was in use prior to launching the guest so it
1747 * has been marked busy.
1749 * 'ltr' requires the descriptor to be marked available so change the
1750 * type to "64-bit available TSS".
1752 tss_sd = PCPU_GET(tss);
1753 tss_sd->sd_type = SDT_SYSTSS;
1754 ltr(GSEL(GPROC0_SEL, SEL_KPL));
1758 check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu)
1760 struct svm_vcpu *vcpustate;
1761 struct vmcb_ctrl *ctrl;
1765 KASSERT(CPU_ISSET(thiscpu, &pmap->pm_active), ("%s: nested pmap not "
1766 "active on cpu %u", __func__, thiscpu));
1768 vcpustate = svm_get_vcpu(sc, vcpuid);
1769 ctrl = svm_get_vmcb_ctrl(sc, vcpuid);
1772 * The TLB entries associated with the vcpu's ASID are not valid
1773 * if either of the following conditions is true:
1775 * 1. The vcpu's ASID generation is different than the host cpu's
1776 * ASID generation. This happens when the vcpu migrates to a new
1777 * host cpu. It can also happen when the number of vcpus executing
1778 * on a host cpu is greater than the number of ASIDs available.
1780 * 2. The pmap generation number is different than the value cached in
1781 * the 'vcpustate'. This happens when the host invalidates pages
1782 * belonging to the guest.
1784 * asidgen eptgen Action
1791 * (a) There is no mismatch in eptgen or ASID generation and therefore
1792 * no further action is needed.
1794 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is
1795 * retained and the TLB entries associated with this ASID
1796 * are flushed by VMRUN.
1798 * (b2) If the cpu does not support FlushByAsid then a new ASID is
1801 * (c) A new ASID is allocated.
1803 * (d) A new ASID is allocated.
1807 eptgen = pmap->pm_eptgen;
1808 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING;
1810 if (vcpustate->asid.gen != asid[thiscpu].gen) {
1811 alloc_asid = true; /* (c) and (d) */
1812 } else if (vcpustate->eptgen != eptgen) {
1813 if (flush_by_asid())
1814 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */
1816 alloc_asid = true; /* (b2) */
1819 * This is the common case (a).
1821 KASSERT(!alloc_asid, ("ASID allocation not necessary"));
1822 KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING,
1823 ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl));
1827 if (++asid[thiscpu].num >= nasid) {
1828 asid[thiscpu].num = 1;
1829 if (++asid[thiscpu].gen == 0)
1830 asid[thiscpu].gen = 1;
1832 * If this cpu does not support "flush-by-asid"
1833 * then flush the entire TLB on a generation
1834 * bump. Subsequent ASID allocation in this
1835 * generation can be done without a TLB flush.
1837 if (!flush_by_asid())
1838 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL;
1840 vcpustate->asid.gen = asid[thiscpu].gen;
1841 vcpustate->asid.num = asid[thiscpu].num;
1843 ctrl->asid = vcpustate->asid.num;
1844 svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID);
1846 * If this cpu supports "flush-by-asid" then the TLB
1847 * was not flushed after the generation bump. The TLB
1848 * is flushed selectively after every new ASID allocation.
1850 if (flush_by_asid())
1851 ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST;
1853 vcpustate->eptgen = eptgen;
1855 KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero"));
1856 KASSERT(ctrl->asid == vcpustate->asid.num,
1857 ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num));
1860 static __inline void
1864 __asm __volatile("clgi");
1867 static __inline void
1871 __asm __volatile("stgi");
1874 static __inline void
1875 svm_dr_enter_guest(struct svm_regctx *gctx)
1878 /* Save host control debug registers. */
1879 gctx->host_dr7 = rdr7();
1880 gctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR);
1883 * Disable debugging in DR7 and DEBUGCTL to avoid triggering
1884 * exceptions in the host based on the guest DRx values. The
1885 * guest DR6, DR7, and DEBUGCTL are saved/restored in the
1889 wrmsr(MSR_DEBUGCTLMSR, 0);
1891 /* Save host debug registers. */
1892 gctx->host_dr0 = rdr0();
1893 gctx->host_dr1 = rdr1();
1894 gctx->host_dr2 = rdr2();
1895 gctx->host_dr3 = rdr3();
1896 gctx->host_dr6 = rdr6();
1898 /* Restore guest debug registers. */
1899 load_dr0(gctx->sctx_dr0);
1900 load_dr1(gctx->sctx_dr1);
1901 load_dr2(gctx->sctx_dr2);
1902 load_dr3(gctx->sctx_dr3);
1905 static __inline void
1906 svm_dr_leave_guest(struct svm_regctx *gctx)
1909 /* Save guest debug registers. */
1910 gctx->sctx_dr0 = rdr0();
1911 gctx->sctx_dr1 = rdr1();
1912 gctx->sctx_dr2 = rdr2();
1913 gctx->sctx_dr3 = rdr3();
1916 * Restore host debug registers. Restore DR7 and DEBUGCTL
1919 load_dr0(gctx->host_dr0);
1920 load_dr1(gctx->host_dr1);
1921 load_dr2(gctx->host_dr2);
1922 load_dr3(gctx->host_dr3);
1923 load_dr6(gctx->host_dr6);
1924 wrmsr(MSR_DEBUGCTLMSR, gctx->host_debugctl);
1925 load_dr7(gctx->host_dr7);
1929 * Start vcpu with specified RIP.
1932 svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap,
1933 struct vm_eventinfo *evinfo)
1935 struct svm_regctx *gctx;
1936 struct svm_softc *svm_sc;
1937 struct svm_vcpu *vcpustate;
1938 struct vmcb_state *state;
1939 struct vmcb_ctrl *ctrl;
1940 struct vm_exit *vmexit;
1941 struct vlapic *vlapic;
1950 vcpustate = svm_get_vcpu(svm_sc, vcpu);
1951 state = svm_get_vmcb_state(svm_sc, vcpu);
1952 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
1953 vmexit = vm_exitinfo(vm, vcpu);
1954 vlapic = vm_lapic(vm, vcpu);
1956 gctx = svm_get_guest_regctx(svm_sc, vcpu);
1957 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa;
1959 if (vcpustate->lastcpu != curcpu) {
1961 * Force new ASID allocation by invalidating the generation.
1963 vcpustate->asid.gen = 0;
1966 * Invalidate the VMCB state cache by marking all fields dirty.
1968 svm_set_dirty(svm_sc, vcpu, 0xffffffff);
1972 * Setting 'vcpustate->lastcpu' here is bit premature because
1973 * we may return from this function without actually executing
1974 * the VMRUN instruction. This could happen if a rendezvous
1975 * or an AST is pending on the first time through the loop.
1977 * This works for now but any new side-effects of vcpu
1978 * migration should take this case into account.
1980 vcpustate->lastcpu = curcpu;
1981 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1);
1984 svm_msr_guest_enter(svm_sc, vcpu);
1986 /* Update Guest RIP */
1991 * Disable global interrupts to guarantee atomicity during
1992 * loading of guest state. This includes not only the state
1993 * loaded by the "vmrun" instruction but also software state
1994 * maintained by the hypervisor: suspended and rendezvous
1995 * state, NPT generation number, vlapic interrupts etc.
1999 if (vcpu_suspended(evinfo)) {
2001 vm_exit_suspended(vm, vcpu, state->rip);
2005 if (vcpu_rendezvous_pending(evinfo)) {
2007 vm_exit_rendezvous(vm, vcpu, state->rip);
2011 if (vcpu_reqidle(evinfo)) {
2013 vm_exit_reqidle(vm, vcpu, state->rip);
2017 /* We are asked to give the cpu by scheduler. */
2018 if (vcpu_should_yield(vm, vcpu)) {
2020 vm_exit_astpending(vm, vcpu, state->rip);
2024 if (vcpu_debugged(vm, vcpu)) {
2026 vm_exit_debug(vm, vcpu, state->rip);
2031 * #VMEXIT resumes the host with the guest LDTR, so
2032 * save the current LDT selector so it can be restored
2033 * after an exit. The userspace hypervisor probably
2034 * doesn't use a LDT, but save and restore it to be
2039 svm_inj_interrupts(svm_sc, vcpu, vlapic);
2041 /* Activate the nested pmap on 'curcpu' */
2042 CPU_SET_ATOMIC_ACQ(curcpu, &pmap->pm_active);
2045 * Check the pmap generation and the ASID generation to
2046 * ensure that the vcpu does not use stale TLB mappings.
2048 check_asid(svm_sc, vcpu, pmap, curcpu);
2050 ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty;
2051 vcpustate->dirty = 0;
2052 VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean);
2054 /* Launch Virtual Machine. */
2055 VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip);
2056 svm_dr_enter_guest(gctx);
2057 svm_launch(vmcb_pa, gctx, &__pcpu[curcpu]);
2058 svm_dr_leave_guest(gctx);
2060 CPU_CLR_ATOMIC(curcpu, &pmap->pm_active);
2063 * The host GDTR and IDTR is saved by VMRUN and restored
2064 * automatically on #VMEXIT. However, the host TSS needs
2065 * to be restored explicitly.
2069 /* Restore host LDTR. */
2072 /* #VMEXIT disables interrupts so re-enable them here. */
2075 /* Update 'nextrip' */
2076 vcpustate->nextrip = state->rip;
2078 /* Handle #VMEXIT and if required return to user space. */
2079 handled = svm_vmexit(svm_sc, vcpu, vmexit);
2082 svm_msr_guest_exit(svm_sc, vcpu);
2088 svm_vmcleanup(void *arg)
2090 struct svm_softc *sc = arg;
2092 contigfree(sc->iopm_bitmap, SVM_IO_BITMAP_SIZE, M_SVM);
2093 contigfree(sc->msr_bitmap, SVM_MSR_BITMAP_SIZE, M_SVM);
2098 swctx_regptr(struct svm_regctx *regctx, int reg)
2102 case VM_REG_GUEST_RBX:
2103 return (®ctx->sctx_rbx);
2104 case VM_REG_GUEST_RCX:
2105 return (®ctx->sctx_rcx);
2106 case VM_REG_GUEST_RDX:
2107 return (®ctx->sctx_rdx);
2108 case VM_REG_GUEST_RDI:
2109 return (®ctx->sctx_rdi);
2110 case VM_REG_GUEST_RSI:
2111 return (®ctx->sctx_rsi);
2112 case VM_REG_GUEST_RBP:
2113 return (®ctx->sctx_rbp);
2114 case VM_REG_GUEST_R8:
2115 return (®ctx->sctx_r8);
2116 case VM_REG_GUEST_R9:
2117 return (®ctx->sctx_r9);
2118 case VM_REG_GUEST_R10:
2119 return (®ctx->sctx_r10);
2120 case VM_REG_GUEST_R11:
2121 return (®ctx->sctx_r11);
2122 case VM_REG_GUEST_R12:
2123 return (®ctx->sctx_r12);
2124 case VM_REG_GUEST_R13:
2125 return (®ctx->sctx_r13);
2126 case VM_REG_GUEST_R14:
2127 return (®ctx->sctx_r14);
2128 case VM_REG_GUEST_R15:
2129 return (®ctx->sctx_r15);
2130 case VM_REG_GUEST_DR0:
2131 return (®ctx->sctx_dr0);
2132 case VM_REG_GUEST_DR1:
2133 return (®ctx->sctx_dr1);
2134 case VM_REG_GUEST_DR2:
2135 return (®ctx->sctx_dr2);
2136 case VM_REG_GUEST_DR3:
2137 return (®ctx->sctx_dr3);
2144 svm_getreg(void *arg, int vcpu, int ident, uint64_t *val)
2146 struct svm_softc *svm_sc;
2151 if (ident == VM_REG_GUEST_INTR_SHADOW) {
2152 return (svm_get_intr_shadow(svm_sc, vcpu, val));
2155 if (vmcb_read(svm_sc, vcpu, ident, val) == 0) {
2159 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident);
2166 VCPU_CTR1(svm_sc->vm, vcpu, "svm_getreg: unknown register %#x", ident);
2171 svm_setreg(void *arg, int vcpu, int ident, uint64_t val)
2173 struct svm_softc *svm_sc;
2178 if (ident == VM_REG_GUEST_INTR_SHADOW) {
2179 return (svm_modify_intr_shadow(svm_sc, vcpu, val));
2182 if (vmcb_write(svm_sc, vcpu, ident, val) == 0) {
2186 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident);
2194 * XXX deal with CR3 and invalidate TLB entries tagged with the
2195 * vcpu's ASID. This needs to be treated differently depending on
2196 * whether 'running' is true/false.
2199 VCPU_CTR1(svm_sc->vm, vcpu, "svm_setreg: unknown register %#x", ident);
2204 svm_setcap(void *arg, int vcpu, int type, int val)
2206 struct svm_softc *sc;
2212 case VM_CAP_HALT_EXIT:
2213 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2214 VMCB_INTCPT_HLT, val);
2216 case VM_CAP_PAUSE_EXIT:
2217 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2218 VMCB_INTCPT_PAUSE, val);
2220 case VM_CAP_UNRESTRICTED_GUEST:
2221 /* Unrestricted guest execution cannot be disabled in SVM */
2233 svm_getcap(void *arg, int vcpu, int type, int *retval)
2235 struct svm_softc *sc;
2242 case VM_CAP_HALT_EXIT:
2243 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2246 case VM_CAP_PAUSE_EXIT:
2247 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2250 case VM_CAP_UNRESTRICTED_GUEST:
2251 *retval = 1; /* unrestricted guest is always enabled */
2260 static struct vlapic *
2261 svm_vlapic_init(void *arg, int vcpuid)
2263 struct svm_softc *svm_sc;
2264 struct vlapic *vlapic;
2267 vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO);
2268 vlapic->vm = svm_sc->vm;
2269 vlapic->vcpuid = vcpuid;
2270 vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid];
2272 vlapic_init(vlapic);
2278 svm_vlapic_cleanup(void *arg, struct vlapic *vlapic)
2281 vlapic_cleanup(vlapic);
2282 free(vlapic, M_SVM_VLAPIC);
2285 struct vmm_ops vmm_ops_amd = {
2287 .cleanup = svm_cleanup,
2288 .resume = svm_restore,
2289 .vminit = svm_vminit,
2291 .vmcleanup = svm_vmcleanup,
2292 .vmgetreg = svm_getreg,
2293 .vmsetreg = svm_setreg,
2294 .vmgetdesc = vmcb_getdesc,
2295 .vmsetdesc = vmcb_setdesc,
2296 .vmgetcap = svm_getcap,
2297 .vmsetcap = svm_setcap,
2298 .vmspace_alloc = svm_npt_alloc,
2299 .vmspace_free = svm_npt_free,
2300 .vlapic_init = svm_vlapic_init,
2301 .vlapic_cleanup = svm_vlapic_cleanup,