2 * Copyright (c) 2011 NetApp, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
39 #include <sys/sysctl.h>
44 #include <machine/psl.h>
45 #include <machine/cpufunc.h>
46 #include <machine/md_var.h>
47 #include <machine/pmap.h>
48 #include <machine/segments.h>
49 #include <machine/specialreg.h>
50 #include <machine/vmparam.h>
52 #include <machine/vmm.h>
54 #include "vmm_lapic.h"
61 #include "vmx_cpufunc.h"
64 #include "vmx_controls.h"
66 #define PINBASED_CTLS_ONE_SETTING \
67 (PINBASED_EXTINT_EXITING | \
68 PINBASED_NMI_EXITING | \
70 #define PINBASED_CTLS_ZERO_SETTING 0
72 #define PROCBASED_CTLS_WINDOW_SETTING \
73 (PROCBASED_INT_WINDOW_EXITING | \
74 PROCBASED_NMI_WINDOW_EXITING)
76 #define PROCBASED_CTLS_ONE_SETTING \
77 (PROCBASED_SECONDARY_CONTROLS | \
78 PROCBASED_IO_EXITING | \
79 PROCBASED_MSR_BITMAPS | \
80 PROCBASED_CTLS_WINDOW_SETTING)
81 #define PROCBASED_CTLS_ZERO_SETTING \
82 (PROCBASED_CR3_LOAD_EXITING | \
83 PROCBASED_CR3_STORE_EXITING | \
86 #define PROCBASED_CTLS2_ONE_SETTING PROCBASED2_ENABLE_EPT
87 #define PROCBASED_CTLS2_ZERO_SETTING 0
89 #define VM_EXIT_CTLS_ONE_SETTING_NO_PAT \
94 #define VM_EXIT_CTLS_ONE_SETTING \
95 (VM_EXIT_CTLS_ONE_SETTING_NO_PAT | \
98 #define VM_EXIT_CTLS_ZERO_SETTING VM_EXIT_SAVE_DEBUG_CONTROLS
100 #define VM_ENTRY_CTLS_ONE_SETTING_NO_PAT VM_ENTRY_LOAD_EFER
102 #define VM_ENTRY_CTLS_ONE_SETTING \
103 (VM_ENTRY_CTLS_ONE_SETTING_NO_PAT | \
105 #define VM_ENTRY_CTLS_ZERO_SETTING \
106 (VM_ENTRY_LOAD_DEBUG_CONTROLS | \
107 VM_ENTRY_INTO_SMM | \
108 VM_ENTRY_DEACTIVATE_DUAL_MONITOR)
110 #define guest_msr_rw(vmx, msr) \
111 msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_RW)
116 MALLOC_DEFINE(M_VMX, "vmx", "vmx");
118 SYSCTL_DECL(_hw_vmm);
119 SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW, NULL, NULL);
121 int vmxon_enabled[MAXCPU];
122 static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
124 static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2;
125 static uint32_t exit_ctls, entry_ctls;
127 static uint64_t cr0_ones_mask, cr0_zeros_mask;
128 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD,
129 &cr0_ones_mask, 0, NULL);
130 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD,
131 &cr0_zeros_mask, 0, NULL);
133 static uint64_t cr4_ones_mask, cr4_zeros_mask;
134 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD,
135 &cr4_ones_mask, 0, NULL);
136 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD,
137 &cr4_zeros_mask, 0, NULL);
139 static int vmx_no_patmsr;
141 static int vmx_initialized;
142 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD,
143 &vmx_initialized, 0, "Intel VMX initialized");
146 * Virtual NMI blocking conditions.
148 * Some processor implementations also require NMI to be blocked if
149 * the STI_BLOCKING bit is set. It is possible to detect this at runtime
150 * based on the (exit_reason,exit_qual) tuple being set to
151 * (EXIT_REASON_INVAL_VMCS, EXIT_QUAL_NMI_WHILE_STI_BLOCKING).
153 * We take the easy way out and also include STI_BLOCKING as one of the
154 * gating items for vNMI injection.
156 static uint64_t nmi_blocking_bits = VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING |
157 VMCS_INTERRUPTIBILITY_NMI_BLOCKING |
158 VMCS_INTERRUPTIBILITY_STI_BLOCKING;
161 * Optional capabilities
163 static int cap_halt_exit;
164 static int cap_pause_exit;
165 static int cap_unrestricted_guest;
166 static int cap_monitor_trap;
167 static int cap_invpcid;
169 static struct unrhdr *vpid_unr;
170 static u_int vpid_alloc_failed;
171 SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD,
172 &vpid_alloc_failed, 0, NULL);
176 exit_reason_to_str(int reason)
178 static char reasonbuf[32];
181 case EXIT_REASON_EXCEPTION:
183 case EXIT_REASON_EXT_INTR:
185 case EXIT_REASON_TRIPLE_FAULT:
186 return "triplefault";
187 case EXIT_REASON_INIT:
189 case EXIT_REASON_SIPI:
191 case EXIT_REASON_IO_SMI:
193 case EXIT_REASON_SMI:
195 case EXIT_REASON_INTR_WINDOW:
197 case EXIT_REASON_NMI_WINDOW:
199 case EXIT_REASON_TASK_SWITCH:
201 case EXIT_REASON_CPUID:
203 case EXIT_REASON_GETSEC:
205 case EXIT_REASON_HLT:
207 case EXIT_REASON_INVD:
209 case EXIT_REASON_INVLPG:
211 case EXIT_REASON_RDPMC:
213 case EXIT_REASON_RDTSC:
215 case EXIT_REASON_RSM:
217 case EXIT_REASON_VMCALL:
219 case EXIT_REASON_VMCLEAR:
221 case EXIT_REASON_VMLAUNCH:
223 case EXIT_REASON_VMPTRLD:
225 case EXIT_REASON_VMPTRST:
227 case EXIT_REASON_VMREAD:
229 case EXIT_REASON_VMRESUME:
231 case EXIT_REASON_VMWRITE:
233 case EXIT_REASON_VMXOFF:
235 case EXIT_REASON_VMXON:
237 case EXIT_REASON_CR_ACCESS:
239 case EXIT_REASON_DR_ACCESS:
241 case EXIT_REASON_INOUT:
243 case EXIT_REASON_RDMSR:
245 case EXIT_REASON_WRMSR:
247 case EXIT_REASON_INVAL_VMCS:
249 case EXIT_REASON_INVAL_MSR:
251 case EXIT_REASON_MWAIT:
253 case EXIT_REASON_MTF:
255 case EXIT_REASON_MONITOR:
257 case EXIT_REASON_PAUSE:
259 case EXIT_REASON_MCE:
261 case EXIT_REASON_TPR:
263 case EXIT_REASON_APIC:
265 case EXIT_REASON_GDTR_IDTR:
267 case EXIT_REASON_LDTR_TR:
269 case EXIT_REASON_EPT_FAULT:
271 case EXIT_REASON_EPT_MISCONFIG:
272 return "eptmisconfig";
273 case EXIT_REASON_INVEPT:
275 case EXIT_REASON_RDTSCP:
277 case EXIT_REASON_VMX_PREEMPT:
279 case EXIT_REASON_INVVPID:
281 case EXIT_REASON_WBINVD:
283 case EXIT_REASON_XSETBV:
286 snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason);
293 vmx_setjmp_rc2str(int rc)
296 case VMX_RETURN_DIRECT:
298 case VMX_RETURN_LONGJMP:
300 case VMX_RETURN_VMRESUME:
302 case VMX_RETURN_VMLAUNCH:
311 #define SETJMP_TRACE(vmx, vcpu, vmxctx, regname) \
312 VMM_CTR1((vmx)->vm, (vcpu), "setjmp trace " #regname " 0x%016lx", \
316 vmx_setjmp_trace(struct vmx *vmx, int vcpu, struct vmxctx *vmxctx, int rc)
318 uint64_t host_rip, host_rsp;
320 if (vmxctx != &vmx->ctx[vcpu])
321 panic("vmx_setjmp_trace: invalid vmxctx %p; should be %p",
322 vmxctx, &vmx->ctx[vcpu]);
324 VMM_CTR1((vmx)->vm, (vcpu), "vmxctx = %p", vmxctx);
325 VMM_CTR2((vmx)->vm, (vcpu), "setjmp return code %s(%d)",
326 vmx_setjmp_rc2str(rc), rc);
328 host_rsp = host_rip = ~0;
329 vmread(VMCS_HOST_RIP, &host_rip);
330 vmread(VMCS_HOST_RSP, &host_rsp);
331 VMM_CTR2((vmx)->vm, (vcpu), "vmcs host_rip 0x%016lx, host_rsp 0x%016lx",
334 SETJMP_TRACE(vmx, vcpu, vmxctx, host_r15);
335 SETJMP_TRACE(vmx, vcpu, vmxctx, host_r14);
336 SETJMP_TRACE(vmx, vcpu, vmxctx, host_r13);
337 SETJMP_TRACE(vmx, vcpu, vmxctx, host_r12);
338 SETJMP_TRACE(vmx, vcpu, vmxctx, host_rbp);
339 SETJMP_TRACE(vmx, vcpu, vmxctx, host_rsp);
340 SETJMP_TRACE(vmx, vcpu, vmxctx, host_rbx);
341 SETJMP_TRACE(vmx, vcpu, vmxctx, host_rip);
343 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rdi);
344 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rsi);
345 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rdx);
346 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rcx);
347 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r8);
348 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r9);
349 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rax);
350 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rbx);
351 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rbp);
352 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r10);
353 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r11);
354 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r12);
355 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r13);
356 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r14);
357 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r15);
358 SETJMP_TRACE(vmx, vcpu, vmxctx, guest_cr2);
363 vmx_setjmp_trace(struct vmx *vmx, int vcpu, struct vmxctx *vmxctx, int rc)
370 vmx_fix_cr0(u_long cr0)
373 return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask);
377 vmx_fix_cr4(u_long cr4)
380 return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask);
386 if (vpid < 0 || vpid > 0xffff)
387 panic("vpid_free: invalid vpid %d", vpid);
390 * VPIDs [0,VM_MAXCPU] are special and are not allocated from
391 * the unit number allocator.
394 if (vpid > VM_MAXCPU)
395 free_unr(vpid_unr, vpid);
399 vpid_alloc(uint16_t *vpid, int num)
403 if (num <= 0 || num > VM_MAXCPU)
404 panic("invalid number of vpids requested: %d", num);
407 * If the "enable vpid" execution control is not enabled then the
408 * VPID is required to be 0 for all vcpus.
410 if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) {
411 for (i = 0; i < num; i++)
417 * Allocate a unique VPID for each vcpu from the unit number allocator.
419 for (i = 0; i < num; i++) {
420 x = alloc_unr(vpid_unr);
428 atomic_add_int(&vpid_alloc_failed, 1);
431 * If the unit number allocator does not have enough unique
432 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range.
434 * These VPIDs are not be unique across VMs but this does not
435 * affect correctness because the combined mappings are also
436 * tagged with the EP4TA which is unique for each VM.
438 * It is still sub-optimal because the invvpid will invalidate
439 * combined mappings for a particular VPID across all EP4TAs.
444 for (i = 0; i < num; i++)
453 * VPID 0 is required when the "enable VPID" execution control is
456 * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the
457 * unit number allocator does not have sufficient unique VPIDs to
458 * satisfy the allocation.
460 * The remaining VPIDs are managed by the unit number allocator.
462 vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL);
466 msr_save_area_init(struct msr_entry *g_area, int *g_count)
470 static struct msr_entry guest_msrs[] = {
471 { MSR_KGSBASE, 0, 0 },
474 cnt = sizeof(guest_msrs) / sizeof(guest_msrs[0]);
475 if (cnt > GUEST_MSR_MAX_ENTRIES)
476 panic("guest msr save area overrun");
477 bcopy(guest_msrs, g_area, sizeof(guest_msrs));
482 vmx_disable(void *arg __unused)
484 struct invvpid_desc invvpid_desc = { 0 };
485 struct invept_desc invept_desc = { 0 };
487 if (vmxon_enabled[curcpu]) {
489 * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b.
491 * VMXON or VMXOFF are not required to invalidate any TLB
492 * caching structures. This prevents potential retention of
493 * cached information in the TLB between distinct VMX episodes.
495 invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc);
496 invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc);
499 load_cr4(rcr4() & ~CR4_VMXE);
506 if (vpid_unr != NULL) {
507 delete_unrhdr(vpid_unr);
511 smp_rendezvous(NULL, vmx_disable, NULL, NULL);
517 vmx_enable(void *arg __unused)
521 load_cr4(rcr4() | CR4_VMXE);
523 *(uint32_t *)vmxon_region[curcpu] = vmx_revision();
524 error = vmxon(vmxon_region[curcpu]);
526 vmxon_enabled[curcpu] = 1;
533 uint64_t fixed0, fixed1, feature_control;
536 /* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */
537 if (!(cpu_feature2 & CPUID2_VMX)) {
538 printf("vmx_init: processor does not support VMX operation\n");
543 * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits
544 * are set (bits 0 and 2 respectively).
546 feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
547 if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 ||
548 (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
549 printf("vmx_init: VMX operation disabled by BIOS\n");
553 /* Check support for primary processor-based VM-execution controls */
554 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
555 MSR_VMX_TRUE_PROCBASED_CTLS,
556 PROCBASED_CTLS_ONE_SETTING,
557 PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls);
559 printf("vmx_init: processor does not support desired primary "
560 "processor-based controls\n");
564 /* Clear the processor-based ctl bits that are set on demand */
565 procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING;
567 /* Check support for secondary processor-based VM-execution controls */
568 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
569 MSR_VMX_PROCBASED_CTLS2,
570 PROCBASED_CTLS2_ONE_SETTING,
571 PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2);
573 printf("vmx_init: processor does not support desired secondary "
574 "processor-based controls\n");
578 /* Check support for VPID */
579 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
580 PROCBASED2_ENABLE_VPID, 0, &tmp);
582 procbased_ctls2 |= PROCBASED2_ENABLE_VPID;
584 /* Check support for pin-based VM-execution controls */
585 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
586 MSR_VMX_TRUE_PINBASED_CTLS,
587 PINBASED_CTLS_ONE_SETTING,
588 PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls);
590 printf("vmx_init: processor does not support desired "
591 "pin-based controls\n");
595 /* Check support for VM-exit controls */
596 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS,
597 VM_EXIT_CTLS_ONE_SETTING,
598 VM_EXIT_CTLS_ZERO_SETTING,
601 /* Try again without the PAT MSR bits */
602 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS,
603 MSR_VMX_TRUE_EXIT_CTLS,
604 VM_EXIT_CTLS_ONE_SETTING_NO_PAT,
605 VM_EXIT_CTLS_ZERO_SETTING,
608 printf("vmx_init: processor does not support desired "
613 printf("vmm: PAT MSR access not supported\n");
614 guest_msr_valid(MSR_PAT);
619 /* Check support for VM-entry controls */
620 if (!vmx_no_patmsr) {
621 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS,
622 MSR_VMX_TRUE_ENTRY_CTLS,
623 VM_ENTRY_CTLS_ONE_SETTING,
624 VM_ENTRY_CTLS_ZERO_SETTING,
627 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS,
628 MSR_VMX_TRUE_ENTRY_CTLS,
629 VM_ENTRY_CTLS_ONE_SETTING_NO_PAT,
630 VM_ENTRY_CTLS_ZERO_SETTING,
635 printf("vmx_init: processor does not support desired "
641 * Check support for optional features by testing them
644 cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
645 MSR_VMX_TRUE_PROCBASED_CTLS,
646 PROCBASED_HLT_EXITING, 0,
649 cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
650 MSR_VMX_PROCBASED_CTLS,
654 cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
655 MSR_VMX_TRUE_PROCBASED_CTLS,
656 PROCBASED_PAUSE_EXITING, 0,
659 cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
660 MSR_VMX_PROCBASED_CTLS2,
661 PROCBASED2_UNRESTRICTED_GUEST, 0,
664 cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
665 MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0,
672 printf("vmx_init: ept initialization failed (%d)\n", error);
677 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1
679 fixed0 = rdmsr(MSR_VMX_CR0_FIXED0);
680 fixed1 = rdmsr(MSR_VMX_CR0_FIXED1);
681 cr0_ones_mask = fixed0 & fixed1;
682 cr0_zeros_mask = ~fixed0 & ~fixed1;
685 * CR0_PE and CR0_PG can be set to zero in VMX non-root operation
686 * if unrestricted guest execution is allowed.
688 if (cap_unrestricted_guest)
689 cr0_ones_mask &= ~(CR0_PG | CR0_PE);
692 * Do not allow the guest to set CR0_NW or CR0_CD.
694 cr0_zeros_mask |= (CR0_NW | CR0_CD);
696 fixed0 = rdmsr(MSR_VMX_CR4_FIXED0);
697 fixed1 = rdmsr(MSR_VMX_CR4_FIXED1);
698 cr4_ones_mask = fixed0 & fixed1;
699 cr4_zeros_mask = ~fixed0 & ~fixed1;
703 /* enable VMX operation */
704 smp_rendezvous(NULL, vmx_enable, NULL, NULL);
712 vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial)
714 int error, mask_ident, shadow_ident;
717 if (which != 0 && which != 4)
718 panic("vmx_setup_cr_shadow: unknown cr%d", which);
721 mask_ident = VMCS_CR0_MASK;
722 mask_value = cr0_ones_mask | cr0_zeros_mask;
723 shadow_ident = VMCS_CR0_SHADOW;
725 mask_ident = VMCS_CR4_MASK;
726 mask_value = cr4_ones_mask | cr4_zeros_mask;
727 shadow_ident = VMCS_CR4_SHADOW;
730 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value);
734 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial);
740 #define vmx_setup_cr0_shadow(vmcs,init) vmx_setup_cr_shadow(0, (vmcs), (init))
741 #define vmx_setup_cr4_shadow(vmcs,init) vmx_setup_cr_shadow(4, (vmcs), (init))
744 vmx_vminit(struct vm *vm, pmap_t pmap)
746 uint16_t vpid[VM_MAXCPU];
747 int i, error, guest_msr_count;
750 vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO);
751 if ((uintptr_t)vmx & PAGE_MASK) {
752 panic("malloc of struct vmx not aligned on %d byte boundary",
757 vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4));
760 * Clean up EPTP-tagged guest physical and combined mappings
762 * VMX transitions are not required to invalidate any guest physical
763 * mappings. So, it may be possible for stale guest physical mappings
764 * to be present in the processor TLBs.
766 * Combined mappings for this EP4TA are also invalidated for all VPIDs.
768 ept_invalidate_mappings(vmx->eptp);
770 msr_bitmap_initialize(vmx->msr_bitmap);
773 * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE.
774 * The guest FSBASE and GSBASE are saved and restored during
775 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are
776 * always restored from the vmcs host state area on vm-exit.
778 * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in
779 * how they are saved/restored so can be directly accessed by the
782 * Guest KGSBASE is saved and restored in the guest MSR save area.
783 * Host KGSBASE is restored before returning to userland from the pcb.
784 * There will be a window of time when we are executing in the host
785 * kernel context with a value of KGSBASE from the guest. This is ok
786 * because the value of KGSBASE is inconsequential in kernel context.
788 * MSR_EFER is saved and restored in the guest VMCS area on a
789 * VM exit and entry respectively. It is also restored from the
790 * host VMCS area on a VM exit.
792 if (guest_msr_rw(vmx, MSR_GSBASE) ||
793 guest_msr_rw(vmx, MSR_FSBASE) ||
794 guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) ||
795 guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) ||
796 guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) ||
797 guest_msr_rw(vmx, MSR_KGSBASE) ||
798 guest_msr_rw(vmx, MSR_EFER))
799 panic("vmx_vminit: error setting guest msr access");
802 * MSR_PAT is saved and restored in the guest VMCS are on a VM exit
803 * and entry respectively. It is also restored from the host VMCS
804 * area on a VM exit. However, if running on a system with no
805 * MSR_PAT save/restore support, leave access disabled so accesses
808 if (!vmx_no_patmsr && guest_msr_rw(vmx, MSR_PAT))
809 panic("vmx_vminit: error setting guest pat msr access");
811 vpid_alloc(vpid, VM_MAXCPU);
813 for (i = 0; i < VM_MAXCPU; i++) {
814 vmx->vmcs[i].identifier = vmx_revision();
815 error = vmclear(&vmx->vmcs[i]);
817 panic("vmx_vminit: vmclear error %d on vcpu %d\n",
821 error = vmcs_set_defaults(&vmx->vmcs[i],
823 (u_long)&vmx->ctx[i],
828 exit_ctls, entry_ctls,
829 vtophys(vmx->msr_bitmap),
833 panic("vmx_vminit: vmcs_set_defaults error %d", error);
836 vmx->cap[i].proc_ctls = procbased_ctls;
837 vmx->cap[i].proc_ctls2 = procbased_ctls2;
839 vmx->state[i].lastcpu = -1;
840 vmx->state[i].vpid = vpid[i];
842 msr_save_area_init(vmx->guest_msrs[i], &guest_msr_count);
844 error = vmcs_set_msr_save(&vmx->vmcs[i],
845 vtophys(vmx->guest_msrs[i]),
848 panic("vmcs_set_msr_save error %d", error);
851 * Set up the CR0/4 shadows, and init the read shadow
852 * to the power-on register value from the Intel Sys Arch.
856 error = vmx_setup_cr0_shadow(&vmx->vmcs[i], 0x60000010);
858 panic("vmx_setup_cr0_shadow %d", error);
860 error = vmx_setup_cr4_shadow(&vmx->vmcs[i], 0);
862 panic("vmx_setup_cr4_shadow %d", error);
864 vmx->ctx[i].pmap = pmap;
865 vmx->ctx[i].eptp = vmx->eptp;
872 vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx)
876 func = vmxctx->guest_rax;
878 handled = x86_emulate_cpuid(vm, vcpu,
879 (uint32_t*)(&vmxctx->guest_rax),
880 (uint32_t*)(&vmxctx->guest_rbx),
881 (uint32_t*)(&vmxctx->guest_rcx),
882 (uint32_t*)(&vmxctx->guest_rdx));
887 vmx_run_trace(struct vmx *vmx, int vcpu)
890 VMM_CTR1(vmx->vm, vcpu, "Resume execution at 0x%0lx", vmcs_guest_rip());
895 vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason,
899 VMM_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx",
900 handled ? "handled" : "unhandled",
901 exit_reason_to_str(exit_reason), rip);
906 vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip)
909 VMM_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip);
914 vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu)
917 struct vmxstate *vmxstate;
918 struct invvpid_desc invvpid_desc = { 0 };
920 vmxstate = &vmx->state[vcpu];
921 lastcpu = vmxstate->lastcpu;
922 vmxstate->lastcpu = curcpu;
924 if (lastcpu == curcpu) {
929 vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1);
931 error = vmwrite(VMCS_HOST_TR_BASE, vmm_get_host_trbase());
935 error = vmwrite(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase());
939 error = vmwrite(VMCS_HOST_GS_BASE, vmm_get_host_gsbase());
944 * If we are using VPIDs then invalidate all mappings tagged with 'vpid'
946 * We do this because this vcpu was executing on a different host
947 * cpu when it last ran. We do not track whether it invalidated
948 * mappings associated with its 'vpid' during that run. So we must
949 * assume that the mappings associated with 'vpid' on 'curcpu' are
950 * stale and invalidate them.
952 * Note that we incur this penalty only when the scheduler chooses to
953 * move the thread associated with this vcpu between host cpus.
955 * Note also that this will invalidate mappings tagged with 'vpid'
958 if (vmxstate->vpid != 0) {
959 invvpid_desc.vpid = vmxstate->vpid;
960 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc);
967 vm_exit_update_rip(struct vm_exit *vmexit)
971 error = vmwrite(VMCS_GUEST_RIP, vmexit->rip + vmexit->inst_length);
973 panic("vmx_run: error %d writing to VMCS_GUEST_RIP", error);
977 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set.
979 CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0);
982 vmx_set_int_window_exiting(struct vmx *vmx, int vcpu)
986 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING;
988 error = vmwrite(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
990 panic("vmx_set_int_window_exiting: vmwrite error %d", error);
994 vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu)
998 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING;
1000 error = vmwrite(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1002 panic("vmx_clear_int_window_exiting: vmwrite error %d", error);
1005 static void __inline
1006 vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu)
1010 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING;
1012 error = vmwrite(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1014 panic("vmx_set_nmi_window_exiting: vmwrite error %d", error);
1017 static void __inline
1018 vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu)
1022 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING;
1024 error = vmwrite(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1026 panic("vmx_clear_nmi_window_exiting: vmwrite error %d", error);
1030 vmx_inject_nmi(struct vmx *vmx, int vcpu)
1033 uint64_t info, interruptibility;
1035 /* Bail out if no NMI requested */
1036 if (!vm_nmi_pending(vmx->vm, vcpu))
1039 error = vmread(VMCS_GUEST_INTERRUPTIBILITY, &interruptibility);
1041 panic("vmx_inject_nmi: vmread(interruptibility) %d",
1044 if (interruptibility & nmi_blocking_bits)
1048 * Inject the virtual NMI. The vector must be the NMI IDT entry
1049 * or the VMCS entry check will fail.
1051 info = VMCS_INTERRUPTION_INFO_NMI | VMCS_INTERRUPTION_INFO_VALID;
1054 error = vmwrite(VMCS_ENTRY_INTR_INFO, info);
1056 panic("vmx_inject_nmi: vmwrite(intrinfo) %d", error);
1058 VMM_CTR0(vmx->vm, vcpu, "Injecting vNMI");
1060 /* Clear the request */
1061 vm_nmi_clear(vmx->vm, vcpu);
1066 * Set the NMI Window Exiting execution control so we can inject
1067 * the virtual NMI as soon as blocking condition goes away.
1069 vmx_set_nmi_window_exiting(vmx, vcpu);
1071 VMM_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting");
1076 vmx_inject_interrupts(struct vmx *vmx, int vcpu)
1079 uint64_t info, rflags, interruptibility;
1081 const int HWINTR_BLOCKED = VMCS_INTERRUPTIBILITY_STI_BLOCKING |
1082 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING;
1085 * If there is already an interrupt pending then just return.
1087 * This could happen if an interrupt was injected on a prior
1088 * VM entry but the actual entry into guest mode was aborted
1089 * because of a pending AST.
1091 error = vmread(VMCS_ENTRY_INTR_INFO, &info);
1093 panic("vmx_inject_interrupts: vmread(intrinfo) %d", error);
1094 if (info & VMCS_INTERRUPTION_INFO_VALID)
1098 * NMI injection has priority so deal with those first
1100 if (vmx_inject_nmi(vmx, vcpu))
1103 /* Ask the local apic for a vector to inject */
1104 vector = lapic_pending_intr(vmx->vm, vcpu);
1108 if (vector < 32 || vector > 255)
1109 panic("vmx_inject_interrupts: invalid vector %d\n", vector);
1111 /* Check RFLAGS.IF and the interruptibility state of the guest */
1112 error = vmread(VMCS_GUEST_RFLAGS, &rflags);
1114 panic("vmx_inject_interrupts: vmread(rflags) %d", error);
1116 if ((rflags & PSL_I) == 0)
1119 error = vmread(VMCS_GUEST_INTERRUPTIBILITY, &interruptibility);
1121 panic("vmx_inject_interrupts: vmread(interruptibility) %d",
1124 if (interruptibility & HWINTR_BLOCKED)
1127 /* Inject the interrupt */
1128 info = VMCS_INTERRUPTION_INFO_HW_INTR | VMCS_INTERRUPTION_INFO_VALID;
1130 error = vmwrite(VMCS_ENTRY_INTR_INFO, info);
1132 panic("vmx_inject_interrupts: vmwrite(intrinfo) %d", error);
1134 /* Update the Local APIC ISR */
1135 lapic_intr_accepted(vmx->vm, vcpu, vector);
1137 VMM_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector);
1143 * Set the Interrupt Window Exiting execution control so we can inject
1144 * the interrupt as soon as blocking condition goes away.
1146 vmx_set_int_window_exiting(vmx, vcpu);
1148 VMM_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting");
1152 vmx_emulate_cr_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1154 int error, cr, vmcs_guest_cr, vmcs_shadow_cr;
1155 uint64_t crval, regval, ones_mask, zeros_mask;
1156 const struct vmxctx *vmxctx;
1158 /* We only handle mov to %cr0 or %cr4 at this time */
1159 if ((exitqual & 0xf0) != 0x00)
1162 cr = exitqual & 0xf;
1163 if (cr != 0 && cr != 4)
1166 vmxctx = &vmx->ctx[vcpu];
1169 * We must use vmwrite() directly here because vmcs_setreg() will
1170 * call vmclear(vmcs) as a side-effect which we certainly don't want.
1172 switch ((exitqual >> 8) & 0xf) {
1174 regval = vmxctx->guest_rax;
1177 regval = vmxctx->guest_rcx;
1180 regval = vmxctx->guest_rdx;
1183 regval = vmxctx->guest_rbx;
1186 error = vmread(VMCS_GUEST_RSP, ®val);
1188 panic("vmx_emulate_cr_access: "
1189 "error %d reading guest rsp", error);
1193 regval = vmxctx->guest_rbp;
1196 regval = vmxctx->guest_rsi;
1199 regval = vmxctx->guest_rdi;
1202 regval = vmxctx->guest_r8;
1205 regval = vmxctx->guest_r9;
1208 regval = vmxctx->guest_r10;
1211 regval = vmxctx->guest_r11;
1214 regval = vmxctx->guest_r12;
1217 regval = vmxctx->guest_r13;
1220 regval = vmxctx->guest_r14;
1223 regval = vmxctx->guest_r15;
1228 ones_mask = cr0_ones_mask;
1229 zeros_mask = cr0_zeros_mask;
1230 vmcs_guest_cr = VMCS_GUEST_CR0;
1231 vmcs_shadow_cr = VMCS_CR0_SHADOW;
1233 ones_mask = cr4_ones_mask;
1234 zeros_mask = cr4_zeros_mask;
1235 vmcs_guest_cr = VMCS_GUEST_CR4;
1236 vmcs_shadow_cr = VMCS_CR4_SHADOW;
1239 error = vmwrite(vmcs_shadow_cr, regval);
1241 panic("vmx_emulate_cr_access: error %d writing cr%d shadow",
1245 crval = regval | ones_mask;
1246 crval &= ~zeros_mask;
1247 error = vmwrite(vmcs_guest_cr, crval);
1249 panic("vmx_emulate_cr_access: error %d writing cr%d",
1253 if (cr == 0 && regval & CR0_PG) {
1254 uint64_t efer, entry_ctls;
1257 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and
1258 * the "IA-32e mode guest" bit in VM-entry control must be
1261 error = vmread(VMCS_GUEST_IA32_EFER, &efer);
1263 panic("vmx_emulate_cr_access: error %d efer read",
1266 if (efer & EFER_LME) {
1268 error = vmwrite(VMCS_GUEST_IA32_EFER, efer);
1270 panic("vmx_emulate_cr_access: error %d"
1271 " efer write", error);
1273 error = vmread(VMCS_ENTRY_CTLS, &entry_ctls);
1275 panic("vmx_emulate_cr_access: error %d"
1276 " entry ctls read", error);
1278 entry_ctls |= VM_ENTRY_GUEST_LMA;
1279 error = vmwrite(VMCS_ENTRY_CTLS, entry_ctls);
1281 panic("vmx_emulate_cr_access: error %d"
1282 " entry ctls write", error);
1291 ept_fault_type(uint64_t ept_qual)
1295 if (ept_qual & EPT_VIOLATION_DATA_WRITE)
1296 fault_type = VM_PROT_WRITE;
1297 else if (ept_qual & EPT_VIOLATION_INST_FETCH)
1298 fault_type = VM_PROT_EXECUTE;
1300 fault_type= VM_PROT_READ;
1302 return (fault_type);
1306 ept_protection(uint64_t ept_qual)
1310 if (ept_qual & EPT_VIOLATION_GPA_READABLE)
1311 prot |= VM_PROT_READ;
1312 if (ept_qual & EPT_VIOLATION_GPA_WRITEABLE)
1313 prot |= VM_PROT_WRITE;
1314 if (ept_qual & EPT_VIOLATION_GPA_EXECUTABLE)
1315 prot |= VM_PROT_EXECUTE;
1321 ept_emulation_fault(uint64_t ept_qual)
1325 /* EPT fault on an instruction fetch doesn't make sense here */
1326 if (ept_qual & EPT_VIOLATION_INST_FETCH)
1329 /* EPT fault must be a read fault or a write fault */
1330 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;
1331 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;
1332 if ((read | write) == 0)
1336 * The EPT violation must have been caused by accessing a
1337 * guest-physical address that is a translation of a guest-linear
1340 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||
1341 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {
1349 vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
1353 struct vmxctx *vmxctx;
1354 uint32_t eax, ecx, edx, idtvec_info, idtvec_err, reason;
1358 vmcs = &vmx->vmcs[vcpu];
1359 vmxctx = &vmx->ctx[vcpu];
1360 qual = vmexit->u.vmx.exit_qualification;
1361 reason = vmexit->u.vmx.exit_reason;
1362 vmexit->exitcode = VM_EXITCODE_BOGUS;
1364 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1);
1367 * VM exits that could be triggered during event injection on the
1368 * previous VM entry need to be handled specially by re-injecting
1371 * See "Information for VM Exits During Event Delivery" in Intel SDM
1375 case EXIT_REASON_EPT_FAULT:
1376 case EXIT_REASON_EPT_MISCONFIG:
1377 case EXIT_REASON_APIC:
1378 case EXIT_REASON_TASK_SWITCH:
1379 case EXIT_REASON_EXCEPTION:
1380 idtvec_info = vmcs_idt_vectoring_info();
1381 if (idtvec_info & VMCS_IDT_VEC_VALID) {
1382 idtvec_info &= ~(1 << 12); /* clear undefined bit */
1383 vmwrite(VMCS_ENTRY_INTR_INFO, idtvec_info);
1384 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
1385 idtvec_err = vmcs_idt_vectoring_err();
1386 vmwrite(VMCS_ENTRY_EXCEPTION_ERROR, idtvec_err);
1388 vmwrite(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
1395 case EXIT_REASON_CR_ACCESS:
1396 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1);
1397 handled = vmx_emulate_cr_access(vmx, vcpu, qual);
1399 case EXIT_REASON_RDMSR:
1400 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1);
1401 ecx = vmxctx->guest_rcx;
1402 error = emulate_rdmsr(vmx->vm, vcpu, ecx);
1404 vmexit->exitcode = VM_EXITCODE_RDMSR;
1405 vmexit->u.msr.code = ecx;
1409 case EXIT_REASON_WRMSR:
1410 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1);
1411 eax = vmxctx->guest_rax;
1412 ecx = vmxctx->guest_rcx;
1413 edx = vmxctx->guest_rdx;
1414 error = emulate_wrmsr(vmx->vm, vcpu, ecx,
1415 (uint64_t)edx << 32 | eax);
1417 vmexit->exitcode = VM_EXITCODE_WRMSR;
1418 vmexit->u.msr.code = ecx;
1419 vmexit->u.msr.wval = (uint64_t)edx << 32 | eax;
1423 case EXIT_REASON_HLT:
1424 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1);
1425 vmexit->exitcode = VM_EXITCODE_HLT;
1427 case EXIT_REASON_MTF:
1428 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1);
1429 vmexit->exitcode = VM_EXITCODE_MTRAP;
1431 case EXIT_REASON_PAUSE:
1432 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1);
1433 vmexit->exitcode = VM_EXITCODE_PAUSE;
1435 case EXIT_REASON_INTR_WINDOW:
1436 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1);
1437 vmx_clear_int_window_exiting(vmx, vcpu);
1438 VMM_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting");
1440 case EXIT_REASON_EXT_INTR:
1442 * External interrupts serve only to cause VM exits and allow
1443 * the host interrupt handler to run.
1445 * If this external interrupt triggers a virtual interrupt
1446 * to a VM, then that state will be recorded by the
1447 * host interrupt handler in the VM's softc. We will inject
1448 * this virtual interrupt during the subsequent VM enter.
1452 * This is special. We want to treat this as an 'handled'
1453 * VM-exit but not increment the instruction pointer.
1455 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1);
1457 case EXIT_REASON_NMI_WINDOW:
1458 /* Exit to allow the pending virtual NMI to be injected */
1459 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1);
1460 vmx_clear_nmi_window_exiting(vmx, vcpu);
1461 VMM_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting");
1463 case EXIT_REASON_INOUT:
1464 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1);
1465 vmexit->exitcode = VM_EXITCODE_INOUT;
1466 vmexit->u.inout.bytes = (qual & 0x7) + 1;
1467 vmexit->u.inout.in = (qual & 0x8) ? 1 : 0;
1468 vmexit->u.inout.string = (qual & 0x10) ? 1 : 0;
1469 vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0;
1470 vmexit->u.inout.port = (uint16_t)(qual >> 16);
1471 vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax);
1473 case EXIT_REASON_CPUID:
1474 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1);
1475 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx);
1477 case EXIT_REASON_EPT_FAULT:
1478 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EPT_FAULT, 1);
1480 * If 'gpa' lies within the address space allocated to
1481 * memory then this must be a nested page fault otherwise
1482 * this must be an instruction that accesses MMIO space.
1485 if (vm_mem_allocated(vmx->vm, gpa)) {
1486 vmexit->exitcode = VM_EXITCODE_PAGING;
1487 vmexit->u.paging.gpa = gpa;
1488 vmexit->u.paging.fault_type = ept_fault_type(qual);
1489 vmexit->u.paging.protection = ept_protection(qual);
1490 } else if (ept_emulation_fault(qual)) {
1491 vmexit->exitcode = VM_EXITCODE_INST_EMUL;
1492 vmexit->u.inst_emul.gpa = gpa;
1493 vmexit->u.inst_emul.gla = vmcs_gla();
1494 vmexit->u.inst_emul.cr3 = vmcs_guest_cr3();
1498 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1);
1504 * It is possible that control is returned to userland
1505 * even though we were able to handle the VM exit in the
1508 * In such a case we want to make sure that the userland
1509 * restarts guest execution at the instruction *after*
1510 * the one we just processed. Therefore we update the
1511 * guest rip in the VMCS and in 'vmexit'.
1513 vm_exit_update_rip(vmexit);
1514 vmexit->rip += vmexit->inst_length;
1515 vmexit->inst_length = 0;
1517 if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
1519 * If this VM exit was not claimed by anybody then
1520 * treat it as a generic VMX exit.
1522 vmexit->exitcode = VM_EXITCODE_VMX;
1523 vmexit->u.vmx.error = 0;
1526 * The exitcode and collateral have been populated.
1527 * The VM exit will be processed further in userland.
1535 vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap)
1537 int error, vie, rc, handled, astpending;
1538 uint32_t exit_reason;
1540 struct vmxctx *vmxctx;
1542 struct vm_exit *vmexit;
1545 vmcs = &vmx->vmcs[vcpu];
1546 vmxctx = &vmx->ctx[vcpu];
1547 vmxctx->launched = 0;
1550 vmexit = vm_exitinfo(vmx->vm, vcpu);
1552 KASSERT(vmxctx->pmap == pmap,
1553 ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap));
1554 KASSERT(vmxctx->eptp == vmx->eptp,
1555 ("eptp %p different than ctx eptp %#lx", eptp, vmxctx->eptp));
1558 * XXX Can we avoid doing this every time we do a vm run?
1564 * We do this every time because we may setup the virtual machine
1565 * from a different process than the one that actually runs it.
1567 * If the life of a virtual machine was spent entirely in the context
1568 * of a single process we could do this once in vmcs_set_defaults().
1570 if ((error = vmwrite(VMCS_HOST_CR3, rcr3())) != 0)
1571 panic("vmx_run: error %d writing to VMCS_HOST_CR3", error);
1573 if ((error = vmwrite(VMCS_GUEST_RIP, rip)) != 0)
1574 panic("vmx_run: error %d writing to VMCS_GUEST_RIP", error);
1576 if ((error = vmx_set_pcpu_defaults(vmx, vcpu)) != 0)
1577 panic("vmx_run: error %d setting up pcpu defaults", error);
1580 lapic_timer_tick(vmx->vm, vcpu);
1581 vmx_inject_interrupts(vmx, vcpu);
1582 vmx_run_trace(vmx, vcpu);
1583 rc = vmx_setjmp(vmxctx);
1585 vmx_setjmp_trace(vmx, vcpu, vmxctx, rc);
1588 case VMX_RETURN_DIRECT:
1589 if (vmxctx->launched == 0) {
1590 vmxctx->launched = 1;
1594 panic("vmx_launch/resume should not return");
1596 case VMX_RETURN_LONGJMP:
1597 break; /* vm exit */
1598 case VMX_RETURN_AST:
1601 case VMX_RETURN_VMRESUME:
1602 vie = vmcs_instruction_error();
1603 if (vmxctx->launch_error == VM_FAIL_INVALID ||
1604 vie != VMRESUME_WITH_NON_LAUNCHED_VMCS) {
1605 printf("vmresume error %d vmcs inst error %d\n",
1606 vmxctx->launch_error, vie);
1609 vmx_launch(vmxctx); /* try to launch the guest */
1610 panic("vmx_launch should not return");
1612 case VMX_RETURN_VMLAUNCH:
1613 vie = vmcs_instruction_error();
1615 printf("vmlaunch error %d vmcs inst error %d\n",
1616 vmxctx->launch_error, vie);
1619 case VMX_RETURN_INVEPT:
1620 panic("vm %s:%d invept error %d",
1621 vm_name(vmx->vm), vcpu, vmxctx->launch_error);
1623 panic("vmx_setjmp returned %d", rc);
1626 /* enable interrupts */
1629 /* collect some basic information for VM exit processing */
1630 vmexit->rip = rip = vmcs_guest_rip();
1631 vmexit->inst_length = vmexit_instruction_length();
1632 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason();
1633 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification();
1637 vmexit->inst_length = 0;
1638 vmexit->exitcode = VM_EXITCODE_BOGUS;
1639 vmx_astpending_trace(vmx, vcpu, rip);
1640 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_ASTPENDING, 1);
1644 handled = vmx_exit_process(vmx, vcpu, vmexit);
1645 vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled);
1650 * If a VM exit has been handled then the exitcode must be BOGUS
1651 * If a VM exit is not handled then the exitcode must not be BOGUS
1653 if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) ||
1654 (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) {
1655 panic("Mismatch between handled (%d) and exitcode (%d)",
1656 handled, vmexit->exitcode);
1660 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_USERSPACE, 1);
1662 VMM_CTR1(vmx->vm, vcpu, "goto userland: exitcode %d",vmexit->exitcode);
1666 * We need to do this to ensure that any VMCS state cached by the
1667 * processor is flushed to memory. We need to do this in case the
1668 * VM moves to a different cpu the next time it runs.
1670 * Can we avoid doing this?
1676 vmexit->exitcode = VM_EXITCODE_VMX;
1677 vmexit->u.vmx.exit_reason = (uint32_t)-1;
1678 vmexit->u.vmx.exit_qualification = (uint32_t)-1;
1679 vmexit->u.vmx.error = vie;
1685 vmx_vmcleanup(void *arg)
1688 struct vmx *vmx = arg;
1690 for (i = 0; i < VM_MAXCPU; i++)
1691 vpid_free(vmx->state[i].vpid);
1694 * XXXSMP we also need to clear the VMCS active on the other vcpus.
1696 error = vmclear(&vmx->vmcs[0]);
1698 panic("vmx_vmcleanup: vmclear error %d on vcpu 0", error);
1706 vmxctx_regptr(struct vmxctx *vmxctx, int reg)
1710 case VM_REG_GUEST_RAX:
1711 return (&vmxctx->guest_rax);
1712 case VM_REG_GUEST_RBX:
1713 return (&vmxctx->guest_rbx);
1714 case VM_REG_GUEST_RCX:
1715 return (&vmxctx->guest_rcx);
1716 case VM_REG_GUEST_RDX:
1717 return (&vmxctx->guest_rdx);
1718 case VM_REG_GUEST_RSI:
1719 return (&vmxctx->guest_rsi);
1720 case VM_REG_GUEST_RDI:
1721 return (&vmxctx->guest_rdi);
1722 case VM_REG_GUEST_RBP:
1723 return (&vmxctx->guest_rbp);
1724 case VM_REG_GUEST_R8:
1725 return (&vmxctx->guest_r8);
1726 case VM_REG_GUEST_R9:
1727 return (&vmxctx->guest_r9);
1728 case VM_REG_GUEST_R10:
1729 return (&vmxctx->guest_r10);
1730 case VM_REG_GUEST_R11:
1731 return (&vmxctx->guest_r11);
1732 case VM_REG_GUEST_R12:
1733 return (&vmxctx->guest_r12);
1734 case VM_REG_GUEST_R13:
1735 return (&vmxctx->guest_r13);
1736 case VM_REG_GUEST_R14:
1737 return (&vmxctx->guest_r14);
1738 case VM_REG_GUEST_R15:
1739 return (&vmxctx->guest_r15);
1747 vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval)
1751 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
1759 vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val)
1763 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
1771 vmx_shadow_reg(int reg)
1778 case VM_REG_GUEST_CR0:
1779 shreg = VMCS_CR0_SHADOW;
1781 case VM_REG_GUEST_CR4:
1782 shreg = VMCS_CR4_SHADOW;
1792 vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval)
1794 int running, hostcpu;
1795 struct vmx *vmx = arg;
1797 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
1798 if (running && hostcpu != curcpu)
1799 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu);
1801 if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0)
1804 return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval));
1808 vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
1810 int error, hostcpu, running, shadow;
1812 struct vmx *vmx = arg;
1814 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
1815 if (running && hostcpu != curcpu)
1816 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu);
1818 if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0)
1821 error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val);
1825 * If the "load EFER" VM-entry control is 1 then the
1826 * value of EFER.LMA must be identical to "IA-32e mode guest"
1827 * bit in the VM-entry control.
1829 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 &&
1830 (reg == VM_REG_GUEST_EFER)) {
1831 vmcs_getreg(&vmx->vmcs[vcpu], running,
1832 VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls);
1834 ctls |= VM_ENTRY_GUEST_LMA;
1836 ctls &= ~VM_ENTRY_GUEST_LMA;
1837 vmcs_setreg(&vmx->vmcs[vcpu], running,
1838 VMCS_IDENT(VMCS_ENTRY_CTLS), ctls);
1841 shadow = vmx_shadow_reg(reg);
1844 * Store the unmodified value in the shadow
1846 error = vmcs_setreg(&vmx->vmcs[vcpu], running,
1847 VMCS_IDENT(shadow), val);
1855 vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
1857 struct vmx *vmx = arg;
1859 return (vmcs_getdesc(&vmx->vmcs[vcpu], reg, desc));
1863 vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
1865 struct vmx *vmx = arg;
1867 return (vmcs_setdesc(&vmx->vmcs[vcpu], reg, desc));
1871 vmx_inject(void *arg, int vcpu, int type, int vector, uint32_t code,
1876 struct vmx *vmx = arg;
1877 struct vmcs *vmcs = &vmx->vmcs[vcpu];
1879 static uint32_t type_map[VM_EVENT_MAX] = {
1880 0x1, /* VM_EVENT_NONE */
1881 0x0, /* VM_HW_INTR */
1883 0x3, /* VM_HW_EXCEPTION */
1884 0x4, /* VM_SW_INTR */
1885 0x5, /* VM_PRIV_SW_EXCEPTION */
1886 0x6, /* VM_SW_EXCEPTION */
1890 * If there is already an exception pending to be delivered to the
1891 * vcpu then just return.
1893 error = vmcs_getreg(vmcs, 0, VMCS_IDENT(VMCS_ENTRY_INTR_INFO), &info);
1897 if (info & VMCS_INTERRUPTION_INFO_VALID)
1900 info = vector | (type_map[type] << 8) | (code_valid ? 1 << 11 : 0);
1901 info |= VMCS_INTERRUPTION_INFO_VALID;
1902 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(VMCS_ENTRY_INTR_INFO), info);
1907 error = vmcs_setreg(vmcs, 0,
1908 VMCS_IDENT(VMCS_ENTRY_EXCEPTION_ERROR),
1915 vmx_getcap(void *arg, int vcpu, int type, int *retval)
1917 struct vmx *vmx = arg;
1923 vcap = vmx->cap[vcpu].set;
1926 case VM_CAP_HALT_EXIT:
1930 case VM_CAP_PAUSE_EXIT:
1934 case VM_CAP_MTRAP_EXIT:
1935 if (cap_monitor_trap)
1938 case VM_CAP_UNRESTRICTED_GUEST:
1939 if (cap_unrestricted_guest)
1942 case VM_CAP_ENABLE_INVPCID:
1951 *retval = (vcap & (1 << type)) ? 1 : 0;
1957 vmx_setcap(void *arg, int vcpu, int type, int val)
1959 struct vmx *vmx = arg;
1960 struct vmcs *vmcs = &vmx->vmcs[vcpu];
1972 case VM_CAP_HALT_EXIT:
1973 if (cap_halt_exit) {
1975 pptr = &vmx->cap[vcpu].proc_ctls;
1977 flag = PROCBASED_HLT_EXITING;
1978 reg = VMCS_PRI_PROC_BASED_CTLS;
1981 case VM_CAP_MTRAP_EXIT:
1982 if (cap_monitor_trap) {
1984 pptr = &vmx->cap[vcpu].proc_ctls;
1986 flag = PROCBASED_MTF;
1987 reg = VMCS_PRI_PROC_BASED_CTLS;
1990 case VM_CAP_PAUSE_EXIT:
1991 if (cap_pause_exit) {
1993 pptr = &vmx->cap[vcpu].proc_ctls;
1995 flag = PROCBASED_PAUSE_EXITING;
1996 reg = VMCS_PRI_PROC_BASED_CTLS;
1999 case VM_CAP_UNRESTRICTED_GUEST:
2000 if (cap_unrestricted_guest) {
2002 pptr = &vmx->cap[vcpu].proc_ctls2;
2004 flag = PROCBASED2_UNRESTRICTED_GUEST;
2005 reg = VMCS_SEC_PROC_BASED_CTLS;
2008 case VM_CAP_ENABLE_INVPCID:
2011 pptr = &vmx->cap[vcpu].proc_ctls2;
2013 flag = PROCBASED2_ENABLE_INVPCID;
2014 reg = VMCS_SEC_PROC_BASED_CTLS;
2028 error = vmwrite(reg, baseval);
2035 * Update optional stored flags, and record
2043 vmx->cap[vcpu].set |= (1 << type);
2045 vmx->cap[vcpu].set &= ~(1 << type);
2053 struct vmm_ops vmm_ops_intel = {