2 * Copyright (c) 2011 NetApp, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
39 #include <sys/sysctl.h>
44 #include <machine/psl.h>
45 #include <machine/cpufunc.h>
46 #include <machine/md_var.h>
47 #include <machine/segments.h>
48 #include <machine/smp.h>
49 #include <machine/specialreg.h>
50 #include <machine/vmparam.h>
52 #include <machine/vmm.h>
53 #include <machine/vmm_dev.h>
54 #include <machine/vmm_instruction_emul.h>
55 #include "vmm_lapic.h"
57 #include "vmm_ioport.h"
63 #include "vlapic_priv.h"
66 #include "vmx_cpufunc.h"
70 #include "vmx_controls.h"
72 #define PINBASED_CTLS_ONE_SETTING \
73 (PINBASED_EXTINT_EXITING | \
74 PINBASED_NMI_EXITING | \
76 #define PINBASED_CTLS_ZERO_SETTING 0
78 #define PROCBASED_CTLS_WINDOW_SETTING \
79 (PROCBASED_INT_WINDOW_EXITING | \
80 PROCBASED_NMI_WINDOW_EXITING)
82 #define PROCBASED_CTLS_ONE_SETTING \
83 (PROCBASED_SECONDARY_CONTROLS | \
84 PROCBASED_MWAIT_EXITING | \
85 PROCBASED_MONITOR_EXITING | \
86 PROCBASED_IO_EXITING | \
87 PROCBASED_MSR_BITMAPS | \
88 PROCBASED_CTLS_WINDOW_SETTING | \
89 PROCBASED_CR8_LOAD_EXITING | \
90 PROCBASED_CR8_STORE_EXITING)
91 #define PROCBASED_CTLS_ZERO_SETTING \
92 (PROCBASED_CR3_LOAD_EXITING | \
93 PROCBASED_CR3_STORE_EXITING | \
96 #define PROCBASED_CTLS2_ONE_SETTING PROCBASED2_ENABLE_EPT
97 #define PROCBASED_CTLS2_ZERO_SETTING 0
99 #define VM_EXIT_CTLS_ONE_SETTING \
100 (VM_EXIT_HOST_LMA | \
101 VM_EXIT_SAVE_EFER | \
102 VM_EXIT_LOAD_EFER | \
103 VM_EXIT_ACKNOWLEDGE_INTERRUPT | \
107 #define VM_EXIT_CTLS_ZERO_SETTING VM_EXIT_SAVE_DEBUG_CONTROLS
109 #define VM_ENTRY_CTLS_ONE_SETTING (VM_ENTRY_LOAD_EFER | VM_ENTRY_LOAD_PAT)
111 #define VM_ENTRY_CTLS_ZERO_SETTING \
112 (VM_ENTRY_LOAD_DEBUG_CONTROLS | \
113 VM_ENTRY_INTO_SMM | \
114 VM_ENTRY_DEACTIVATE_DUAL_MONITOR)
119 static MALLOC_DEFINE(M_VMX, "vmx", "vmx");
120 static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic");
122 SYSCTL_DECL(_hw_vmm);
123 SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW, NULL, NULL);
125 int vmxon_enabled[MAXCPU];
126 static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
128 static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2;
129 static uint32_t exit_ctls, entry_ctls;
131 static uint64_t cr0_ones_mask, cr0_zeros_mask;
132 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD,
133 &cr0_ones_mask, 0, NULL);
134 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD,
135 &cr0_zeros_mask, 0, NULL);
137 static uint64_t cr4_ones_mask, cr4_zeros_mask;
138 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD,
139 &cr4_ones_mask, 0, NULL);
140 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD,
141 &cr4_zeros_mask, 0, NULL);
143 static int vmx_initialized;
144 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD,
145 &vmx_initialized, 0, "Intel VMX initialized");
148 * Optional capabilities
150 static SYSCTL_NODE(_hw_vmm_vmx, OID_AUTO, cap, CTLFLAG_RW, NULL, NULL);
152 static int cap_halt_exit;
153 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, halt_exit, CTLFLAG_RD, &cap_halt_exit, 0,
154 "HLT triggers a VM-exit");
156 static int cap_pause_exit;
157 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, pause_exit, CTLFLAG_RD, &cap_pause_exit,
158 0, "PAUSE triggers a VM-exit");
160 static int cap_unrestricted_guest;
161 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, unrestricted_guest, CTLFLAG_RD,
162 &cap_unrestricted_guest, 0, "Unrestricted guests");
164 static int cap_monitor_trap;
165 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, monitor_trap, CTLFLAG_RD,
166 &cap_monitor_trap, 0, "Monitor trap flag");
168 static int cap_invpcid;
169 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, invpcid, CTLFLAG_RD, &cap_invpcid,
170 0, "Guests are allowed to use INVPCID");
172 static int virtual_interrupt_delivery;
173 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD,
174 &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support");
176 static int posted_interrupts;
177 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, posted_interrupts, CTLFLAG_RD,
178 &posted_interrupts, 0, "APICv posted interrupt support");
181 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD,
182 &pirvec, 0, "APICv posted interrupt vector");
184 static struct unrhdr *vpid_unr;
185 static u_int vpid_alloc_failed;
186 SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD,
187 &vpid_alloc_failed, 0, NULL);
190 * Use the last page below 4GB as the APIC access address. This address is
191 * occupied by the boot firmware so it is guaranteed that it will not conflict
192 * with a page in system memory.
194 #define APIC_ACCESS_ADDRESS 0xFFFFF000
196 static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc);
197 static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval);
198 static int vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val);
199 static void vmx_inject_pir(struct vlapic *vlapic);
203 exit_reason_to_str(int reason)
205 static char reasonbuf[32];
208 case EXIT_REASON_EXCEPTION:
210 case EXIT_REASON_EXT_INTR:
212 case EXIT_REASON_TRIPLE_FAULT:
213 return "triplefault";
214 case EXIT_REASON_INIT:
216 case EXIT_REASON_SIPI:
218 case EXIT_REASON_IO_SMI:
220 case EXIT_REASON_SMI:
222 case EXIT_REASON_INTR_WINDOW:
224 case EXIT_REASON_NMI_WINDOW:
226 case EXIT_REASON_TASK_SWITCH:
228 case EXIT_REASON_CPUID:
230 case EXIT_REASON_GETSEC:
232 case EXIT_REASON_HLT:
234 case EXIT_REASON_INVD:
236 case EXIT_REASON_INVLPG:
238 case EXIT_REASON_RDPMC:
240 case EXIT_REASON_RDTSC:
242 case EXIT_REASON_RSM:
244 case EXIT_REASON_VMCALL:
246 case EXIT_REASON_VMCLEAR:
248 case EXIT_REASON_VMLAUNCH:
250 case EXIT_REASON_VMPTRLD:
252 case EXIT_REASON_VMPTRST:
254 case EXIT_REASON_VMREAD:
256 case EXIT_REASON_VMRESUME:
258 case EXIT_REASON_VMWRITE:
260 case EXIT_REASON_VMXOFF:
262 case EXIT_REASON_VMXON:
264 case EXIT_REASON_CR_ACCESS:
266 case EXIT_REASON_DR_ACCESS:
268 case EXIT_REASON_INOUT:
270 case EXIT_REASON_RDMSR:
272 case EXIT_REASON_WRMSR:
274 case EXIT_REASON_INVAL_VMCS:
276 case EXIT_REASON_INVAL_MSR:
278 case EXIT_REASON_MWAIT:
280 case EXIT_REASON_MTF:
282 case EXIT_REASON_MONITOR:
284 case EXIT_REASON_PAUSE:
286 case EXIT_REASON_MCE_DURING_ENTRY:
287 return "mce-during-entry";
288 case EXIT_REASON_TPR:
290 case EXIT_REASON_APIC_ACCESS:
291 return "apic-access";
292 case EXIT_REASON_GDTR_IDTR:
294 case EXIT_REASON_LDTR_TR:
296 case EXIT_REASON_EPT_FAULT:
298 case EXIT_REASON_EPT_MISCONFIG:
299 return "eptmisconfig";
300 case EXIT_REASON_INVEPT:
302 case EXIT_REASON_RDTSCP:
304 case EXIT_REASON_VMX_PREEMPT:
306 case EXIT_REASON_INVVPID:
308 case EXIT_REASON_WBINVD:
310 case EXIT_REASON_XSETBV:
312 case EXIT_REASON_APIC_WRITE:
315 snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason);
322 vmx_allow_x2apic_msrs(struct vmx *vmx)
329 * Allow readonly access to the following x2APIC MSRs from the guest.
331 error += guest_msr_ro(vmx, MSR_APIC_ID);
332 error += guest_msr_ro(vmx, MSR_APIC_VERSION);
333 error += guest_msr_ro(vmx, MSR_APIC_LDR);
334 error += guest_msr_ro(vmx, MSR_APIC_SVR);
336 for (i = 0; i < 8; i++)
337 error += guest_msr_ro(vmx, MSR_APIC_ISR0 + i);
339 for (i = 0; i < 8; i++)
340 error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i);
342 for (i = 0; i < 8; i++)
343 error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i);
345 error += guest_msr_ro(vmx, MSR_APIC_ESR);
346 error += guest_msr_ro(vmx, MSR_APIC_LVT_TIMER);
347 error += guest_msr_ro(vmx, MSR_APIC_LVT_THERMAL);
348 error += guest_msr_ro(vmx, MSR_APIC_LVT_PCINT);
349 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT0);
350 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT1);
351 error += guest_msr_ro(vmx, MSR_APIC_LVT_ERROR);
352 error += guest_msr_ro(vmx, MSR_APIC_ICR_TIMER);
353 error += guest_msr_ro(vmx, MSR_APIC_DCR_TIMER);
354 error += guest_msr_ro(vmx, MSR_APIC_ICR);
357 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest.
359 * These registers get special treatment described in the section
360 * "Virtualizing MSR-Based APIC Accesses".
362 error += guest_msr_rw(vmx, MSR_APIC_TPR);
363 error += guest_msr_rw(vmx, MSR_APIC_EOI);
364 error += guest_msr_rw(vmx, MSR_APIC_SELF_IPI);
370 vmx_fix_cr0(u_long cr0)
373 return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask);
377 vmx_fix_cr4(u_long cr4)
380 return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask);
386 if (vpid < 0 || vpid > 0xffff)
387 panic("vpid_free: invalid vpid %d", vpid);
390 * VPIDs [0,VM_MAXCPU] are special and are not allocated from
391 * the unit number allocator.
394 if (vpid > VM_MAXCPU)
395 free_unr(vpid_unr, vpid);
399 vpid_alloc(uint16_t *vpid, int num)
403 if (num <= 0 || num > VM_MAXCPU)
404 panic("invalid number of vpids requested: %d", num);
407 * If the "enable vpid" execution control is not enabled then the
408 * VPID is required to be 0 for all vcpus.
410 if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) {
411 for (i = 0; i < num; i++)
417 * Allocate a unique VPID for each vcpu from the unit number allocator.
419 for (i = 0; i < num; i++) {
420 x = alloc_unr(vpid_unr);
428 atomic_add_int(&vpid_alloc_failed, 1);
431 * If the unit number allocator does not have enough unique
432 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range.
434 * These VPIDs are not be unique across VMs but this does not
435 * affect correctness because the combined mappings are also
436 * tagged with the EP4TA which is unique for each VM.
438 * It is still sub-optimal because the invvpid will invalidate
439 * combined mappings for a particular VPID across all EP4TAs.
444 for (i = 0; i < num; i++)
453 * VPID 0 is required when the "enable VPID" execution control is
456 * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the
457 * unit number allocator does not have sufficient unique VPIDs to
458 * satisfy the allocation.
460 * The remaining VPIDs are managed by the unit number allocator.
462 vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL);
466 vmx_disable(void *arg __unused)
468 struct invvpid_desc invvpid_desc = { 0 };
469 struct invept_desc invept_desc = { 0 };
471 if (vmxon_enabled[curcpu]) {
473 * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b.
475 * VMXON or VMXOFF are not required to invalidate any TLB
476 * caching structures. This prevents potential retention of
477 * cached information in the TLB between distinct VMX episodes.
479 invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc);
480 invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc);
483 load_cr4(rcr4() & ~CR4_VMXE);
491 vmm_ipi_free(pirvec);
493 if (vpid_unr != NULL) {
494 delete_unrhdr(vpid_unr);
498 smp_rendezvous(NULL, vmx_disable, NULL, NULL);
504 vmx_enable(void *arg __unused)
507 uint64_t feature_control;
509 feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
510 if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 ||
511 (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
512 wrmsr(MSR_IA32_FEATURE_CONTROL,
513 feature_control | IA32_FEATURE_CONTROL_VMX_EN |
514 IA32_FEATURE_CONTROL_LOCK);
517 load_cr4(rcr4() | CR4_VMXE);
519 *(uint32_t *)vmxon_region[curcpu] = vmx_revision();
520 error = vmxon(vmxon_region[curcpu]);
522 vmxon_enabled[curcpu] = 1;
529 if (vmxon_enabled[curcpu])
530 vmxon(vmxon_region[curcpu]);
536 int error, use_tpr_shadow;
537 uint64_t basic, fixed0, fixed1, feature_control;
538 uint32_t tmp, procbased2_vid_bits;
540 /* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */
541 if (!(cpu_feature2 & CPUID2_VMX)) {
542 printf("vmx_init: processor does not support VMX operation\n");
547 * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits
548 * are set (bits 0 and 2 respectively).
550 feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
551 if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 1 &&
552 (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
553 printf("vmx_init: VMX operation disabled by BIOS\n");
558 * Verify capabilities MSR_VMX_BASIC:
559 * - bit 54 indicates support for INS/OUTS decoding
561 basic = rdmsr(MSR_VMX_BASIC);
562 if ((basic & (1UL << 54)) == 0) {
563 printf("vmx_init: processor does not support desired basic "
568 /* Check support for primary processor-based VM-execution controls */
569 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
570 MSR_VMX_TRUE_PROCBASED_CTLS,
571 PROCBASED_CTLS_ONE_SETTING,
572 PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls);
574 printf("vmx_init: processor does not support desired primary "
575 "processor-based controls\n");
579 /* Clear the processor-based ctl bits that are set on demand */
580 procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING;
582 /* Check support for secondary processor-based VM-execution controls */
583 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
584 MSR_VMX_PROCBASED_CTLS2,
585 PROCBASED_CTLS2_ONE_SETTING,
586 PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2);
588 printf("vmx_init: processor does not support desired secondary "
589 "processor-based controls\n");
593 /* Check support for VPID */
594 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
595 PROCBASED2_ENABLE_VPID, 0, &tmp);
597 procbased_ctls2 |= PROCBASED2_ENABLE_VPID;
599 /* Check support for pin-based VM-execution controls */
600 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
601 MSR_VMX_TRUE_PINBASED_CTLS,
602 PINBASED_CTLS_ONE_SETTING,
603 PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls);
605 printf("vmx_init: processor does not support desired "
606 "pin-based controls\n");
610 /* Check support for VM-exit controls */
611 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS,
612 VM_EXIT_CTLS_ONE_SETTING,
613 VM_EXIT_CTLS_ZERO_SETTING,
616 printf("vmx_init: processor does not support desired "
621 /* Check support for VM-entry controls */
622 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS,
623 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING,
626 printf("vmx_init: processor does not support desired "
632 * Check support for optional features by testing them
635 cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
636 MSR_VMX_TRUE_PROCBASED_CTLS,
637 PROCBASED_HLT_EXITING, 0,
640 cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
641 MSR_VMX_PROCBASED_CTLS,
645 cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
646 MSR_VMX_TRUE_PROCBASED_CTLS,
647 PROCBASED_PAUSE_EXITING, 0,
650 cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
651 MSR_VMX_PROCBASED_CTLS2,
652 PROCBASED2_UNRESTRICTED_GUEST, 0,
655 cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
656 MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0,
660 * Check support for virtual interrupt delivery.
662 procbased2_vid_bits = (PROCBASED2_VIRTUALIZE_APIC_ACCESSES |
663 PROCBASED2_VIRTUALIZE_X2APIC_MODE |
664 PROCBASED2_APIC_REGISTER_VIRTUALIZATION |
665 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY);
667 use_tpr_shadow = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
668 MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0,
671 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
672 procbased2_vid_bits, 0, &tmp);
673 if (error == 0 && use_tpr_shadow) {
674 virtual_interrupt_delivery = 1;
675 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid",
676 &virtual_interrupt_delivery);
679 if (virtual_interrupt_delivery) {
680 procbased_ctls |= PROCBASED_USE_TPR_SHADOW;
681 procbased_ctls2 |= procbased2_vid_bits;
682 procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE;
685 * No need to emulate accesses to %CR8 if virtual
686 * interrupt delivery is enabled.
688 procbased_ctls &= ~PROCBASED_CR8_LOAD_EXITING;
689 procbased_ctls &= ~PROCBASED_CR8_STORE_EXITING;
692 * Check for Posted Interrupts only if Virtual Interrupt
693 * Delivery is enabled.
695 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
696 MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_POSTED_INTERRUPT, 0,
699 pirvec = vmm_ipi_alloc();
702 printf("vmx_init: unable to allocate "
703 "posted interrupt vector\n");
706 posted_interrupts = 1;
707 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_pir",
713 if (posted_interrupts)
714 pinbased_ctls |= PINBASED_POSTED_INTERRUPT;
717 error = ept_init(ipinum);
719 printf("vmx_init: ept initialization failed (%d)\n", error);
724 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1
726 fixed0 = rdmsr(MSR_VMX_CR0_FIXED0);
727 fixed1 = rdmsr(MSR_VMX_CR0_FIXED1);
728 cr0_ones_mask = fixed0 & fixed1;
729 cr0_zeros_mask = ~fixed0 & ~fixed1;
732 * CR0_PE and CR0_PG can be set to zero in VMX non-root operation
733 * if unrestricted guest execution is allowed.
735 if (cap_unrestricted_guest)
736 cr0_ones_mask &= ~(CR0_PG | CR0_PE);
739 * Do not allow the guest to set CR0_NW or CR0_CD.
741 cr0_zeros_mask |= (CR0_NW | CR0_CD);
743 fixed0 = rdmsr(MSR_VMX_CR4_FIXED0);
744 fixed1 = rdmsr(MSR_VMX_CR4_FIXED1);
745 cr4_ones_mask = fixed0 & fixed1;
746 cr4_zeros_mask = ~fixed0 & ~fixed1;
752 /* enable VMX operation */
753 smp_rendezvous(NULL, vmx_enable, NULL, NULL);
761 vmx_trigger_hostintr(int vector)
764 struct gate_descriptor *gd;
768 KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: "
769 "invalid vector %d", vector));
770 KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present",
772 KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d "
773 "has invalid type %d", vector, gd->gd_type));
774 KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d "
775 "has invalid dpl %d", vector, gd->gd_dpl));
776 KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor "
777 "for vector %d has invalid selector %d", vector, gd->gd_selector));
778 KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid "
779 "IST %d", vector, gd->gd_ist));
781 func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset);
786 vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial)
788 int error, mask_ident, shadow_ident;
791 if (which != 0 && which != 4)
792 panic("vmx_setup_cr_shadow: unknown cr%d", which);
795 mask_ident = VMCS_CR0_MASK;
796 mask_value = cr0_ones_mask | cr0_zeros_mask;
797 shadow_ident = VMCS_CR0_SHADOW;
799 mask_ident = VMCS_CR4_MASK;
800 mask_value = cr4_ones_mask | cr4_zeros_mask;
801 shadow_ident = VMCS_CR4_SHADOW;
804 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value);
808 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial);
814 #define vmx_setup_cr0_shadow(vmcs,init) vmx_setup_cr_shadow(0, (vmcs), (init))
815 #define vmx_setup_cr4_shadow(vmcs,init) vmx_setup_cr_shadow(4, (vmcs), (init))
818 vmx_vminit(struct vm *vm, pmap_t pmap)
820 uint16_t vpid[VM_MAXCPU];
826 vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO);
827 if ((uintptr_t)vmx & PAGE_MASK) {
828 panic("malloc of struct vmx not aligned on %d byte boundary",
833 vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4));
836 * Clean up EPTP-tagged guest physical and combined mappings
838 * VMX transitions are not required to invalidate any guest physical
839 * mappings. So, it may be possible for stale guest physical mappings
840 * to be present in the processor TLBs.
842 * Combined mappings for this EP4TA are also invalidated for all VPIDs.
844 ept_invalidate_mappings(vmx->eptp);
846 msr_bitmap_initialize(vmx->msr_bitmap);
849 * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE.
850 * The guest FSBASE and GSBASE are saved and restored during
851 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are
852 * always restored from the vmcs host state area on vm-exit.
854 * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in
855 * how they are saved/restored so can be directly accessed by the
858 * MSR_EFER is saved and restored in the guest VMCS area on a
859 * VM exit and entry respectively. It is also restored from the
860 * host VMCS area on a VM exit.
862 * MSR_PAT is saved and restored in the guest VMCS are on a VM exit
863 * and entry respectively. It is also restored from the host VMCS
866 * The TSC MSR is exposed read-only. Writes are disallowed as that
867 * will impact the host TSC.
868 * XXX Writes would be implemented with a wrmsr trap, and
869 * then modifying the TSC offset in the VMCS.
871 if (guest_msr_rw(vmx, MSR_GSBASE) ||
872 guest_msr_rw(vmx, MSR_FSBASE) ||
873 guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) ||
874 guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) ||
875 guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) ||
876 guest_msr_rw(vmx, MSR_EFER) ||
877 guest_msr_rw(vmx, MSR_PAT) ||
878 guest_msr_ro(vmx, MSR_TSC))
879 panic("vmx_vminit: error setting guest msr access");
881 vpid_alloc(vpid, VM_MAXCPU);
883 if (virtual_interrupt_delivery) {
884 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE,
885 APIC_ACCESS_ADDRESS);
886 /* XXX this should really return an error to the caller */
887 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error));
890 for (i = 0; i < VM_MAXCPU; i++) {
891 vmcs = &vmx->vmcs[i];
892 vmcs->identifier = vmx_revision();
893 error = vmclear(vmcs);
895 panic("vmx_vminit: vmclear error %d on vcpu %d\n",
899 vmx_msr_guest_init(vmx, i);
901 error = vmcs_init(vmcs);
902 KASSERT(error == 0, ("vmcs_init error %d", error));
906 error += vmwrite(VMCS_HOST_RSP, (u_long)&vmx->ctx[i]);
907 error += vmwrite(VMCS_EPTP, vmx->eptp);
908 error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls);
909 error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls);
910 error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2);
911 error += vmwrite(VMCS_EXIT_CTLS, exit_ctls);
912 error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls);
913 error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap));
914 error += vmwrite(VMCS_VPID, vpid[i]);
916 /* exception bitmap */
917 if (vcpu_trace_exceptions(vm, i))
918 exc_bitmap = 0xffffffff;
920 exc_bitmap = 1 << IDT_MC;
921 error += vmwrite(VMCS_EXCEPTION_BITMAP, exc_bitmap);
923 if (virtual_interrupt_delivery) {
924 error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS);
925 error += vmwrite(VMCS_VIRTUAL_APIC,
926 vtophys(&vmx->apic_page[i]));
927 error += vmwrite(VMCS_EOI_EXIT0, 0);
928 error += vmwrite(VMCS_EOI_EXIT1, 0);
929 error += vmwrite(VMCS_EOI_EXIT2, 0);
930 error += vmwrite(VMCS_EOI_EXIT3, 0);
932 if (posted_interrupts) {
933 error += vmwrite(VMCS_PIR_VECTOR, pirvec);
934 error += vmwrite(VMCS_PIR_DESC,
935 vtophys(&vmx->pir_desc[i]));
938 KASSERT(error == 0, ("vmx_vminit: error customizing the vmcs"));
941 vmx->cap[i].proc_ctls = procbased_ctls;
942 vmx->cap[i].proc_ctls2 = procbased_ctls2;
944 vmx->state[i].lastcpu = NOCPU;
945 vmx->state[i].vpid = vpid[i];
948 * Set up the CR0/4 shadows, and init the read shadow
949 * to the power-on register value from the Intel Sys Arch.
953 error = vmx_setup_cr0_shadow(vmcs, 0x60000010);
955 panic("vmx_setup_cr0_shadow %d", error);
957 error = vmx_setup_cr4_shadow(vmcs, 0);
959 panic("vmx_setup_cr4_shadow %d", error);
961 vmx->ctx[i].pmap = pmap;
968 vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx)
972 func = vmxctx->guest_rax;
974 handled = x86_emulate_cpuid(vm, vcpu,
975 (uint32_t*)(&vmxctx->guest_rax),
976 (uint32_t*)(&vmxctx->guest_rbx),
977 (uint32_t*)(&vmxctx->guest_rcx),
978 (uint32_t*)(&vmxctx->guest_rdx));
983 vmx_run_trace(struct vmx *vmx, int vcpu)
986 VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip());
991 vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason,
995 VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx",
996 handled ? "handled" : "unhandled",
997 exit_reason_to_str(exit_reason), rip);
1001 static __inline void
1002 vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip)
1005 VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip);
1009 static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved");
1010 static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done");
1013 * Invalidate guest mappings identified by its vpid from the TLB.
1015 static __inline void
1016 vmx_invvpid(struct vmx *vmx, int vcpu, pmap_t pmap, int running)
1018 struct vmxstate *vmxstate;
1019 struct invvpid_desc invvpid_desc;
1021 vmxstate = &vmx->state[vcpu];
1022 if (vmxstate->vpid == 0)
1027 * Set the 'lastcpu' to an invalid host cpu.
1029 * This will invalidate TLB entries tagged with the vcpu's
1030 * vpid the next time it runs via vmx_set_pcpu_defaults().
1032 vmxstate->lastcpu = NOCPU;
1036 KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside "
1037 "critical section", __func__, vcpu));
1040 * Invalidate all mappings tagged with 'vpid'
1042 * We do this because this vcpu was executing on a different host
1043 * cpu when it last ran. We do not track whether it invalidated
1044 * mappings associated with its 'vpid' during that run. So we must
1045 * assume that the mappings associated with 'vpid' on 'curcpu' are
1046 * stale and invalidate them.
1048 * Note that we incur this penalty only when the scheduler chooses to
1049 * move the thread associated with this vcpu between host cpus.
1051 * Note also that this will invalidate mappings tagged with 'vpid'
1054 if (pmap->pm_eptgen == vmx->eptgen[curcpu]) {
1055 invvpid_desc._res1 = 0;
1056 invvpid_desc._res2 = 0;
1057 invvpid_desc.vpid = vmxstate->vpid;
1058 invvpid_desc.linear_addr = 0;
1059 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc);
1060 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1);
1063 * The invvpid can be skipped if an invept is going to
1064 * be performed before entering the guest. The invept
1065 * will invalidate combined mappings tagged with
1066 * 'vmx->eptp' for all vpids.
1068 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1);
1073 vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap)
1075 struct vmxstate *vmxstate;
1077 vmxstate = &vmx->state[vcpu];
1078 if (vmxstate->lastcpu == curcpu)
1081 vmxstate->lastcpu = curcpu;
1083 vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1);
1085 vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase());
1086 vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase());
1087 vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase());
1088 vmx_invvpid(vmx, vcpu, pmap, 1);
1092 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set.
1094 CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0);
1096 static void __inline
1097 vmx_set_int_window_exiting(struct vmx *vmx, int vcpu)
1100 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) {
1101 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING;
1102 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1103 VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting");
1107 static void __inline
1108 vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu)
1111 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0,
1112 ("intr_window_exiting not set: %#x", vmx->cap[vcpu].proc_ctls));
1113 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING;
1114 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1115 VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting");
1118 static void __inline
1119 vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu)
1122 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) {
1123 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING;
1124 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1125 VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting");
1129 static void __inline
1130 vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu)
1133 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0,
1134 ("nmi_window_exiting not set %#x", vmx->cap[vcpu].proc_ctls));
1135 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING;
1136 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1137 VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting");
1140 #define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \
1141 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1142 #define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \
1143 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1146 vmx_inject_nmi(struct vmx *vmx, int vcpu)
1150 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1151 KASSERT((gi & NMI_BLOCKING) == 0, ("vmx_inject_nmi: invalid guest "
1152 "interruptibility-state %#x", gi));
1154 info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1155 KASSERT((info & VMCS_INTR_VALID) == 0, ("vmx_inject_nmi: invalid "
1156 "VM-entry interruption information %#x", info));
1159 * Inject the virtual NMI. The vector must be the NMI IDT entry
1160 * or the VMCS entry check will fail.
1162 info = IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID;
1163 vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1165 VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI");
1167 /* Clear the request */
1168 vm_nmi_clear(vmx->vm, vcpu);
1172 vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic)
1174 int vector, need_nmi_exiting, extint_pending;
1175 uint64_t rflags, entryinfo;
1178 if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) {
1179 KASSERT((entryinfo & VMCS_INTR_VALID) != 0, ("%s: entry "
1180 "intinfo is not valid: %#lx", __func__, entryinfo));
1182 info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1183 KASSERT((info & VMCS_INTR_VALID) == 0, ("%s: cannot inject "
1184 "pending exception: %#lx/%#x", __func__, entryinfo, info));
1187 vector = info & 0xff;
1188 if (vector == IDT_BP || vector == IDT_OF) {
1190 * VT-x requires #BP and #OF to be injected as software
1193 info &= ~VMCS_INTR_T_MASK;
1194 info |= VMCS_INTR_T_SWEXCEPTION;
1197 if (info & VMCS_INTR_DEL_ERRCODE)
1198 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, entryinfo >> 32);
1200 vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1203 if (vm_nmi_pending(vmx->vm, vcpu)) {
1205 * If there are no conditions blocking NMI injection then
1206 * inject it directly here otherwise enable "NMI window
1207 * exiting" to inject it as soon as we can.
1209 * We also check for STI_BLOCKING because some implementations
1210 * don't allow NMI injection in this case. If we are running
1211 * on a processor that doesn't have this restriction it will
1212 * immediately exit and the NMI will be injected in the
1213 * "NMI window exiting" handler.
1215 need_nmi_exiting = 1;
1216 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1217 if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) {
1218 info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1219 if ((info & VMCS_INTR_VALID) == 0) {
1220 vmx_inject_nmi(vmx, vcpu);
1221 need_nmi_exiting = 0;
1223 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI "
1224 "due to VM-entry intr info %#x", info);
1227 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI due to "
1228 "Guest Interruptibility-state %#x", gi);
1231 if (need_nmi_exiting)
1232 vmx_set_nmi_window_exiting(vmx, vcpu);
1235 extint_pending = vm_extint_pending(vmx->vm, vcpu);
1237 if (!extint_pending && virtual_interrupt_delivery) {
1238 vmx_inject_pir(vlapic);
1243 * If interrupt-window exiting is already in effect then don't bother
1244 * checking for pending interrupts. This is just an optimization and
1245 * not needed for correctness.
1247 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) {
1248 VCPU_CTR0(vmx->vm, vcpu, "Skip interrupt injection due to "
1249 "pending int_window_exiting");
1253 if (!extint_pending) {
1254 /* Ask the local apic for a vector to inject */
1255 if (!vlapic_pending_intr(vlapic, &vector))
1259 * From the Intel SDM, Volume 3, Section "Maskable
1260 * Hardware Interrupts":
1261 * - maskable interrupt vectors [16,255] can be delivered
1262 * through the local APIC.
1264 KASSERT(vector >= 16 && vector <= 255,
1265 ("invalid vector %d from local APIC", vector));
1267 /* Ask the legacy pic for a vector to inject */
1268 vatpic_pending_intr(vmx->vm, &vector);
1271 * From the Intel SDM, Volume 3, Section "Maskable
1272 * Hardware Interrupts":
1273 * - maskable interrupt vectors [0,255] can be delivered
1274 * through the INTR pin.
1276 KASSERT(vector >= 0 && vector <= 255,
1277 ("invalid vector %d from INTR", vector));
1280 /* Check RFLAGS.IF and the interruptibility state of the guest */
1281 rflags = vmcs_read(VMCS_GUEST_RFLAGS);
1282 if ((rflags & PSL_I) == 0) {
1283 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1284 "rflags %#lx", vector, rflags);
1288 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1289 if (gi & HWINTR_BLOCKING) {
1290 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1291 "Guest Interruptibility-state %#x", vector, gi);
1295 info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1296 if (info & VMCS_INTR_VALID) {
1298 * This is expected and could happen for multiple reasons:
1299 * - A vectoring VM-entry was aborted due to astpending
1300 * - A VM-exit happened during event injection.
1301 * - An exception was injected above.
1302 * - An NMI was injected above or after "NMI window exiting"
1304 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1305 "VM-entry intr info %#x", vector, info);
1309 /* Inject the interrupt */
1310 info = VMCS_INTR_T_HWINTR | VMCS_INTR_VALID;
1312 vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1314 if (!extint_pending) {
1315 /* Update the Local APIC ISR */
1316 vlapic_intr_accepted(vlapic, vector);
1318 vm_extint_clear(vmx->vm, vcpu);
1319 vatpic_intr_accepted(vmx->vm, vector);
1322 * After we accepted the current ExtINT the PIC may
1323 * have posted another one. If that is the case, set
1324 * the Interrupt Window Exiting execution control so
1325 * we can inject that one too.
1327 * Also, interrupt window exiting allows us to inject any
1328 * pending APIC vector that was preempted by the ExtINT
1329 * as soon as possible. This applies both for the software
1330 * emulated vlapic and the hardware assisted virtual APIC.
1332 vmx_set_int_window_exiting(vmx, vcpu);
1335 VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector);
1341 * Set the Interrupt Window Exiting execution control so we can inject
1342 * the interrupt as soon as blocking condition goes away.
1344 vmx_set_int_window_exiting(vmx, vcpu);
1348 * If the Virtual NMIs execution control is '1' then the logical processor
1349 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of
1350 * the VMCS. An IRET instruction in VMX non-root operation will remove any
1351 * virtual-NMI blocking.
1353 * This unblocking occurs even if the IRET causes a fault. In this case the
1354 * hypervisor needs to restore virtual-NMI blocking before resuming the guest.
1357 vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid)
1361 VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking");
1362 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1363 gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1364 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1368 vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid)
1372 VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking");
1373 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1374 gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1375 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1379 vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid)
1383 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1384 KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING,
1385 ("NMI blocking is not in effect %#x", gi));
1389 vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
1391 struct vmxctx *vmxctx;
1393 const struct xsave_limits *limits;
1395 vmxctx = &vmx->ctx[vcpu];
1396 limits = vmm_get_xsave_limits();
1399 * Note that the processor raises a GP# fault on its own if
1400 * xsetbv is executed for CPL != 0, so we do not have to
1401 * emulate that fault here.
1404 /* Only xcr0 is supported. */
1405 if (vmxctx->guest_rcx != 0) {
1406 vm_inject_gp(vmx->vm, vcpu);
1410 /* We only handle xcr0 if both the host and guest have XSAVE enabled. */
1411 if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) {
1412 vm_inject_ud(vmx->vm, vcpu);
1416 xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff);
1417 if ((xcrval & ~limits->xcr0_allowed) != 0) {
1418 vm_inject_gp(vmx->vm, vcpu);
1422 if (!(xcrval & XFEATURE_ENABLED_X87)) {
1423 vm_inject_gp(vmx->vm, vcpu);
1427 /* AVX (YMM_Hi128) requires SSE. */
1428 if (xcrval & XFEATURE_ENABLED_AVX &&
1429 (xcrval & XFEATURE_AVX) != XFEATURE_AVX) {
1430 vm_inject_gp(vmx->vm, vcpu);
1435 * AVX512 requires base AVX (YMM_Hi128) as well as OpMask,
1436 * ZMM_Hi256, and Hi16_ZMM.
1438 if (xcrval & XFEATURE_AVX512 &&
1439 (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) !=
1440 (XFEATURE_AVX512 | XFEATURE_AVX)) {
1441 vm_inject_gp(vmx->vm, vcpu);
1446 * Intel MPX requires both bound register state flags to be
1449 if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) !=
1450 ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) {
1451 vm_inject_gp(vmx->vm, vcpu);
1456 * This runs "inside" vmrun() with the guest's FPU state, so
1457 * modifying xcr0 directly modifies the guest's xcr0, not the
1460 load_xcr(0, xcrval);
1465 vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident)
1467 const struct vmxctx *vmxctx;
1469 vmxctx = &vmx->ctx[vcpu];
1473 return (vmxctx->guest_rax);
1475 return (vmxctx->guest_rcx);
1477 return (vmxctx->guest_rdx);
1479 return (vmxctx->guest_rbx);
1481 return (vmcs_read(VMCS_GUEST_RSP));
1483 return (vmxctx->guest_rbp);
1485 return (vmxctx->guest_rsi);
1487 return (vmxctx->guest_rdi);
1489 return (vmxctx->guest_r8);
1491 return (vmxctx->guest_r9);
1493 return (vmxctx->guest_r10);
1495 return (vmxctx->guest_r11);
1497 return (vmxctx->guest_r12);
1499 return (vmxctx->guest_r13);
1501 return (vmxctx->guest_r14);
1503 return (vmxctx->guest_r15);
1505 panic("invalid vmx register %d", ident);
1510 vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval)
1512 struct vmxctx *vmxctx;
1514 vmxctx = &vmx->ctx[vcpu];
1518 vmxctx->guest_rax = regval;
1521 vmxctx->guest_rcx = regval;
1524 vmxctx->guest_rdx = regval;
1527 vmxctx->guest_rbx = regval;
1530 vmcs_write(VMCS_GUEST_RSP, regval);
1533 vmxctx->guest_rbp = regval;
1536 vmxctx->guest_rsi = regval;
1539 vmxctx->guest_rdi = regval;
1542 vmxctx->guest_r8 = regval;
1545 vmxctx->guest_r9 = regval;
1548 vmxctx->guest_r10 = regval;
1551 vmxctx->guest_r11 = regval;
1554 vmxctx->guest_r12 = regval;
1557 vmxctx->guest_r13 = regval;
1560 vmxctx->guest_r14 = regval;
1563 vmxctx->guest_r15 = regval;
1566 panic("invalid vmx register %d", ident);
1571 vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1573 uint64_t crval, regval;
1575 /* We only handle mov to %cr0 at this time */
1576 if ((exitqual & 0xf0) != 0x00)
1579 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
1581 vmcs_write(VMCS_CR0_SHADOW, regval);
1583 crval = regval | cr0_ones_mask;
1584 crval &= ~cr0_zeros_mask;
1585 vmcs_write(VMCS_GUEST_CR0, crval);
1587 if (regval & CR0_PG) {
1588 uint64_t efer, entry_ctls;
1591 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and
1592 * the "IA-32e mode guest" bit in VM-entry control must be
1595 efer = vmcs_read(VMCS_GUEST_IA32_EFER);
1596 if (efer & EFER_LME) {
1598 vmcs_write(VMCS_GUEST_IA32_EFER, efer);
1599 entry_ctls = vmcs_read(VMCS_ENTRY_CTLS);
1600 entry_ctls |= VM_ENTRY_GUEST_LMA;
1601 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls);
1609 vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1611 uint64_t crval, regval;
1613 /* We only handle mov to %cr4 at this time */
1614 if ((exitqual & 0xf0) != 0x00)
1617 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
1619 vmcs_write(VMCS_CR4_SHADOW, regval);
1621 crval = regval | cr4_ones_mask;
1622 crval &= ~cr4_zeros_mask;
1623 vmcs_write(VMCS_GUEST_CR4, crval);
1629 vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1631 struct vlapic *vlapic;
1635 /* We only handle mov %cr8 to/from a register at this time. */
1636 if ((exitqual & 0xe0) != 0x00) {
1640 vlapic = vm_lapic(vmx->vm, vcpu);
1641 regnum = (exitqual >> 8) & 0xf;
1642 if (exitqual & 0x10) {
1643 cr8 = vlapic_get_cr8(vlapic);
1644 vmx_set_guest_reg(vmx, vcpu, regnum, cr8);
1646 cr8 = vmx_get_guest_reg(vmx, vcpu, regnum);
1647 vlapic_set_cr8(vlapic, cr8);
1654 * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL
1661 ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS);
1662 return ((ssar >> 5) & 0x3);
1665 static enum vm_cpu_mode
1670 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) {
1671 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1673 return (CPU_MODE_64BIT); /* CS.L = 1 */
1675 return (CPU_MODE_COMPATIBILITY);
1676 } else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) {
1677 return (CPU_MODE_PROTECTED);
1679 return (CPU_MODE_REAL);
1683 static enum vm_paging_mode
1684 vmx_paging_mode(void)
1687 if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG))
1688 return (PAGING_MODE_FLAT);
1689 if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE))
1690 return (PAGING_MODE_32);
1691 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME)
1692 return (PAGING_MODE_64);
1694 return (PAGING_MODE_PAE);
1698 inout_str_index(struct vmx *vmx, int vcpuid, int in)
1702 enum vm_reg_name reg;
1704 reg = in ? VM_REG_GUEST_RDI : VM_REG_GUEST_RSI;
1705 error = vmx_getreg(vmx, vcpuid, reg, &val);
1706 KASSERT(error == 0, ("%s: vmx_getreg error %d", __func__, error));
1711 inout_str_count(struct vmx *vmx, int vcpuid, int rep)
1717 error = vmx_getreg(vmx, vcpuid, VM_REG_GUEST_RCX, &val);
1718 KASSERT(!error, ("%s: vmx_getreg error %d", __func__, error));
1726 inout_str_addrsize(uint32_t inst_info)
1730 size = (inst_info >> 7) & 0x7;
1733 return (2); /* 16 bit */
1735 return (4); /* 32 bit */
1737 return (8); /* 64 bit */
1739 panic("%s: invalid size encoding %d", __func__, size);
1744 inout_str_seginfo(struct vmx *vmx, int vcpuid, uint32_t inst_info, int in,
1745 struct vm_inout_str *vis)
1750 vis->seg_name = VM_REG_GUEST_ES;
1752 s = (inst_info >> 15) & 0x7;
1753 vis->seg_name = vm_segment_name(s);
1756 error = vmx_getdesc(vmx, vcpuid, vis->seg_name, &vis->seg_desc);
1757 KASSERT(error == 0, ("%s: vmx_getdesc error %d", __func__, error));
1761 vmx_paging_info(struct vm_guest_paging *paging)
1763 paging->cr3 = vmcs_guest_cr3();
1764 paging->cpl = vmx_cpl();
1765 paging->cpu_mode = vmx_cpu_mode();
1766 paging->paging_mode = vmx_paging_mode();
1770 vmexit_inst_emul(struct vm_exit *vmexit, uint64_t gpa, uint64_t gla)
1772 struct vm_guest_paging *paging;
1775 paging = &vmexit->u.inst_emul.paging;
1777 vmexit->exitcode = VM_EXITCODE_INST_EMUL;
1778 vmexit->u.inst_emul.gpa = gpa;
1779 vmexit->u.inst_emul.gla = gla;
1780 vmx_paging_info(paging);
1781 switch (paging->cpu_mode) {
1782 case CPU_MODE_PROTECTED:
1783 case CPU_MODE_COMPATIBILITY:
1784 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1785 vmexit->u.inst_emul.cs_d = SEG_DESC_DEF32(csar);
1788 vmexit->u.inst_emul.cs_d = 0;
1791 vie_init(&vmexit->u.inst_emul.vie, NULL, 0);
1795 ept_fault_type(uint64_t ept_qual)
1799 if (ept_qual & EPT_VIOLATION_DATA_WRITE)
1800 fault_type = VM_PROT_WRITE;
1801 else if (ept_qual & EPT_VIOLATION_INST_FETCH)
1802 fault_type = VM_PROT_EXECUTE;
1804 fault_type= VM_PROT_READ;
1806 return (fault_type);
1810 ept_emulation_fault(uint64_t ept_qual)
1814 /* EPT fault on an instruction fetch doesn't make sense here */
1815 if (ept_qual & EPT_VIOLATION_INST_FETCH)
1818 /* EPT fault must be a read fault or a write fault */
1819 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;
1820 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;
1821 if ((read | write) == 0)
1825 * The EPT violation must have been caused by accessing a
1826 * guest-physical address that is a translation of a guest-linear
1829 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||
1830 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {
1838 apic_access_virtualization(struct vmx *vmx, int vcpuid)
1840 uint32_t proc_ctls2;
1842 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
1843 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0);
1847 x2apic_virtualization(struct vmx *vmx, int vcpuid)
1849 uint32_t proc_ctls2;
1851 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
1852 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0);
1856 vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic,
1859 int error, handled, offset;
1860 uint32_t *apic_regs, vector;
1864 offset = APIC_WRITE_OFFSET(qual);
1866 if (!apic_access_virtualization(vmx, vcpuid)) {
1868 * In general there should not be any APIC write VM-exits
1869 * unless APIC-access virtualization is enabled.
1871 * However self-IPI virtualization can legitimately trigger
1872 * an APIC-write VM-exit so treat it specially.
1874 if (x2apic_virtualization(vmx, vcpuid) &&
1875 offset == APIC_OFFSET_SELF_IPI) {
1876 apic_regs = (uint32_t *)(vlapic->apic_page);
1877 vector = apic_regs[APIC_OFFSET_SELF_IPI / 4];
1878 vlapic_self_ipi_handler(vlapic, vector);
1885 case APIC_OFFSET_ID:
1886 vlapic_id_write_handler(vlapic);
1888 case APIC_OFFSET_LDR:
1889 vlapic_ldr_write_handler(vlapic);
1891 case APIC_OFFSET_DFR:
1892 vlapic_dfr_write_handler(vlapic);
1894 case APIC_OFFSET_SVR:
1895 vlapic_svr_write_handler(vlapic);
1897 case APIC_OFFSET_ESR:
1898 vlapic_esr_write_handler(vlapic);
1900 case APIC_OFFSET_ICR_LOW:
1902 error = vlapic_icrlo_write_handler(vlapic, &retu);
1903 if (error != 0 || retu)
1904 handled = UNHANDLED;
1906 case APIC_OFFSET_CMCI_LVT:
1907 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT:
1908 vlapic_lvt_write_handler(vlapic, offset);
1910 case APIC_OFFSET_TIMER_ICR:
1911 vlapic_icrtmr_write_handler(vlapic);
1913 case APIC_OFFSET_TIMER_DCR:
1914 vlapic_dcr_write_handler(vlapic);
1917 handled = UNHANDLED;
1924 apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa)
1927 if (apic_access_virtualization(vmx, vcpuid) &&
1928 (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE))
1935 vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
1938 int access_type, offset, allowed;
1940 if (!apic_access_virtualization(vmx, vcpuid))
1943 qual = vmexit->u.vmx.exit_qualification;
1944 access_type = APIC_ACCESS_TYPE(qual);
1945 offset = APIC_ACCESS_OFFSET(qual);
1948 if (access_type == 0) {
1950 * Read data access to the following registers is expected.
1953 case APIC_OFFSET_APR:
1954 case APIC_OFFSET_PPR:
1955 case APIC_OFFSET_RRR:
1956 case APIC_OFFSET_CMCI_LVT:
1957 case APIC_OFFSET_TIMER_CCR:
1963 } else if (access_type == 1) {
1965 * Write data access to the following registers is expected.
1968 case APIC_OFFSET_VER:
1969 case APIC_OFFSET_APR:
1970 case APIC_OFFSET_PPR:
1971 case APIC_OFFSET_RRR:
1972 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7:
1973 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7:
1974 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7:
1975 case APIC_OFFSET_CMCI_LVT:
1976 case APIC_OFFSET_TIMER_CCR:
1985 vmexit_inst_emul(vmexit, DEFAULT_APIC_BASE + offset,
1990 * Regardless of whether the APIC-access is allowed this handler
1991 * always returns UNHANDLED:
1992 * - if the access is allowed then it is handled by emulating the
1993 * instruction that caused the VM-exit (outside the critical section)
1994 * - if the access is not allowed then it will be converted to an
1995 * exitcode of VM_EXITCODE_VMX and will be dealt with in userland.
2000 static enum task_switch_reason
2001 vmx_task_switch_reason(uint64_t qual)
2005 reason = (qual >> 30) & 0x3;
2014 return (TSR_IDT_GATE);
2016 panic("%s: invalid reason %d", __func__, reason);
2021 emulate_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu)
2026 error = lapic_wrmsr(vmx->vm, vcpuid, num, val, retu);
2028 error = vmx_wrmsr(vmx, vcpuid, num, val, retu);
2034 emulate_rdmsr(struct vmx *vmx, int vcpuid, u_int num, bool *retu)
2036 struct vmxctx *vmxctx;
2042 error = lapic_rdmsr(vmx->vm, vcpuid, num, &result, retu);
2044 error = vmx_rdmsr(vmx, vcpuid, num, &result, retu);
2048 vmxctx = &vmx->ctx[vcpuid];
2049 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RAX, eax);
2050 KASSERT(error == 0, ("vmxctx_setreg(rax) error %d", error));
2053 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RDX, edx);
2054 KASSERT(error == 0, ("vmxctx_setreg(rdx) error %d", error));
2061 vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
2063 int error, handled, in;
2064 struct vmxctx *vmxctx;
2065 struct vlapic *vlapic;
2066 struct vm_inout_str *vis;
2067 struct vm_task_switch *ts;
2068 struct vm_exception vmexc;
2069 uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, inst_info;
2070 uint32_t intr_type, intr_vec, reason;
2071 uint64_t exitintinfo, qual, gpa;
2074 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0);
2075 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0);
2077 handled = UNHANDLED;
2078 vmxctx = &vmx->ctx[vcpu];
2080 qual = vmexit->u.vmx.exit_qualification;
2081 reason = vmexit->u.vmx.exit_reason;
2082 vmexit->exitcode = VM_EXITCODE_BOGUS;
2084 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1);
2087 * VM-entry failures during or after loading guest state.
2089 * These VM-exits are uncommon but must be handled specially
2090 * as most VM-exit fields are not populated as usual.
2092 if (__predict_false(reason == EXIT_REASON_MCE_DURING_ENTRY)) {
2093 VCPU_CTR0(vmx->vm, vcpu, "Handling MCE during VM-entry");
2094 __asm __volatile("int $18");
2099 * VM exits that can be triggered during event delivery need to
2100 * be handled specially by re-injecting the event if the IDT
2101 * vectoring information field's valid bit is set.
2103 * See "Information for VM Exits During Event Delivery" in Intel SDM
2106 idtvec_info = vmcs_idt_vectoring_info();
2107 if (idtvec_info & VMCS_IDT_VEC_VALID) {
2108 idtvec_info &= ~(1 << 12); /* clear undefined bit */
2109 exitintinfo = idtvec_info;
2110 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
2111 idtvec_err = vmcs_idt_vectoring_err();
2112 exitintinfo |= (uint64_t)idtvec_err << 32;
2114 error = vm_exit_intinfo(vmx->vm, vcpu, exitintinfo);
2115 KASSERT(error == 0, ("%s: vm_set_intinfo error %d",
2119 * If 'virtual NMIs' are being used and the VM-exit
2120 * happened while injecting an NMI during the previous
2121 * VM-entry, then clear "blocking by NMI" in the
2122 * Guest Interruptibility-State so the NMI can be
2123 * reinjected on the subsequent VM-entry.
2125 * However, if the NMI was being delivered through a task
2126 * gate, then the new task must start execution with NMIs
2127 * blocked so don't clear NMI blocking in this case.
2129 intr_type = idtvec_info & VMCS_INTR_T_MASK;
2130 if (intr_type == VMCS_INTR_T_NMI) {
2131 if (reason != EXIT_REASON_TASK_SWITCH)
2132 vmx_clear_nmi_blocking(vmx, vcpu);
2134 vmx_assert_nmi_blocking(vmx, vcpu);
2138 * Update VM-entry instruction length if the event being
2139 * delivered was a software interrupt or software exception.
2141 if (intr_type == VMCS_INTR_T_SWINTR ||
2142 intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION ||
2143 intr_type == VMCS_INTR_T_SWEXCEPTION) {
2144 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
2149 case EXIT_REASON_TASK_SWITCH:
2150 ts = &vmexit->u.task_switch;
2151 ts->tsssel = qual & 0xffff;
2152 ts->reason = vmx_task_switch_reason(qual);
2154 ts->errcode_valid = 0;
2155 vmx_paging_info(&ts->paging);
2157 * If the task switch was due to a CALL, JMP, IRET, software
2158 * interrupt (INT n) or software exception (INT3, INTO),
2159 * then the saved %rip references the instruction that caused
2160 * the task switch. The instruction length field in the VMCS
2161 * is valid in this case.
2163 * In all other cases (e.g., NMI, hardware exception) the
2164 * saved %rip is one that would have been saved in the old TSS
2165 * had the task switch completed normally so the instruction
2166 * length field is not needed in this case and is explicitly
2169 if (ts->reason == TSR_IDT_GATE) {
2170 KASSERT(idtvec_info & VMCS_IDT_VEC_VALID,
2171 ("invalid idtvec_info %#x for IDT task switch",
2173 intr_type = idtvec_info & VMCS_INTR_T_MASK;
2174 if (intr_type != VMCS_INTR_T_SWINTR &&
2175 intr_type != VMCS_INTR_T_SWEXCEPTION &&
2176 intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) {
2177 /* Task switch triggered by external event */
2179 vmexit->inst_length = 0;
2180 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
2181 ts->errcode_valid = 1;
2182 ts->errcode = vmcs_idt_vectoring_err();
2186 vmexit->exitcode = VM_EXITCODE_TASK_SWITCH;
2187 VCPU_CTR4(vmx->vm, vcpu, "task switch reason %d, tss 0x%04x, "
2188 "%s errcode 0x%016lx", ts->reason, ts->tsssel,
2189 ts->ext ? "external" : "internal",
2190 ((uint64_t)ts->errcode << 32) | ts->errcode_valid);
2192 case EXIT_REASON_CR_ACCESS:
2193 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1);
2194 switch (qual & 0xf) {
2196 handled = vmx_emulate_cr0_access(vmx, vcpu, qual);
2199 handled = vmx_emulate_cr4_access(vmx, vcpu, qual);
2202 handled = vmx_emulate_cr8_access(vmx, vcpu, qual);
2206 case EXIT_REASON_RDMSR:
2207 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1);
2209 ecx = vmxctx->guest_rcx;
2210 VCPU_CTR1(vmx->vm, vcpu, "rdmsr 0x%08x", ecx);
2211 error = emulate_rdmsr(vmx, vcpu, ecx, &retu);
2213 vmexit->exitcode = VM_EXITCODE_RDMSR;
2214 vmexit->u.msr.code = ecx;
2218 /* Return to userspace with a valid exitcode */
2219 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
2220 ("emulate_rdmsr retu with bogus exitcode"));
2223 case EXIT_REASON_WRMSR:
2224 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1);
2226 eax = vmxctx->guest_rax;
2227 ecx = vmxctx->guest_rcx;
2228 edx = vmxctx->guest_rdx;
2229 VCPU_CTR2(vmx->vm, vcpu, "wrmsr 0x%08x value 0x%016lx",
2230 ecx, (uint64_t)edx << 32 | eax);
2231 error = emulate_wrmsr(vmx, vcpu, ecx,
2232 (uint64_t)edx << 32 | eax, &retu);
2234 vmexit->exitcode = VM_EXITCODE_WRMSR;
2235 vmexit->u.msr.code = ecx;
2236 vmexit->u.msr.wval = (uint64_t)edx << 32 | eax;
2240 /* Return to userspace with a valid exitcode */
2241 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
2242 ("emulate_wrmsr retu with bogus exitcode"));
2245 case EXIT_REASON_HLT:
2246 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1);
2247 vmexit->exitcode = VM_EXITCODE_HLT;
2248 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS);
2250 case EXIT_REASON_MTF:
2251 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1);
2252 vmexit->exitcode = VM_EXITCODE_MTRAP;
2254 case EXIT_REASON_PAUSE:
2255 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1);
2256 vmexit->exitcode = VM_EXITCODE_PAUSE;
2258 case EXIT_REASON_INTR_WINDOW:
2259 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1);
2260 vmx_clear_int_window_exiting(vmx, vcpu);
2262 case EXIT_REASON_EXT_INTR:
2264 * External interrupts serve only to cause VM exits and allow
2265 * the host interrupt handler to run.
2267 * If this external interrupt triggers a virtual interrupt
2268 * to a VM, then that state will be recorded by the
2269 * host interrupt handler in the VM's softc. We will inject
2270 * this virtual interrupt during the subsequent VM enter.
2272 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2275 * XXX: Ignore this exit if VMCS_INTR_VALID is not set.
2276 * This appears to be a bug in VMware Fusion?
2278 if (!(intr_info & VMCS_INTR_VALID))
2280 KASSERT((intr_info & VMCS_INTR_VALID) != 0 &&
2281 (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR,
2282 ("VM exit interruption info invalid: %#x", intr_info));
2283 vmx_trigger_hostintr(intr_info & 0xff);
2286 * This is special. We want to treat this as an 'handled'
2287 * VM-exit but not increment the instruction pointer.
2289 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1);
2291 case EXIT_REASON_NMI_WINDOW:
2292 /* Exit to allow the pending virtual NMI to be injected */
2293 if (vm_nmi_pending(vmx->vm, vcpu))
2294 vmx_inject_nmi(vmx, vcpu);
2295 vmx_clear_nmi_window_exiting(vmx, vcpu);
2296 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1);
2298 case EXIT_REASON_INOUT:
2299 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1);
2300 vmexit->exitcode = VM_EXITCODE_INOUT;
2301 vmexit->u.inout.bytes = (qual & 0x7) + 1;
2302 vmexit->u.inout.in = in = (qual & 0x8) ? 1 : 0;
2303 vmexit->u.inout.string = (qual & 0x10) ? 1 : 0;
2304 vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0;
2305 vmexit->u.inout.port = (uint16_t)(qual >> 16);
2306 vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax);
2307 if (vmexit->u.inout.string) {
2308 inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO);
2309 vmexit->exitcode = VM_EXITCODE_INOUT_STR;
2310 vis = &vmexit->u.inout_str;
2311 vmx_paging_info(&vis->paging);
2312 vis->rflags = vmcs_read(VMCS_GUEST_RFLAGS);
2313 vis->cr0 = vmcs_read(VMCS_GUEST_CR0);
2314 vis->index = inout_str_index(vmx, vcpu, in);
2315 vis->count = inout_str_count(vmx, vcpu, vis->inout.rep);
2316 vis->addrsize = inout_str_addrsize(inst_info);
2317 inout_str_seginfo(vmx, vcpu, inst_info, in, vis);
2320 case EXIT_REASON_CPUID:
2321 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1);
2322 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx);
2324 case EXIT_REASON_EXCEPTION:
2325 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1);
2326 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2327 KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2328 ("VM exit interruption info invalid: %#x", intr_info));
2330 intr_vec = intr_info & 0xff;
2331 intr_type = intr_info & VMCS_INTR_T_MASK;
2334 * If Virtual NMIs control is 1 and the VM-exit is due to a
2335 * fault encountered during the execution of IRET then we must
2336 * restore the state of "virtual-NMI blocking" before resuming
2339 * See "Resuming Guest Software after Handling an Exception".
2340 * See "Information for VM Exits Due to Vectored Events".
2342 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2343 (intr_vec != IDT_DF) &&
2344 (intr_info & EXIT_QUAL_NMIUDTI) != 0)
2345 vmx_restore_nmi_blocking(vmx, vcpu);
2348 * The NMI has already been handled in vmx_exit_handle_nmi().
2350 if (intr_type == VMCS_INTR_T_NMI)
2354 * Call the machine check handler by hand. Also don't reflect
2355 * the machine check back into the guest.
2357 if (intr_vec == IDT_MC) {
2358 VCPU_CTR0(vmx->vm, vcpu, "Vectoring to MCE handler");
2359 __asm __volatile("int $18");
2363 if (intr_vec == IDT_PF) {
2364 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_CR2, qual);
2365 KASSERT(error == 0, ("%s: vmxctx_setreg(cr2) error %d",
2370 * Software exceptions exhibit trap-like behavior. This in
2371 * turn requires populating the VM-entry instruction length
2372 * so that the %rip in the trap frame is past the INT3/INTO
2375 if (intr_type == VMCS_INTR_T_SWEXCEPTION)
2376 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
2378 /* Reflect all other exceptions back into the guest */
2379 bzero(&vmexc, sizeof(struct vm_exception));
2380 vmexc.vector = intr_vec;
2381 if (intr_info & VMCS_INTR_DEL_ERRCODE) {
2382 vmexc.error_code_valid = 1;
2383 vmexc.error_code = vmcs_read(VMCS_EXIT_INTR_ERRCODE);
2385 VCPU_CTR2(vmx->vm, vcpu, "Reflecting exception %d/%#x into "
2386 "the guest", vmexc.vector, vmexc.error_code);
2387 error = vm_inject_exception(vmx->vm, vcpu, &vmexc);
2388 KASSERT(error == 0, ("%s: vm_inject_exception error %d",
2392 case EXIT_REASON_EPT_FAULT:
2394 * If 'gpa' lies within the address space allocated to
2395 * memory then this must be a nested page fault otherwise
2396 * this must be an instruction that accesses MMIO space.
2399 if (vm_mem_allocated(vmx->vm, gpa) ||
2400 apic_access_fault(vmx, vcpu, gpa)) {
2401 vmexit->exitcode = VM_EXITCODE_PAGING;
2402 vmexit->u.paging.gpa = gpa;
2403 vmexit->u.paging.fault_type = ept_fault_type(qual);
2404 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
2405 } else if (ept_emulation_fault(qual)) {
2406 vmexit_inst_emul(vmexit, gpa, vmcs_gla());
2407 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INST_EMUL, 1);
2410 * If Virtual NMIs control is 1 and the VM-exit is due to an
2411 * EPT fault during the execution of IRET then we must restore
2412 * the state of "virtual-NMI blocking" before resuming.
2414 * See description of "NMI unblocking due to IRET" in
2415 * "Exit Qualification for EPT Violations".
2417 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2418 (qual & EXIT_QUAL_NMIUDTI) != 0)
2419 vmx_restore_nmi_blocking(vmx, vcpu);
2421 case EXIT_REASON_VIRTUALIZED_EOI:
2422 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI;
2423 vmexit->u.ioapic_eoi.vector = qual & 0xFF;
2424 vmexit->inst_length = 0; /* trap-like */
2426 case EXIT_REASON_APIC_ACCESS:
2427 handled = vmx_handle_apic_access(vmx, vcpu, vmexit);
2429 case EXIT_REASON_APIC_WRITE:
2431 * APIC-write VM exit is trap-like so the %rip is already
2432 * pointing to the next instruction.
2434 vmexit->inst_length = 0;
2435 vlapic = vm_lapic(vmx->vm, vcpu);
2436 handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual);
2438 case EXIT_REASON_XSETBV:
2439 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit);
2441 case EXIT_REASON_MONITOR:
2442 vmexit->exitcode = VM_EXITCODE_MONITOR;
2444 case EXIT_REASON_MWAIT:
2445 vmexit->exitcode = VM_EXITCODE_MWAIT;
2448 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1);
2454 * It is possible that control is returned to userland
2455 * even though we were able to handle the VM exit in the
2458 * In such a case we want to make sure that the userland
2459 * restarts guest execution at the instruction *after*
2460 * the one we just processed. Therefore we update the
2461 * guest rip in the VMCS and in 'vmexit'.
2463 vmexit->rip += vmexit->inst_length;
2464 vmexit->inst_length = 0;
2465 vmcs_write(VMCS_GUEST_RIP, vmexit->rip);
2467 if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
2469 * If this VM exit was not claimed by anybody then
2470 * treat it as a generic VMX exit.
2472 vmexit->exitcode = VM_EXITCODE_VMX;
2473 vmexit->u.vmx.status = VM_SUCCESS;
2474 vmexit->u.vmx.inst_type = 0;
2475 vmexit->u.vmx.inst_error = 0;
2478 * The exitcode and collateral have been populated.
2479 * The VM exit will be processed further in userland.
2486 static __inline void
2487 vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit)
2490 KASSERT(vmxctx->inst_fail_status != VM_SUCCESS,
2491 ("vmx_exit_inst_error: invalid inst_fail_status %d",
2492 vmxctx->inst_fail_status));
2494 vmexit->inst_length = 0;
2495 vmexit->exitcode = VM_EXITCODE_VMX;
2496 vmexit->u.vmx.status = vmxctx->inst_fail_status;
2497 vmexit->u.vmx.inst_error = vmcs_instruction_error();
2498 vmexit->u.vmx.exit_reason = ~0;
2499 vmexit->u.vmx.exit_qualification = ~0;
2502 case VMX_VMRESUME_ERROR:
2503 case VMX_VMLAUNCH_ERROR:
2504 case VMX_INVEPT_ERROR:
2505 vmexit->u.vmx.inst_type = rc;
2508 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc);
2513 * If the NMI-exiting VM execution control is set to '1' then an NMI in
2514 * non-root operation causes a VM-exit. NMI blocking is in effect so it is
2515 * sufficient to simply vector to the NMI handler via a software interrupt.
2516 * However, this must be done before maskable interrupts are enabled
2517 * otherwise the "iret" issued by an interrupt handler will incorrectly
2518 * clear NMI blocking.
2520 static __inline void
2521 vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
2525 KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled"));
2527 if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION)
2530 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2531 KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2532 ("VM exit interruption info invalid: %#x", intr_info));
2534 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) {
2535 KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due "
2536 "to NMI has invalid vector: %#x", intr_info));
2537 VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler");
2538 __asm __volatile("int $2");
2543 vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
2544 void *rendezvous_cookie, void *suspend_cookie)
2546 int rc, handled, launched;
2549 struct vmxctx *vmxctx;
2551 struct vm_exit *vmexit;
2552 struct vlapic *vlapic;
2554 uint32_t exit_reason;
2558 vmcs = &vmx->vmcs[vcpu];
2559 vmxctx = &vmx->ctx[vcpu];
2560 vlapic = vm_lapic(vm, vcpu);
2561 vmexit = vm_exitinfo(vm, vcpu);
2564 KASSERT(vmxctx->pmap == pmap,
2565 ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap));
2567 vmx_msr_guest_enter(vmx, vcpu);
2573 * We do this every time because we may setup the virtual machine
2574 * from a different process than the one that actually runs it.
2576 * If the life of a virtual machine was spent entirely in the context
2577 * of a single process we could do this once in vmx_vminit().
2579 vmcs_write(VMCS_HOST_CR3, rcr3());
2581 vmcs_write(VMCS_GUEST_RIP, startrip);
2582 vmx_set_pcpu_defaults(vmx, vcpu, pmap);
2584 handled = UNHANDLED;
2587 * Interrupts are disabled from this point on until the
2588 * guest starts executing. This is done for the following
2591 * If an AST is asserted on this thread after the check below,
2592 * then the IPI_AST notification will not be lost, because it
2593 * will cause a VM exit due to external interrupt as soon as
2594 * the guest state is loaded.
2596 * A posted interrupt after 'vmx_inject_interrupts()' will
2597 * not be "lost" because it will be held pending in the host
2598 * APIC because interrupts are disabled. The pending interrupt
2599 * will be recognized as soon as the guest state is loaded.
2601 * The same reasoning applies to the IPI generated by
2602 * pmap_invalidate_ept().
2605 vmx_inject_interrupts(vmx, vcpu, vlapic);
2608 * Check for vcpu suspension after injecting events because
2609 * vmx_inject_interrupts() can suspend the vcpu due to a
2612 if (vcpu_suspended(suspend_cookie)) {
2614 vm_exit_suspended(vmx->vm, vcpu, vmcs_guest_rip());
2618 if (vcpu_rendezvous_pending(rendezvous_cookie)) {
2620 vm_exit_rendezvous(vmx->vm, vcpu, vmcs_guest_rip());
2624 if (vcpu_should_yield(vm, vcpu)) {
2626 vm_exit_astpending(vmx->vm, vcpu, vmcs_guest_rip());
2627 vmx_astpending_trace(vmx, vcpu, vmexit->rip);
2632 vmx_run_trace(vmx, vcpu);
2633 rc = vmx_enter_guest(vmxctx, vmx, launched);
2635 /* Collect some information for VM exit processing */
2636 vmexit->rip = rip = vmcs_guest_rip();
2637 vmexit->inst_length = vmexit_instruction_length();
2638 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason();
2639 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification();
2641 if (rc == VMX_GUEST_VMEXIT) {
2642 vmx_exit_handle_nmi(vmx, vcpu, vmexit);
2644 handled = vmx_exit_process(vmx, vcpu, vmexit);
2647 vmx_exit_inst_error(vmxctx, rc, vmexit);
2650 vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled);
2654 * If a VM exit has been handled then the exitcode must be BOGUS
2655 * If a VM exit is not handled then the exitcode must not be BOGUS
2657 if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) ||
2658 (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) {
2659 panic("Mismatch between handled (%d) and exitcode (%d)",
2660 handled, vmexit->exitcode);
2664 vmm_stat_incr(vm, vcpu, VMEXIT_USERSPACE, 1);
2666 VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d",
2670 vmx_msr_guest_exit(vmx, vcpu);
2676 vmx_vmcleanup(void *arg)
2679 struct vmx *vmx = arg;
2681 if (apic_access_virtualization(vmx, 0))
2682 vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
2684 for (i = 0; i < VM_MAXCPU; i++)
2685 vpid_free(vmx->state[i].vpid);
2693 vmxctx_regptr(struct vmxctx *vmxctx, int reg)
2697 case VM_REG_GUEST_RAX:
2698 return (&vmxctx->guest_rax);
2699 case VM_REG_GUEST_RBX:
2700 return (&vmxctx->guest_rbx);
2701 case VM_REG_GUEST_RCX:
2702 return (&vmxctx->guest_rcx);
2703 case VM_REG_GUEST_RDX:
2704 return (&vmxctx->guest_rdx);
2705 case VM_REG_GUEST_RSI:
2706 return (&vmxctx->guest_rsi);
2707 case VM_REG_GUEST_RDI:
2708 return (&vmxctx->guest_rdi);
2709 case VM_REG_GUEST_RBP:
2710 return (&vmxctx->guest_rbp);
2711 case VM_REG_GUEST_R8:
2712 return (&vmxctx->guest_r8);
2713 case VM_REG_GUEST_R9:
2714 return (&vmxctx->guest_r9);
2715 case VM_REG_GUEST_R10:
2716 return (&vmxctx->guest_r10);
2717 case VM_REG_GUEST_R11:
2718 return (&vmxctx->guest_r11);
2719 case VM_REG_GUEST_R12:
2720 return (&vmxctx->guest_r12);
2721 case VM_REG_GUEST_R13:
2722 return (&vmxctx->guest_r13);
2723 case VM_REG_GUEST_R14:
2724 return (&vmxctx->guest_r14);
2725 case VM_REG_GUEST_R15:
2726 return (&vmxctx->guest_r15);
2727 case VM_REG_GUEST_CR2:
2728 return (&vmxctx->guest_cr2);
2736 vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval)
2740 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
2748 vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val)
2752 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
2760 vmx_get_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t *retval)
2765 error = vmcs_getreg(&vmx->vmcs[vcpu], running,
2766 VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY), &gi);
2767 *retval = (gi & HWINTR_BLOCKING) ? 1 : 0;
2772 vmx_modify_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t val)
2779 * Forcing the vcpu into an interrupt shadow is not supported.
2786 vmcs = &vmx->vmcs[vcpu];
2787 ident = VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY);
2788 error = vmcs_getreg(vmcs, running, ident, &gi);
2790 gi &= ~HWINTR_BLOCKING;
2791 error = vmcs_setreg(vmcs, running, ident, gi);
2794 VCPU_CTR2(vmx->vm, vcpu, "Setting intr_shadow to %#lx %s", val,
2795 error ? "failed" : "succeeded");
2800 vmx_shadow_reg(int reg)
2807 case VM_REG_GUEST_CR0:
2808 shreg = VMCS_CR0_SHADOW;
2810 case VM_REG_GUEST_CR4:
2811 shreg = VMCS_CR4_SHADOW;
2821 vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval)
2823 int running, hostcpu;
2824 struct vmx *vmx = arg;
2826 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2827 if (running && hostcpu != curcpu)
2828 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu);
2830 if (reg == VM_REG_GUEST_INTR_SHADOW)
2831 return (vmx_get_intr_shadow(vmx, vcpu, running, retval));
2833 if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0)
2836 return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval));
2840 vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
2842 int error, hostcpu, running, shadow;
2845 struct vmx *vmx = arg;
2847 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2848 if (running && hostcpu != curcpu)
2849 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu);
2851 if (reg == VM_REG_GUEST_INTR_SHADOW)
2852 return (vmx_modify_intr_shadow(vmx, vcpu, running, val));
2854 if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0)
2857 error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val);
2861 * If the "load EFER" VM-entry control is 1 then the
2862 * value of EFER.LMA must be identical to "IA-32e mode guest"
2863 * bit in the VM-entry control.
2865 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 &&
2866 (reg == VM_REG_GUEST_EFER)) {
2867 vmcs_getreg(&vmx->vmcs[vcpu], running,
2868 VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls);
2870 ctls |= VM_ENTRY_GUEST_LMA;
2872 ctls &= ~VM_ENTRY_GUEST_LMA;
2873 vmcs_setreg(&vmx->vmcs[vcpu], running,
2874 VMCS_IDENT(VMCS_ENTRY_CTLS), ctls);
2877 shadow = vmx_shadow_reg(reg);
2880 * Store the unmodified value in the shadow
2882 error = vmcs_setreg(&vmx->vmcs[vcpu], running,
2883 VMCS_IDENT(shadow), val);
2886 if (reg == VM_REG_GUEST_CR3) {
2888 * Invalidate the guest vcpu's TLB mappings to emulate
2889 * the behavior of updating %cr3.
2891 * XXX the processor retains global mappings when %cr3
2892 * is updated but vmx_invvpid() does not.
2894 pmap = vmx->ctx[vcpu].pmap;
2895 vmx_invvpid(vmx, vcpu, pmap, running);
2903 vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
2905 int hostcpu, running;
2906 struct vmx *vmx = arg;
2908 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2909 if (running && hostcpu != curcpu)
2910 panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), vcpu);
2912 return (vmcs_getdesc(&vmx->vmcs[vcpu], running, reg, desc));
2916 vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
2918 int hostcpu, running;
2919 struct vmx *vmx = arg;
2921 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2922 if (running && hostcpu != curcpu)
2923 panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), vcpu);
2925 return (vmcs_setdesc(&vmx->vmcs[vcpu], running, reg, desc));
2929 vmx_getcap(void *arg, int vcpu, int type, int *retval)
2931 struct vmx *vmx = arg;
2937 vcap = vmx->cap[vcpu].set;
2940 case VM_CAP_HALT_EXIT:
2944 case VM_CAP_PAUSE_EXIT:
2948 case VM_CAP_MTRAP_EXIT:
2949 if (cap_monitor_trap)
2952 case VM_CAP_UNRESTRICTED_GUEST:
2953 if (cap_unrestricted_guest)
2956 case VM_CAP_ENABLE_INVPCID:
2965 *retval = (vcap & (1 << type)) ? 1 : 0;
2971 vmx_setcap(void *arg, int vcpu, int type, int val)
2973 struct vmx *vmx = arg;
2974 struct vmcs *vmcs = &vmx->vmcs[vcpu];
2986 case VM_CAP_HALT_EXIT:
2987 if (cap_halt_exit) {
2989 pptr = &vmx->cap[vcpu].proc_ctls;
2991 flag = PROCBASED_HLT_EXITING;
2992 reg = VMCS_PRI_PROC_BASED_CTLS;
2995 case VM_CAP_MTRAP_EXIT:
2996 if (cap_monitor_trap) {
2998 pptr = &vmx->cap[vcpu].proc_ctls;
3000 flag = PROCBASED_MTF;
3001 reg = VMCS_PRI_PROC_BASED_CTLS;
3004 case VM_CAP_PAUSE_EXIT:
3005 if (cap_pause_exit) {
3007 pptr = &vmx->cap[vcpu].proc_ctls;
3009 flag = PROCBASED_PAUSE_EXITING;
3010 reg = VMCS_PRI_PROC_BASED_CTLS;
3013 case VM_CAP_UNRESTRICTED_GUEST:
3014 if (cap_unrestricted_guest) {
3016 pptr = &vmx->cap[vcpu].proc_ctls2;
3018 flag = PROCBASED2_UNRESTRICTED_GUEST;
3019 reg = VMCS_SEC_PROC_BASED_CTLS;
3022 case VM_CAP_ENABLE_INVPCID:
3025 pptr = &vmx->cap[vcpu].proc_ctls2;
3027 flag = PROCBASED2_ENABLE_INVPCID;
3028 reg = VMCS_SEC_PROC_BASED_CTLS;
3042 error = vmwrite(reg, baseval);
3049 * Update optional stored flags, and record
3057 vmx->cap[vcpu].set |= (1 << type);
3059 vmx->cap[vcpu].set &= ~(1 << type);
3068 struct vlapic vlapic;
3069 struct pir_desc *pir_desc;
3073 #define VMX_CTR_PIR(vm, vcpuid, pir_desc, notify, vector, level, msg) \
3075 VCPU_CTR2(vm, vcpuid, msg " assert %s-triggered vector %d", \
3076 level ? "level" : "edge", vector); \
3077 VCPU_CTR1(vm, vcpuid, msg " pir0 0x%016lx", pir_desc->pir[0]); \
3078 VCPU_CTR1(vm, vcpuid, msg " pir1 0x%016lx", pir_desc->pir[1]); \
3079 VCPU_CTR1(vm, vcpuid, msg " pir2 0x%016lx", pir_desc->pir[2]); \
3080 VCPU_CTR1(vm, vcpuid, msg " pir3 0x%016lx", pir_desc->pir[3]); \
3081 VCPU_CTR1(vm, vcpuid, msg " notify: %s", notify ? "yes" : "no");\
3085 * vlapic->ops handlers that utilize the APICv hardware assist described in
3086 * Chapter 29 of the Intel SDM.
3089 vmx_set_intr_ready(struct vlapic *vlapic, int vector, bool level)
3091 struct vlapic_vtx *vlapic_vtx;
3092 struct pir_desc *pir_desc;
3096 vlapic_vtx = (struct vlapic_vtx *)vlapic;
3097 pir_desc = vlapic_vtx->pir_desc;
3100 * Keep track of interrupt requests in the PIR descriptor. This is
3101 * because the virtual APIC page pointed to by the VMCS cannot be
3102 * modified if the vcpu is running.
3105 mask = 1UL << (vector % 64);
3106 atomic_set_long(&pir_desc->pir[idx], mask);
3107 notify = atomic_cmpset_long(&pir_desc->pending, 0, 1);
3109 VMX_CTR_PIR(vlapic->vm, vlapic->vcpuid, pir_desc, notify, vector,
3110 level, "vmx_set_intr_ready");
3115 vmx_pending_intr(struct vlapic *vlapic, int *vecptr)
3117 struct vlapic_vtx *vlapic_vtx;
3118 struct pir_desc *pir_desc;
3119 struct LAPIC *lapic;
3120 uint64_t pending, pirval;
3125 * This function is only expected to be called from the 'HLT' exit
3126 * handler which does not care about the vector that is pending.
3128 KASSERT(vecptr == NULL, ("vmx_pending_intr: vecptr must be NULL"));
3130 vlapic_vtx = (struct vlapic_vtx *)vlapic;
3131 pir_desc = vlapic_vtx->pir_desc;
3133 pending = atomic_load_acq_long(&pir_desc->pending);
3135 return (0); /* common case */
3138 * If there is an interrupt pending then it will be recognized only
3139 * if its priority is greater than the processor priority.
3141 * Special case: if the processor priority is zero then any pending
3142 * interrupt will be recognized.
3144 lapic = vlapic->apic_page;
3145 ppr = lapic->ppr & 0xf0;
3149 VCPU_CTR1(vlapic->vm, vlapic->vcpuid, "HLT with non-zero PPR %d",
3152 for (i = 3; i >= 0; i--) {
3153 pirval = pir_desc->pir[i];
3155 vpr = (i * 64 + flsl(pirval) - 1) & 0xf0;
3163 vmx_intr_accepted(struct vlapic *vlapic, int vector)
3166 panic("vmx_intr_accepted: not expected to be called");
3170 vmx_set_tmr(struct vlapic *vlapic, int vector, bool level)
3172 struct vlapic_vtx *vlapic_vtx;
3177 KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d", vector));
3178 KASSERT(!vcpu_is_running(vlapic->vm, vlapic->vcpuid, NULL),
3179 ("vmx_set_tmr: vcpu cannot be running"));
3181 vlapic_vtx = (struct vlapic_vtx *)vlapic;
3182 vmx = vlapic_vtx->vmx;
3183 vmcs = &vmx->vmcs[vlapic->vcpuid];
3184 mask = 1UL << (vector % 64);
3187 val = vmcs_read(VMCS_EOI_EXIT(vector));
3192 vmcs_write(VMCS_EOI_EXIT(vector), val);
3197 vmx_enable_x2apic_mode(struct vlapic *vlapic)
3201 uint32_t proc_ctls2;
3204 vcpuid = vlapic->vcpuid;
3205 vmx = ((struct vlapic_vtx *)vlapic)->vmx;
3206 vmcs = &vmx->vmcs[vcpuid];
3208 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
3209 KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0,
3210 ("%s: invalid proc_ctls2 %#x", __func__, proc_ctls2));
3212 proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES;
3213 proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE;
3214 vmx->cap[vcpuid].proc_ctls2 = proc_ctls2;
3217 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2);
3220 if (vlapic->vcpuid == 0) {
3222 * The nested page table mappings are shared by all vcpus
3223 * so unmap the APIC access page just once.
3225 error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
3226 KASSERT(error == 0, ("%s: vm_unmap_mmio error %d",
3230 * The MSR bitmap is shared by all vcpus so modify it only
3231 * once in the context of vcpu 0.
3233 error = vmx_allow_x2apic_msrs(vmx);
3234 KASSERT(error == 0, ("%s: vmx_allow_x2apic_msrs error %d",
3240 vmx_post_intr(struct vlapic *vlapic, int hostcpu)
3243 ipi_cpu(hostcpu, pirvec);
3247 * Transfer the pending interrupts in the PIR descriptor to the IRR
3248 * in the virtual APIC page.
3251 vmx_inject_pir(struct vlapic *vlapic)
3253 struct vlapic_vtx *vlapic_vtx;
3254 struct pir_desc *pir_desc;
3255 struct LAPIC *lapic;
3256 uint64_t val, pirval;
3257 int rvi, pirbase = -1;
3258 uint16_t intr_status_old, intr_status_new;
3260 vlapic_vtx = (struct vlapic_vtx *)vlapic;
3261 pir_desc = vlapic_vtx->pir_desc;
3262 if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) {
3263 VCPU_CTR0(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: "
3264 "no posted interrupt pending");
3270 lapic = vlapic->apic_page;
3272 val = atomic_readandclear_long(&pir_desc->pir[0]);
3275 lapic->irr1 |= val >> 32;
3280 val = atomic_readandclear_long(&pir_desc->pir[1]);
3283 lapic->irr3 |= val >> 32;
3288 val = atomic_readandclear_long(&pir_desc->pir[2]);
3291 lapic->irr5 |= val >> 32;
3296 val = atomic_readandclear_long(&pir_desc->pir[3]);
3299 lapic->irr7 |= val >> 32;
3304 VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir");
3307 * Update RVI so the processor can evaluate pending virtual
3308 * interrupts on VM-entry.
3310 * It is possible for pirval to be 0 here, even though the
3311 * pending bit has been set. The scenario is:
3312 * CPU-Y is sending a posted interrupt to CPU-X, which
3313 * is running a guest and processing posted interrupts in h/w.
3314 * CPU-X will eventually exit and the state seen in s/w is
3315 * the pending bit set, but no PIR bits set.
3318 * (vm running) (host running)
3319 * rx posted interrupt
3322 * READ/CLEAR PIR bits
3325 * pending bit set, PIR 0
3328 rvi = pirbase + flsl(pirval) - 1;
3329 intr_status_old = vmcs_read(VMCS_GUEST_INTR_STATUS);
3330 intr_status_new = (intr_status_old & 0xFF00) | rvi;
3331 if (intr_status_new > intr_status_old) {
3332 vmcs_write(VMCS_GUEST_INTR_STATUS, intr_status_new);
3333 VCPU_CTR2(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: "
3334 "guest_intr_status changed from 0x%04x to 0x%04x",
3335 intr_status_old, intr_status_new);
3340 static struct vlapic *
3341 vmx_vlapic_init(void *arg, int vcpuid)
3344 struct vlapic *vlapic;
3345 struct vlapic_vtx *vlapic_vtx;
3349 vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO);
3350 vlapic->vm = vmx->vm;
3351 vlapic->vcpuid = vcpuid;
3352 vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid];
3354 vlapic_vtx = (struct vlapic_vtx *)vlapic;
3355 vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid];
3356 vlapic_vtx->vmx = vmx;
3358 if (virtual_interrupt_delivery) {
3359 vlapic->ops.set_intr_ready = vmx_set_intr_ready;
3360 vlapic->ops.pending_intr = vmx_pending_intr;
3361 vlapic->ops.intr_accepted = vmx_intr_accepted;
3362 vlapic->ops.set_tmr = vmx_set_tmr;
3363 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode;
3366 if (posted_interrupts)
3367 vlapic->ops.post_intr = vmx_post_intr;
3369 vlapic_init(vlapic);
3375 vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic)
3378 vlapic_cleanup(vlapic);
3379 free(vlapic, M_VLAPIC);
3382 struct vmm_ops vmm_ops_intel = {