2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011 NetApp, Inc.
6 * Copyright (c) 2018 Joyent, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/malloc.h>
42 #include <sys/sysctl.h>
47 #include <machine/psl.h>
48 #include <machine/cpufunc.h>
49 #include <machine/md_var.h>
50 #include <machine/reg.h>
51 #include <machine/segments.h>
52 #include <machine/smp.h>
53 #include <machine/specialreg.h>
54 #include <machine/vmparam.h>
56 #include <machine/vmm.h>
57 #include <machine/vmm_dev.h>
58 #include <machine/vmm_instruction_emul.h>
59 #include "vmm_lapic.h"
61 #include "vmm_ioport.h"
66 #include "vlapic_priv.h"
69 #include "vmx_cpufunc.h"
73 #include "vmx_controls.h"
75 #define PINBASED_CTLS_ONE_SETTING \
76 (PINBASED_EXTINT_EXITING | \
77 PINBASED_NMI_EXITING | \
79 #define PINBASED_CTLS_ZERO_SETTING 0
81 #define PROCBASED_CTLS_WINDOW_SETTING \
82 (PROCBASED_INT_WINDOW_EXITING | \
83 PROCBASED_NMI_WINDOW_EXITING)
85 #define PROCBASED_CTLS_ONE_SETTING \
86 (PROCBASED_SECONDARY_CONTROLS | \
87 PROCBASED_MWAIT_EXITING | \
88 PROCBASED_MONITOR_EXITING | \
89 PROCBASED_IO_EXITING | \
90 PROCBASED_MSR_BITMAPS | \
91 PROCBASED_CTLS_WINDOW_SETTING | \
92 PROCBASED_CR8_LOAD_EXITING | \
93 PROCBASED_CR8_STORE_EXITING)
94 #define PROCBASED_CTLS_ZERO_SETTING \
95 (PROCBASED_CR3_LOAD_EXITING | \
96 PROCBASED_CR3_STORE_EXITING | \
99 #define PROCBASED_CTLS2_ONE_SETTING PROCBASED2_ENABLE_EPT
100 #define PROCBASED_CTLS2_ZERO_SETTING 0
102 #define VM_EXIT_CTLS_ONE_SETTING \
103 (VM_EXIT_SAVE_DEBUG_CONTROLS | \
105 VM_EXIT_SAVE_EFER | \
106 VM_EXIT_LOAD_EFER | \
107 VM_EXIT_ACKNOWLEDGE_INTERRUPT)
109 #define VM_EXIT_CTLS_ZERO_SETTING 0
111 #define VM_ENTRY_CTLS_ONE_SETTING \
112 (VM_ENTRY_LOAD_DEBUG_CONTROLS | \
115 #define VM_ENTRY_CTLS_ZERO_SETTING \
116 (VM_ENTRY_INTO_SMM | \
117 VM_ENTRY_DEACTIVATE_DUAL_MONITOR)
122 static MALLOC_DEFINE(M_VMX, "vmx", "vmx");
123 static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic");
125 SYSCTL_DECL(_hw_vmm);
126 SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW, NULL, NULL);
128 int vmxon_enabled[MAXCPU];
129 static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
131 static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2;
132 static uint32_t exit_ctls, entry_ctls;
134 static uint64_t cr0_ones_mask, cr0_zeros_mask;
135 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD,
136 &cr0_ones_mask, 0, NULL);
137 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD,
138 &cr0_zeros_mask, 0, NULL);
140 static uint64_t cr4_ones_mask, cr4_zeros_mask;
141 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD,
142 &cr4_ones_mask, 0, NULL);
143 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD,
144 &cr4_zeros_mask, 0, NULL);
146 static int vmx_initialized;
147 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD,
148 &vmx_initialized, 0, "Intel VMX initialized");
151 * Optional capabilities
153 static SYSCTL_NODE(_hw_vmm_vmx, OID_AUTO, cap, CTLFLAG_RW, NULL, NULL);
155 static int cap_halt_exit;
156 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, halt_exit, CTLFLAG_RD, &cap_halt_exit, 0,
157 "HLT triggers a VM-exit");
159 static int cap_pause_exit;
160 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, pause_exit, CTLFLAG_RD, &cap_pause_exit,
161 0, "PAUSE triggers a VM-exit");
163 static int cap_unrestricted_guest;
164 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, unrestricted_guest, CTLFLAG_RD,
165 &cap_unrestricted_guest, 0, "Unrestricted guests");
167 static int cap_monitor_trap;
168 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, monitor_trap, CTLFLAG_RD,
169 &cap_monitor_trap, 0, "Monitor trap flag");
171 static int cap_invpcid;
172 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, invpcid, CTLFLAG_RD, &cap_invpcid,
173 0, "Guests are allowed to use INVPCID");
175 static int virtual_interrupt_delivery;
176 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD,
177 &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support");
179 static int posted_interrupts;
180 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, posted_interrupts, CTLFLAG_RD,
181 &posted_interrupts, 0, "APICv posted interrupt support");
183 static int pirvec = -1;
184 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD,
185 &pirvec, 0, "APICv posted interrupt vector");
187 static struct unrhdr *vpid_unr;
188 static u_int vpid_alloc_failed;
189 SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD,
190 &vpid_alloc_failed, 0, NULL);
192 static int guest_l1d_flush;
193 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush, CTLFLAG_RD,
194 &guest_l1d_flush, 0, NULL);
195 static int guest_l1d_flush_sw;
196 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush_sw, CTLFLAG_RD,
197 &guest_l1d_flush_sw, 0, NULL);
199 static struct msr_entry msr_load_list[1] __aligned(16);
202 * The definitions of SDT probes for VMX.
205 SDT_PROBE_DEFINE3(vmm, vmx, exit, entry,
206 "struct vmx *", "int", "struct vm_exit *");
208 SDT_PROBE_DEFINE4(vmm, vmx, exit, taskswitch,
209 "struct vmx *", "int", "struct vm_exit *", "struct vm_task_switch *");
211 SDT_PROBE_DEFINE4(vmm, vmx, exit, craccess,
212 "struct vmx *", "int", "struct vm_exit *", "uint64_t");
214 SDT_PROBE_DEFINE4(vmm, vmx, exit, rdmsr,
215 "struct vmx *", "int", "struct vm_exit *", "uint32_t");
217 SDT_PROBE_DEFINE5(vmm, vmx, exit, wrmsr,
218 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "uint64_t");
220 SDT_PROBE_DEFINE3(vmm, vmx, exit, halt,
221 "struct vmx *", "int", "struct vm_exit *");
223 SDT_PROBE_DEFINE3(vmm, vmx, exit, mtrap,
224 "struct vmx *", "int", "struct vm_exit *");
226 SDT_PROBE_DEFINE3(vmm, vmx, exit, pause,
227 "struct vmx *", "int", "struct vm_exit *");
229 SDT_PROBE_DEFINE3(vmm, vmx, exit, intrwindow,
230 "struct vmx *", "int", "struct vm_exit *");
232 SDT_PROBE_DEFINE4(vmm, vmx, exit, interrupt,
233 "struct vmx *", "int", "struct vm_exit *", "uint32_t");
235 SDT_PROBE_DEFINE3(vmm, vmx, exit, nmiwindow,
236 "struct vmx *", "int", "struct vm_exit *");
238 SDT_PROBE_DEFINE3(vmm, vmx, exit, inout,
239 "struct vmx *", "int", "struct vm_exit *");
241 SDT_PROBE_DEFINE3(vmm, vmx, exit, cpuid,
242 "struct vmx *", "int", "struct vm_exit *");
244 SDT_PROBE_DEFINE5(vmm, vmx, exit, exception,
245 "struct vmx *", "int", "struct vm_exit *", "uint32_t", "int");
247 SDT_PROBE_DEFINE5(vmm, vmx, exit, nestedfault,
248 "struct vmx *", "int", "struct vm_exit *", "uint64_t", "uint64_t");
250 SDT_PROBE_DEFINE4(vmm, vmx, exit, mmiofault,
251 "struct vmx *", "int", "struct vm_exit *", "uint64_t");
253 SDT_PROBE_DEFINE3(vmm, vmx, exit, eoi,
254 "struct vmx *", "int", "struct vm_exit *");
256 SDT_PROBE_DEFINE3(vmm, vmx, exit, apicaccess,
257 "struct vmx *", "int", "struct vm_exit *");
259 SDT_PROBE_DEFINE4(vmm, vmx, exit, apicwrite,
260 "struct vmx *", "int", "struct vm_exit *", "struct vlapic *");
262 SDT_PROBE_DEFINE3(vmm, vmx, exit, xsetbv,
263 "struct vmx *", "int", "struct vm_exit *");
265 SDT_PROBE_DEFINE3(vmm, vmx, exit, monitor,
266 "struct vmx *", "int", "struct vm_exit *");
268 SDT_PROBE_DEFINE3(vmm, vmx, exit, mwait,
269 "struct vmx *", "int", "struct vm_exit *");
271 SDT_PROBE_DEFINE3(vmm, vmx, exit, vminsn,
272 "struct vmx *", "int", "struct vm_exit *");
274 SDT_PROBE_DEFINE4(vmm, vmx, exit, unknown,
275 "struct vmx *", "int", "struct vm_exit *", "uint32_t");
277 SDT_PROBE_DEFINE4(vmm, vmx, exit, return,
278 "struct vmx *", "int", "struct vm_exit *", "int");
281 * Use the last page below 4GB as the APIC access address. This address is
282 * occupied by the boot firmware so it is guaranteed that it will not conflict
283 * with a page in system memory.
285 #define APIC_ACCESS_ADDRESS 0xFFFFF000
287 static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc);
288 static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval);
289 static int vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val);
290 static void vmx_inject_pir(struct vlapic *vlapic);
294 exit_reason_to_str(int reason)
296 static char reasonbuf[32];
299 case EXIT_REASON_EXCEPTION:
301 case EXIT_REASON_EXT_INTR:
303 case EXIT_REASON_TRIPLE_FAULT:
304 return "triplefault";
305 case EXIT_REASON_INIT:
307 case EXIT_REASON_SIPI:
309 case EXIT_REASON_IO_SMI:
311 case EXIT_REASON_SMI:
313 case EXIT_REASON_INTR_WINDOW:
315 case EXIT_REASON_NMI_WINDOW:
317 case EXIT_REASON_TASK_SWITCH:
319 case EXIT_REASON_CPUID:
321 case EXIT_REASON_GETSEC:
323 case EXIT_REASON_HLT:
325 case EXIT_REASON_INVD:
327 case EXIT_REASON_INVLPG:
329 case EXIT_REASON_RDPMC:
331 case EXIT_REASON_RDTSC:
333 case EXIT_REASON_RSM:
335 case EXIT_REASON_VMCALL:
337 case EXIT_REASON_VMCLEAR:
339 case EXIT_REASON_VMLAUNCH:
341 case EXIT_REASON_VMPTRLD:
343 case EXIT_REASON_VMPTRST:
345 case EXIT_REASON_VMREAD:
347 case EXIT_REASON_VMRESUME:
349 case EXIT_REASON_VMWRITE:
351 case EXIT_REASON_VMXOFF:
353 case EXIT_REASON_VMXON:
355 case EXIT_REASON_CR_ACCESS:
357 case EXIT_REASON_DR_ACCESS:
359 case EXIT_REASON_INOUT:
361 case EXIT_REASON_RDMSR:
363 case EXIT_REASON_WRMSR:
365 case EXIT_REASON_INVAL_VMCS:
367 case EXIT_REASON_INVAL_MSR:
369 case EXIT_REASON_MWAIT:
371 case EXIT_REASON_MTF:
373 case EXIT_REASON_MONITOR:
375 case EXIT_REASON_PAUSE:
377 case EXIT_REASON_MCE_DURING_ENTRY:
378 return "mce-during-entry";
379 case EXIT_REASON_TPR:
381 case EXIT_REASON_APIC_ACCESS:
382 return "apic-access";
383 case EXIT_REASON_GDTR_IDTR:
385 case EXIT_REASON_LDTR_TR:
387 case EXIT_REASON_EPT_FAULT:
389 case EXIT_REASON_EPT_MISCONFIG:
390 return "eptmisconfig";
391 case EXIT_REASON_INVEPT:
393 case EXIT_REASON_RDTSCP:
395 case EXIT_REASON_VMX_PREEMPT:
397 case EXIT_REASON_INVVPID:
399 case EXIT_REASON_WBINVD:
401 case EXIT_REASON_XSETBV:
403 case EXIT_REASON_APIC_WRITE:
406 snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason);
413 vmx_allow_x2apic_msrs(struct vmx *vmx)
420 * Allow readonly access to the following x2APIC MSRs from the guest.
422 error += guest_msr_ro(vmx, MSR_APIC_ID);
423 error += guest_msr_ro(vmx, MSR_APIC_VERSION);
424 error += guest_msr_ro(vmx, MSR_APIC_LDR);
425 error += guest_msr_ro(vmx, MSR_APIC_SVR);
427 for (i = 0; i < 8; i++)
428 error += guest_msr_ro(vmx, MSR_APIC_ISR0 + i);
430 for (i = 0; i < 8; i++)
431 error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i);
433 for (i = 0; i < 8; i++)
434 error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i);
436 error += guest_msr_ro(vmx, MSR_APIC_ESR);
437 error += guest_msr_ro(vmx, MSR_APIC_LVT_TIMER);
438 error += guest_msr_ro(vmx, MSR_APIC_LVT_THERMAL);
439 error += guest_msr_ro(vmx, MSR_APIC_LVT_PCINT);
440 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT0);
441 error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT1);
442 error += guest_msr_ro(vmx, MSR_APIC_LVT_ERROR);
443 error += guest_msr_ro(vmx, MSR_APIC_ICR_TIMER);
444 error += guest_msr_ro(vmx, MSR_APIC_DCR_TIMER);
445 error += guest_msr_ro(vmx, MSR_APIC_ICR);
448 * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest.
450 * These registers get special treatment described in the section
451 * "Virtualizing MSR-Based APIC Accesses".
453 error += guest_msr_rw(vmx, MSR_APIC_TPR);
454 error += guest_msr_rw(vmx, MSR_APIC_EOI);
455 error += guest_msr_rw(vmx, MSR_APIC_SELF_IPI);
461 vmx_fix_cr0(u_long cr0)
464 return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask);
468 vmx_fix_cr4(u_long cr4)
471 return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask);
477 if (vpid < 0 || vpid > 0xffff)
478 panic("vpid_free: invalid vpid %d", vpid);
481 * VPIDs [0,VM_MAXCPU] are special and are not allocated from
482 * the unit number allocator.
485 if (vpid > VM_MAXCPU)
486 free_unr(vpid_unr, vpid);
490 vpid_alloc(uint16_t *vpid, int num)
494 if (num <= 0 || num > VM_MAXCPU)
495 panic("invalid number of vpids requested: %d", num);
498 * If the "enable vpid" execution control is not enabled then the
499 * VPID is required to be 0 for all vcpus.
501 if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) {
502 for (i = 0; i < num; i++)
508 * Allocate a unique VPID for each vcpu from the unit number allocator.
510 for (i = 0; i < num; i++) {
511 x = alloc_unr(vpid_unr);
519 atomic_add_int(&vpid_alloc_failed, 1);
522 * If the unit number allocator does not have enough unique
523 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range.
525 * These VPIDs are not be unique across VMs but this does not
526 * affect correctness because the combined mappings are also
527 * tagged with the EP4TA which is unique for each VM.
529 * It is still sub-optimal because the invvpid will invalidate
530 * combined mappings for a particular VPID across all EP4TAs.
535 for (i = 0; i < num; i++)
544 * VPID 0 is required when the "enable VPID" execution control is
547 * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the
548 * unit number allocator does not have sufficient unique VPIDs to
549 * satisfy the allocation.
551 * The remaining VPIDs are managed by the unit number allocator.
553 vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL);
557 vmx_disable(void *arg __unused)
559 struct invvpid_desc invvpid_desc = { 0 };
560 struct invept_desc invept_desc = { 0 };
562 if (vmxon_enabled[curcpu]) {
564 * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b.
566 * VMXON or VMXOFF are not required to invalidate any TLB
567 * caching structures. This prevents potential retention of
568 * cached information in the TLB between distinct VMX episodes.
570 invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc);
571 invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc);
574 load_cr4(rcr4() & ~CR4_VMXE);
582 lapic_ipi_free(pirvec);
584 if (vpid_unr != NULL) {
585 delete_unrhdr(vpid_unr);
589 if (nmi_flush_l1d_sw == 1)
590 nmi_flush_l1d_sw = 0;
592 smp_rendezvous(NULL, vmx_disable, NULL, NULL);
598 vmx_enable(void *arg __unused)
601 uint64_t feature_control;
603 feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
604 if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 ||
605 (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
606 wrmsr(MSR_IA32_FEATURE_CONTROL,
607 feature_control | IA32_FEATURE_CONTROL_VMX_EN |
608 IA32_FEATURE_CONTROL_LOCK);
611 load_cr4(rcr4() | CR4_VMXE);
613 *(uint32_t *)vmxon_region[curcpu] = vmx_revision();
614 error = vmxon(vmxon_region[curcpu]);
616 vmxon_enabled[curcpu] = 1;
623 if (vmxon_enabled[curcpu])
624 vmxon(vmxon_region[curcpu]);
630 int error, use_tpr_shadow;
631 uint64_t basic, fixed0, fixed1, feature_control;
632 uint32_t tmp, procbased2_vid_bits;
634 /* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */
635 if (!(cpu_feature2 & CPUID2_VMX)) {
636 printf("vmx_init: processor does not support VMX operation\n");
641 * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits
642 * are set (bits 0 and 2 respectively).
644 feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
645 if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 1 &&
646 (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
647 printf("vmx_init: VMX operation disabled by BIOS\n");
652 * Verify capabilities MSR_VMX_BASIC:
653 * - bit 54 indicates support for INS/OUTS decoding
655 basic = rdmsr(MSR_VMX_BASIC);
656 if ((basic & (1UL << 54)) == 0) {
657 printf("vmx_init: processor does not support desired basic "
662 /* Check support for primary processor-based VM-execution controls */
663 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
664 MSR_VMX_TRUE_PROCBASED_CTLS,
665 PROCBASED_CTLS_ONE_SETTING,
666 PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls);
668 printf("vmx_init: processor does not support desired primary "
669 "processor-based controls\n");
673 /* Clear the processor-based ctl bits that are set on demand */
674 procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING;
676 /* Check support for secondary processor-based VM-execution controls */
677 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
678 MSR_VMX_PROCBASED_CTLS2,
679 PROCBASED_CTLS2_ONE_SETTING,
680 PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2);
682 printf("vmx_init: processor does not support desired secondary "
683 "processor-based controls\n");
687 /* Check support for VPID */
688 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
689 PROCBASED2_ENABLE_VPID, 0, &tmp);
691 procbased_ctls2 |= PROCBASED2_ENABLE_VPID;
693 /* Check support for pin-based VM-execution controls */
694 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
695 MSR_VMX_TRUE_PINBASED_CTLS,
696 PINBASED_CTLS_ONE_SETTING,
697 PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls);
699 printf("vmx_init: processor does not support desired "
700 "pin-based controls\n");
704 /* Check support for VM-exit controls */
705 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS,
706 VM_EXIT_CTLS_ONE_SETTING,
707 VM_EXIT_CTLS_ZERO_SETTING,
710 printf("vmx_init: processor does not support desired "
715 /* Check support for VM-entry controls */
716 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS,
717 VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING,
720 printf("vmx_init: processor does not support desired "
726 * Check support for optional features by testing them
729 cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
730 MSR_VMX_TRUE_PROCBASED_CTLS,
731 PROCBASED_HLT_EXITING, 0,
734 cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
735 MSR_VMX_PROCBASED_CTLS,
739 cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
740 MSR_VMX_TRUE_PROCBASED_CTLS,
741 PROCBASED_PAUSE_EXITING, 0,
744 cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
745 MSR_VMX_PROCBASED_CTLS2,
746 PROCBASED2_UNRESTRICTED_GUEST, 0,
749 cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
750 MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0,
754 * Check support for virtual interrupt delivery.
756 procbased2_vid_bits = (PROCBASED2_VIRTUALIZE_APIC_ACCESSES |
757 PROCBASED2_VIRTUALIZE_X2APIC_MODE |
758 PROCBASED2_APIC_REGISTER_VIRTUALIZATION |
759 PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY);
761 use_tpr_shadow = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
762 MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0,
765 error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
766 procbased2_vid_bits, 0, &tmp);
767 if (error == 0 && use_tpr_shadow) {
768 virtual_interrupt_delivery = 1;
769 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid",
770 &virtual_interrupt_delivery);
773 if (virtual_interrupt_delivery) {
774 procbased_ctls |= PROCBASED_USE_TPR_SHADOW;
775 procbased_ctls2 |= procbased2_vid_bits;
776 procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE;
779 * No need to emulate accesses to %CR8 if virtual
780 * interrupt delivery is enabled.
782 procbased_ctls &= ~PROCBASED_CR8_LOAD_EXITING;
783 procbased_ctls &= ~PROCBASED_CR8_STORE_EXITING;
786 * Check for Posted Interrupts only if Virtual Interrupt
787 * Delivery is enabled.
789 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
790 MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_POSTED_INTERRUPT, 0,
793 pirvec = lapic_ipi_alloc(pti ? &IDTVEC(justreturn1_pti) :
794 &IDTVEC(justreturn));
797 printf("vmx_init: unable to allocate "
798 "posted interrupt vector\n");
801 posted_interrupts = 1;
802 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_pir",
808 if (posted_interrupts)
809 pinbased_ctls |= PINBASED_POSTED_INTERRUPT;
812 error = ept_init(ipinum);
814 printf("vmx_init: ept initialization failed (%d)\n", error);
818 guest_l1d_flush = (cpu_ia32_arch_caps &
819 IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) == 0;
820 TUNABLE_INT_FETCH("hw.vmm.l1d_flush", &guest_l1d_flush);
823 * L1D cache flush is enabled. Use IA32_FLUSH_CMD MSR when
824 * available. Otherwise fall back to the software flush
825 * method which loads enough data from the kernel text to
826 * flush existing L1D content, both on VMX entry and on NMI
829 if (guest_l1d_flush) {
830 if ((cpu_stdext_feature3 & CPUID_STDEXT3_L1D_FLUSH) == 0) {
831 guest_l1d_flush_sw = 1;
832 TUNABLE_INT_FETCH("hw.vmm.l1d_flush_sw",
833 &guest_l1d_flush_sw);
835 if (guest_l1d_flush_sw) {
836 if (nmi_flush_l1d_sw <= 1)
837 nmi_flush_l1d_sw = 1;
839 msr_load_list[0].index = MSR_IA32_FLUSH_CMD;
840 msr_load_list[0].val = IA32_FLUSH_CMD_L1D;
845 * Stash the cr0 and cr4 bits that must be fixed to 0 or 1
847 fixed0 = rdmsr(MSR_VMX_CR0_FIXED0);
848 fixed1 = rdmsr(MSR_VMX_CR0_FIXED1);
849 cr0_ones_mask = fixed0 & fixed1;
850 cr0_zeros_mask = ~fixed0 & ~fixed1;
853 * CR0_PE and CR0_PG can be set to zero in VMX non-root operation
854 * if unrestricted guest execution is allowed.
856 if (cap_unrestricted_guest)
857 cr0_ones_mask &= ~(CR0_PG | CR0_PE);
860 * Do not allow the guest to set CR0_NW or CR0_CD.
862 cr0_zeros_mask |= (CR0_NW | CR0_CD);
864 fixed0 = rdmsr(MSR_VMX_CR4_FIXED0);
865 fixed1 = rdmsr(MSR_VMX_CR4_FIXED1);
866 cr4_ones_mask = fixed0 & fixed1;
867 cr4_zeros_mask = ~fixed0 & ~fixed1;
873 /* enable VMX operation */
874 smp_rendezvous(NULL, vmx_enable, NULL, NULL);
882 vmx_trigger_hostintr(int vector)
885 struct gate_descriptor *gd;
889 KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: "
890 "invalid vector %d", vector));
891 KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present",
893 KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d "
894 "has invalid type %d", vector, gd->gd_type));
895 KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d "
896 "has invalid dpl %d", vector, gd->gd_dpl));
897 KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor "
898 "for vector %d has invalid selector %d", vector, gd->gd_selector));
899 KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid "
900 "IST %d", vector, gd->gd_ist));
902 func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset);
907 vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial)
909 int error, mask_ident, shadow_ident;
912 if (which != 0 && which != 4)
913 panic("vmx_setup_cr_shadow: unknown cr%d", which);
916 mask_ident = VMCS_CR0_MASK;
917 mask_value = cr0_ones_mask | cr0_zeros_mask;
918 shadow_ident = VMCS_CR0_SHADOW;
920 mask_ident = VMCS_CR4_MASK;
921 mask_value = cr4_ones_mask | cr4_zeros_mask;
922 shadow_ident = VMCS_CR4_SHADOW;
925 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value);
929 error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial);
935 #define vmx_setup_cr0_shadow(vmcs,init) vmx_setup_cr_shadow(0, (vmcs), (init))
936 #define vmx_setup_cr4_shadow(vmcs,init) vmx_setup_cr_shadow(4, (vmcs), (init))
939 vmx_vminit(struct vm *vm, pmap_t pmap)
941 uint16_t vpid[VM_MAXCPU];
948 vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO);
949 if ((uintptr_t)vmx & PAGE_MASK) {
950 panic("malloc of struct vmx not aligned on %d byte boundary",
955 vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4));
958 * Clean up EPTP-tagged guest physical and combined mappings
960 * VMX transitions are not required to invalidate any guest physical
961 * mappings. So, it may be possible for stale guest physical mappings
962 * to be present in the processor TLBs.
964 * Combined mappings for this EP4TA are also invalidated for all VPIDs.
966 ept_invalidate_mappings(vmx->eptp);
968 msr_bitmap_initialize(vmx->msr_bitmap);
971 * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE.
972 * The guest FSBASE and GSBASE are saved and restored during
973 * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are
974 * always restored from the vmcs host state area on vm-exit.
976 * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in
977 * how they are saved/restored so can be directly accessed by the
980 * MSR_EFER is saved and restored in the guest VMCS area on a
981 * VM exit and entry respectively. It is also restored from the
982 * host VMCS area on a VM exit.
984 * The TSC MSR is exposed read-only. Writes are disallowed as
985 * that will impact the host TSC. If the guest does a write
986 * the "use TSC offsetting" execution control is enabled and the
987 * difference between the host TSC and the guest TSC is written
988 * into the TSC offset in the VMCS.
990 if (guest_msr_rw(vmx, MSR_GSBASE) ||
991 guest_msr_rw(vmx, MSR_FSBASE) ||
992 guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) ||
993 guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) ||
994 guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) ||
995 guest_msr_rw(vmx, MSR_EFER) ||
996 guest_msr_ro(vmx, MSR_TSC))
997 panic("vmx_vminit: error setting guest msr access");
999 vpid_alloc(vpid, VM_MAXCPU);
1001 if (virtual_interrupt_delivery) {
1002 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE,
1003 APIC_ACCESS_ADDRESS);
1004 /* XXX this should really return an error to the caller */
1005 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error));
1008 maxcpus = vm_get_maxcpus(vm);
1009 for (i = 0; i < maxcpus; i++) {
1010 vmcs = &vmx->vmcs[i];
1011 vmcs->identifier = vmx_revision();
1012 error = vmclear(vmcs);
1014 panic("vmx_vminit: vmclear error %d on vcpu %d\n",
1018 vmx_msr_guest_init(vmx, i);
1020 error = vmcs_init(vmcs);
1021 KASSERT(error == 0, ("vmcs_init error %d", error));
1025 error += vmwrite(VMCS_HOST_RSP, (u_long)&vmx->ctx[i]);
1026 error += vmwrite(VMCS_EPTP, vmx->eptp);
1027 error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls);
1028 error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls);
1029 error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2);
1030 error += vmwrite(VMCS_EXIT_CTLS, exit_ctls);
1031 error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls);
1032 error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap));
1033 error += vmwrite(VMCS_VPID, vpid[i]);
1035 if (guest_l1d_flush && !guest_l1d_flush_sw) {
1036 vmcs_write(VMCS_ENTRY_MSR_LOAD, pmap_kextract(
1037 (vm_offset_t)&msr_load_list[0]));
1038 vmcs_write(VMCS_ENTRY_MSR_LOAD_COUNT,
1039 nitems(msr_load_list));
1040 vmcs_write(VMCS_EXIT_MSR_STORE, 0);
1041 vmcs_write(VMCS_EXIT_MSR_STORE_COUNT, 0);
1044 /* exception bitmap */
1045 if (vcpu_trace_exceptions(vm, i))
1046 exc_bitmap = 0xffffffff;
1048 exc_bitmap = 1 << IDT_MC;
1049 error += vmwrite(VMCS_EXCEPTION_BITMAP, exc_bitmap);
1051 vmx->ctx[i].guest_dr6 = DBREG_DR6_RESERVED1;
1052 error += vmwrite(VMCS_GUEST_DR7, DBREG_DR7_RESERVED1);
1054 if (virtual_interrupt_delivery) {
1055 error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS);
1056 error += vmwrite(VMCS_VIRTUAL_APIC,
1057 vtophys(&vmx->apic_page[i]));
1058 error += vmwrite(VMCS_EOI_EXIT0, 0);
1059 error += vmwrite(VMCS_EOI_EXIT1, 0);
1060 error += vmwrite(VMCS_EOI_EXIT2, 0);
1061 error += vmwrite(VMCS_EOI_EXIT3, 0);
1063 if (posted_interrupts) {
1064 error += vmwrite(VMCS_PIR_VECTOR, pirvec);
1065 error += vmwrite(VMCS_PIR_DESC,
1066 vtophys(&vmx->pir_desc[i]));
1069 KASSERT(error == 0, ("vmx_vminit: error customizing the vmcs"));
1071 vmx->cap[i].set = 0;
1072 vmx->cap[i].proc_ctls = procbased_ctls;
1073 vmx->cap[i].proc_ctls2 = procbased_ctls2;
1075 vmx->state[i].nextrip = ~0;
1076 vmx->state[i].lastcpu = NOCPU;
1077 vmx->state[i].vpid = vpid[i];
1080 * Set up the CR0/4 shadows, and init the read shadow
1081 * to the power-on register value from the Intel Sys Arch.
1085 error = vmx_setup_cr0_shadow(vmcs, 0x60000010);
1087 panic("vmx_setup_cr0_shadow %d", error);
1089 error = vmx_setup_cr4_shadow(vmcs, 0);
1091 panic("vmx_setup_cr4_shadow %d", error);
1093 vmx->ctx[i].pmap = pmap;
1100 vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx)
1104 func = vmxctx->guest_rax;
1106 handled = x86_emulate_cpuid(vm, vcpu,
1107 (uint32_t*)(&vmxctx->guest_rax),
1108 (uint32_t*)(&vmxctx->guest_rbx),
1109 (uint32_t*)(&vmxctx->guest_rcx),
1110 (uint32_t*)(&vmxctx->guest_rdx));
1114 static __inline void
1115 vmx_run_trace(struct vmx *vmx, int vcpu)
1118 VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip());
1122 static __inline void
1123 vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason,
1127 VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx",
1128 handled ? "handled" : "unhandled",
1129 exit_reason_to_str(exit_reason), rip);
1133 static __inline void
1134 vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip)
1137 VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip);
1141 static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved");
1142 static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done");
1145 * Invalidate guest mappings identified by its vpid from the TLB.
1147 static __inline void
1148 vmx_invvpid(struct vmx *vmx, int vcpu, pmap_t pmap, int running)
1150 struct vmxstate *vmxstate;
1151 struct invvpid_desc invvpid_desc;
1153 vmxstate = &vmx->state[vcpu];
1154 if (vmxstate->vpid == 0)
1159 * Set the 'lastcpu' to an invalid host cpu.
1161 * This will invalidate TLB entries tagged with the vcpu's
1162 * vpid the next time it runs via vmx_set_pcpu_defaults().
1164 vmxstate->lastcpu = NOCPU;
1168 KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside "
1169 "critical section", __func__, vcpu));
1172 * Invalidate all mappings tagged with 'vpid'
1174 * We do this because this vcpu was executing on a different host
1175 * cpu when it last ran. We do not track whether it invalidated
1176 * mappings associated with its 'vpid' during that run. So we must
1177 * assume that the mappings associated with 'vpid' on 'curcpu' are
1178 * stale and invalidate them.
1180 * Note that we incur this penalty only when the scheduler chooses to
1181 * move the thread associated with this vcpu between host cpus.
1183 * Note also that this will invalidate mappings tagged with 'vpid'
1186 if (pmap->pm_eptgen == vmx->eptgen[curcpu]) {
1187 invvpid_desc._res1 = 0;
1188 invvpid_desc._res2 = 0;
1189 invvpid_desc.vpid = vmxstate->vpid;
1190 invvpid_desc.linear_addr = 0;
1191 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc);
1192 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1);
1195 * The invvpid can be skipped if an invept is going to
1196 * be performed before entering the guest. The invept
1197 * will invalidate combined mappings tagged with
1198 * 'vmx->eptp' for all vpids.
1200 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1);
1205 vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap)
1207 struct vmxstate *vmxstate;
1209 vmxstate = &vmx->state[vcpu];
1210 if (vmxstate->lastcpu == curcpu)
1213 vmxstate->lastcpu = curcpu;
1215 vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1);
1217 vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase());
1218 vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase());
1219 vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase());
1220 vmx_invvpid(vmx, vcpu, pmap, 1);
1224 * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set.
1226 CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0);
1228 static void __inline
1229 vmx_set_int_window_exiting(struct vmx *vmx, int vcpu)
1232 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) {
1233 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING;
1234 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1235 VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting");
1239 static void __inline
1240 vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu)
1243 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0,
1244 ("intr_window_exiting not set: %#x", vmx->cap[vcpu].proc_ctls));
1245 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING;
1246 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1247 VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting");
1250 static void __inline
1251 vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu)
1254 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) {
1255 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING;
1256 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1257 VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting");
1261 static void __inline
1262 vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu)
1265 KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0,
1266 ("nmi_window_exiting not set %#x", vmx->cap[vcpu].proc_ctls));
1267 vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING;
1268 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1269 VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting");
1273 vmx_set_tsc_offset(struct vmx *vmx, int vcpu, uint64_t offset)
1277 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_TSC_OFFSET) == 0) {
1278 vmx->cap[vcpu].proc_ctls |= PROCBASED_TSC_OFFSET;
1279 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1280 VCPU_CTR0(vmx->vm, vcpu, "Enabling TSC offsetting");
1283 error = vmwrite(VMCS_TSC_OFFSET, offset);
1288 #define NMI_BLOCKING (VMCS_INTERRUPTIBILITY_NMI_BLOCKING | \
1289 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1290 #define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING | \
1291 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1294 vmx_inject_nmi(struct vmx *vmx, int vcpu)
1298 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1299 KASSERT((gi & NMI_BLOCKING) == 0, ("vmx_inject_nmi: invalid guest "
1300 "interruptibility-state %#x", gi));
1302 info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1303 KASSERT((info & VMCS_INTR_VALID) == 0, ("vmx_inject_nmi: invalid "
1304 "VM-entry interruption information %#x", info));
1307 * Inject the virtual NMI. The vector must be the NMI IDT entry
1308 * or the VMCS entry check will fail.
1310 info = IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID;
1311 vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1313 VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI");
1315 /* Clear the request */
1316 vm_nmi_clear(vmx->vm, vcpu);
1320 vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic,
1323 int vector, need_nmi_exiting, extint_pending;
1324 uint64_t rflags, entryinfo;
1327 if (vmx->state[vcpu].nextrip != guestrip) {
1328 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1329 if (gi & HWINTR_BLOCKING) {
1330 VCPU_CTR2(vmx->vm, vcpu, "Guest interrupt blocking "
1331 "cleared due to rip change: %#lx/%#lx",
1332 vmx->state[vcpu].nextrip, guestrip);
1333 gi &= ~HWINTR_BLOCKING;
1334 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1338 if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) {
1339 KASSERT((entryinfo & VMCS_INTR_VALID) != 0, ("%s: entry "
1340 "intinfo is not valid: %#lx", __func__, entryinfo));
1342 info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1343 KASSERT((info & VMCS_INTR_VALID) == 0, ("%s: cannot inject "
1344 "pending exception: %#lx/%#x", __func__, entryinfo, info));
1347 vector = info & 0xff;
1348 if (vector == IDT_BP || vector == IDT_OF) {
1350 * VT-x requires #BP and #OF to be injected as software
1353 info &= ~VMCS_INTR_T_MASK;
1354 info |= VMCS_INTR_T_SWEXCEPTION;
1357 if (info & VMCS_INTR_DEL_ERRCODE)
1358 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, entryinfo >> 32);
1360 vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1363 if (vm_nmi_pending(vmx->vm, vcpu)) {
1365 * If there are no conditions blocking NMI injection then
1366 * inject it directly here otherwise enable "NMI window
1367 * exiting" to inject it as soon as we can.
1369 * We also check for STI_BLOCKING because some implementations
1370 * don't allow NMI injection in this case. If we are running
1371 * on a processor that doesn't have this restriction it will
1372 * immediately exit and the NMI will be injected in the
1373 * "NMI window exiting" handler.
1375 need_nmi_exiting = 1;
1376 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1377 if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) {
1378 info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1379 if ((info & VMCS_INTR_VALID) == 0) {
1380 vmx_inject_nmi(vmx, vcpu);
1381 need_nmi_exiting = 0;
1383 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI "
1384 "due to VM-entry intr info %#x", info);
1387 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI due to "
1388 "Guest Interruptibility-state %#x", gi);
1391 if (need_nmi_exiting)
1392 vmx_set_nmi_window_exiting(vmx, vcpu);
1395 extint_pending = vm_extint_pending(vmx->vm, vcpu);
1397 if (!extint_pending && virtual_interrupt_delivery) {
1398 vmx_inject_pir(vlapic);
1403 * If interrupt-window exiting is already in effect then don't bother
1404 * checking for pending interrupts. This is just an optimization and
1405 * not needed for correctness.
1407 if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) {
1408 VCPU_CTR0(vmx->vm, vcpu, "Skip interrupt injection due to "
1409 "pending int_window_exiting");
1413 if (!extint_pending) {
1414 /* Ask the local apic for a vector to inject */
1415 if (!vlapic_pending_intr(vlapic, &vector))
1419 * From the Intel SDM, Volume 3, Section "Maskable
1420 * Hardware Interrupts":
1421 * - maskable interrupt vectors [16,255] can be delivered
1422 * through the local APIC.
1424 KASSERT(vector >= 16 && vector <= 255,
1425 ("invalid vector %d from local APIC", vector));
1427 /* Ask the legacy pic for a vector to inject */
1428 vatpic_pending_intr(vmx->vm, &vector);
1431 * From the Intel SDM, Volume 3, Section "Maskable
1432 * Hardware Interrupts":
1433 * - maskable interrupt vectors [0,255] can be delivered
1434 * through the INTR pin.
1436 KASSERT(vector >= 0 && vector <= 255,
1437 ("invalid vector %d from INTR", vector));
1440 /* Check RFLAGS.IF and the interruptibility state of the guest */
1441 rflags = vmcs_read(VMCS_GUEST_RFLAGS);
1442 if ((rflags & PSL_I) == 0) {
1443 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1444 "rflags %#lx", vector, rflags);
1448 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1449 if (gi & HWINTR_BLOCKING) {
1450 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1451 "Guest Interruptibility-state %#x", vector, gi);
1455 info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1456 if (info & VMCS_INTR_VALID) {
1458 * This is expected and could happen for multiple reasons:
1459 * - A vectoring VM-entry was aborted due to astpending
1460 * - A VM-exit happened during event injection.
1461 * - An exception was injected above.
1462 * - An NMI was injected above or after "NMI window exiting"
1464 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1465 "VM-entry intr info %#x", vector, info);
1469 /* Inject the interrupt */
1470 info = VMCS_INTR_T_HWINTR | VMCS_INTR_VALID;
1472 vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1474 if (!extint_pending) {
1475 /* Update the Local APIC ISR */
1476 vlapic_intr_accepted(vlapic, vector);
1478 vm_extint_clear(vmx->vm, vcpu);
1479 vatpic_intr_accepted(vmx->vm, vector);
1482 * After we accepted the current ExtINT the PIC may
1483 * have posted another one. If that is the case, set
1484 * the Interrupt Window Exiting execution control so
1485 * we can inject that one too.
1487 * Also, interrupt window exiting allows us to inject any
1488 * pending APIC vector that was preempted by the ExtINT
1489 * as soon as possible. This applies both for the software
1490 * emulated vlapic and the hardware assisted virtual APIC.
1492 vmx_set_int_window_exiting(vmx, vcpu);
1495 VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector);
1501 * Set the Interrupt Window Exiting execution control so we can inject
1502 * the interrupt as soon as blocking condition goes away.
1504 vmx_set_int_window_exiting(vmx, vcpu);
1508 * If the Virtual NMIs execution control is '1' then the logical processor
1509 * tracks virtual-NMI blocking in the Guest Interruptibility-state field of
1510 * the VMCS. An IRET instruction in VMX non-root operation will remove any
1511 * virtual-NMI blocking.
1513 * This unblocking occurs even if the IRET causes a fault. In this case the
1514 * hypervisor needs to restore virtual-NMI blocking before resuming the guest.
1517 vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid)
1521 VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking");
1522 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1523 gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1524 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1528 vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid)
1532 VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking");
1533 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1534 gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1535 vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1539 vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid)
1543 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1544 KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING,
1545 ("NMI blocking is not in effect %#x", gi));
1549 vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
1551 struct vmxctx *vmxctx;
1553 const struct xsave_limits *limits;
1555 vmxctx = &vmx->ctx[vcpu];
1556 limits = vmm_get_xsave_limits();
1559 * Note that the processor raises a GP# fault on its own if
1560 * xsetbv is executed for CPL != 0, so we do not have to
1561 * emulate that fault here.
1564 /* Only xcr0 is supported. */
1565 if (vmxctx->guest_rcx != 0) {
1566 vm_inject_gp(vmx->vm, vcpu);
1570 /* We only handle xcr0 if both the host and guest have XSAVE enabled. */
1571 if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) {
1572 vm_inject_ud(vmx->vm, vcpu);
1576 xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff);
1577 if ((xcrval & ~limits->xcr0_allowed) != 0) {
1578 vm_inject_gp(vmx->vm, vcpu);
1582 if (!(xcrval & XFEATURE_ENABLED_X87)) {
1583 vm_inject_gp(vmx->vm, vcpu);
1587 /* AVX (YMM_Hi128) requires SSE. */
1588 if (xcrval & XFEATURE_ENABLED_AVX &&
1589 (xcrval & XFEATURE_AVX) != XFEATURE_AVX) {
1590 vm_inject_gp(vmx->vm, vcpu);
1595 * AVX512 requires base AVX (YMM_Hi128) as well as OpMask,
1596 * ZMM_Hi256, and Hi16_ZMM.
1598 if (xcrval & XFEATURE_AVX512 &&
1599 (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) !=
1600 (XFEATURE_AVX512 | XFEATURE_AVX)) {
1601 vm_inject_gp(vmx->vm, vcpu);
1606 * Intel MPX requires both bound register state flags to be
1609 if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) !=
1610 ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) {
1611 vm_inject_gp(vmx->vm, vcpu);
1616 * This runs "inside" vmrun() with the guest's FPU state, so
1617 * modifying xcr0 directly modifies the guest's xcr0, not the
1620 load_xcr(0, xcrval);
1625 vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident)
1627 const struct vmxctx *vmxctx;
1629 vmxctx = &vmx->ctx[vcpu];
1633 return (vmxctx->guest_rax);
1635 return (vmxctx->guest_rcx);
1637 return (vmxctx->guest_rdx);
1639 return (vmxctx->guest_rbx);
1641 return (vmcs_read(VMCS_GUEST_RSP));
1643 return (vmxctx->guest_rbp);
1645 return (vmxctx->guest_rsi);
1647 return (vmxctx->guest_rdi);
1649 return (vmxctx->guest_r8);
1651 return (vmxctx->guest_r9);
1653 return (vmxctx->guest_r10);
1655 return (vmxctx->guest_r11);
1657 return (vmxctx->guest_r12);
1659 return (vmxctx->guest_r13);
1661 return (vmxctx->guest_r14);
1663 return (vmxctx->guest_r15);
1665 panic("invalid vmx register %d", ident);
1670 vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval)
1672 struct vmxctx *vmxctx;
1674 vmxctx = &vmx->ctx[vcpu];
1678 vmxctx->guest_rax = regval;
1681 vmxctx->guest_rcx = regval;
1684 vmxctx->guest_rdx = regval;
1687 vmxctx->guest_rbx = regval;
1690 vmcs_write(VMCS_GUEST_RSP, regval);
1693 vmxctx->guest_rbp = regval;
1696 vmxctx->guest_rsi = regval;
1699 vmxctx->guest_rdi = regval;
1702 vmxctx->guest_r8 = regval;
1705 vmxctx->guest_r9 = regval;
1708 vmxctx->guest_r10 = regval;
1711 vmxctx->guest_r11 = regval;
1714 vmxctx->guest_r12 = regval;
1717 vmxctx->guest_r13 = regval;
1720 vmxctx->guest_r14 = regval;
1723 vmxctx->guest_r15 = regval;
1726 panic("invalid vmx register %d", ident);
1731 vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1733 uint64_t crval, regval;
1735 /* We only handle mov to %cr0 at this time */
1736 if ((exitqual & 0xf0) != 0x00)
1739 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
1741 vmcs_write(VMCS_CR0_SHADOW, regval);
1743 crval = regval | cr0_ones_mask;
1744 crval &= ~cr0_zeros_mask;
1745 vmcs_write(VMCS_GUEST_CR0, crval);
1747 if (regval & CR0_PG) {
1748 uint64_t efer, entry_ctls;
1751 * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and
1752 * the "IA-32e mode guest" bit in VM-entry control must be
1755 efer = vmcs_read(VMCS_GUEST_IA32_EFER);
1756 if (efer & EFER_LME) {
1758 vmcs_write(VMCS_GUEST_IA32_EFER, efer);
1759 entry_ctls = vmcs_read(VMCS_ENTRY_CTLS);
1760 entry_ctls |= VM_ENTRY_GUEST_LMA;
1761 vmcs_write(VMCS_ENTRY_CTLS, entry_ctls);
1769 vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1771 uint64_t crval, regval;
1773 /* We only handle mov to %cr4 at this time */
1774 if ((exitqual & 0xf0) != 0x00)
1777 regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
1779 vmcs_write(VMCS_CR4_SHADOW, regval);
1781 crval = regval | cr4_ones_mask;
1782 crval &= ~cr4_zeros_mask;
1783 vmcs_write(VMCS_GUEST_CR4, crval);
1789 vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1791 struct vlapic *vlapic;
1795 /* We only handle mov %cr8 to/from a register at this time. */
1796 if ((exitqual & 0xe0) != 0x00) {
1800 vlapic = vm_lapic(vmx->vm, vcpu);
1801 regnum = (exitqual >> 8) & 0xf;
1802 if (exitqual & 0x10) {
1803 cr8 = vlapic_get_cr8(vlapic);
1804 vmx_set_guest_reg(vmx, vcpu, regnum, cr8);
1806 cr8 = vmx_get_guest_reg(vmx, vcpu, regnum);
1807 vlapic_set_cr8(vlapic, cr8);
1814 * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL
1821 ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS);
1822 return ((ssar >> 5) & 0x3);
1825 static enum vm_cpu_mode
1830 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) {
1831 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1833 return (CPU_MODE_64BIT); /* CS.L = 1 */
1835 return (CPU_MODE_COMPATIBILITY);
1836 } else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) {
1837 return (CPU_MODE_PROTECTED);
1839 return (CPU_MODE_REAL);
1843 static enum vm_paging_mode
1844 vmx_paging_mode(void)
1847 if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG))
1848 return (PAGING_MODE_FLAT);
1849 if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE))
1850 return (PAGING_MODE_32);
1851 if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME)
1852 return (PAGING_MODE_64);
1854 return (PAGING_MODE_PAE);
1858 inout_str_index(struct vmx *vmx, int vcpuid, int in)
1862 enum vm_reg_name reg;
1864 reg = in ? VM_REG_GUEST_RDI : VM_REG_GUEST_RSI;
1865 error = vmx_getreg(vmx, vcpuid, reg, &val);
1866 KASSERT(error == 0, ("%s: vmx_getreg error %d", __func__, error));
1871 inout_str_count(struct vmx *vmx, int vcpuid, int rep)
1877 error = vmx_getreg(vmx, vcpuid, VM_REG_GUEST_RCX, &val);
1878 KASSERT(!error, ("%s: vmx_getreg error %d", __func__, error));
1886 inout_str_addrsize(uint32_t inst_info)
1890 size = (inst_info >> 7) & 0x7;
1893 return (2); /* 16 bit */
1895 return (4); /* 32 bit */
1897 return (8); /* 64 bit */
1899 panic("%s: invalid size encoding %d", __func__, size);
1904 inout_str_seginfo(struct vmx *vmx, int vcpuid, uint32_t inst_info, int in,
1905 struct vm_inout_str *vis)
1910 vis->seg_name = VM_REG_GUEST_ES;
1912 s = (inst_info >> 15) & 0x7;
1913 vis->seg_name = vm_segment_name(s);
1916 error = vmx_getdesc(vmx, vcpuid, vis->seg_name, &vis->seg_desc);
1917 KASSERT(error == 0, ("%s: vmx_getdesc error %d", __func__, error));
1921 vmx_paging_info(struct vm_guest_paging *paging)
1923 paging->cr3 = vmcs_guest_cr3();
1924 paging->cpl = vmx_cpl();
1925 paging->cpu_mode = vmx_cpu_mode();
1926 paging->paging_mode = vmx_paging_mode();
1930 vmexit_inst_emul(struct vm_exit *vmexit, uint64_t gpa, uint64_t gla)
1932 struct vm_guest_paging *paging;
1935 paging = &vmexit->u.inst_emul.paging;
1937 vmexit->exitcode = VM_EXITCODE_INST_EMUL;
1938 vmexit->inst_length = 0;
1939 vmexit->u.inst_emul.gpa = gpa;
1940 vmexit->u.inst_emul.gla = gla;
1941 vmx_paging_info(paging);
1942 switch (paging->cpu_mode) {
1944 vmexit->u.inst_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE);
1945 vmexit->u.inst_emul.cs_d = 0;
1947 case CPU_MODE_PROTECTED:
1948 case CPU_MODE_COMPATIBILITY:
1949 vmexit->u.inst_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE);
1950 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1951 vmexit->u.inst_emul.cs_d = SEG_DESC_DEF32(csar);
1954 vmexit->u.inst_emul.cs_base = 0;
1955 vmexit->u.inst_emul.cs_d = 0;
1958 vie_init(&vmexit->u.inst_emul.vie, NULL, 0);
1962 ept_fault_type(uint64_t ept_qual)
1966 if (ept_qual & EPT_VIOLATION_DATA_WRITE)
1967 fault_type = VM_PROT_WRITE;
1968 else if (ept_qual & EPT_VIOLATION_INST_FETCH)
1969 fault_type = VM_PROT_EXECUTE;
1971 fault_type= VM_PROT_READ;
1973 return (fault_type);
1977 ept_emulation_fault(uint64_t ept_qual)
1981 /* EPT fault on an instruction fetch doesn't make sense here */
1982 if (ept_qual & EPT_VIOLATION_INST_FETCH)
1985 /* EPT fault must be a read fault or a write fault */
1986 read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;
1987 write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;
1988 if ((read | write) == 0)
1992 * The EPT violation must have been caused by accessing a
1993 * guest-physical address that is a translation of a guest-linear
1996 if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||
1997 (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {
2005 apic_access_virtualization(struct vmx *vmx, int vcpuid)
2007 uint32_t proc_ctls2;
2009 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
2010 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0);
2014 x2apic_virtualization(struct vmx *vmx, int vcpuid)
2016 uint32_t proc_ctls2;
2018 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
2019 return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0);
2023 vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic,
2026 int error, handled, offset;
2027 uint32_t *apic_regs, vector;
2031 offset = APIC_WRITE_OFFSET(qual);
2033 if (!apic_access_virtualization(vmx, vcpuid)) {
2035 * In general there should not be any APIC write VM-exits
2036 * unless APIC-access virtualization is enabled.
2038 * However self-IPI virtualization can legitimately trigger
2039 * an APIC-write VM-exit so treat it specially.
2041 if (x2apic_virtualization(vmx, vcpuid) &&
2042 offset == APIC_OFFSET_SELF_IPI) {
2043 apic_regs = (uint32_t *)(vlapic->apic_page);
2044 vector = apic_regs[APIC_OFFSET_SELF_IPI / 4];
2045 vlapic_self_ipi_handler(vlapic, vector);
2052 case APIC_OFFSET_ID:
2053 vlapic_id_write_handler(vlapic);
2055 case APIC_OFFSET_LDR:
2056 vlapic_ldr_write_handler(vlapic);
2058 case APIC_OFFSET_DFR:
2059 vlapic_dfr_write_handler(vlapic);
2061 case APIC_OFFSET_SVR:
2062 vlapic_svr_write_handler(vlapic);
2064 case APIC_OFFSET_ESR:
2065 vlapic_esr_write_handler(vlapic);
2067 case APIC_OFFSET_ICR_LOW:
2069 error = vlapic_icrlo_write_handler(vlapic, &retu);
2070 if (error != 0 || retu)
2071 handled = UNHANDLED;
2073 case APIC_OFFSET_CMCI_LVT:
2074 case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT:
2075 vlapic_lvt_write_handler(vlapic, offset);
2077 case APIC_OFFSET_TIMER_ICR:
2078 vlapic_icrtmr_write_handler(vlapic);
2080 case APIC_OFFSET_TIMER_DCR:
2081 vlapic_dcr_write_handler(vlapic);
2084 handled = UNHANDLED;
2091 apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa)
2094 if (apic_access_virtualization(vmx, vcpuid) &&
2095 (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE))
2102 vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
2105 int access_type, offset, allowed;
2107 if (!apic_access_virtualization(vmx, vcpuid))
2110 qual = vmexit->u.vmx.exit_qualification;
2111 access_type = APIC_ACCESS_TYPE(qual);
2112 offset = APIC_ACCESS_OFFSET(qual);
2115 if (access_type == 0) {
2117 * Read data access to the following registers is expected.
2120 case APIC_OFFSET_APR:
2121 case APIC_OFFSET_PPR:
2122 case APIC_OFFSET_RRR:
2123 case APIC_OFFSET_CMCI_LVT:
2124 case APIC_OFFSET_TIMER_CCR:
2130 } else if (access_type == 1) {
2132 * Write data access to the following registers is expected.
2135 case APIC_OFFSET_VER:
2136 case APIC_OFFSET_APR:
2137 case APIC_OFFSET_PPR:
2138 case APIC_OFFSET_RRR:
2139 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7:
2140 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7:
2141 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7:
2142 case APIC_OFFSET_CMCI_LVT:
2143 case APIC_OFFSET_TIMER_CCR:
2152 vmexit_inst_emul(vmexit, DEFAULT_APIC_BASE + offset,
2157 * Regardless of whether the APIC-access is allowed this handler
2158 * always returns UNHANDLED:
2159 * - if the access is allowed then it is handled by emulating the
2160 * instruction that caused the VM-exit (outside the critical section)
2161 * - if the access is not allowed then it will be converted to an
2162 * exitcode of VM_EXITCODE_VMX and will be dealt with in userland.
2167 static enum task_switch_reason
2168 vmx_task_switch_reason(uint64_t qual)
2172 reason = (qual >> 30) & 0x3;
2181 return (TSR_IDT_GATE);
2183 panic("%s: invalid reason %d", __func__, reason);
2188 emulate_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu)
2193 error = lapic_wrmsr(vmx->vm, vcpuid, num, val, retu);
2195 error = vmx_wrmsr(vmx, vcpuid, num, val, retu);
2201 emulate_rdmsr(struct vmx *vmx, int vcpuid, u_int num, bool *retu)
2203 struct vmxctx *vmxctx;
2209 error = lapic_rdmsr(vmx->vm, vcpuid, num, &result, retu);
2211 error = vmx_rdmsr(vmx, vcpuid, num, &result, retu);
2215 vmxctx = &vmx->ctx[vcpuid];
2216 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RAX, eax);
2217 KASSERT(error == 0, ("vmxctx_setreg(rax) error %d", error));
2220 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RDX, edx);
2221 KASSERT(error == 0, ("vmxctx_setreg(rdx) error %d", error));
2228 vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
2230 int error, errcode, errcode_valid, handled, in;
2231 struct vmxctx *vmxctx;
2232 struct vlapic *vlapic;
2233 struct vm_inout_str *vis;
2234 struct vm_task_switch *ts;
2235 uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, inst_info;
2236 uint32_t intr_type, intr_vec, reason;
2237 uint64_t exitintinfo, qual, gpa;
2240 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0);
2241 CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0);
2243 handled = UNHANDLED;
2244 vmxctx = &vmx->ctx[vcpu];
2246 qual = vmexit->u.vmx.exit_qualification;
2247 reason = vmexit->u.vmx.exit_reason;
2248 vmexit->exitcode = VM_EXITCODE_BOGUS;
2250 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1);
2251 SDT_PROBE3(vmm, vmx, exit, entry, vmx, vcpu, vmexit);
2254 * VM-entry failures during or after loading guest state.
2256 * These VM-exits are uncommon but must be handled specially
2257 * as most VM-exit fields are not populated as usual.
2259 if (__predict_false(reason == EXIT_REASON_MCE_DURING_ENTRY)) {
2260 VCPU_CTR0(vmx->vm, vcpu, "Handling MCE during VM-entry");
2261 __asm __volatile("int $18");
2266 * VM exits that can be triggered during event delivery need to
2267 * be handled specially by re-injecting the event if the IDT
2268 * vectoring information field's valid bit is set.
2270 * See "Information for VM Exits During Event Delivery" in Intel SDM
2273 idtvec_info = vmcs_idt_vectoring_info();
2274 if (idtvec_info & VMCS_IDT_VEC_VALID) {
2275 idtvec_info &= ~(1 << 12); /* clear undefined bit */
2276 exitintinfo = idtvec_info;
2277 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
2278 idtvec_err = vmcs_idt_vectoring_err();
2279 exitintinfo |= (uint64_t)idtvec_err << 32;
2281 error = vm_exit_intinfo(vmx->vm, vcpu, exitintinfo);
2282 KASSERT(error == 0, ("%s: vm_set_intinfo error %d",
2286 * If 'virtual NMIs' are being used and the VM-exit
2287 * happened while injecting an NMI during the previous
2288 * VM-entry, then clear "blocking by NMI" in the
2289 * Guest Interruptibility-State so the NMI can be
2290 * reinjected on the subsequent VM-entry.
2292 * However, if the NMI was being delivered through a task
2293 * gate, then the new task must start execution with NMIs
2294 * blocked so don't clear NMI blocking in this case.
2296 intr_type = idtvec_info & VMCS_INTR_T_MASK;
2297 if (intr_type == VMCS_INTR_T_NMI) {
2298 if (reason != EXIT_REASON_TASK_SWITCH)
2299 vmx_clear_nmi_blocking(vmx, vcpu);
2301 vmx_assert_nmi_blocking(vmx, vcpu);
2305 * Update VM-entry instruction length if the event being
2306 * delivered was a software interrupt or software exception.
2308 if (intr_type == VMCS_INTR_T_SWINTR ||
2309 intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION ||
2310 intr_type == VMCS_INTR_T_SWEXCEPTION) {
2311 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
2316 case EXIT_REASON_TASK_SWITCH:
2317 ts = &vmexit->u.task_switch;
2318 ts->tsssel = qual & 0xffff;
2319 ts->reason = vmx_task_switch_reason(qual);
2321 ts->errcode_valid = 0;
2322 vmx_paging_info(&ts->paging);
2324 * If the task switch was due to a CALL, JMP, IRET, software
2325 * interrupt (INT n) or software exception (INT3, INTO),
2326 * then the saved %rip references the instruction that caused
2327 * the task switch. The instruction length field in the VMCS
2328 * is valid in this case.
2330 * In all other cases (e.g., NMI, hardware exception) the
2331 * saved %rip is one that would have been saved in the old TSS
2332 * had the task switch completed normally so the instruction
2333 * length field is not needed in this case and is explicitly
2336 if (ts->reason == TSR_IDT_GATE) {
2337 KASSERT(idtvec_info & VMCS_IDT_VEC_VALID,
2338 ("invalid idtvec_info %#x for IDT task switch",
2340 intr_type = idtvec_info & VMCS_INTR_T_MASK;
2341 if (intr_type != VMCS_INTR_T_SWINTR &&
2342 intr_type != VMCS_INTR_T_SWEXCEPTION &&
2343 intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) {
2344 /* Task switch triggered by external event */
2346 vmexit->inst_length = 0;
2347 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
2348 ts->errcode_valid = 1;
2349 ts->errcode = vmcs_idt_vectoring_err();
2353 vmexit->exitcode = VM_EXITCODE_TASK_SWITCH;
2354 SDT_PROBE4(vmm, vmx, exit, taskswitch, vmx, vcpu, vmexit, ts);
2355 VCPU_CTR4(vmx->vm, vcpu, "task switch reason %d, tss 0x%04x, "
2356 "%s errcode 0x%016lx", ts->reason, ts->tsssel,
2357 ts->ext ? "external" : "internal",
2358 ((uint64_t)ts->errcode << 32) | ts->errcode_valid);
2360 case EXIT_REASON_CR_ACCESS:
2361 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1);
2362 SDT_PROBE4(vmm, vmx, exit, craccess, vmx, vcpu, vmexit, qual);
2363 switch (qual & 0xf) {
2365 handled = vmx_emulate_cr0_access(vmx, vcpu, qual);
2368 handled = vmx_emulate_cr4_access(vmx, vcpu, qual);
2371 handled = vmx_emulate_cr8_access(vmx, vcpu, qual);
2375 case EXIT_REASON_RDMSR:
2376 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1);
2378 ecx = vmxctx->guest_rcx;
2379 VCPU_CTR1(vmx->vm, vcpu, "rdmsr 0x%08x", ecx);
2380 SDT_PROBE4(vmm, vmx, exit, rdmsr, vmx, vcpu, vmexit, ecx);
2381 error = emulate_rdmsr(vmx, vcpu, ecx, &retu);
2383 vmexit->exitcode = VM_EXITCODE_RDMSR;
2384 vmexit->u.msr.code = ecx;
2388 /* Return to userspace with a valid exitcode */
2389 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
2390 ("emulate_rdmsr retu with bogus exitcode"));
2393 case EXIT_REASON_WRMSR:
2394 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1);
2396 eax = vmxctx->guest_rax;
2397 ecx = vmxctx->guest_rcx;
2398 edx = vmxctx->guest_rdx;
2399 VCPU_CTR2(vmx->vm, vcpu, "wrmsr 0x%08x value 0x%016lx",
2400 ecx, (uint64_t)edx << 32 | eax);
2401 SDT_PROBE5(vmm, vmx, exit, wrmsr, vmx, vmexit, vcpu, ecx,
2402 (uint64_t)edx << 32 | eax);
2403 error = emulate_wrmsr(vmx, vcpu, ecx,
2404 (uint64_t)edx << 32 | eax, &retu);
2406 vmexit->exitcode = VM_EXITCODE_WRMSR;
2407 vmexit->u.msr.code = ecx;
2408 vmexit->u.msr.wval = (uint64_t)edx << 32 | eax;
2412 /* Return to userspace with a valid exitcode */
2413 KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
2414 ("emulate_wrmsr retu with bogus exitcode"));
2417 case EXIT_REASON_HLT:
2418 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1);
2419 SDT_PROBE3(vmm, vmx, exit, halt, vmx, vcpu, vmexit);
2420 vmexit->exitcode = VM_EXITCODE_HLT;
2421 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS);
2422 if (virtual_interrupt_delivery)
2423 vmexit->u.hlt.intr_status =
2424 vmcs_read(VMCS_GUEST_INTR_STATUS);
2426 vmexit->u.hlt.intr_status = 0;
2428 case EXIT_REASON_MTF:
2429 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1);
2430 SDT_PROBE3(vmm, vmx, exit, mtrap, vmx, vcpu, vmexit);
2431 vmexit->exitcode = VM_EXITCODE_MTRAP;
2432 vmexit->inst_length = 0;
2434 case EXIT_REASON_PAUSE:
2435 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1);
2436 SDT_PROBE3(vmm, vmx, exit, pause, vmx, vcpu, vmexit);
2437 vmexit->exitcode = VM_EXITCODE_PAUSE;
2439 case EXIT_REASON_INTR_WINDOW:
2440 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1);
2441 SDT_PROBE3(vmm, vmx, exit, intrwindow, vmx, vcpu, vmexit);
2442 vmx_clear_int_window_exiting(vmx, vcpu);
2444 case EXIT_REASON_EXT_INTR:
2446 * External interrupts serve only to cause VM exits and allow
2447 * the host interrupt handler to run.
2449 * If this external interrupt triggers a virtual interrupt
2450 * to a VM, then that state will be recorded by the
2451 * host interrupt handler in the VM's softc. We will inject
2452 * this virtual interrupt during the subsequent VM enter.
2454 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2455 SDT_PROBE4(vmm, vmx, exit, interrupt,
2456 vmx, vcpu, vmexit, intr_info);
2459 * XXX: Ignore this exit if VMCS_INTR_VALID is not set.
2460 * This appears to be a bug in VMware Fusion?
2462 if (!(intr_info & VMCS_INTR_VALID))
2464 KASSERT((intr_info & VMCS_INTR_VALID) != 0 &&
2465 (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR,
2466 ("VM exit interruption info invalid: %#x", intr_info));
2467 vmx_trigger_hostintr(intr_info & 0xff);
2470 * This is special. We want to treat this as an 'handled'
2471 * VM-exit but not increment the instruction pointer.
2473 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1);
2475 case EXIT_REASON_NMI_WINDOW:
2476 SDT_PROBE3(vmm, vmx, exit, nmiwindow, vmx, vcpu, vmexit);
2477 /* Exit to allow the pending virtual NMI to be injected */
2478 if (vm_nmi_pending(vmx->vm, vcpu))
2479 vmx_inject_nmi(vmx, vcpu);
2480 vmx_clear_nmi_window_exiting(vmx, vcpu);
2481 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1);
2483 case EXIT_REASON_INOUT:
2484 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1);
2485 vmexit->exitcode = VM_EXITCODE_INOUT;
2486 vmexit->u.inout.bytes = (qual & 0x7) + 1;
2487 vmexit->u.inout.in = in = (qual & 0x8) ? 1 : 0;
2488 vmexit->u.inout.string = (qual & 0x10) ? 1 : 0;
2489 vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0;
2490 vmexit->u.inout.port = (uint16_t)(qual >> 16);
2491 vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax);
2492 if (vmexit->u.inout.string) {
2493 inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO);
2494 vmexit->exitcode = VM_EXITCODE_INOUT_STR;
2495 vis = &vmexit->u.inout_str;
2496 vmx_paging_info(&vis->paging);
2497 vis->rflags = vmcs_read(VMCS_GUEST_RFLAGS);
2498 vis->cr0 = vmcs_read(VMCS_GUEST_CR0);
2499 vis->index = inout_str_index(vmx, vcpu, in);
2500 vis->count = inout_str_count(vmx, vcpu, vis->inout.rep);
2501 vis->addrsize = inout_str_addrsize(inst_info);
2502 inout_str_seginfo(vmx, vcpu, inst_info, in, vis);
2504 SDT_PROBE3(vmm, vmx, exit, inout, vmx, vcpu, vmexit);
2506 case EXIT_REASON_CPUID:
2507 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1);
2508 SDT_PROBE3(vmm, vmx, exit, cpuid, vmx, vcpu, vmexit);
2509 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx);
2511 case EXIT_REASON_EXCEPTION:
2512 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1);
2513 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2514 KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2515 ("VM exit interruption info invalid: %#x", intr_info));
2517 intr_vec = intr_info & 0xff;
2518 intr_type = intr_info & VMCS_INTR_T_MASK;
2521 * If Virtual NMIs control is 1 and the VM-exit is due to a
2522 * fault encountered during the execution of IRET then we must
2523 * restore the state of "virtual-NMI blocking" before resuming
2526 * See "Resuming Guest Software after Handling an Exception".
2527 * See "Information for VM Exits Due to Vectored Events".
2529 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2530 (intr_vec != IDT_DF) &&
2531 (intr_info & EXIT_QUAL_NMIUDTI) != 0)
2532 vmx_restore_nmi_blocking(vmx, vcpu);
2535 * The NMI has already been handled in vmx_exit_handle_nmi().
2537 if (intr_type == VMCS_INTR_T_NMI)
2541 * Call the machine check handler by hand. Also don't reflect
2542 * the machine check back into the guest.
2544 if (intr_vec == IDT_MC) {
2545 VCPU_CTR0(vmx->vm, vcpu, "Vectoring to MCE handler");
2546 __asm __volatile("int $18");
2550 if (intr_vec == IDT_PF) {
2551 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_CR2, qual);
2552 KASSERT(error == 0, ("%s: vmxctx_setreg(cr2) error %d",
2557 * Software exceptions exhibit trap-like behavior. This in
2558 * turn requires populating the VM-entry instruction length
2559 * so that the %rip in the trap frame is past the INT3/INTO
2562 if (intr_type == VMCS_INTR_T_SWEXCEPTION)
2563 vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
2565 /* Reflect all other exceptions back into the guest */
2566 errcode_valid = errcode = 0;
2567 if (intr_info & VMCS_INTR_DEL_ERRCODE) {
2569 errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE);
2571 VCPU_CTR2(vmx->vm, vcpu, "Reflecting exception %d/%#x into "
2572 "the guest", intr_vec, errcode);
2573 SDT_PROBE5(vmm, vmx, exit, exception,
2574 vmx, vcpu, vmexit, intr_vec, errcode);
2575 error = vm_inject_exception(vmx->vm, vcpu, intr_vec,
2576 errcode_valid, errcode, 0);
2577 KASSERT(error == 0, ("%s: vm_inject_exception error %d",
2581 case EXIT_REASON_EPT_FAULT:
2583 * If 'gpa' lies within the address space allocated to
2584 * memory then this must be a nested page fault otherwise
2585 * this must be an instruction that accesses MMIO space.
2588 if (vm_mem_allocated(vmx->vm, vcpu, gpa) ||
2589 apic_access_fault(vmx, vcpu, gpa)) {
2590 vmexit->exitcode = VM_EXITCODE_PAGING;
2591 vmexit->inst_length = 0;
2592 vmexit->u.paging.gpa = gpa;
2593 vmexit->u.paging.fault_type = ept_fault_type(qual);
2594 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
2595 SDT_PROBE5(vmm, vmx, exit, nestedfault,
2596 vmx, vcpu, vmexit, gpa, qual);
2597 } else if (ept_emulation_fault(qual)) {
2598 vmexit_inst_emul(vmexit, gpa, vmcs_gla());
2599 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INST_EMUL, 1);
2600 SDT_PROBE4(vmm, vmx, exit, mmiofault,
2601 vmx, vcpu, vmexit, gpa);
2604 * If Virtual NMIs control is 1 and the VM-exit is due to an
2605 * EPT fault during the execution of IRET then we must restore
2606 * the state of "virtual-NMI blocking" before resuming.
2608 * See description of "NMI unblocking due to IRET" in
2609 * "Exit Qualification for EPT Violations".
2611 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2612 (qual & EXIT_QUAL_NMIUDTI) != 0)
2613 vmx_restore_nmi_blocking(vmx, vcpu);
2615 case EXIT_REASON_VIRTUALIZED_EOI:
2616 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI;
2617 vmexit->u.ioapic_eoi.vector = qual & 0xFF;
2618 SDT_PROBE3(vmm, vmx, exit, eoi, vmx, vcpu, vmexit);
2619 vmexit->inst_length = 0; /* trap-like */
2621 case EXIT_REASON_APIC_ACCESS:
2622 SDT_PROBE3(vmm, vmx, exit, apicaccess, vmx, vcpu, vmexit);
2623 handled = vmx_handle_apic_access(vmx, vcpu, vmexit);
2625 case EXIT_REASON_APIC_WRITE:
2627 * APIC-write VM exit is trap-like so the %rip is already
2628 * pointing to the next instruction.
2630 vmexit->inst_length = 0;
2631 vlapic = vm_lapic(vmx->vm, vcpu);
2632 SDT_PROBE4(vmm, vmx, exit, apicwrite,
2633 vmx, vcpu, vmexit, vlapic);
2634 handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual);
2636 case EXIT_REASON_XSETBV:
2637 SDT_PROBE3(vmm, vmx, exit, xsetbv, vmx, vcpu, vmexit);
2638 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit);
2640 case EXIT_REASON_MONITOR:
2641 SDT_PROBE3(vmm, vmx, exit, monitor, vmx, vcpu, vmexit);
2642 vmexit->exitcode = VM_EXITCODE_MONITOR;
2644 case EXIT_REASON_MWAIT:
2645 SDT_PROBE3(vmm, vmx, exit, mwait, vmx, vcpu, vmexit);
2646 vmexit->exitcode = VM_EXITCODE_MWAIT;
2648 case EXIT_REASON_VMCALL:
2649 case EXIT_REASON_VMCLEAR:
2650 case EXIT_REASON_VMLAUNCH:
2651 case EXIT_REASON_VMPTRLD:
2652 case EXIT_REASON_VMPTRST:
2653 case EXIT_REASON_VMREAD:
2654 case EXIT_REASON_VMRESUME:
2655 case EXIT_REASON_VMWRITE:
2656 case EXIT_REASON_VMXOFF:
2657 case EXIT_REASON_VMXON:
2658 SDT_PROBE3(vmm, vmx, exit, vminsn, vmx, vcpu, vmexit);
2659 vmexit->exitcode = VM_EXITCODE_VMINSN;
2662 SDT_PROBE4(vmm, vmx, exit, unknown,
2663 vmx, vcpu, vmexit, reason);
2664 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1);
2670 * It is possible that control is returned to userland
2671 * even though we were able to handle the VM exit in the
2674 * In such a case we want to make sure that the userland
2675 * restarts guest execution at the instruction *after*
2676 * the one we just processed. Therefore we update the
2677 * guest rip in the VMCS and in 'vmexit'.
2679 vmexit->rip += vmexit->inst_length;
2680 vmexit->inst_length = 0;
2681 vmcs_write(VMCS_GUEST_RIP, vmexit->rip);
2683 if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
2685 * If this VM exit was not claimed by anybody then
2686 * treat it as a generic VMX exit.
2688 vmexit->exitcode = VM_EXITCODE_VMX;
2689 vmexit->u.vmx.status = VM_SUCCESS;
2690 vmexit->u.vmx.inst_type = 0;
2691 vmexit->u.vmx.inst_error = 0;
2694 * The exitcode and collateral have been populated.
2695 * The VM exit will be processed further in userland.
2700 SDT_PROBE4(vmm, vmx, exit, return,
2701 vmx, vcpu, vmexit, handled);
2705 static __inline void
2706 vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit)
2709 KASSERT(vmxctx->inst_fail_status != VM_SUCCESS,
2710 ("vmx_exit_inst_error: invalid inst_fail_status %d",
2711 vmxctx->inst_fail_status));
2713 vmexit->inst_length = 0;
2714 vmexit->exitcode = VM_EXITCODE_VMX;
2715 vmexit->u.vmx.status = vmxctx->inst_fail_status;
2716 vmexit->u.vmx.inst_error = vmcs_instruction_error();
2717 vmexit->u.vmx.exit_reason = ~0;
2718 vmexit->u.vmx.exit_qualification = ~0;
2721 case VMX_VMRESUME_ERROR:
2722 case VMX_VMLAUNCH_ERROR:
2723 case VMX_INVEPT_ERROR:
2724 vmexit->u.vmx.inst_type = rc;
2727 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc);
2732 * If the NMI-exiting VM execution control is set to '1' then an NMI in
2733 * non-root operation causes a VM-exit. NMI blocking is in effect so it is
2734 * sufficient to simply vector to the NMI handler via a software interrupt.
2735 * However, this must be done before maskable interrupts are enabled
2736 * otherwise the "iret" issued by an interrupt handler will incorrectly
2737 * clear NMI blocking.
2739 static __inline void
2740 vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
2744 KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled"));
2746 if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION)
2749 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2750 KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2751 ("VM exit interruption info invalid: %#x", intr_info));
2753 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) {
2754 KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due "
2755 "to NMI has invalid vector: %#x", intr_info));
2756 VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler");
2757 __asm __volatile("int $2");
2761 static __inline void
2762 vmx_dr_enter_guest(struct vmxctx *vmxctx)
2766 /* Save host control debug registers. */
2767 vmxctx->host_dr7 = rdr7();
2768 vmxctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR);
2771 * Disable debugging in DR7 and DEBUGCTL to avoid triggering
2772 * exceptions in the host based on the guest DRx values. The
2773 * guest DR7 and DEBUGCTL are saved/restored in the VMCS.
2776 wrmsr(MSR_DEBUGCTLMSR, 0);
2779 * Disable single stepping the kernel to avoid corrupting the
2780 * guest DR6. A debugger might still be able to corrupt the
2781 * guest DR6 by setting a breakpoint after this point and then
2784 rflags = read_rflags();
2785 vmxctx->host_tf = rflags & PSL_T;
2786 write_rflags(rflags & ~PSL_T);
2788 /* Save host debug registers. */
2789 vmxctx->host_dr0 = rdr0();
2790 vmxctx->host_dr1 = rdr1();
2791 vmxctx->host_dr2 = rdr2();
2792 vmxctx->host_dr3 = rdr3();
2793 vmxctx->host_dr6 = rdr6();
2795 /* Restore guest debug registers. */
2796 load_dr0(vmxctx->guest_dr0);
2797 load_dr1(vmxctx->guest_dr1);
2798 load_dr2(vmxctx->guest_dr2);
2799 load_dr3(vmxctx->guest_dr3);
2800 load_dr6(vmxctx->guest_dr6);
2803 static __inline void
2804 vmx_dr_leave_guest(struct vmxctx *vmxctx)
2807 /* Save guest debug registers. */
2808 vmxctx->guest_dr0 = rdr0();
2809 vmxctx->guest_dr1 = rdr1();
2810 vmxctx->guest_dr2 = rdr2();
2811 vmxctx->guest_dr3 = rdr3();
2812 vmxctx->guest_dr6 = rdr6();
2815 * Restore host debug registers. Restore DR7, DEBUGCTL, and
2818 load_dr0(vmxctx->host_dr0);
2819 load_dr1(vmxctx->host_dr1);
2820 load_dr2(vmxctx->host_dr2);
2821 load_dr3(vmxctx->host_dr3);
2822 load_dr6(vmxctx->host_dr6);
2823 wrmsr(MSR_DEBUGCTLMSR, vmxctx->host_debugctl);
2824 load_dr7(vmxctx->host_dr7);
2825 write_rflags(read_rflags() | vmxctx->host_tf);
2829 vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap,
2830 struct vm_eventinfo *evinfo)
2832 int rc, handled, launched;
2835 struct vmxctx *vmxctx;
2837 struct vm_exit *vmexit;
2838 struct vlapic *vlapic;
2839 uint32_t exit_reason;
2840 struct region_descriptor gdtr, idtr;
2845 vmcs = &vmx->vmcs[vcpu];
2846 vmxctx = &vmx->ctx[vcpu];
2847 vlapic = vm_lapic(vm, vcpu);
2848 vmexit = vm_exitinfo(vm, vcpu);
2851 KASSERT(vmxctx->pmap == pmap,
2852 ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap));
2854 vmx_msr_guest_enter(vmx, vcpu);
2860 * We do this every time because we may setup the virtual machine
2861 * from a different process than the one that actually runs it.
2863 * If the life of a virtual machine was spent entirely in the context
2864 * of a single process we could do this once in vmx_vminit().
2866 vmcs_write(VMCS_HOST_CR3, rcr3());
2868 vmcs_write(VMCS_GUEST_RIP, rip);
2869 vmx_set_pcpu_defaults(vmx, vcpu, pmap);
2871 KASSERT(vmcs_guest_rip() == rip, ("%s: vmcs guest rip mismatch "
2872 "%#lx/%#lx", __func__, vmcs_guest_rip(), rip));
2874 handled = UNHANDLED;
2876 * Interrupts are disabled from this point on until the
2877 * guest starts executing. This is done for the following
2880 * If an AST is asserted on this thread after the check below,
2881 * then the IPI_AST notification will not be lost, because it
2882 * will cause a VM exit due to external interrupt as soon as
2883 * the guest state is loaded.
2885 * A posted interrupt after 'vmx_inject_interrupts()' will
2886 * not be "lost" because it will be held pending in the host
2887 * APIC because interrupts are disabled. The pending interrupt
2888 * will be recognized as soon as the guest state is loaded.
2890 * The same reasoning applies to the IPI generated by
2891 * pmap_invalidate_ept().
2894 vmx_inject_interrupts(vmx, vcpu, vlapic, rip);
2897 * Check for vcpu suspension after injecting events because
2898 * vmx_inject_interrupts() can suspend the vcpu due to a
2901 if (vcpu_suspended(evinfo)) {
2903 vm_exit_suspended(vmx->vm, vcpu, rip);
2907 if (vcpu_rendezvous_pending(evinfo)) {
2909 vm_exit_rendezvous(vmx->vm, vcpu, rip);
2913 if (vcpu_reqidle(evinfo)) {
2915 vm_exit_reqidle(vmx->vm, vcpu, rip);
2919 if (vcpu_should_yield(vm, vcpu)) {
2921 vm_exit_astpending(vmx->vm, vcpu, rip);
2922 vmx_astpending_trace(vmx, vcpu, rip);
2927 if (vcpu_debugged(vm, vcpu)) {
2929 vm_exit_debug(vmx->vm, vcpu, rip);
2934 * VM exits restore the base address but not the
2935 * limits of GDTR and IDTR. The VMCS only stores the
2936 * base address, so VM exits set the limits to 0xffff.
2937 * Save and restore the full GDTR and IDTR to restore
2940 * The VMCS does not save the LDTR at all, and VM
2941 * exits clear LDTR as if a NULL selector were loaded.
2942 * The userspace hypervisor probably doesn't use a
2943 * LDT, but save and restore it to be safe.
2949 vmx_run_trace(vmx, vcpu);
2950 vmx_dr_enter_guest(vmxctx);
2951 rc = vmx_enter_guest(vmxctx, vmx, launched);
2952 vmx_dr_leave_guest(vmxctx);
2958 /* Collect some information for VM exit processing */
2959 vmexit->rip = rip = vmcs_guest_rip();
2960 vmexit->inst_length = vmexit_instruction_length();
2961 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason();
2962 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification();
2964 /* Update 'nextrip' */
2965 vmx->state[vcpu].nextrip = rip;
2967 if (rc == VMX_GUEST_VMEXIT) {
2968 vmx_exit_handle_nmi(vmx, vcpu, vmexit);
2970 handled = vmx_exit_process(vmx, vcpu, vmexit);
2973 vmx_exit_inst_error(vmxctx, rc, vmexit);
2976 vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled);
2981 * If a VM exit has been handled then the exitcode must be BOGUS
2982 * If a VM exit is not handled then the exitcode must not be BOGUS
2984 if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) ||
2985 (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) {
2986 panic("Mismatch between handled (%d) and exitcode (%d)",
2987 handled, vmexit->exitcode);
2991 vmm_stat_incr(vm, vcpu, VMEXIT_USERSPACE, 1);
2993 VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d",
2997 vmx_msr_guest_exit(vmx, vcpu);
3003 vmx_vmcleanup(void *arg)
3006 struct vmx *vmx = arg;
3009 if (apic_access_virtualization(vmx, 0))
3010 vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
3012 maxcpus = vm_get_maxcpus(vmx->vm);
3013 for (i = 0; i < maxcpus; i++)
3014 vpid_free(vmx->state[i].vpid);
3022 vmxctx_regptr(struct vmxctx *vmxctx, int reg)
3026 case VM_REG_GUEST_RAX:
3027 return (&vmxctx->guest_rax);
3028 case VM_REG_GUEST_RBX:
3029 return (&vmxctx->guest_rbx);
3030 case VM_REG_GUEST_RCX:
3031 return (&vmxctx->guest_rcx);
3032 case VM_REG_GUEST_RDX:
3033 return (&vmxctx->guest_rdx);
3034 case VM_REG_GUEST_RSI:
3035 return (&vmxctx->guest_rsi);
3036 case VM_REG_GUEST_RDI:
3037 return (&vmxctx->guest_rdi);
3038 case VM_REG_GUEST_RBP:
3039 return (&vmxctx->guest_rbp);
3040 case VM_REG_GUEST_R8:
3041 return (&vmxctx->guest_r8);
3042 case VM_REG_GUEST_R9:
3043 return (&vmxctx->guest_r9);
3044 case VM_REG_GUEST_R10:
3045 return (&vmxctx->guest_r10);
3046 case VM_REG_GUEST_R11:
3047 return (&vmxctx->guest_r11);
3048 case VM_REG_GUEST_R12:
3049 return (&vmxctx->guest_r12);
3050 case VM_REG_GUEST_R13:
3051 return (&vmxctx->guest_r13);
3052 case VM_REG_GUEST_R14:
3053 return (&vmxctx->guest_r14);
3054 case VM_REG_GUEST_R15:
3055 return (&vmxctx->guest_r15);
3056 case VM_REG_GUEST_CR2:
3057 return (&vmxctx->guest_cr2);
3058 case VM_REG_GUEST_DR0:
3059 return (&vmxctx->guest_dr0);
3060 case VM_REG_GUEST_DR1:
3061 return (&vmxctx->guest_dr1);
3062 case VM_REG_GUEST_DR2:
3063 return (&vmxctx->guest_dr2);
3064 case VM_REG_GUEST_DR3:
3065 return (&vmxctx->guest_dr3);
3066 case VM_REG_GUEST_DR6:
3067 return (&vmxctx->guest_dr6);
3075 vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval)
3079 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
3087 vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val)
3091 if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
3099 vmx_get_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t *retval)
3104 error = vmcs_getreg(&vmx->vmcs[vcpu], running,
3105 VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY), &gi);
3106 *retval = (gi & HWINTR_BLOCKING) ? 1 : 0;
3111 vmx_modify_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t val)
3118 * Forcing the vcpu into an interrupt shadow is not supported.
3125 vmcs = &vmx->vmcs[vcpu];
3126 ident = VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY);
3127 error = vmcs_getreg(vmcs, running, ident, &gi);
3129 gi &= ~HWINTR_BLOCKING;
3130 error = vmcs_setreg(vmcs, running, ident, gi);
3133 VCPU_CTR2(vmx->vm, vcpu, "Setting intr_shadow to %#lx %s", val,
3134 error ? "failed" : "succeeded");
3139 vmx_shadow_reg(int reg)
3146 case VM_REG_GUEST_CR0:
3147 shreg = VMCS_CR0_SHADOW;
3149 case VM_REG_GUEST_CR4:
3150 shreg = VMCS_CR4_SHADOW;
3160 vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval)
3162 int running, hostcpu;
3163 struct vmx *vmx = arg;
3165 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
3166 if (running && hostcpu != curcpu)
3167 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu);
3169 if (reg == VM_REG_GUEST_INTR_SHADOW)
3170 return (vmx_get_intr_shadow(vmx, vcpu, running, retval));
3172 if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0)
3175 return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval));
3179 vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
3181 int error, hostcpu, running, shadow;
3184 struct vmx *vmx = arg;
3186 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
3187 if (running && hostcpu != curcpu)
3188 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu);
3190 if (reg == VM_REG_GUEST_INTR_SHADOW)
3191 return (vmx_modify_intr_shadow(vmx, vcpu, running, val));
3193 if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0)
3196 /* Do not permit user write access to VMCS fields by offset. */
3200 error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val);
3204 * If the "load EFER" VM-entry control is 1 then the
3205 * value of EFER.LMA must be identical to "IA-32e mode guest"
3206 * bit in the VM-entry control.
3208 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 &&
3209 (reg == VM_REG_GUEST_EFER)) {
3210 vmcs_getreg(&vmx->vmcs[vcpu], running,
3211 VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls);
3213 ctls |= VM_ENTRY_GUEST_LMA;
3215 ctls &= ~VM_ENTRY_GUEST_LMA;
3216 vmcs_setreg(&vmx->vmcs[vcpu], running,
3217 VMCS_IDENT(VMCS_ENTRY_CTLS), ctls);
3220 shadow = vmx_shadow_reg(reg);
3223 * Store the unmodified value in the shadow
3225 error = vmcs_setreg(&vmx->vmcs[vcpu], running,
3226 VMCS_IDENT(shadow), val);
3229 if (reg == VM_REG_GUEST_CR3) {
3231 * Invalidate the guest vcpu's TLB mappings to emulate
3232 * the behavior of updating %cr3.
3234 * XXX the processor retains global mappings when %cr3
3235 * is updated but vmx_invvpid() does not.
3237 pmap = vmx->ctx[vcpu].pmap;
3238 vmx_invvpid(vmx, vcpu, pmap, running);
3246 vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
3248 int hostcpu, running;
3249 struct vmx *vmx = arg;
3251 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
3252 if (running && hostcpu != curcpu)
3253 panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), vcpu);
3255 return (vmcs_getdesc(&vmx->vmcs[vcpu], running, reg, desc));
3259 vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
3261 int hostcpu, running;
3262 struct vmx *vmx = arg;
3264 running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
3265 if (running && hostcpu != curcpu)
3266 panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), vcpu);
3268 return (vmcs_setdesc(&vmx->vmcs[vcpu], running, reg, desc));
3272 vmx_getcap(void *arg, int vcpu, int type, int *retval)
3274 struct vmx *vmx = arg;
3280 vcap = vmx->cap[vcpu].set;
3283 case VM_CAP_HALT_EXIT:
3287 case VM_CAP_PAUSE_EXIT:
3291 case VM_CAP_MTRAP_EXIT:
3292 if (cap_monitor_trap)
3295 case VM_CAP_UNRESTRICTED_GUEST:
3296 if (cap_unrestricted_guest)
3299 case VM_CAP_ENABLE_INVPCID:
3308 *retval = (vcap & (1 << type)) ? 1 : 0;
3314 vmx_setcap(void *arg, int vcpu, int type, int val)
3316 struct vmx *vmx = arg;
3317 struct vmcs *vmcs = &vmx->vmcs[vcpu];
3329 case VM_CAP_HALT_EXIT:
3330 if (cap_halt_exit) {
3332 pptr = &vmx->cap[vcpu].proc_ctls;
3334 flag = PROCBASED_HLT_EXITING;
3335 reg = VMCS_PRI_PROC_BASED_CTLS;
3338 case VM_CAP_MTRAP_EXIT:
3339 if (cap_monitor_trap) {
3341 pptr = &vmx->cap[vcpu].proc_ctls;
3343 flag = PROCBASED_MTF;
3344 reg = VMCS_PRI_PROC_BASED_CTLS;
3347 case VM_CAP_PAUSE_EXIT:
3348 if (cap_pause_exit) {
3350 pptr = &vmx->cap[vcpu].proc_ctls;
3352 flag = PROCBASED_PAUSE_EXITING;
3353 reg = VMCS_PRI_PROC_BASED_CTLS;
3356 case VM_CAP_UNRESTRICTED_GUEST:
3357 if (cap_unrestricted_guest) {
3359 pptr = &vmx->cap[vcpu].proc_ctls2;
3361 flag = PROCBASED2_UNRESTRICTED_GUEST;
3362 reg = VMCS_SEC_PROC_BASED_CTLS;
3365 case VM_CAP_ENABLE_INVPCID:
3368 pptr = &vmx->cap[vcpu].proc_ctls2;
3370 flag = PROCBASED2_ENABLE_INVPCID;
3371 reg = VMCS_SEC_PROC_BASED_CTLS;
3385 error = vmwrite(reg, baseval);
3392 * Update optional stored flags, and record
3400 vmx->cap[vcpu].set |= (1 << type);
3402 vmx->cap[vcpu].set &= ~(1 << type);
3411 struct vlapic vlapic;
3412 struct pir_desc *pir_desc;
3417 #define VPR_PRIO_BIT(vpr) (1 << ((vpr) >> 4))
3419 #define VMX_CTR_PIR(vm, vcpuid, pir_desc, notify, vector, level, msg) \
3421 VCPU_CTR2(vm, vcpuid, msg " assert %s-triggered vector %d", \
3422 level ? "level" : "edge", vector); \
3423 VCPU_CTR1(vm, vcpuid, msg " pir0 0x%016lx", pir_desc->pir[0]); \
3424 VCPU_CTR1(vm, vcpuid, msg " pir1 0x%016lx", pir_desc->pir[1]); \
3425 VCPU_CTR1(vm, vcpuid, msg " pir2 0x%016lx", pir_desc->pir[2]); \
3426 VCPU_CTR1(vm, vcpuid, msg " pir3 0x%016lx", pir_desc->pir[3]); \
3427 VCPU_CTR1(vm, vcpuid, msg " notify: %s", notify ? "yes" : "no");\
3431 * vlapic->ops handlers that utilize the APICv hardware assist described in
3432 * Chapter 29 of the Intel SDM.
3435 vmx_set_intr_ready(struct vlapic *vlapic, int vector, bool level)
3437 struct vlapic_vtx *vlapic_vtx;
3438 struct pir_desc *pir_desc;
3440 int idx, notify = 0;
3442 vlapic_vtx = (struct vlapic_vtx *)vlapic;
3443 pir_desc = vlapic_vtx->pir_desc;
3446 * Keep track of interrupt requests in the PIR descriptor. This is
3447 * because the virtual APIC page pointed to by the VMCS cannot be
3448 * modified if the vcpu is running.
3451 mask = 1UL << (vector % 64);
3452 atomic_set_long(&pir_desc->pir[idx], mask);
3455 * A notification is required whenever the 'pending' bit makes a
3456 * transition from 0->1.
3458 * Even if the 'pending' bit is already asserted, notification about
3459 * the incoming interrupt may still be necessary. For example, if a
3460 * vCPU is HLTed with a high PPR, a low priority interrupt would cause
3461 * the 0->1 'pending' transition with a notification, but the vCPU
3462 * would ignore the interrupt for the time being. The same vCPU would
3463 * need to then be notified if a high-priority interrupt arrived which
3464 * satisfied the PPR.
3466 * The priorities of interrupts injected while 'pending' is asserted
3467 * are tracked in a custom bitfield 'pending_prio'. Should the
3468 * to-be-injected interrupt exceed the priorities already present, the
3469 * notification is sent. The priorities recorded in 'pending_prio' are
3470 * cleared whenever the 'pending' bit makes another 0->1 transition.
3472 if (atomic_cmpset_long(&pir_desc->pending, 0, 1) != 0) {
3474 vlapic_vtx->pending_prio = 0;
3476 const u_int old_prio = vlapic_vtx->pending_prio;
3477 const u_int prio_bit = VPR_PRIO_BIT(vector & APIC_TPR_INT);
3479 if ((old_prio & prio_bit) == 0 && prio_bit > old_prio) {
3480 atomic_set_int(&vlapic_vtx->pending_prio, prio_bit);
3485 VMX_CTR_PIR(vlapic->vm, vlapic->vcpuid, pir_desc, notify, vector,
3486 level, "vmx_set_intr_ready");
3491 vmx_pending_intr(struct vlapic *vlapic, int *vecptr)
3493 struct vlapic_vtx *vlapic_vtx;
3494 struct pir_desc *pir_desc;
3495 struct LAPIC *lapic;
3496 uint64_t pending, pirval;
3501 * This function is only expected to be called from the 'HLT' exit
3502 * handler which does not care about the vector that is pending.
3504 KASSERT(vecptr == NULL, ("vmx_pending_intr: vecptr must be NULL"));
3506 vlapic_vtx = (struct vlapic_vtx *)vlapic;
3507 pir_desc = vlapic_vtx->pir_desc;
3509 pending = atomic_load_acq_long(&pir_desc->pending);
3512 * While a virtual interrupt may have already been
3513 * processed the actual delivery maybe pending the
3514 * interruptibility of the guest. Recognize a pending
3515 * interrupt by reevaluating virtual interrupts
3516 * following Section 29.2.1 in the Intel SDM Volume 3.
3518 struct vm_exit *vmexit;
3521 vmexit = vm_exitinfo(vlapic->vm, vlapic->vcpuid);
3522 KASSERT(vmexit->exitcode == VM_EXITCODE_HLT,
3523 ("vmx_pending_intr: exitcode not 'HLT'"));
3524 rvi = vmexit->u.hlt.intr_status & APIC_TPR_INT;
3525 lapic = vlapic->apic_page;
3526 ppr = lapic->ppr & APIC_TPR_INT;
3535 * If there is an interrupt pending then it will be recognized only
3536 * if its priority is greater than the processor priority.
3538 * Special case: if the processor priority is zero then any pending
3539 * interrupt will be recognized.
3541 lapic = vlapic->apic_page;
3542 ppr = lapic->ppr & APIC_TPR_INT;
3546 VCPU_CTR1(vlapic->vm, vlapic->vcpuid, "HLT with non-zero PPR %d",
3550 for (i = 3; i >= 0; i--) {
3551 pirval = pir_desc->pir[i];
3553 vpr = (i * 64 + flsl(pirval) - 1) & APIC_TPR_INT;
3559 * If the highest-priority pending interrupt falls short of the
3560 * processor priority of this vCPU, ensure that 'pending_prio' does not
3561 * have any stale bits which would preclude a higher-priority interrupt
3562 * from incurring a notification later.
3565 const u_int prio_bit = VPR_PRIO_BIT(vpr);
3566 const u_int old = vlapic_vtx->pending_prio;
3568 if (old > prio_bit && (old & prio_bit) == 0) {
3569 vlapic_vtx->pending_prio = prio_bit;
3577 vmx_intr_accepted(struct vlapic *vlapic, int vector)
3580 panic("vmx_intr_accepted: not expected to be called");
3584 vmx_set_tmr(struct vlapic *vlapic, int vector, bool level)
3586 struct vlapic_vtx *vlapic_vtx;
3591 KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d", vector));
3592 KASSERT(!vcpu_is_running(vlapic->vm, vlapic->vcpuid, NULL),
3593 ("vmx_set_tmr: vcpu cannot be running"));
3595 vlapic_vtx = (struct vlapic_vtx *)vlapic;
3596 vmx = vlapic_vtx->vmx;
3597 vmcs = &vmx->vmcs[vlapic->vcpuid];
3598 mask = 1UL << (vector % 64);
3601 val = vmcs_read(VMCS_EOI_EXIT(vector));
3606 vmcs_write(VMCS_EOI_EXIT(vector), val);
3611 vmx_enable_x2apic_mode(struct vlapic *vlapic)
3615 uint32_t proc_ctls2;
3618 vcpuid = vlapic->vcpuid;
3619 vmx = ((struct vlapic_vtx *)vlapic)->vmx;
3620 vmcs = &vmx->vmcs[vcpuid];
3622 proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
3623 KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0,
3624 ("%s: invalid proc_ctls2 %#x", __func__, proc_ctls2));
3626 proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES;
3627 proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE;
3628 vmx->cap[vcpuid].proc_ctls2 = proc_ctls2;
3631 vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2);
3634 if (vlapic->vcpuid == 0) {
3636 * The nested page table mappings are shared by all vcpus
3637 * so unmap the APIC access page just once.
3639 error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
3640 KASSERT(error == 0, ("%s: vm_unmap_mmio error %d",
3644 * The MSR bitmap is shared by all vcpus so modify it only
3645 * once in the context of vcpu 0.
3647 error = vmx_allow_x2apic_msrs(vmx);
3648 KASSERT(error == 0, ("%s: vmx_allow_x2apic_msrs error %d",
3654 vmx_post_intr(struct vlapic *vlapic, int hostcpu)
3657 ipi_cpu(hostcpu, pirvec);
3661 * Transfer the pending interrupts in the PIR descriptor to the IRR
3662 * in the virtual APIC page.
3665 vmx_inject_pir(struct vlapic *vlapic)
3667 struct vlapic_vtx *vlapic_vtx;
3668 struct pir_desc *pir_desc;
3669 struct LAPIC *lapic;
3670 uint64_t val, pirval;
3671 int rvi, pirbase = -1;
3672 uint16_t intr_status_old, intr_status_new;
3674 vlapic_vtx = (struct vlapic_vtx *)vlapic;
3675 pir_desc = vlapic_vtx->pir_desc;
3676 if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) {
3677 VCPU_CTR0(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: "
3678 "no posted interrupt pending");
3684 lapic = vlapic->apic_page;
3686 val = atomic_readandclear_long(&pir_desc->pir[0]);
3689 lapic->irr1 |= val >> 32;
3694 val = atomic_readandclear_long(&pir_desc->pir[1]);
3697 lapic->irr3 |= val >> 32;
3702 val = atomic_readandclear_long(&pir_desc->pir[2]);
3705 lapic->irr5 |= val >> 32;
3710 val = atomic_readandclear_long(&pir_desc->pir[3]);
3713 lapic->irr7 |= val >> 32;
3718 VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir");
3721 * Update RVI so the processor can evaluate pending virtual
3722 * interrupts on VM-entry.
3724 * It is possible for pirval to be 0 here, even though the
3725 * pending bit has been set. The scenario is:
3726 * CPU-Y is sending a posted interrupt to CPU-X, which
3727 * is running a guest and processing posted interrupts in h/w.
3728 * CPU-X will eventually exit and the state seen in s/w is
3729 * the pending bit set, but no PIR bits set.
3732 * (vm running) (host running)
3733 * rx posted interrupt
3736 * READ/CLEAR PIR bits
3739 * pending bit set, PIR 0
3742 rvi = pirbase + flsl(pirval) - 1;
3743 intr_status_old = vmcs_read(VMCS_GUEST_INTR_STATUS);
3744 intr_status_new = (intr_status_old & 0xFF00) | rvi;
3745 if (intr_status_new > intr_status_old) {
3746 vmcs_write(VMCS_GUEST_INTR_STATUS, intr_status_new);
3747 VCPU_CTR2(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: "
3748 "guest_intr_status changed from 0x%04x to 0x%04x",
3749 intr_status_old, intr_status_new);
3754 static struct vlapic *
3755 vmx_vlapic_init(void *arg, int vcpuid)
3758 struct vlapic *vlapic;
3759 struct vlapic_vtx *vlapic_vtx;
3763 vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO);
3764 vlapic->vm = vmx->vm;
3765 vlapic->vcpuid = vcpuid;
3766 vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid];
3768 vlapic_vtx = (struct vlapic_vtx *)vlapic;
3769 vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid];
3770 vlapic_vtx->vmx = vmx;
3772 if (virtual_interrupt_delivery) {
3773 vlapic->ops.set_intr_ready = vmx_set_intr_ready;
3774 vlapic->ops.pending_intr = vmx_pending_intr;
3775 vlapic->ops.intr_accepted = vmx_intr_accepted;
3776 vlapic->ops.set_tmr = vmx_set_tmr;
3777 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode;
3780 if (posted_interrupts)
3781 vlapic->ops.post_intr = vmx_post_intr;
3783 vlapic_init(vlapic);
3789 vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic)
3792 vlapic_cleanup(vlapic);
3793 free(vlapic, M_VLAPIC);
3796 struct vmm_ops vmm_ops_intel = {
3798 .cleanup = vmx_cleanup,
3799 .resume = vmx_restore,
3800 .vminit = vmx_vminit,
3802 .vmcleanup = vmx_vmcleanup,
3803 .vmgetreg = vmx_getreg,
3804 .vmsetreg = vmx_setreg,
3805 .vmgetdesc = vmx_getdesc,
3806 .vmsetdesc = vmx_setdesc,
3807 .vmgetcap = vmx_getcap,
3808 .vmsetcap = vmx_setcap,
3809 .vmspace_alloc = ept_vmspace_alloc,
3810 .vmspace_free = ept_vmspace_free,
3811 .vlapic_init = vmx_vlapic_init,
3812 .vlapic_cleanup = vmx_vlapic_cleanup,