]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/amd64/vmm/intel/vmx.c
MFC 263780,264516,265062,265101,265203,265364:
[FreeBSD/stable/10.git] / sys / amd64 / vmm / intel / vmx.c
1 /*-
2  * Copyright (c) 2011 NetApp, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/smp.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/pcpu.h>
38 #include <sys/proc.h>
39 #include <sys/sysctl.h>
40
41 #include <vm/vm.h>
42 #include <vm/pmap.h>
43
44 #include <machine/psl.h>
45 #include <machine/cpufunc.h>
46 #include <machine/md_var.h>
47 #include <machine/segments.h>
48 #include <machine/smp.h>
49 #include <machine/specialreg.h>
50 #include <machine/vmparam.h>
51
52 #include <machine/vmm.h>
53 #include <machine/vmm_dev.h>
54 #include "vmm_host.h"
55 #include "vmm_ioport.h"
56 #include "vmm_ipi.h"
57 #include "vmm_msr.h"
58 #include "vmm_ktr.h"
59 #include "vmm_stat.h"
60 #include "vatpic.h"
61 #include "vlapic.h"
62 #include "vlapic_priv.h"
63
64 #include "vmx_msr.h"
65 #include "ept.h"
66 #include "vmx_cpufunc.h"
67 #include "vmx.h"
68 #include "x86.h"
69 #include "vmx_controls.h"
70
71 #define PINBASED_CTLS_ONE_SETTING                                       \
72         (PINBASED_EXTINT_EXITING        |                               \
73          PINBASED_NMI_EXITING           |                               \
74          PINBASED_VIRTUAL_NMI)
75 #define PINBASED_CTLS_ZERO_SETTING      0
76
77 #define PROCBASED_CTLS_WINDOW_SETTING                                   \
78         (PROCBASED_INT_WINDOW_EXITING   |                               \
79          PROCBASED_NMI_WINDOW_EXITING)
80
81 #define PROCBASED_CTLS_ONE_SETTING                                      \
82         (PROCBASED_SECONDARY_CONTROLS   |                               \
83          PROCBASED_IO_EXITING           |                               \
84          PROCBASED_MSR_BITMAPS          |                               \
85          PROCBASED_CTLS_WINDOW_SETTING)
86 #define PROCBASED_CTLS_ZERO_SETTING     \
87         (PROCBASED_CR3_LOAD_EXITING |   \
88         PROCBASED_CR3_STORE_EXITING |   \
89         PROCBASED_IO_BITMAPS)
90
91 #define PROCBASED_CTLS2_ONE_SETTING     PROCBASED2_ENABLE_EPT
92 #define PROCBASED_CTLS2_ZERO_SETTING    0
93
94 #define VM_EXIT_CTLS_ONE_SETTING_NO_PAT                                 \
95         (VM_EXIT_HOST_LMA                       |                       \
96         VM_EXIT_SAVE_EFER                       |                       \
97         VM_EXIT_LOAD_EFER)
98
99 #define VM_EXIT_CTLS_ONE_SETTING                                        \
100         (VM_EXIT_CTLS_ONE_SETTING_NO_PAT        |                       \
101         VM_EXIT_ACKNOWLEDGE_INTERRUPT           |                       \
102         VM_EXIT_SAVE_PAT                        |                       \
103         VM_EXIT_LOAD_PAT)
104 #define VM_EXIT_CTLS_ZERO_SETTING       VM_EXIT_SAVE_DEBUG_CONTROLS
105
106 #define VM_ENTRY_CTLS_ONE_SETTING_NO_PAT        VM_ENTRY_LOAD_EFER
107
108 #define VM_ENTRY_CTLS_ONE_SETTING                                       \
109         (VM_ENTRY_CTLS_ONE_SETTING_NO_PAT       |                       \
110         VM_ENTRY_LOAD_PAT)
111 #define VM_ENTRY_CTLS_ZERO_SETTING                                      \
112         (VM_ENTRY_LOAD_DEBUG_CONTROLS           |                       \
113         VM_ENTRY_INTO_SMM                       |                       \
114         VM_ENTRY_DEACTIVATE_DUAL_MONITOR)
115
116 #define guest_msr_rw(vmx, msr) \
117         msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_RW)
118
119 #define guest_msr_ro(vmx, msr) \
120     msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_READ)
121
122 #define HANDLED         1
123 #define UNHANDLED       0
124
125 static MALLOC_DEFINE(M_VMX, "vmx", "vmx");
126 static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic");
127
128 SYSCTL_DECL(_hw_vmm);
129 SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW, NULL, NULL);
130
131 int vmxon_enabled[MAXCPU];
132 static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
133
134 static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2;
135 static uint32_t exit_ctls, entry_ctls;
136
137 static uint64_t cr0_ones_mask, cr0_zeros_mask;
138 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD,
139              &cr0_ones_mask, 0, NULL);
140 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD,
141              &cr0_zeros_mask, 0, NULL);
142
143 static uint64_t cr4_ones_mask, cr4_zeros_mask;
144 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD,
145              &cr4_ones_mask, 0, NULL);
146 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD,
147              &cr4_zeros_mask, 0, NULL);
148
149 static int vmx_no_patmsr;
150
151 static int vmx_initialized;
152 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD,
153            &vmx_initialized, 0, "Intel VMX initialized");
154
155 /*
156  * Optional capabilities
157  */
158 static int cap_halt_exit;
159 static int cap_pause_exit;
160 static int cap_unrestricted_guest;
161 static int cap_monitor_trap;
162 static int cap_invpcid;
163
164 static int virtual_interrupt_delivery;
165 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD,
166     &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support");
167
168 static int posted_interrupts;
169 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupts, CTLFLAG_RD,
170     &posted_interrupts, 0, "APICv posted interrupt support");
171
172 static int pirvec;
173 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD,
174     &pirvec, 0, "APICv posted interrupt vector");
175
176 static struct unrhdr *vpid_unr;
177 static u_int vpid_alloc_failed;
178 SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD,
179             &vpid_alloc_failed, 0, NULL);
180
181 /*
182  * Use the last page below 4GB as the APIC access address. This address is
183  * occupied by the boot firmware so it is guaranteed that it will not conflict
184  * with a page in system memory.
185  */
186 #define APIC_ACCESS_ADDRESS     0xFFFFF000
187
188 static void vmx_inject_pir(struct vlapic *vlapic);
189
190 #ifdef KTR
191 static const char *
192 exit_reason_to_str(int reason)
193 {
194         static char reasonbuf[32];
195
196         switch (reason) {
197         case EXIT_REASON_EXCEPTION:
198                 return "exception";
199         case EXIT_REASON_EXT_INTR:
200                 return "extint";
201         case EXIT_REASON_TRIPLE_FAULT:
202                 return "triplefault";
203         case EXIT_REASON_INIT:
204                 return "init";
205         case EXIT_REASON_SIPI:
206                 return "sipi";
207         case EXIT_REASON_IO_SMI:
208                 return "iosmi";
209         case EXIT_REASON_SMI:
210                 return "smi";
211         case EXIT_REASON_INTR_WINDOW:
212                 return "intrwindow";
213         case EXIT_REASON_NMI_WINDOW:
214                 return "nmiwindow";
215         case EXIT_REASON_TASK_SWITCH:
216                 return "taskswitch";
217         case EXIT_REASON_CPUID:
218                 return "cpuid";
219         case EXIT_REASON_GETSEC:
220                 return "getsec";
221         case EXIT_REASON_HLT:
222                 return "hlt";
223         case EXIT_REASON_INVD:
224                 return "invd";
225         case EXIT_REASON_INVLPG:
226                 return "invlpg";
227         case EXIT_REASON_RDPMC:
228                 return "rdpmc";
229         case EXIT_REASON_RDTSC:
230                 return "rdtsc";
231         case EXIT_REASON_RSM:
232                 return "rsm";
233         case EXIT_REASON_VMCALL:
234                 return "vmcall";
235         case EXIT_REASON_VMCLEAR:
236                 return "vmclear";
237         case EXIT_REASON_VMLAUNCH:
238                 return "vmlaunch";
239         case EXIT_REASON_VMPTRLD:
240                 return "vmptrld";
241         case EXIT_REASON_VMPTRST:
242                 return "vmptrst";
243         case EXIT_REASON_VMREAD:
244                 return "vmread";
245         case EXIT_REASON_VMRESUME:
246                 return "vmresume";
247         case EXIT_REASON_VMWRITE:
248                 return "vmwrite";
249         case EXIT_REASON_VMXOFF:
250                 return "vmxoff";
251         case EXIT_REASON_VMXON:
252                 return "vmxon";
253         case EXIT_REASON_CR_ACCESS:
254                 return "craccess";
255         case EXIT_REASON_DR_ACCESS:
256                 return "draccess";
257         case EXIT_REASON_INOUT:
258                 return "inout";
259         case EXIT_REASON_RDMSR:
260                 return "rdmsr";
261         case EXIT_REASON_WRMSR:
262                 return "wrmsr";
263         case EXIT_REASON_INVAL_VMCS:
264                 return "invalvmcs";
265         case EXIT_REASON_INVAL_MSR:
266                 return "invalmsr";
267         case EXIT_REASON_MWAIT:
268                 return "mwait";
269         case EXIT_REASON_MTF:
270                 return "mtf";
271         case EXIT_REASON_MONITOR:
272                 return "monitor";
273         case EXIT_REASON_PAUSE:
274                 return "pause";
275         case EXIT_REASON_MCE:
276                 return "mce";
277         case EXIT_REASON_TPR:
278                 return "tpr";
279         case EXIT_REASON_APIC_ACCESS:
280                 return "apic-access";
281         case EXIT_REASON_GDTR_IDTR:
282                 return "gdtridtr";
283         case EXIT_REASON_LDTR_TR:
284                 return "ldtrtr";
285         case EXIT_REASON_EPT_FAULT:
286                 return "eptfault";
287         case EXIT_REASON_EPT_MISCONFIG:
288                 return "eptmisconfig";
289         case EXIT_REASON_INVEPT:
290                 return "invept";
291         case EXIT_REASON_RDTSCP:
292                 return "rdtscp";
293         case EXIT_REASON_VMX_PREEMPT:
294                 return "vmxpreempt";
295         case EXIT_REASON_INVVPID:
296                 return "invvpid";
297         case EXIT_REASON_WBINVD:
298                 return "wbinvd";
299         case EXIT_REASON_XSETBV:
300                 return "xsetbv";
301         case EXIT_REASON_APIC_WRITE:
302                 return "apic-write";
303         default:
304                 snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason);
305                 return (reasonbuf);
306         }
307 }
308 #endif  /* KTR */
309
310 static int
311 vmx_allow_x2apic_msrs(struct vmx *vmx)
312 {
313         int i, error;
314
315         error = 0;
316
317         /*
318          * Allow readonly access to the following x2APIC MSRs from the guest.
319          */
320         error += guest_msr_ro(vmx, MSR_APIC_ID);
321         error += guest_msr_ro(vmx, MSR_APIC_VERSION);
322         error += guest_msr_ro(vmx, MSR_APIC_LDR);
323         error += guest_msr_ro(vmx, MSR_APIC_SVR);
324
325         for (i = 0; i < 8; i++)
326                 error += guest_msr_ro(vmx, MSR_APIC_ISR0 + i);
327
328         for (i = 0; i < 8; i++)
329                 error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i);
330         
331         for (i = 0; i < 8; i++)
332                 error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i);
333
334         error += guest_msr_ro(vmx, MSR_APIC_ESR);
335         error += guest_msr_ro(vmx, MSR_APIC_LVT_TIMER);
336         error += guest_msr_ro(vmx, MSR_APIC_LVT_THERMAL);
337         error += guest_msr_ro(vmx, MSR_APIC_LVT_PCINT);
338         error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT0);
339         error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT1);
340         error += guest_msr_ro(vmx, MSR_APIC_LVT_ERROR);
341         error += guest_msr_ro(vmx, MSR_APIC_ICR_TIMER);
342         error += guest_msr_ro(vmx, MSR_APIC_DCR_TIMER);
343         error += guest_msr_ro(vmx, MSR_APIC_ICR);
344
345         /*
346          * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest.
347          *
348          * These registers get special treatment described in the section
349          * "Virtualizing MSR-Based APIC Accesses".
350          */
351         error += guest_msr_rw(vmx, MSR_APIC_TPR);
352         error += guest_msr_rw(vmx, MSR_APIC_EOI);
353         error += guest_msr_rw(vmx, MSR_APIC_SELF_IPI);
354
355         return (error);
356 }
357
358 u_long
359 vmx_fix_cr0(u_long cr0)
360 {
361
362         return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask);
363 }
364
365 u_long
366 vmx_fix_cr4(u_long cr4)
367 {
368
369         return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask);
370 }
371
372 static void
373 vpid_free(int vpid)
374 {
375         if (vpid < 0 || vpid > 0xffff)
376                 panic("vpid_free: invalid vpid %d", vpid);
377
378         /*
379          * VPIDs [0,VM_MAXCPU] are special and are not allocated from
380          * the unit number allocator.
381          */
382
383         if (vpid > VM_MAXCPU)
384                 free_unr(vpid_unr, vpid);
385 }
386
387 static void
388 vpid_alloc(uint16_t *vpid, int num)
389 {
390         int i, x;
391
392         if (num <= 0 || num > VM_MAXCPU)
393                 panic("invalid number of vpids requested: %d", num);
394
395         /*
396          * If the "enable vpid" execution control is not enabled then the
397          * VPID is required to be 0 for all vcpus.
398          */
399         if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) {
400                 for (i = 0; i < num; i++)
401                         vpid[i] = 0;
402                 return;
403         }
404
405         /*
406          * Allocate a unique VPID for each vcpu from the unit number allocator.
407          */
408         for (i = 0; i < num; i++) {
409                 x = alloc_unr(vpid_unr);
410                 if (x == -1)
411                         break;
412                 else
413                         vpid[i] = x;
414         }
415
416         if (i < num) {
417                 atomic_add_int(&vpid_alloc_failed, 1);
418
419                 /*
420                  * If the unit number allocator does not have enough unique
421                  * VPIDs then we need to allocate from the [1,VM_MAXCPU] range.
422                  *
423                  * These VPIDs are not be unique across VMs but this does not
424                  * affect correctness because the combined mappings are also
425                  * tagged with the EP4TA which is unique for each VM.
426                  *
427                  * It is still sub-optimal because the invvpid will invalidate
428                  * combined mappings for a particular VPID across all EP4TAs.
429                  */
430                 while (i-- > 0)
431                         vpid_free(vpid[i]);
432
433                 for (i = 0; i < num; i++)
434                         vpid[i] = i + 1;
435         }
436 }
437
438 static void
439 vpid_init(void)
440 {
441         /*
442          * VPID 0 is required when the "enable VPID" execution control is
443          * disabled.
444          *
445          * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the
446          * unit number allocator does not have sufficient unique VPIDs to
447          * satisfy the allocation.
448          *
449          * The remaining VPIDs are managed by the unit number allocator.
450          */
451         vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL);
452 }
453
454 static void
455 msr_save_area_init(struct msr_entry *g_area, int *g_count)
456 {
457         int cnt;
458
459         static struct msr_entry guest_msrs[] = {
460                 { MSR_KGSBASE, 0, 0 },
461         };
462
463         cnt = sizeof(guest_msrs) / sizeof(guest_msrs[0]);
464         if (cnt > GUEST_MSR_MAX_ENTRIES)
465                 panic("guest msr save area overrun");
466         bcopy(guest_msrs, g_area, sizeof(guest_msrs));
467         *g_count = cnt;
468 }
469
470 static void
471 vmx_disable(void *arg __unused)
472 {
473         struct invvpid_desc invvpid_desc = { 0 };
474         struct invept_desc invept_desc = { 0 };
475
476         if (vmxon_enabled[curcpu]) {
477                 /*
478                  * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b.
479                  *
480                  * VMXON or VMXOFF are not required to invalidate any TLB
481                  * caching structures. This prevents potential retention of
482                  * cached information in the TLB between distinct VMX episodes.
483                  */
484                 invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc);
485                 invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc);
486                 vmxoff();
487         }
488         load_cr4(rcr4() & ~CR4_VMXE);
489 }
490
491 static int
492 vmx_cleanup(void)
493 {
494         
495         if (pirvec != 0)
496                 vmm_ipi_free(pirvec);
497
498         if (vpid_unr != NULL) {
499                 delete_unrhdr(vpid_unr);
500                 vpid_unr = NULL;
501         }
502
503         smp_rendezvous(NULL, vmx_disable, NULL, NULL);
504
505         return (0);
506 }
507
508 static void
509 vmx_enable(void *arg __unused)
510 {
511         int error;
512
513         load_cr4(rcr4() | CR4_VMXE);
514
515         *(uint32_t *)vmxon_region[curcpu] = vmx_revision();
516         error = vmxon(vmxon_region[curcpu]);
517         if (error == 0)
518                 vmxon_enabled[curcpu] = 1;
519 }
520
521 static void
522 vmx_restore(void)
523 {
524
525         if (vmxon_enabled[curcpu])
526                 vmxon(vmxon_region[curcpu]);
527 }
528
529 static int
530 vmx_init(int ipinum)
531 {
532         int error, use_tpr_shadow;
533         uint64_t fixed0, fixed1, feature_control;
534         uint32_t tmp, procbased2_vid_bits;
535
536         /* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */
537         if (!(cpu_feature2 & CPUID2_VMX)) {
538                 printf("vmx_init: processor does not support VMX operation\n");
539                 return (ENXIO);
540         }
541
542         /*
543          * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits
544          * are set (bits 0 and 2 respectively).
545          */
546         feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
547         if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 ||
548             (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
549                 printf("vmx_init: VMX operation disabled by BIOS\n");
550                 return (ENXIO);
551         }
552
553         /* Check support for primary processor-based VM-execution controls */
554         error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
555                                MSR_VMX_TRUE_PROCBASED_CTLS,
556                                PROCBASED_CTLS_ONE_SETTING,
557                                PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls);
558         if (error) {
559                 printf("vmx_init: processor does not support desired primary "
560                        "processor-based controls\n");
561                 return (error);
562         }
563
564         /* Clear the processor-based ctl bits that are set on demand */
565         procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING;
566
567         /* Check support for secondary processor-based VM-execution controls */
568         error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
569                                MSR_VMX_PROCBASED_CTLS2,
570                                PROCBASED_CTLS2_ONE_SETTING,
571                                PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2);
572         if (error) {
573                 printf("vmx_init: processor does not support desired secondary "
574                        "processor-based controls\n");
575                 return (error);
576         }
577
578         /* Check support for VPID */
579         error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
580                                PROCBASED2_ENABLE_VPID, 0, &tmp);
581         if (error == 0)
582                 procbased_ctls2 |= PROCBASED2_ENABLE_VPID;
583
584         /* Check support for pin-based VM-execution controls */
585         error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
586                                MSR_VMX_TRUE_PINBASED_CTLS,
587                                PINBASED_CTLS_ONE_SETTING,
588                                PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls);
589         if (error) {
590                 printf("vmx_init: processor does not support desired "
591                        "pin-based controls\n");
592                 return (error);
593         }
594
595         /* Check support for VM-exit controls */
596         error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS,
597                                VM_EXIT_CTLS_ONE_SETTING,
598                                VM_EXIT_CTLS_ZERO_SETTING,
599                                &exit_ctls);
600         if (error) {
601                 /* Try again without the PAT MSR bits */
602                 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS,
603                                        MSR_VMX_TRUE_EXIT_CTLS,
604                                        VM_EXIT_CTLS_ONE_SETTING_NO_PAT,
605                                        VM_EXIT_CTLS_ZERO_SETTING,
606                                        &exit_ctls);
607                 if (error) {
608                         printf("vmx_init: processor does not support desired "
609                                "exit controls\n");
610                         return (error);
611                 } else {
612                         if (bootverbose)
613                                 printf("vmm: PAT MSR access not supported\n");
614                         guest_msr_valid(MSR_PAT);
615                         vmx_no_patmsr = 1;
616                 }
617         }
618
619         /* Check support for VM-entry controls */
620         if (!vmx_no_patmsr) {
621                 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS,
622                                        MSR_VMX_TRUE_ENTRY_CTLS,
623                                        VM_ENTRY_CTLS_ONE_SETTING,
624                                        VM_ENTRY_CTLS_ZERO_SETTING,
625                                        &entry_ctls);
626         } else {
627                 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS,
628                                        MSR_VMX_TRUE_ENTRY_CTLS,
629                                        VM_ENTRY_CTLS_ONE_SETTING_NO_PAT,
630                                        VM_ENTRY_CTLS_ZERO_SETTING,
631                                        &entry_ctls);
632         }
633
634         if (error) {
635                 printf("vmx_init: processor does not support desired "
636                        "entry controls\n");
637                        return (error);
638         }
639
640         /*
641          * Check support for optional features by testing them
642          * as individual bits
643          */
644         cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
645                                         MSR_VMX_TRUE_PROCBASED_CTLS,
646                                         PROCBASED_HLT_EXITING, 0,
647                                         &tmp) == 0);
648
649         cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
650                                         MSR_VMX_PROCBASED_CTLS,
651                                         PROCBASED_MTF, 0,
652                                         &tmp) == 0);
653
654         cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
655                                          MSR_VMX_TRUE_PROCBASED_CTLS,
656                                          PROCBASED_PAUSE_EXITING, 0,
657                                          &tmp) == 0);
658
659         cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
660                                         MSR_VMX_PROCBASED_CTLS2,
661                                         PROCBASED2_UNRESTRICTED_GUEST, 0,
662                                         &tmp) == 0);
663
664         cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
665             MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0,
666             &tmp) == 0);
667
668         /*
669          * Check support for virtual interrupt delivery.
670          */
671         procbased2_vid_bits = (PROCBASED2_VIRTUALIZE_APIC_ACCESSES |
672             PROCBASED2_VIRTUALIZE_X2APIC_MODE |
673             PROCBASED2_APIC_REGISTER_VIRTUALIZATION |
674             PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY);
675
676         use_tpr_shadow = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
677             MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0,
678             &tmp) == 0);
679
680         error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
681             procbased2_vid_bits, 0, &tmp);
682         if (error == 0 && use_tpr_shadow) {
683                 virtual_interrupt_delivery = 1;
684                 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid",
685                     &virtual_interrupt_delivery);
686         }
687
688         if (virtual_interrupt_delivery) {
689                 procbased_ctls |= PROCBASED_USE_TPR_SHADOW;
690                 procbased_ctls2 |= procbased2_vid_bits;
691                 procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE;
692
693                 /*
694                  * Check for Posted Interrupts only if Virtual Interrupt
695                  * Delivery is enabled.
696                  */
697                 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
698                     MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_POSTED_INTERRUPT, 0,
699                     &tmp);
700                 if (error == 0) {
701                         pirvec = vmm_ipi_alloc();
702                         if (pirvec == 0) {
703                                 if (bootverbose) {
704                                         printf("vmx_init: unable to allocate "
705                                             "posted interrupt vector\n");
706                                 }
707                         } else {
708                                 posted_interrupts = 1;
709                                 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_pir",
710                                     &posted_interrupts);
711                         }
712                 }
713         }
714
715         if (posted_interrupts)
716                     pinbased_ctls |= PINBASED_POSTED_INTERRUPT;
717
718         /* Initialize EPT */
719         error = ept_init(ipinum);
720         if (error) {
721                 printf("vmx_init: ept initialization failed (%d)\n", error);
722                 return (error);
723         }
724
725         /*
726          * Stash the cr0 and cr4 bits that must be fixed to 0 or 1
727          */
728         fixed0 = rdmsr(MSR_VMX_CR0_FIXED0);
729         fixed1 = rdmsr(MSR_VMX_CR0_FIXED1);
730         cr0_ones_mask = fixed0 & fixed1;
731         cr0_zeros_mask = ~fixed0 & ~fixed1;
732
733         /*
734          * CR0_PE and CR0_PG can be set to zero in VMX non-root operation
735          * if unrestricted guest execution is allowed.
736          */
737         if (cap_unrestricted_guest)
738                 cr0_ones_mask &= ~(CR0_PG | CR0_PE);
739
740         /*
741          * Do not allow the guest to set CR0_NW or CR0_CD.
742          */
743         cr0_zeros_mask |= (CR0_NW | CR0_CD);
744
745         fixed0 = rdmsr(MSR_VMX_CR4_FIXED0);
746         fixed1 = rdmsr(MSR_VMX_CR4_FIXED1);
747         cr4_ones_mask = fixed0 & fixed1;
748         cr4_zeros_mask = ~fixed0 & ~fixed1;
749
750         vpid_init();
751
752         /* enable VMX operation */
753         smp_rendezvous(NULL, vmx_enable, NULL, NULL);
754
755         vmx_initialized = 1;
756
757         return (0);
758 }
759
760 static void
761 vmx_trigger_hostintr(int vector)
762 {
763         uintptr_t func;
764         struct gate_descriptor *gd;
765
766         gd = &idt[vector];
767
768         KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: "
769             "invalid vector %d", vector));
770         KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present",
771             vector));
772         KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d "
773             "has invalid type %d", vector, gd->gd_type));
774         KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d "
775             "has invalid dpl %d", vector, gd->gd_dpl));
776         KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor "
777             "for vector %d has invalid selector %d", vector, gd->gd_selector));
778         KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid "
779             "IST %d", vector, gd->gd_ist));
780
781         func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset);
782         vmx_call_isr(func);
783 }
784
785 static int
786 vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial)
787 {
788         int error, mask_ident, shadow_ident;
789         uint64_t mask_value;
790
791         if (which != 0 && which != 4)
792                 panic("vmx_setup_cr_shadow: unknown cr%d", which);
793
794         if (which == 0) {
795                 mask_ident = VMCS_CR0_MASK;
796                 mask_value = cr0_ones_mask | cr0_zeros_mask;
797                 shadow_ident = VMCS_CR0_SHADOW;
798         } else {
799                 mask_ident = VMCS_CR4_MASK;
800                 mask_value = cr4_ones_mask | cr4_zeros_mask;
801                 shadow_ident = VMCS_CR4_SHADOW;
802         }
803
804         error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value);
805         if (error)
806                 return (error);
807
808         error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial);
809         if (error)
810                 return (error);
811
812         return (0);
813 }
814 #define vmx_setup_cr0_shadow(vmcs,init) vmx_setup_cr_shadow(0, (vmcs), (init))
815 #define vmx_setup_cr4_shadow(vmcs,init) vmx_setup_cr_shadow(4, (vmcs), (init))
816
817 static void *
818 vmx_vminit(struct vm *vm, pmap_t pmap)
819 {
820         uint16_t vpid[VM_MAXCPU];
821         int i, error, guest_msr_count;
822         struct vmx *vmx;
823         struct vmcs *vmcs;
824
825         vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO);
826         if ((uintptr_t)vmx & PAGE_MASK) {
827                 panic("malloc of struct vmx not aligned on %d byte boundary",
828                       PAGE_SIZE);
829         }
830         vmx->vm = vm;
831
832         vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4));
833
834         /*
835          * Clean up EPTP-tagged guest physical and combined mappings
836          *
837          * VMX transitions are not required to invalidate any guest physical
838          * mappings. So, it may be possible for stale guest physical mappings
839          * to be present in the processor TLBs.
840          *
841          * Combined mappings for this EP4TA are also invalidated for all VPIDs.
842          */
843         ept_invalidate_mappings(vmx->eptp);
844
845         msr_bitmap_initialize(vmx->msr_bitmap);
846
847         /*
848          * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE.
849          * The guest FSBASE and GSBASE are saved and restored during
850          * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are
851          * always restored from the vmcs host state area on vm-exit.
852          *
853          * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in
854          * how they are saved/restored so can be directly accessed by the
855          * guest.
856          *
857          * Guest KGSBASE is saved and restored in the guest MSR save area.
858          * Host KGSBASE is restored before returning to userland from the pcb.
859          * There will be a window of time when we are executing in the host
860          * kernel context with a value of KGSBASE from the guest. This is ok
861          * because the value of KGSBASE is inconsequential in kernel context.
862          *
863          * MSR_EFER is saved and restored in the guest VMCS area on a
864          * VM exit and entry respectively. It is also restored from the
865          * host VMCS area on a VM exit.
866          */
867         if (guest_msr_rw(vmx, MSR_GSBASE) ||
868             guest_msr_rw(vmx, MSR_FSBASE) ||
869             guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) ||
870             guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) ||
871             guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) ||
872             guest_msr_rw(vmx, MSR_KGSBASE) ||
873             guest_msr_rw(vmx, MSR_EFER))
874                 panic("vmx_vminit: error setting guest msr access");
875
876         /*
877          * MSR_PAT is saved and restored in the guest VMCS are on a VM exit
878          * and entry respectively. It is also restored from the host VMCS
879          * area on a VM exit. However, if running on a system with no
880          * MSR_PAT save/restore support, leave access disabled so accesses
881          * will be trapped.
882          */
883         if (!vmx_no_patmsr && guest_msr_rw(vmx, MSR_PAT))
884                 panic("vmx_vminit: error setting guest pat msr access");
885
886         vpid_alloc(vpid, VM_MAXCPU);
887
888         if (virtual_interrupt_delivery) {
889                 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE,
890                     APIC_ACCESS_ADDRESS);
891                 /* XXX this should really return an error to the caller */
892                 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error));
893         }
894
895         for (i = 0; i < VM_MAXCPU; i++) {
896                 vmcs = &vmx->vmcs[i];
897                 vmcs->identifier = vmx_revision();
898                 error = vmclear(vmcs);
899                 if (error != 0) {
900                         panic("vmx_vminit: vmclear error %d on vcpu %d\n",
901                               error, i);
902                 }
903
904                 error = vmcs_init(vmcs);
905                 KASSERT(error == 0, ("vmcs_init error %d", error));
906
907                 VMPTRLD(vmcs);
908                 error = 0;
909                 error += vmwrite(VMCS_HOST_RSP, (u_long)&vmx->ctx[i]);
910                 error += vmwrite(VMCS_EPTP, vmx->eptp);
911                 error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls);
912                 error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls);
913                 error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2);
914                 error += vmwrite(VMCS_EXIT_CTLS, exit_ctls);
915                 error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls);
916                 error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap));
917                 error += vmwrite(VMCS_VPID, vpid[i]);
918                 if (virtual_interrupt_delivery) {
919                         error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS);
920                         error += vmwrite(VMCS_VIRTUAL_APIC,
921                             vtophys(&vmx->apic_page[i]));
922                         error += vmwrite(VMCS_EOI_EXIT0, 0);
923                         error += vmwrite(VMCS_EOI_EXIT1, 0);
924                         error += vmwrite(VMCS_EOI_EXIT2, 0);
925                         error += vmwrite(VMCS_EOI_EXIT3, 0);
926                 }
927                 if (posted_interrupts) {
928                         error += vmwrite(VMCS_PIR_VECTOR, pirvec);
929                         error += vmwrite(VMCS_PIR_DESC,
930                             vtophys(&vmx->pir_desc[i]));
931                 }
932                 VMCLEAR(vmcs);
933                 KASSERT(error == 0, ("vmx_vminit: error customizing the vmcs"));
934
935                 vmx->cap[i].set = 0;
936                 vmx->cap[i].proc_ctls = procbased_ctls;
937                 vmx->cap[i].proc_ctls2 = procbased_ctls2;
938
939                 vmx->state[i].lastcpu = -1;
940                 vmx->state[i].vpid = vpid[i];
941
942                 msr_save_area_init(vmx->guest_msrs[i], &guest_msr_count);
943
944                 error = vmcs_set_msr_save(vmcs, vtophys(vmx->guest_msrs[i]),
945                     guest_msr_count);
946                 if (error != 0)
947                         panic("vmcs_set_msr_save error %d", error);
948
949                 /*
950                  * Set up the CR0/4 shadows, and init the read shadow
951                  * to the power-on register value from the Intel Sys Arch.
952                  *  CR0 - 0x60000010
953                  *  CR4 - 0
954                  */
955                 error = vmx_setup_cr0_shadow(vmcs, 0x60000010);
956                 if (error != 0)
957                         panic("vmx_setup_cr0_shadow %d", error);
958
959                 error = vmx_setup_cr4_shadow(vmcs, 0);
960                 if (error != 0)
961                         panic("vmx_setup_cr4_shadow %d", error);
962
963                 vmx->ctx[i].pmap = pmap;
964         }
965
966         return (vmx);
967 }
968
969 static int
970 vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx)
971 {
972         int handled, func;
973         
974         func = vmxctx->guest_rax;
975
976         handled = x86_emulate_cpuid(vm, vcpu,
977                                     (uint32_t*)(&vmxctx->guest_rax),
978                                     (uint32_t*)(&vmxctx->guest_rbx),
979                                     (uint32_t*)(&vmxctx->guest_rcx),
980                                     (uint32_t*)(&vmxctx->guest_rdx));
981         return (handled);
982 }
983
984 static __inline void
985 vmx_run_trace(struct vmx *vmx, int vcpu)
986 {
987 #ifdef KTR
988         VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip());
989 #endif
990 }
991
992 static __inline void
993 vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason,
994                int handled)
995 {
996 #ifdef KTR
997         VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx",
998                  handled ? "handled" : "unhandled",
999                  exit_reason_to_str(exit_reason), rip);
1000 #endif
1001 }
1002
1003 static __inline void
1004 vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip)
1005 {
1006 #ifdef KTR
1007         VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip);
1008 #endif
1009 }
1010
1011 static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved");
1012
1013 static void
1014 vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap)
1015 {
1016         struct vmxstate *vmxstate;
1017         struct invvpid_desc invvpid_desc;
1018
1019         vmxstate = &vmx->state[vcpu];
1020         if (vmxstate->lastcpu == curcpu)
1021                 return;
1022
1023         vmxstate->lastcpu = curcpu;
1024
1025         vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1);
1026
1027         vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase());
1028         vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase());
1029         vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase());
1030
1031         /*
1032          * If we are using VPIDs then invalidate all mappings tagged with 'vpid'
1033          *
1034          * We do this because this vcpu was executing on a different host
1035          * cpu when it last ran. We do not track whether it invalidated
1036          * mappings associated with its 'vpid' during that run. So we must
1037          * assume that the mappings associated with 'vpid' on 'curcpu' are
1038          * stale and invalidate them.
1039          *
1040          * Note that we incur this penalty only when the scheduler chooses to
1041          * move the thread associated with this vcpu between host cpus.
1042          *
1043          * Note also that this will invalidate mappings tagged with 'vpid'
1044          * for "all" EP4TAs.
1045          */
1046         if (vmxstate->vpid != 0) {
1047                 if (pmap->pm_eptgen == vmx->eptgen[curcpu]) {
1048                         invvpid_desc._res1 = 0;
1049                         invvpid_desc._res2 = 0;
1050                         invvpid_desc.vpid = vmxstate->vpid;
1051                         invvpid_desc.linear_addr = 0;
1052                         invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc);
1053                 } else {
1054                         /*
1055                          * The invvpid can be skipped if an invept is going to
1056                          * be performed before entering the guest. The invept
1057                          * will invalidate combined mappings tagged with
1058                          * 'vmx->eptp' for all vpids.
1059                          */
1060                         vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1);
1061                 }
1062         }
1063 }
1064
1065 /*
1066  * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set.
1067  */
1068 CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0);
1069
1070 static void __inline
1071 vmx_set_int_window_exiting(struct vmx *vmx, int vcpu)
1072 {
1073
1074         if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) {
1075                 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING;
1076                 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1077                 VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting");
1078         }
1079 }
1080
1081 static void __inline
1082 vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu)
1083 {
1084
1085         KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0,
1086             ("intr_window_exiting not set: %#x", vmx->cap[vcpu].proc_ctls));
1087         vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING;
1088         vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1089         VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting");
1090 }
1091
1092 static void __inline
1093 vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu)
1094 {
1095
1096         if ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) {
1097                 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING;
1098                 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1099                 VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting");
1100         }
1101 }
1102
1103 static void __inline
1104 vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu)
1105 {
1106
1107         KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0,
1108             ("nmi_window_exiting not set %#x", vmx->cap[vcpu].proc_ctls));
1109         vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING;
1110         vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1111         VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting");
1112 }
1113
1114 #define NMI_BLOCKING    (VMCS_INTERRUPTIBILITY_NMI_BLOCKING |           \
1115                          VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1116 #define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING |           \
1117                          VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1118
1119 static void
1120 vmx_inject_nmi(struct vmx *vmx, int vcpu)
1121 {
1122         uint32_t gi, info;
1123
1124         gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1125         KASSERT((gi & NMI_BLOCKING) == 0, ("vmx_inject_nmi: invalid guest "
1126             "interruptibility-state %#x", gi));
1127
1128         info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1129         KASSERT((info & VMCS_INTR_VALID) == 0, ("vmx_inject_nmi: invalid "
1130             "VM-entry interruption information %#x", info));
1131
1132         /*
1133          * Inject the virtual NMI. The vector must be the NMI IDT entry
1134          * or the VMCS entry check will fail.
1135          */
1136         info = IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID;
1137         vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1138
1139         VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI");
1140
1141         /* Clear the request */
1142         vm_nmi_clear(vmx->vm, vcpu);
1143 }
1144
1145 static void
1146 vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic)
1147 {
1148         struct vm_exception exc;
1149         int vector, need_nmi_exiting, extint_pending;
1150         uint64_t rflags;
1151         uint32_t gi, info;
1152
1153         if (vm_exception_pending(vmx->vm, vcpu, &exc)) {
1154                 KASSERT(exc.vector >= 0 && exc.vector < 32,
1155                     ("%s: invalid exception vector %d", __func__, exc.vector));
1156
1157                 info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1158                 KASSERT((info & VMCS_INTR_VALID) == 0, ("%s: cannot inject "
1159                      "pending exception %d: %#x", __func__, exc.vector, info));
1160
1161                 info = exc.vector | VMCS_INTR_T_HWEXCEPTION | VMCS_INTR_VALID;
1162                 if (exc.error_code_valid) {
1163                         info |= VMCS_INTR_DEL_ERRCODE;
1164                         vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, exc.error_code);
1165                 }
1166                 vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1167         }
1168
1169         if (vm_nmi_pending(vmx->vm, vcpu)) {
1170                 /*
1171                  * If there are no conditions blocking NMI injection then
1172                  * inject it directly here otherwise enable "NMI window
1173                  * exiting" to inject it as soon as we can.
1174                  *
1175                  * We also check for STI_BLOCKING because some implementations
1176                  * don't allow NMI injection in this case. If we are running
1177                  * on a processor that doesn't have this restriction it will
1178                  * immediately exit and the NMI will be injected in the
1179                  * "NMI window exiting" handler.
1180                  */
1181                 need_nmi_exiting = 1;
1182                 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1183                 if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) {
1184                         info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1185                         if ((info & VMCS_INTR_VALID) == 0) {
1186                                 vmx_inject_nmi(vmx, vcpu);
1187                                 need_nmi_exiting = 0;
1188                         } else {
1189                                 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI "
1190                                     "due to VM-entry intr info %#x", info);
1191                         }
1192                 } else {
1193                         VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI due to "
1194                             "Guest Interruptibility-state %#x", gi);
1195                 }
1196
1197                 if (need_nmi_exiting)
1198                         vmx_set_nmi_window_exiting(vmx, vcpu);
1199         }
1200
1201         extint_pending = vm_extint_pending(vmx->vm, vcpu);
1202
1203         if (!extint_pending && virtual_interrupt_delivery) {
1204                 vmx_inject_pir(vlapic);
1205                 return;
1206         }
1207
1208         /*
1209          * If interrupt-window exiting is already in effect then don't bother
1210          * checking for pending interrupts. This is just an optimization and
1211          * not needed for correctness.
1212          */
1213         if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) {
1214                 VCPU_CTR0(vmx->vm, vcpu, "Skip interrupt injection due to "
1215                     "pending int_window_exiting");
1216                 return;
1217         }
1218
1219         if (!extint_pending) {
1220                 /* Ask the local apic for a vector to inject */
1221                 if (!vlapic_pending_intr(vlapic, &vector))
1222                         return;
1223
1224                 /*
1225                  * From the Intel SDM, Volume 3, Section "Maskable
1226                  * Hardware Interrupts":
1227                  * - maskable interrupt vectors [16,255] can be delivered
1228                  *   through the local APIC.
1229                 */
1230                 KASSERT(vector >= 16 && vector <= 255,
1231                     ("invalid vector %d from local APIC", vector));
1232         } else {
1233                 /* Ask the legacy pic for a vector to inject */
1234                 vatpic_pending_intr(vmx->vm, &vector);
1235
1236                 /*
1237                  * From the Intel SDM, Volume 3, Section "Maskable
1238                  * Hardware Interrupts":
1239                  * - maskable interrupt vectors [0,255] can be delivered
1240                  *   through the INTR pin.
1241                  */
1242                 KASSERT(vector >= 0 && vector <= 255,
1243                     ("invalid vector %d from INTR", vector));
1244         }
1245
1246         /* Check RFLAGS.IF and the interruptibility state of the guest */
1247         rflags = vmcs_read(VMCS_GUEST_RFLAGS);
1248         if ((rflags & PSL_I) == 0) {
1249                 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1250                     "rflags %#lx", vector, rflags);
1251                 goto cantinject;
1252         }
1253
1254         gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1255         if (gi & HWINTR_BLOCKING) {
1256                 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1257                     "Guest Interruptibility-state %#x", vector, gi);
1258                 goto cantinject;
1259         }
1260
1261         info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1262         if (info & VMCS_INTR_VALID) {
1263                 /*
1264                  * This is expected and could happen for multiple reasons:
1265                  * - A vectoring VM-entry was aborted due to astpending
1266                  * - A VM-exit happened during event injection.
1267                  * - An exception was injected above.
1268                  * - An NMI was injected above or after "NMI window exiting"
1269                  */
1270                 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1271                     "VM-entry intr info %#x", vector, info);
1272                 goto cantinject;
1273         }
1274
1275         /* Inject the interrupt */
1276         info = VMCS_INTR_T_HWINTR | VMCS_INTR_VALID;
1277         info |= vector;
1278         vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1279
1280         if (!extint_pending) {
1281                 /* Update the Local APIC ISR */
1282                 vlapic_intr_accepted(vlapic, vector);
1283         } else {
1284                 vm_extint_clear(vmx->vm, vcpu);
1285                 vatpic_intr_accepted(vmx->vm, vector);
1286
1287                 /*
1288                  * After we accepted the current ExtINT the PIC may
1289                  * have posted another one.  If that is the case, set
1290                  * the Interrupt Window Exiting execution control so
1291                  * we can inject that one too.
1292                  */
1293                 if (vm_extint_pending(vmx->vm, vcpu))
1294                         vmx_set_int_window_exiting(vmx, vcpu);
1295         }
1296
1297         VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector);
1298
1299         return;
1300
1301 cantinject:
1302         /*
1303          * Set the Interrupt Window Exiting execution control so we can inject
1304          * the interrupt as soon as blocking condition goes away.
1305          */
1306         vmx_set_int_window_exiting(vmx, vcpu);
1307 }
1308
1309 /*
1310  * If the Virtual NMIs execution control is '1' then the logical processor
1311  * tracks virtual-NMI blocking in the Guest Interruptibility-state field of
1312  * the VMCS. An IRET instruction in VMX non-root operation will remove any
1313  * virtual-NMI blocking.
1314  *
1315  * This unblocking occurs even if the IRET causes a fault. In this case the
1316  * hypervisor needs to restore virtual-NMI blocking before resuming the guest.
1317  */
1318 static void
1319 vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid)
1320 {
1321         uint32_t gi;
1322
1323         VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking");
1324         gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1325         gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1326         vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1327 }
1328
1329 static void
1330 vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid)
1331 {
1332         uint32_t gi;
1333
1334         VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking");
1335         gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1336         gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1337         vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1338 }
1339
1340 static int
1341 vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
1342 {
1343         struct vmxctx *vmxctx;
1344         uint64_t xcrval;
1345         const struct xsave_limits *limits;
1346
1347         vmxctx = &vmx->ctx[vcpu];
1348         limits = vmm_get_xsave_limits();
1349
1350         /*
1351          * Note that the processor raises a GP# fault on its own if
1352          * xsetbv is executed for CPL != 0, so we do not have to
1353          * emulate that fault here.
1354          */
1355
1356         /* Only xcr0 is supported. */
1357         if (vmxctx->guest_rcx != 0) {
1358                 vm_inject_gp(vmx->vm, vcpu);
1359                 return (HANDLED);
1360         }
1361
1362         /* We only handle xcr0 if both the host and guest have XSAVE enabled. */
1363         if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) {
1364                 vm_inject_ud(vmx->vm, vcpu);
1365                 return (HANDLED);
1366         }
1367
1368         xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff);
1369         if ((xcrval & ~limits->xcr0_allowed) != 0) {
1370                 vm_inject_gp(vmx->vm, vcpu);
1371                 return (HANDLED);
1372         }
1373
1374         if (!(xcrval & XFEATURE_ENABLED_X87)) {
1375                 vm_inject_gp(vmx->vm, vcpu);
1376                 return (HANDLED);
1377         }
1378
1379         /* AVX (YMM_Hi128) requires SSE. */
1380         if (xcrval & XFEATURE_ENABLED_AVX &&
1381             (xcrval & XFEATURE_AVX) != XFEATURE_AVX) {
1382                 vm_inject_gp(vmx->vm, vcpu);
1383                 return (HANDLED);
1384         }
1385
1386         /*
1387          * AVX512 requires base AVX (YMM_Hi128) as well as OpMask,
1388          * ZMM_Hi256, and Hi16_ZMM.
1389          */
1390         if (xcrval & XFEATURE_AVX512 &&
1391             (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) !=
1392             (XFEATURE_AVX512 | XFEATURE_AVX)) {
1393                 vm_inject_gp(vmx->vm, vcpu);
1394                 return (HANDLED);
1395         }
1396
1397         /*
1398          * Intel MPX requires both bound register state flags to be
1399          * set.
1400          */
1401         if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) !=
1402             ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) {
1403                 vm_inject_gp(vmx->vm, vcpu);
1404                 return (HANDLED);
1405         }
1406
1407         /*
1408          * This runs "inside" vmrun() with the guest's FPU state, so
1409          * modifying xcr0 directly modifies the guest's xcr0, not the
1410          * host's.
1411          */
1412         load_xcr(0, xcrval);
1413         return (HANDLED);
1414 }
1415
1416 static int
1417 vmx_emulate_cr_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1418 {
1419         int cr, vmcs_guest_cr, vmcs_shadow_cr;
1420         uint64_t crval, regval, ones_mask, zeros_mask;
1421         const struct vmxctx *vmxctx;
1422
1423         /* We only handle mov to %cr0 or %cr4 at this time */
1424         if ((exitqual & 0xf0) != 0x00)
1425                 return (UNHANDLED);
1426
1427         cr = exitqual & 0xf;
1428         if (cr != 0 && cr != 4)
1429                 return (UNHANDLED);
1430
1431         regval = 0; /* silence gcc */
1432         vmxctx = &vmx->ctx[vcpu];
1433
1434         /*
1435          * We must use vmcs_write() directly here because vmcs_setreg() will
1436          * call vmclear(vmcs) as a side-effect which we certainly don't want.
1437          */
1438         switch ((exitqual >> 8) & 0xf) {
1439         case 0:
1440                 regval = vmxctx->guest_rax;
1441                 break;
1442         case 1:
1443                 regval = vmxctx->guest_rcx;
1444                 break;
1445         case 2:
1446                 regval = vmxctx->guest_rdx;
1447                 break;
1448         case 3:
1449                 regval = vmxctx->guest_rbx;
1450                 break;
1451         case 4:
1452                 regval = vmcs_read(VMCS_GUEST_RSP);
1453                 break;
1454         case 5:
1455                 regval = vmxctx->guest_rbp;
1456                 break;
1457         case 6:
1458                 regval = vmxctx->guest_rsi;
1459                 break;
1460         case 7:
1461                 regval = vmxctx->guest_rdi;
1462                 break;
1463         case 8:
1464                 regval = vmxctx->guest_r8;
1465                 break;
1466         case 9:
1467                 regval = vmxctx->guest_r9;
1468                 break;
1469         case 10:
1470                 regval = vmxctx->guest_r10;
1471                 break;
1472         case 11:
1473                 regval = vmxctx->guest_r11;
1474                 break;
1475         case 12:
1476                 regval = vmxctx->guest_r12;
1477                 break;
1478         case 13:
1479                 regval = vmxctx->guest_r13;
1480                 break;
1481         case 14:
1482                 regval = vmxctx->guest_r14;
1483                 break;
1484         case 15:
1485                 regval = vmxctx->guest_r15;
1486                 break;
1487         }
1488
1489         if (cr == 0) {
1490                 ones_mask = cr0_ones_mask;
1491                 zeros_mask = cr0_zeros_mask;
1492                 vmcs_guest_cr = VMCS_GUEST_CR0;
1493                 vmcs_shadow_cr = VMCS_CR0_SHADOW;
1494         } else {
1495                 ones_mask = cr4_ones_mask;
1496                 zeros_mask = cr4_zeros_mask;
1497                 vmcs_guest_cr = VMCS_GUEST_CR4;
1498                 vmcs_shadow_cr = VMCS_CR4_SHADOW;
1499         }
1500         vmcs_write(vmcs_shadow_cr, regval);
1501
1502         crval = regval | ones_mask;
1503         crval &= ~zeros_mask;
1504         vmcs_write(vmcs_guest_cr, crval);
1505
1506         if (cr == 0 && regval & CR0_PG) {
1507                 uint64_t efer, entry_ctls;
1508
1509                 /*
1510                  * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and
1511                  * the "IA-32e mode guest" bit in VM-entry control must be
1512                  * equal.
1513                  */
1514                 efer = vmcs_read(VMCS_GUEST_IA32_EFER);
1515                 if (efer & EFER_LME) {
1516                         efer |= EFER_LMA;
1517                         vmcs_write(VMCS_GUEST_IA32_EFER, efer);
1518                         entry_ctls = vmcs_read(VMCS_ENTRY_CTLS);
1519                         entry_ctls |= VM_ENTRY_GUEST_LMA;
1520                         vmcs_write(VMCS_ENTRY_CTLS, entry_ctls);
1521                 }
1522         }
1523
1524         return (HANDLED);
1525 }
1526
1527 static enum vie_cpu_mode
1528 vmx_cpu_mode(void)
1529 {
1530
1531         if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA)
1532                 return (CPU_MODE_64BIT);
1533         else
1534                 return (CPU_MODE_COMPATIBILITY);
1535 }
1536
1537 static enum vie_paging_mode
1538 vmx_paging_mode(void)
1539 {
1540
1541         if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG))
1542                 return (PAGING_MODE_FLAT);
1543         if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE))
1544                 return (PAGING_MODE_32);
1545         if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME)
1546                 return (PAGING_MODE_64);
1547         else
1548                 return (PAGING_MODE_PAE);
1549 }
1550
1551 static int
1552 ept_fault_type(uint64_t ept_qual)
1553 {
1554         int fault_type;
1555
1556         if (ept_qual & EPT_VIOLATION_DATA_WRITE)
1557                 fault_type = VM_PROT_WRITE;
1558         else if (ept_qual & EPT_VIOLATION_INST_FETCH)
1559                 fault_type = VM_PROT_EXECUTE;
1560         else
1561                 fault_type= VM_PROT_READ;
1562
1563         return (fault_type);
1564 }
1565
1566 static boolean_t
1567 ept_emulation_fault(uint64_t ept_qual)
1568 {
1569         int read, write;
1570
1571         /* EPT fault on an instruction fetch doesn't make sense here */
1572         if (ept_qual & EPT_VIOLATION_INST_FETCH)
1573                 return (FALSE);
1574
1575         /* EPT fault must be a read fault or a write fault */
1576         read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;
1577         write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;
1578         if ((read | write) == 0)
1579                 return (FALSE);
1580
1581         /*
1582          * The EPT violation must have been caused by accessing a
1583          * guest-physical address that is a translation of a guest-linear
1584          * address.
1585          */
1586         if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||
1587             (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {
1588                 return (FALSE);
1589         }
1590
1591         return (TRUE);
1592 }
1593
1594 static __inline int
1595 apic_access_virtualization(struct vmx *vmx, int vcpuid)
1596 {
1597         uint32_t proc_ctls2;
1598
1599         proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
1600         return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0);
1601 }
1602
1603 static __inline int
1604 x2apic_virtualization(struct vmx *vmx, int vcpuid)
1605 {
1606         uint32_t proc_ctls2;
1607
1608         proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
1609         return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0);
1610 }
1611
1612 static int
1613 vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic,
1614     uint64_t qual)
1615 {
1616         int error, handled, offset;
1617         uint32_t *apic_regs, vector;
1618         bool retu;
1619
1620         handled = HANDLED;
1621         offset = APIC_WRITE_OFFSET(qual);
1622
1623         if (!apic_access_virtualization(vmx, vcpuid)) {
1624                 /*
1625                  * In general there should not be any APIC write VM-exits
1626                  * unless APIC-access virtualization is enabled.
1627                  *
1628                  * However self-IPI virtualization can legitimately trigger
1629                  * an APIC-write VM-exit so treat it specially.
1630                  */
1631                 if (x2apic_virtualization(vmx, vcpuid) &&
1632                     offset == APIC_OFFSET_SELF_IPI) {
1633                         apic_regs = (uint32_t *)(vlapic->apic_page);
1634                         vector = apic_regs[APIC_OFFSET_SELF_IPI / 4];
1635                         vlapic_self_ipi_handler(vlapic, vector);
1636                         return (HANDLED);
1637                 } else
1638                         return (UNHANDLED);
1639         }
1640
1641         switch (offset) {
1642         case APIC_OFFSET_ID:
1643                 vlapic_id_write_handler(vlapic);
1644                 break;
1645         case APIC_OFFSET_LDR:
1646                 vlapic_ldr_write_handler(vlapic);
1647                 break;
1648         case APIC_OFFSET_DFR:
1649                 vlapic_dfr_write_handler(vlapic);
1650                 break;
1651         case APIC_OFFSET_SVR:
1652                 vlapic_svr_write_handler(vlapic);
1653                 break;
1654         case APIC_OFFSET_ESR:
1655                 vlapic_esr_write_handler(vlapic);
1656                 break;
1657         case APIC_OFFSET_ICR_LOW:
1658                 retu = false;
1659                 error = vlapic_icrlo_write_handler(vlapic, &retu);
1660                 if (error != 0 || retu)
1661                         handled = UNHANDLED;
1662                 break;
1663         case APIC_OFFSET_CMCI_LVT:
1664         case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT:
1665                 vlapic_lvt_write_handler(vlapic, offset);
1666                 break;
1667         case APIC_OFFSET_TIMER_ICR:
1668                 vlapic_icrtmr_write_handler(vlapic);
1669                 break;
1670         case APIC_OFFSET_TIMER_DCR:
1671                 vlapic_dcr_write_handler(vlapic);
1672                 break;
1673         default:
1674                 handled = UNHANDLED;
1675                 break;
1676         }
1677         return (handled);
1678 }
1679
1680 static bool
1681 apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa)
1682 {
1683
1684         if (apic_access_virtualization(vmx, vcpuid) &&
1685             (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE))
1686                 return (true);
1687         else
1688                 return (false);
1689 }
1690
1691 static int
1692 vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
1693 {
1694         uint64_t qual;
1695         int access_type, offset, allowed;
1696
1697         if (!apic_access_virtualization(vmx, vcpuid))
1698                 return (UNHANDLED);
1699
1700         qual = vmexit->u.vmx.exit_qualification;
1701         access_type = APIC_ACCESS_TYPE(qual);
1702         offset = APIC_ACCESS_OFFSET(qual);
1703
1704         allowed = 0;
1705         if (access_type == 0) {
1706                 /*
1707                  * Read data access to the following registers is expected.
1708                  */
1709                 switch (offset) {
1710                 case APIC_OFFSET_APR:
1711                 case APIC_OFFSET_PPR:
1712                 case APIC_OFFSET_RRR:
1713                 case APIC_OFFSET_CMCI_LVT:
1714                 case APIC_OFFSET_TIMER_CCR:
1715                         allowed = 1;
1716                         break;
1717                 default:
1718                         break;
1719                 }
1720         } else if (access_type == 1) {
1721                 /*
1722                  * Write data access to the following registers is expected.
1723                  */
1724                 switch (offset) {
1725                 case APIC_OFFSET_VER:
1726                 case APIC_OFFSET_APR:
1727                 case APIC_OFFSET_PPR:
1728                 case APIC_OFFSET_RRR:
1729                 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7:
1730                 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7:
1731                 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7:
1732                 case APIC_OFFSET_CMCI_LVT:
1733                 case APIC_OFFSET_TIMER_CCR:
1734                         allowed = 1;
1735                         break;
1736                 default:
1737                         break;
1738                 }
1739         }
1740
1741         if (allowed) {
1742                 vmexit->exitcode = VM_EXITCODE_INST_EMUL;
1743                 vmexit->u.inst_emul.gpa = DEFAULT_APIC_BASE + offset;
1744                 vmexit->u.inst_emul.gla = VIE_INVALID_GLA;
1745                 vmexit->u.inst_emul.cr3 = vmcs_guest_cr3();
1746                 vmexit->u.inst_emul.cpu_mode = vmx_cpu_mode();
1747                 vmexit->u.inst_emul.paging_mode = vmx_paging_mode();
1748         }
1749
1750         /*
1751          * Regardless of whether the APIC-access is allowed this handler
1752          * always returns UNHANDLED:
1753          * - if the access is allowed then it is handled by emulating the
1754          *   instruction that caused the VM-exit (outside the critical section)
1755          * - if the access is not allowed then it will be converted to an
1756          *   exitcode of VM_EXITCODE_VMX and will be dealt with in userland.
1757          */
1758         return (UNHANDLED);
1759 }
1760
1761 static int
1762 vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
1763 {
1764         int error, handled;
1765         struct vmxctx *vmxctx;
1766         struct vlapic *vlapic;
1767         uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, reason;
1768         uint64_t qual, gpa;
1769         bool retu;
1770
1771         CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0);
1772         CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0);
1773
1774         handled = UNHANDLED;
1775         vmxctx = &vmx->ctx[vcpu];
1776
1777         qual = vmexit->u.vmx.exit_qualification;
1778         reason = vmexit->u.vmx.exit_reason;
1779         vmexit->exitcode = VM_EXITCODE_BOGUS;
1780
1781         vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1);
1782
1783         /*
1784          * VM exits that could be triggered during event injection on the
1785          * previous VM entry need to be handled specially by re-injecting
1786          * the event.
1787          *
1788          * See "Information for VM Exits During Event Delivery" in Intel SDM
1789          * for details.
1790          */
1791         switch (reason) {
1792         case EXIT_REASON_EPT_FAULT:
1793         case EXIT_REASON_EPT_MISCONFIG:
1794         case EXIT_REASON_APIC_ACCESS:
1795         case EXIT_REASON_TASK_SWITCH:
1796         case EXIT_REASON_EXCEPTION:
1797                 idtvec_info = vmcs_idt_vectoring_info();
1798                 if (idtvec_info & VMCS_IDT_VEC_VALID) {
1799                         idtvec_info &= ~(1 << 12); /* clear undefined bit */
1800                         vmcs_write(VMCS_ENTRY_INTR_INFO, idtvec_info);
1801                         if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
1802                                 idtvec_err = vmcs_idt_vectoring_err();
1803                                 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR,
1804                                     idtvec_err);
1805                         }
1806                         /*
1807                          * If 'virtual NMIs' are being used and the VM-exit
1808                          * happened while injecting an NMI during the previous
1809                          * VM-entry, then clear "blocking by NMI" in the Guest
1810                          * Interruptibility-state.
1811                          */
1812                         if ((idtvec_info & VMCS_INTR_T_MASK) ==
1813                             VMCS_INTR_T_NMI) {
1814                                  vmx_clear_nmi_blocking(vmx, vcpu);
1815                         }
1816                         vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
1817                 }
1818         default:
1819                 idtvec_info = 0;
1820                 break;
1821         }
1822
1823         switch (reason) {
1824         case EXIT_REASON_CR_ACCESS:
1825                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1);
1826                 handled = vmx_emulate_cr_access(vmx, vcpu, qual);
1827                 break;
1828         case EXIT_REASON_RDMSR:
1829                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1);
1830                 retu = false;
1831                 ecx = vmxctx->guest_rcx;
1832                 error = emulate_rdmsr(vmx->vm, vcpu, ecx, &retu);
1833                 if (error) {
1834                         vmexit->exitcode = VM_EXITCODE_RDMSR;
1835                         vmexit->u.msr.code = ecx;
1836                 } else if (!retu) {
1837                         handled = HANDLED;
1838                 } else {
1839                         /* Return to userspace with a valid exitcode */
1840                         KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
1841                             ("emulate_wrmsr retu with bogus exitcode"));
1842                 }
1843                 break;
1844         case EXIT_REASON_WRMSR:
1845                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1);
1846                 retu = false;
1847                 eax = vmxctx->guest_rax;
1848                 ecx = vmxctx->guest_rcx;
1849                 edx = vmxctx->guest_rdx;
1850                 error = emulate_wrmsr(vmx->vm, vcpu, ecx,
1851                     (uint64_t)edx << 32 | eax, &retu);
1852                 if (error) {
1853                         vmexit->exitcode = VM_EXITCODE_WRMSR;
1854                         vmexit->u.msr.code = ecx;
1855                         vmexit->u.msr.wval = (uint64_t)edx << 32 | eax;
1856                 } else if (!retu) {
1857                         handled = HANDLED;
1858                 } else {
1859                         /* Return to userspace with a valid exitcode */
1860                         KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
1861                             ("emulate_wrmsr retu with bogus exitcode"));
1862                 }
1863                 break;
1864         case EXIT_REASON_HLT:
1865                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1);
1866                 vmexit->exitcode = VM_EXITCODE_HLT;
1867                 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS);
1868                 break;
1869         case EXIT_REASON_MTF:
1870                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1);
1871                 vmexit->exitcode = VM_EXITCODE_MTRAP;
1872                 break;
1873         case EXIT_REASON_PAUSE:
1874                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1);
1875                 vmexit->exitcode = VM_EXITCODE_PAUSE;
1876                 break;
1877         case EXIT_REASON_INTR_WINDOW:
1878                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1);
1879                 vmx_clear_int_window_exiting(vmx, vcpu);
1880                 return (1);
1881         case EXIT_REASON_EXT_INTR:
1882                 /*
1883                  * External interrupts serve only to cause VM exits and allow
1884                  * the host interrupt handler to run.
1885                  *
1886                  * If this external interrupt triggers a virtual interrupt
1887                  * to a VM, then that state will be recorded by the
1888                  * host interrupt handler in the VM's softc. We will inject
1889                  * this virtual interrupt during the subsequent VM enter.
1890                  */
1891                 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
1892
1893                 /*
1894                  * XXX: Ignore this exit if VMCS_INTR_VALID is not set.
1895                  * This appears to be a bug in VMware Fusion?
1896                  */
1897                 if (!(intr_info & VMCS_INTR_VALID))
1898                         return (1);
1899                 KASSERT((intr_info & VMCS_INTR_VALID) != 0 &&
1900                     (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR,
1901                     ("VM exit interruption info invalid: %#x", intr_info));
1902                 vmx_trigger_hostintr(intr_info & 0xff);
1903
1904                 /*
1905                  * This is special. We want to treat this as an 'handled'
1906                  * VM-exit but not increment the instruction pointer.
1907                  */
1908                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1);
1909                 return (1);
1910         case EXIT_REASON_NMI_WINDOW:
1911                 /* Exit to allow the pending virtual NMI to be injected */
1912                 if (vm_nmi_pending(vmx->vm, vcpu))
1913                         vmx_inject_nmi(vmx, vcpu);
1914                 vmx_clear_nmi_window_exiting(vmx, vcpu);
1915                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1);
1916                 return (1);
1917         case EXIT_REASON_INOUT:
1918                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1);
1919                 vmexit->exitcode = VM_EXITCODE_INOUT;
1920                 vmexit->u.inout.bytes = (qual & 0x7) + 1;
1921                 vmexit->u.inout.in = (qual & 0x8) ? 1 : 0;
1922                 vmexit->u.inout.string = (qual & 0x10) ? 1 : 0;
1923                 vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0;
1924                 vmexit->u.inout.port = (uint16_t)(qual >> 16);
1925                 vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax);
1926                 error = emulate_ioport(vmx->vm, vcpu, vmexit);
1927                 if (error == 0)  {
1928                         handled = 1;
1929                         vmxctx->guest_rax = vmexit->u.inout.eax;
1930                 }
1931                 break;
1932         case EXIT_REASON_CPUID:
1933                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1);
1934                 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx);
1935                 break;
1936         case EXIT_REASON_EXCEPTION:
1937                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1);
1938                 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
1939                 KASSERT((intr_info & VMCS_INTR_VALID) != 0,
1940                     ("VM exit interruption info invalid: %#x", intr_info));
1941
1942                 /*
1943                  * If Virtual NMIs control is 1 and the VM-exit is due to a
1944                  * fault encountered during the execution of IRET then we must
1945                  * restore the state of "virtual-NMI blocking" before resuming
1946                  * the guest.
1947                  *
1948                  * See "Resuming Guest Software after Handling an Exception".
1949                  */
1950                 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
1951                     (intr_info & 0xff) != IDT_DF &&
1952                     (intr_info & EXIT_QUAL_NMIUDTI) != 0)
1953                         vmx_restore_nmi_blocking(vmx, vcpu);
1954
1955                 /*
1956                  * The NMI has already been handled in vmx_exit_handle_nmi().
1957                  */
1958                 if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI)
1959                         return (1);
1960                 break;
1961         case EXIT_REASON_EPT_FAULT:
1962                 /*
1963                  * If 'gpa' lies within the address space allocated to
1964                  * memory then this must be a nested page fault otherwise
1965                  * this must be an instruction that accesses MMIO space.
1966                  */
1967                 gpa = vmcs_gpa();
1968                 if (vm_mem_allocated(vmx->vm, gpa) ||
1969                     apic_access_fault(vmx, vcpu, gpa)) {
1970                         vmexit->exitcode = VM_EXITCODE_PAGING;
1971                         vmexit->u.paging.gpa = gpa;
1972                         vmexit->u.paging.fault_type = ept_fault_type(qual);
1973                         vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
1974                 } else if (ept_emulation_fault(qual)) {
1975                         vmexit->exitcode = VM_EXITCODE_INST_EMUL;
1976                         vmexit->u.inst_emul.gpa = gpa;
1977                         vmexit->u.inst_emul.gla = vmcs_gla();
1978                         vmexit->u.inst_emul.cr3 = vmcs_guest_cr3();
1979                         vmexit->u.inst_emul.cpu_mode = vmx_cpu_mode();
1980                         vmexit->u.inst_emul.paging_mode = vmx_paging_mode();
1981                         vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INST_EMUL, 1);
1982                 }
1983                 /*
1984                  * If Virtual NMIs control is 1 and the VM-exit is due to an
1985                  * EPT fault during the execution of IRET then we must restore
1986                  * the state of "virtual-NMI blocking" before resuming.
1987                  *
1988                  * See description of "NMI unblocking due to IRET" in
1989                  * "Exit Qualification for EPT Violations".
1990                  */
1991                 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
1992                     (qual & EXIT_QUAL_NMIUDTI) != 0)
1993                         vmx_restore_nmi_blocking(vmx, vcpu);
1994                 break;
1995         case EXIT_REASON_VIRTUALIZED_EOI:
1996                 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI;
1997                 vmexit->u.ioapic_eoi.vector = qual & 0xFF;
1998                 vmexit->inst_length = 0;        /* trap-like */
1999                 break;
2000         case EXIT_REASON_APIC_ACCESS:
2001                 handled = vmx_handle_apic_access(vmx, vcpu, vmexit);
2002                 break;
2003         case EXIT_REASON_APIC_WRITE:
2004                 /*
2005                  * APIC-write VM exit is trap-like so the %rip is already
2006                  * pointing to the next instruction.
2007                  */
2008                 vmexit->inst_length = 0;
2009                 vlapic = vm_lapic(vmx->vm, vcpu);
2010                 handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual);
2011                 break;
2012         case EXIT_REASON_XSETBV:
2013                 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit);
2014                 break;
2015         default:
2016                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1);
2017                 break;
2018         }
2019
2020         if (handled) {
2021                 /*
2022                  * It is possible that control is returned to userland
2023                  * even though we were able to handle the VM exit in the
2024                  * kernel.
2025                  *
2026                  * In such a case we want to make sure that the userland
2027                  * restarts guest execution at the instruction *after*
2028                  * the one we just processed. Therefore we update the
2029                  * guest rip in the VMCS and in 'vmexit'.
2030                  */
2031                 vmexit->rip += vmexit->inst_length;
2032                 vmexit->inst_length = 0;
2033                 vmcs_write(VMCS_GUEST_RIP, vmexit->rip);
2034         } else {
2035                 if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
2036                         /*
2037                          * If this VM exit was not claimed by anybody then
2038                          * treat it as a generic VMX exit.
2039                          */
2040                         vmexit->exitcode = VM_EXITCODE_VMX;
2041                         vmexit->u.vmx.status = VM_SUCCESS;
2042                         vmexit->u.vmx.inst_type = 0;
2043                         vmexit->u.vmx.inst_error = 0;
2044                 } else {
2045                         /*
2046                          * The exitcode and collateral have been populated.
2047                          * The VM exit will be processed further in userland.
2048                          */
2049                 }
2050         }
2051         return (handled);
2052 }
2053
2054 static __inline int
2055 vmx_exit_astpending(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
2056 {
2057
2058         vmexit->rip = vmcs_guest_rip();
2059         vmexit->inst_length = 0;
2060         vmexit->exitcode = VM_EXITCODE_BOGUS;
2061         vmx_astpending_trace(vmx, vcpu, vmexit->rip);
2062         vmm_stat_incr(vmx->vm, vcpu, VMEXIT_ASTPENDING, 1);
2063
2064         return (HANDLED);
2065 }
2066
2067 static __inline int
2068 vmx_exit_rendezvous(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
2069 {
2070
2071         vmexit->rip = vmcs_guest_rip();
2072         vmexit->inst_length = 0;
2073         vmexit->exitcode = VM_EXITCODE_RENDEZVOUS;
2074         vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RENDEZVOUS, 1);
2075
2076         return (UNHANDLED);
2077 }
2078
2079 static __inline int
2080 vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit)
2081 {
2082
2083         KASSERT(vmxctx->inst_fail_status != VM_SUCCESS,
2084             ("vmx_exit_inst_error: invalid inst_fail_status %d",
2085             vmxctx->inst_fail_status));
2086
2087         vmexit->inst_length = 0;
2088         vmexit->exitcode = VM_EXITCODE_VMX;
2089         vmexit->u.vmx.status = vmxctx->inst_fail_status;
2090         vmexit->u.vmx.inst_error = vmcs_instruction_error();
2091         vmexit->u.vmx.exit_reason = ~0;
2092         vmexit->u.vmx.exit_qualification = ~0;
2093
2094         switch (rc) {
2095         case VMX_VMRESUME_ERROR:
2096         case VMX_VMLAUNCH_ERROR:
2097         case VMX_INVEPT_ERROR:
2098                 vmexit->u.vmx.inst_type = rc;
2099                 break;
2100         default:
2101                 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc);
2102         }
2103
2104         return (UNHANDLED);
2105 }
2106
2107 /*
2108  * If the NMI-exiting VM execution control is set to '1' then an NMI in
2109  * non-root operation causes a VM-exit. NMI blocking is in effect so it is
2110  * sufficient to simply vector to the NMI handler via a software interrupt.
2111  * However, this must be done before maskable interrupts are enabled
2112  * otherwise the "iret" issued by an interrupt handler will incorrectly
2113  * clear NMI blocking.
2114  */
2115 static __inline void
2116 vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
2117 {
2118         uint32_t intr_info;
2119
2120         KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled"));
2121
2122         if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION)
2123                 return;
2124
2125         intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2126         KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2127             ("VM exit interruption info invalid: %#x", intr_info));
2128
2129         if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) {
2130                 KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due "
2131                     "to NMI has invalid vector: %#x", intr_info));
2132                 VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler");
2133                 __asm __volatile("int $2");
2134         }
2135 }
2136
2137 static int
2138 vmx_run(void *arg, int vcpu, register_t startrip, pmap_t pmap,
2139     void *rendezvous_cookie, void *suspend_cookie)
2140 {
2141         int rc, handled, launched;
2142         struct vmx *vmx;
2143         struct vm *vm;
2144         struct vmxctx *vmxctx;
2145         struct vmcs *vmcs;
2146         struct vm_exit *vmexit;
2147         struct vlapic *vlapic;
2148         uint64_t rip;
2149         uint32_t exit_reason;
2150
2151         vmx = arg;
2152         vm = vmx->vm;
2153         vmcs = &vmx->vmcs[vcpu];
2154         vmxctx = &vmx->ctx[vcpu];
2155         vlapic = vm_lapic(vm, vcpu);
2156         vmexit = vm_exitinfo(vm, vcpu);
2157         launched = 0;
2158
2159         KASSERT(vmxctx->pmap == pmap,
2160             ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap));
2161
2162         VMPTRLD(vmcs);
2163
2164         /*
2165          * XXX
2166          * We do this every time because we may setup the virtual machine
2167          * from a different process than the one that actually runs it.
2168          *
2169          * If the life of a virtual machine was spent entirely in the context
2170          * of a single process we could do this once in vmx_vminit().
2171          */
2172         vmcs_write(VMCS_HOST_CR3, rcr3());
2173
2174         vmcs_write(VMCS_GUEST_RIP, startrip);
2175         vmx_set_pcpu_defaults(vmx, vcpu, pmap);
2176         do {
2177                 /*
2178                  * Interrupts are disabled from this point on until the
2179                  * guest starts executing. This is done for the following
2180                  * reasons:
2181                  *
2182                  * If an AST is asserted on this thread after the check below,
2183                  * then the IPI_AST notification will not be lost, because it
2184                  * will cause a VM exit due to external interrupt as soon as
2185                  * the guest state is loaded.
2186                  *
2187                  * A posted interrupt after 'vmx_inject_interrupts()' will
2188                  * not be "lost" because it will be held pending in the host
2189                  * APIC because interrupts are disabled. The pending interrupt
2190                  * will be recognized as soon as the guest state is loaded.
2191                  *
2192                  * The same reasoning applies to the IPI generated by
2193                  * pmap_invalidate_ept().
2194                  */
2195                 disable_intr();
2196                 if (vcpu_suspended(suspend_cookie)) {
2197                         enable_intr();
2198                         vm_exit_suspended(vmx->vm, vcpu, vmcs_guest_rip());
2199                         handled = UNHANDLED;
2200                         break;
2201                 }
2202
2203                 if (vcpu_rendezvous_pending(rendezvous_cookie)) {
2204                         enable_intr();
2205                         handled = vmx_exit_rendezvous(vmx, vcpu, vmexit);
2206                         break;
2207                 }
2208
2209                 if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) {
2210                         enable_intr();
2211                         handled = vmx_exit_astpending(vmx, vcpu, vmexit);
2212                         break;
2213                 }
2214
2215                 vmx_inject_interrupts(vmx, vcpu, vlapic);
2216                 vmx_run_trace(vmx, vcpu);
2217                 rc = vmx_enter_guest(vmxctx, vmx, launched);
2218
2219                 /* Collect some information for VM exit processing */
2220                 vmexit->rip = rip = vmcs_guest_rip();
2221                 vmexit->inst_length = vmexit_instruction_length();
2222                 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason();
2223                 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification();
2224
2225                 if (rc == VMX_GUEST_VMEXIT) {
2226                         vmx_exit_handle_nmi(vmx, vcpu, vmexit);
2227                         enable_intr();
2228                         handled = vmx_exit_process(vmx, vcpu, vmexit);
2229                 } else {
2230                         enable_intr();
2231                         handled = vmx_exit_inst_error(vmxctx, rc, vmexit);
2232                 }
2233                 launched = 1;
2234                 vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled);
2235         } while (handled);
2236
2237         /*
2238          * If a VM exit has been handled then the exitcode must be BOGUS
2239          * If a VM exit is not handled then the exitcode must not be BOGUS
2240          */
2241         if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) ||
2242             (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) {
2243                 panic("Mismatch between handled (%d) and exitcode (%d)",
2244                       handled, vmexit->exitcode);
2245         }
2246
2247         if (!handled)
2248                 vmm_stat_incr(vm, vcpu, VMEXIT_USERSPACE, 1);
2249
2250         VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d",
2251             vmexit->exitcode);
2252
2253         VMCLEAR(vmcs);
2254         return (0);
2255 }
2256
2257 static void
2258 vmx_vmcleanup(void *arg)
2259 {
2260         int i, error;
2261         struct vmx *vmx = arg;
2262
2263         if (apic_access_virtualization(vmx, 0))
2264                 vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
2265
2266         for (i = 0; i < VM_MAXCPU; i++)
2267                 vpid_free(vmx->state[i].vpid);
2268
2269         /*
2270          * XXXSMP we also need to clear the VMCS active on the other vcpus.
2271          */
2272         error = vmclear(&vmx->vmcs[0]);
2273         if (error != 0)
2274                 panic("vmx_vmcleanup: vmclear error %d on vcpu 0", error);
2275
2276         free(vmx, M_VMX);
2277
2278         return;
2279 }
2280
2281 static register_t *
2282 vmxctx_regptr(struct vmxctx *vmxctx, int reg)
2283 {
2284
2285         switch (reg) {
2286         case VM_REG_GUEST_RAX:
2287                 return (&vmxctx->guest_rax);
2288         case VM_REG_GUEST_RBX:
2289                 return (&vmxctx->guest_rbx);
2290         case VM_REG_GUEST_RCX:
2291                 return (&vmxctx->guest_rcx);
2292         case VM_REG_GUEST_RDX:
2293                 return (&vmxctx->guest_rdx);
2294         case VM_REG_GUEST_RSI:
2295                 return (&vmxctx->guest_rsi);
2296         case VM_REG_GUEST_RDI:
2297                 return (&vmxctx->guest_rdi);
2298         case VM_REG_GUEST_RBP:
2299                 return (&vmxctx->guest_rbp);
2300         case VM_REG_GUEST_R8:
2301                 return (&vmxctx->guest_r8);
2302         case VM_REG_GUEST_R9:
2303                 return (&vmxctx->guest_r9);
2304         case VM_REG_GUEST_R10:
2305                 return (&vmxctx->guest_r10);
2306         case VM_REG_GUEST_R11:
2307                 return (&vmxctx->guest_r11);
2308         case VM_REG_GUEST_R12:
2309                 return (&vmxctx->guest_r12);
2310         case VM_REG_GUEST_R13:
2311                 return (&vmxctx->guest_r13);
2312         case VM_REG_GUEST_R14:
2313                 return (&vmxctx->guest_r14);
2314         case VM_REG_GUEST_R15:
2315                 return (&vmxctx->guest_r15);
2316         default:
2317                 break;
2318         }
2319         return (NULL);
2320 }
2321
2322 static int
2323 vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval)
2324 {
2325         register_t *regp;
2326
2327         if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
2328                 *retval = *regp;
2329                 return (0);
2330         } else
2331                 return (EINVAL);
2332 }
2333
2334 static int
2335 vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val)
2336 {
2337         register_t *regp;
2338
2339         if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
2340                 *regp = val;
2341                 return (0);
2342         } else
2343                 return (EINVAL);
2344 }
2345
2346 static int
2347 vmx_shadow_reg(int reg)
2348 {
2349         int shreg;
2350
2351         shreg = -1;
2352
2353         switch (reg) {
2354         case VM_REG_GUEST_CR0:
2355                 shreg = VMCS_CR0_SHADOW;
2356                 break;
2357         case VM_REG_GUEST_CR4:
2358                 shreg = VMCS_CR4_SHADOW;
2359                 break;
2360         default:
2361                 break;
2362         }
2363
2364         return (shreg);
2365 }
2366
2367 static int
2368 vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval)
2369 {
2370         int running, hostcpu;
2371         struct vmx *vmx = arg;
2372
2373         running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2374         if (running && hostcpu != curcpu)
2375                 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu);
2376
2377         if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0)
2378                 return (0);
2379
2380         return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval));
2381 }
2382
2383 static int
2384 vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
2385 {
2386         int error, hostcpu, running, shadow;
2387         uint64_t ctls;
2388         struct vmx *vmx = arg;
2389
2390         running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2391         if (running && hostcpu != curcpu)
2392                 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu);
2393
2394         if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0)
2395                 return (0);
2396
2397         error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val);
2398
2399         if (error == 0) {
2400                 /*
2401                  * If the "load EFER" VM-entry control is 1 then the
2402                  * value of EFER.LMA must be identical to "IA-32e mode guest"
2403                  * bit in the VM-entry control.
2404                  */
2405                 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 &&
2406                     (reg == VM_REG_GUEST_EFER)) {
2407                         vmcs_getreg(&vmx->vmcs[vcpu], running,
2408                                     VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls);
2409                         if (val & EFER_LMA)
2410                                 ctls |= VM_ENTRY_GUEST_LMA;
2411                         else
2412                                 ctls &= ~VM_ENTRY_GUEST_LMA;
2413                         vmcs_setreg(&vmx->vmcs[vcpu], running,
2414                                     VMCS_IDENT(VMCS_ENTRY_CTLS), ctls);
2415                 }
2416
2417                 shadow = vmx_shadow_reg(reg);
2418                 if (shadow > 0) {
2419                         /*
2420                          * Store the unmodified value in the shadow
2421                          */                     
2422                         error = vmcs_setreg(&vmx->vmcs[vcpu], running,
2423                                     VMCS_IDENT(shadow), val);
2424                 }
2425         }
2426
2427         return (error);
2428 }
2429
2430 static int
2431 vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
2432 {
2433         struct vmx *vmx = arg;
2434
2435         return (vmcs_getdesc(&vmx->vmcs[vcpu], reg, desc));
2436 }
2437
2438 static int
2439 vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
2440 {
2441         struct vmx *vmx = arg;
2442
2443         return (vmcs_setdesc(&vmx->vmcs[vcpu], reg, desc));
2444 }
2445
2446 static int
2447 vmx_getcap(void *arg, int vcpu, int type, int *retval)
2448 {
2449         struct vmx *vmx = arg;
2450         int vcap;
2451         int ret;
2452
2453         ret = ENOENT;
2454
2455         vcap = vmx->cap[vcpu].set;
2456
2457         switch (type) {
2458         case VM_CAP_HALT_EXIT:
2459                 if (cap_halt_exit)
2460                         ret = 0;
2461                 break;
2462         case VM_CAP_PAUSE_EXIT:
2463                 if (cap_pause_exit)
2464                         ret = 0;
2465                 break;
2466         case VM_CAP_MTRAP_EXIT:
2467                 if (cap_monitor_trap)
2468                         ret = 0;
2469                 break;
2470         case VM_CAP_UNRESTRICTED_GUEST:
2471                 if (cap_unrestricted_guest)
2472                         ret = 0;
2473                 break;
2474         case VM_CAP_ENABLE_INVPCID:
2475                 if (cap_invpcid)
2476                         ret = 0;
2477                 break;
2478         default:
2479                 break;
2480         }
2481
2482         if (ret == 0)
2483                 *retval = (vcap & (1 << type)) ? 1 : 0;
2484
2485         return (ret);
2486 }
2487
2488 static int
2489 vmx_setcap(void *arg, int vcpu, int type, int val)
2490 {
2491         struct vmx *vmx = arg;
2492         struct vmcs *vmcs = &vmx->vmcs[vcpu];
2493         uint32_t baseval;
2494         uint32_t *pptr;
2495         int error;
2496         int flag;
2497         int reg;
2498         int retval;
2499
2500         retval = ENOENT;
2501         pptr = NULL;
2502
2503         switch (type) {
2504         case VM_CAP_HALT_EXIT:
2505                 if (cap_halt_exit) {
2506                         retval = 0;
2507                         pptr = &vmx->cap[vcpu].proc_ctls;
2508                         baseval = *pptr;
2509                         flag = PROCBASED_HLT_EXITING;
2510                         reg = VMCS_PRI_PROC_BASED_CTLS;
2511                 }
2512                 break;
2513         case VM_CAP_MTRAP_EXIT:
2514                 if (cap_monitor_trap) {
2515                         retval = 0;
2516                         pptr = &vmx->cap[vcpu].proc_ctls;
2517                         baseval = *pptr;
2518                         flag = PROCBASED_MTF;
2519                         reg = VMCS_PRI_PROC_BASED_CTLS;
2520                 }
2521                 break;
2522         case VM_CAP_PAUSE_EXIT:
2523                 if (cap_pause_exit) {
2524                         retval = 0;
2525                         pptr = &vmx->cap[vcpu].proc_ctls;
2526                         baseval = *pptr;
2527                         flag = PROCBASED_PAUSE_EXITING;
2528                         reg = VMCS_PRI_PROC_BASED_CTLS;
2529                 }
2530                 break;
2531         case VM_CAP_UNRESTRICTED_GUEST:
2532                 if (cap_unrestricted_guest) {
2533                         retval = 0;
2534                         pptr = &vmx->cap[vcpu].proc_ctls2;
2535                         baseval = *pptr;
2536                         flag = PROCBASED2_UNRESTRICTED_GUEST;
2537                         reg = VMCS_SEC_PROC_BASED_CTLS;
2538                 }
2539                 break;
2540         case VM_CAP_ENABLE_INVPCID:
2541                 if (cap_invpcid) {
2542                         retval = 0;
2543                         pptr = &vmx->cap[vcpu].proc_ctls2;
2544                         baseval = *pptr;
2545                         flag = PROCBASED2_ENABLE_INVPCID;
2546                         reg = VMCS_SEC_PROC_BASED_CTLS;
2547                 }
2548                 break;
2549         default:
2550                 break;
2551         }
2552
2553         if (retval == 0) {
2554                 if (val) {
2555                         baseval |= flag;
2556                 } else {
2557                         baseval &= ~flag;
2558                 }
2559                 VMPTRLD(vmcs);
2560                 error = vmwrite(reg, baseval);
2561                 VMCLEAR(vmcs);
2562
2563                 if (error) {
2564                         retval = error;
2565                 } else {
2566                         /*
2567                          * Update optional stored flags, and record
2568                          * setting
2569                          */
2570                         if (pptr != NULL) {
2571                                 *pptr = baseval;
2572                         }
2573
2574                         if (val) {
2575                                 vmx->cap[vcpu].set |= (1 << type);
2576                         } else {
2577                                 vmx->cap[vcpu].set &= ~(1 << type);
2578                         }
2579                 }
2580         }
2581
2582         return (retval);
2583 }
2584
2585 struct vlapic_vtx {
2586         struct vlapic   vlapic;
2587         struct pir_desc *pir_desc;
2588         struct vmx      *vmx;
2589 };
2590
2591 #define VMX_CTR_PIR(vm, vcpuid, pir_desc, notify, vector, level, msg)   \
2592 do {                                                                    \
2593         VCPU_CTR2(vm, vcpuid, msg " assert %s-triggered vector %d",     \
2594             level ? "level" : "edge", vector);                          \
2595         VCPU_CTR1(vm, vcpuid, msg " pir0 0x%016lx", pir_desc->pir[0]);  \
2596         VCPU_CTR1(vm, vcpuid, msg " pir1 0x%016lx", pir_desc->pir[1]);  \
2597         VCPU_CTR1(vm, vcpuid, msg " pir2 0x%016lx", pir_desc->pir[2]);  \
2598         VCPU_CTR1(vm, vcpuid, msg " pir3 0x%016lx", pir_desc->pir[3]);  \
2599         VCPU_CTR1(vm, vcpuid, msg " notify: %s", notify ? "yes" : "no");\
2600 } while (0)
2601
2602 /*
2603  * vlapic->ops handlers that utilize the APICv hardware assist described in
2604  * Chapter 29 of the Intel SDM.
2605  */
2606 static int
2607 vmx_set_intr_ready(struct vlapic *vlapic, int vector, bool level)
2608 {
2609         struct vlapic_vtx *vlapic_vtx;
2610         struct pir_desc *pir_desc;
2611         uint64_t mask;
2612         int idx, notify;
2613
2614         vlapic_vtx = (struct vlapic_vtx *)vlapic;
2615         pir_desc = vlapic_vtx->pir_desc;
2616
2617         /*
2618          * Keep track of interrupt requests in the PIR descriptor. This is
2619          * because the virtual APIC page pointed to by the VMCS cannot be
2620          * modified if the vcpu is running.
2621          */
2622         idx = vector / 64;
2623         mask = 1UL << (vector % 64);
2624         atomic_set_long(&pir_desc->pir[idx], mask);
2625         notify = atomic_cmpset_long(&pir_desc->pending, 0, 1);
2626
2627         VMX_CTR_PIR(vlapic->vm, vlapic->vcpuid, pir_desc, notify, vector,
2628             level, "vmx_set_intr_ready");
2629         return (notify);
2630 }
2631
2632 static int
2633 vmx_pending_intr(struct vlapic *vlapic, int *vecptr)
2634 {
2635         struct vlapic_vtx *vlapic_vtx;
2636         struct pir_desc *pir_desc;
2637         struct LAPIC *lapic;
2638         uint64_t pending, pirval;
2639         uint32_t ppr, vpr;
2640         int i;
2641
2642         /*
2643          * This function is only expected to be called from the 'HLT' exit
2644          * handler which does not care about the vector that is pending.
2645          */
2646         KASSERT(vecptr == NULL, ("vmx_pending_intr: vecptr must be NULL"));
2647
2648         vlapic_vtx = (struct vlapic_vtx *)vlapic;
2649         pir_desc = vlapic_vtx->pir_desc;
2650
2651         pending = atomic_load_acq_long(&pir_desc->pending);
2652         if (!pending)
2653                 return (0);     /* common case */
2654
2655         /*
2656          * If there is an interrupt pending then it will be recognized only
2657          * if its priority is greater than the processor priority.
2658          *
2659          * Special case: if the processor priority is zero then any pending
2660          * interrupt will be recognized.
2661          */
2662         lapic = vlapic->apic_page;
2663         ppr = lapic->ppr & 0xf0;
2664         if (ppr == 0)
2665                 return (1);
2666
2667         VCPU_CTR1(vlapic->vm, vlapic->vcpuid, "HLT with non-zero PPR %d",
2668             lapic->ppr);
2669
2670         for (i = 3; i >= 0; i--) {
2671                 pirval = pir_desc->pir[i];
2672                 if (pirval != 0) {
2673                         vpr = (i * 64 + flsl(pirval) - 1) & 0xf0;
2674                         return (vpr > ppr);
2675                 }
2676         }
2677         return (0);
2678 }
2679
2680 static void
2681 vmx_intr_accepted(struct vlapic *vlapic, int vector)
2682 {
2683
2684         panic("vmx_intr_accepted: not expected to be called");
2685 }
2686
2687 static void
2688 vmx_set_tmr(struct vlapic *vlapic, int vector, bool level)
2689 {
2690         struct vlapic_vtx *vlapic_vtx;
2691         struct vmx *vmx;
2692         struct vmcs *vmcs;
2693         uint64_t mask, val;
2694
2695         KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d", vector));
2696         KASSERT(!vcpu_is_running(vlapic->vm, vlapic->vcpuid, NULL),
2697             ("vmx_set_tmr: vcpu cannot be running"));
2698
2699         vlapic_vtx = (struct vlapic_vtx *)vlapic;
2700         vmx = vlapic_vtx->vmx;
2701         vmcs = &vmx->vmcs[vlapic->vcpuid];
2702         mask = 1UL << (vector % 64);
2703
2704         VMPTRLD(vmcs);
2705         val = vmcs_read(VMCS_EOI_EXIT(vector));
2706         if (level)
2707                 val |= mask;
2708         else
2709                 val &= ~mask;
2710         vmcs_write(VMCS_EOI_EXIT(vector), val);
2711         VMCLEAR(vmcs);
2712 }
2713
2714 static void
2715 vmx_enable_x2apic_mode(struct vlapic *vlapic)
2716 {
2717         struct vmx *vmx;
2718         struct vmcs *vmcs;
2719         uint32_t proc_ctls2;
2720         int vcpuid, error;
2721
2722         vcpuid = vlapic->vcpuid;
2723         vmx = ((struct vlapic_vtx *)vlapic)->vmx;
2724         vmcs = &vmx->vmcs[vcpuid];
2725
2726         proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
2727         KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0,
2728             ("%s: invalid proc_ctls2 %#x", __func__, proc_ctls2));
2729
2730         proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES;
2731         proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE;
2732         vmx->cap[vcpuid].proc_ctls2 = proc_ctls2;
2733
2734         VMPTRLD(vmcs);
2735         vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2);
2736         VMCLEAR(vmcs);
2737
2738         if (vlapic->vcpuid == 0) {
2739                 /*
2740                  * The nested page table mappings are shared by all vcpus
2741                  * so unmap the APIC access page just once.
2742                  */
2743                 error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
2744                 KASSERT(error == 0, ("%s: vm_unmap_mmio error %d",
2745                     __func__, error));
2746
2747                 /*
2748                  * The MSR bitmap is shared by all vcpus so modify it only
2749                  * once in the context of vcpu 0.
2750                  */
2751                 error = vmx_allow_x2apic_msrs(vmx);
2752                 KASSERT(error == 0, ("%s: vmx_allow_x2apic_msrs error %d",
2753                     __func__, error));
2754         }
2755 }
2756
2757 static void
2758 vmx_post_intr(struct vlapic *vlapic, int hostcpu)
2759 {
2760
2761         ipi_cpu(hostcpu, pirvec);
2762 }
2763
2764 /*
2765  * Transfer the pending interrupts in the PIR descriptor to the IRR
2766  * in the virtual APIC page.
2767  */
2768 static void
2769 vmx_inject_pir(struct vlapic *vlapic)
2770 {
2771         struct vlapic_vtx *vlapic_vtx;
2772         struct pir_desc *pir_desc;
2773         struct LAPIC *lapic;
2774         uint64_t val, pirval;
2775         int rvi, pirbase = -1;
2776         uint16_t intr_status_old, intr_status_new;
2777
2778         vlapic_vtx = (struct vlapic_vtx *)vlapic;
2779         pir_desc = vlapic_vtx->pir_desc;
2780         if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) {
2781                 VCPU_CTR0(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: "
2782                     "no posted interrupt pending");
2783                 return;
2784         }
2785
2786         pirval = 0;
2787         pirbase = -1;
2788         lapic = vlapic->apic_page;
2789
2790         val = atomic_readandclear_long(&pir_desc->pir[0]);
2791         if (val != 0) {
2792                 lapic->irr0 |= val;
2793                 lapic->irr1 |= val >> 32;
2794                 pirbase = 0;
2795                 pirval = val;
2796         }
2797
2798         val = atomic_readandclear_long(&pir_desc->pir[1]);
2799         if (val != 0) {
2800                 lapic->irr2 |= val;
2801                 lapic->irr3 |= val >> 32;
2802                 pirbase = 64;
2803                 pirval = val;
2804         }
2805
2806         val = atomic_readandclear_long(&pir_desc->pir[2]);
2807         if (val != 0) {
2808                 lapic->irr4 |= val;
2809                 lapic->irr5 |= val >> 32;
2810                 pirbase = 128;
2811                 pirval = val;
2812         }
2813
2814         val = atomic_readandclear_long(&pir_desc->pir[3]);
2815         if (val != 0) {
2816                 lapic->irr6 |= val;
2817                 lapic->irr7 |= val >> 32;
2818                 pirbase = 192;
2819                 pirval = val;
2820         }
2821
2822         VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir");
2823
2824         /*
2825          * Update RVI so the processor can evaluate pending virtual
2826          * interrupts on VM-entry.
2827          *
2828          * It is possible for pirval to be 0 here, even though the
2829          * pending bit has been set. The scenario is:
2830          * CPU-Y is sending a posted interrupt to CPU-X, which
2831          * is running a guest and processing posted interrupts in h/w.
2832          * CPU-X will eventually exit and the state seen in s/w is
2833          * the pending bit set, but no PIR bits set.
2834          *
2835          *      CPU-X                      CPU-Y
2836          *   (vm running)                (host running)
2837          *   rx posted interrupt
2838          *   CLEAR pending bit
2839          *                               SET PIR bit
2840          *   READ/CLEAR PIR bits
2841          *                               SET pending bit
2842          *   (vm exit)
2843          *   pending bit set, PIR 0
2844          */
2845         if (pirval != 0) {
2846                 rvi = pirbase + flsl(pirval) - 1;
2847                 intr_status_old = vmcs_read(VMCS_GUEST_INTR_STATUS);
2848                 intr_status_new = (intr_status_old & 0xFF00) | rvi;
2849                 if (intr_status_new > intr_status_old) {
2850                         vmcs_write(VMCS_GUEST_INTR_STATUS, intr_status_new);
2851                         VCPU_CTR2(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: "
2852                             "guest_intr_status changed from 0x%04x to 0x%04x",
2853                             intr_status_old, intr_status_new);
2854                 }
2855         }
2856 }
2857
2858 static struct vlapic *
2859 vmx_vlapic_init(void *arg, int vcpuid)
2860 {
2861         struct vmx *vmx;
2862         struct vlapic *vlapic;
2863         struct vlapic_vtx *vlapic_vtx;
2864         
2865         vmx = arg;
2866
2867         vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO);
2868         vlapic->vm = vmx->vm;
2869         vlapic->vcpuid = vcpuid;
2870         vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid];
2871
2872         vlapic_vtx = (struct vlapic_vtx *)vlapic;
2873         vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid];
2874         vlapic_vtx->vmx = vmx;
2875
2876         if (virtual_interrupt_delivery) {
2877                 vlapic->ops.set_intr_ready = vmx_set_intr_ready;
2878                 vlapic->ops.pending_intr = vmx_pending_intr;
2879                 vlapic->ops.intr_accepted = vmx_intr_accepted;
2880                 vlapic->ops.set_tmr = vmx_set_tmr;
2881                 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode;
2882         }
2883
2884         if (posted_interrupts)
2885                 vlapic->ops.post_intr = vmx_post_intr;
2886
2887         vlapic_init(vlapic);
2888
2889         return (vlapic);
2890 }
2891
2892 static void
2893 vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic)
2894 {
2895
2896         vlapic_cleanup(vlapic);
2897         free(vlapic, M_VLAPIC);
2898 }
2899
2900 struct vmm_ops vmm_ops_intel = {
2901         vmx_init,
2902         vmx_cleanup,
2903         vmx_restore,
2904         vmx_vminit,
2905         vmx_run,
2906         vmx_vmcleanup,
2907         vmx_getreg,
2908         vmx_setreg,
2909         vmx_getdesc,
2910         vmx_setdesc,
2911         vmx_getcap,
2912         vmx_setcap,
2913         ept_vmspace_alloc,
2914         ept_vmspace_free,
2915         vmx_vlapic_init,
2916         vmx_vlapic_cleanup,
2917 };