]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/amd64/vmm/intel/vmx.c
MFC 330615: Fix a lock recursion introduced in r327065.
[FreeBSD/FreeBSD.git] / sys / amd64 / vmm / intel / vmx.c
1 /*-
2  * Copyright (c) 2011 NetApp, Inc.
3  * All rights reserved.
4  * Copyright (c) 2018 Joyent, Inc.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/smp.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/pcpu.h>
39 #include <sys/proc.h>
40 #include <sys/sysctl.h>
41
42 #include <vm/vm.h>
43 #include <vm/pmap.h>
44
45 #include <machine/psl.h>
46 #include <machine/cpufunc.h>
47 #include <machine/md_var.h>
48 #include <machine/reg.h>
49 #include <machine/segments.h>
50 #include <machine/smp.h>
51 #include <machine/specialreg.h>
52 #include <machine/vmparam.h>
53
54 #include <machine/vmm.h>
55 #include <machine/vmm_dev.h>
56 #include <machine/vmm_instruction_emul.h>
57 #include "vmm_lapic.h"
58 #include "vmm_host.h"
59 #include "vmm_ioport.h"
60 #include "vmm_ktr.h"
61 #include "vmm_stat.h"
62 #include "vatpic.h"
63 #include "vlapic.h"
64 #include "vlapic_priv.h"
65
66 #include "ept.h"
67 #include "vmx_cpufunc.h"
68 #include "vmx.h"
69 #include "vmx_msr.h"
70 #include "x86.h"
71 #include "vmx_controls.h"
72
73 #define PINBASED_CTLS_ONE_SETTING                                       \
74         (PINBASED_EXTINT_EXITING        |                               \
75          PINBASED_NMI_EXITING           |                               \
76          PINBASED_VIRTUAL_NMI)
77 #define PINBASED_CTLS_ZERO_SETTING      0
78
79 #define PROCBASED_CTLS_WINDOW_SETTING                                   \
80         (PROCBASED_INT_WINDOW_EXITING   |                               \
81          PROCBASED_NMI_WINDOW_EXITING)
82
83 #define PROCBASED_CTLS_ONE_SETTING                                      \
84         (PROCBASED_SECONDARY_CONTROLS   |                               \
85          PROCBASED_MWAIT_EXITING        |                               \
86          PROCBASED_MONITOR_EXITING      |                               \
87          PROCBASED_IO_EXITING           |                               \
88          PROCBASED_MSR_BITMAPS          |                               \
89          PROCBASED_CTLS_WINDOW_SETTING  |                               \
90          PROCBASED_CR8_LOAD_EXITING     |                               \
91          PROCBASED_CR8_STORE_EXITING)
92 #define PROCBASED_CTLS_ZERO_SETTING     \
93         (PROCBASED_CR3_LOAD_EXITING |   \
94         PROCBASED_CR3_STORE_EXITING |   \
95         PROCBASED_IO_BITMAPS)
96
97 #define PROCBASED_CTLS2_ONE_SETTING     PROCBASED2_ENABLE_EPT
98 #define PROCBASED_CTLS2_ZERO_SETTING    0
99
100 #define VM_EXIT_CTLS_ONE_SETTING                                        \
101         (VM_EXIT_SAVE_DEBUG_CONTROLS            |                       \
102         VM_EXIT_HOST_LMA                        |                       \
103         VM_EXIT_SAVE_EFER                       |                       \
104         VM_EXIT_LOAD_EFER                       |                       \
105         VM_EXIT_ACKNOWLEDGE_INTERRUPT)
106
107 #define VM_EXIT_CTLS_ZERO_SETTING       0
108
109 #define VM_ENTRY_CTLS_ONE_SETTING                                       \
110         (VM_ENTRY_LOAD_DEBUG_CONTROLS           |                       \
111         VM_ENTRY_LOAD_EFER)
112
113 #define VM_ENTRY_CTLS_ZERO_SETTING                                      \
114         (VM_ENTRY_INTO_SMM                      |                       \
115         VM_ENTRY_DEACTIVATE_DUAL_MONITOR)
116
117 #define HANDLED         1
118 #define UNHANDLED       0
119
120 static MALLOC_DEFINE(M_VMX, "vmx", "vmx");
121 static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic");
122
123 SYSCTL_DECL(_hw_vmm);
124 SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW, NULL, NULL);
125
126 int vmxon_enabled[MAXCPU];
127 static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
128
129 static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2;
130 static uint32_t exit_ctls, entry_ctls;
131
132 static uint64_t cr0_ones_mask, cr0_zeros_mask;
133 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD,
134              &cr0_ones_mask, 0, NULL);
135 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD,
136              &cr0_zeros_mask, 0, NULL);
137
138 static uint64_t cr4_ones_mask, cr4_zeros_mask;
139 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD,
140              &cr4_ones_mask, 0, NULL);
141 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD,
142              &cr4_zeros_mask, 0, NULL);
143
144 static int vmx_initialized;
145 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD,
146            &vmx_initialized, 0, "Intel VMX initialized");
147
148 /*
149  * Optional capabilities
150  */
151 static SYSCTL_NODE(_hw_vmm_vmx, OID_AUTO, cap, CTLFLAG_RW, NULL, NULL);
152
153 static int cap_halt_exit;
154 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, halt_exit, CTLFLAG_RD, &cap_halt_exit, 0,
155     "HLT triggers a VM-exit");
156
157 static int cap_pause_exit;
158 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, pause_exit, CTLFLAG_RD, &cap_pause_exit,
159     0, "PAUSE triggers a VM-exit");
160
161 static int cap_unrestricted_guest;
162 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, unrestricted_guest, CTLFLAG_RD,
163     &cap_unrestricted_guest, 0, "Unrestricted guests");
164
165 static int cap_monitor_trap;
166 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, monitor_trap, CTLFLAG_RD,
167     &cap_monitor_trap, 0, "Monitor trap flag");
168
169 static int cap_invpcid;
170 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, invpcid, CTLFLAG_RD, &cap_invpcid,
171     0, "Guests are allowed to use INVPCID");
172
173 static int virtual_interrupt_delivery;
174 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD,
175     &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support");
176
177 static int posted_interrupts;
178 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, posted_interrupts, CTLFLAG_RD,
179     &posted_interrupts, 0, "APICv posted interrupt support");
180
181 static int pirvec = -1;
182 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD,
183     &pirvec, 0, "APICv posted interrupt vector");
184
185 static struct unrhdr *vpid_unr;
186 static u_int vpid_alloc_failed;
187 SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD,
188             &vpid_alloc_failed, 0, NULL);
189
190 static int guest_l1d_flush;
191 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush, CTLFLAG_RD,
192     &guest_l1d_flush, 0, NULL);
193 static int guest_l1d_flush_sw;
194 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush_sw, CTLFLAG_RD,
195     &guest_l1d_flush_sw, 0, NULL);
196
197 static struct msr_entry msr_load_list[1] __aligned(16);
198
199 /*
200  * Use the last page below 4GB as the APIC access address. This address is
201  * occupied by the boot firmware so it is guaranteed that it will not conflict
202  * with a page in system memory.
203  */
204 #define APIC_ACCESS_ADDRESS     0xFFFFF000
205
206 static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc);
207 static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval);
208 static int vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val);
209 static void vmx_inject_pir(struct vlapic *vlapic);
210
211 #ifdef KTR
212 static const char *
213 exit_reason_to_str(int reason)
214 {
215         static char reasonbuf[32];
216
217         switch (reason) {
218         case EXIT_REASON_EXCEPTION:
219                 return "exception";
220         case EXIT_REASON_EXT_INTR:
221                 return "extint";
222         case EXIT_REASON_TRIPLE_FAULT:
223                 return "triplefault";
224         case EXIT_REASON_INIT:
225                 return "init";
226         case EXIT_REASON_SIPI:
227                 return "sipi";
228         case EXIT_REASON_IO_SMI:
229                 return "iosmi";
230         case EXIT_REASON_SMI:
231                 return "smi";
232         case EXIT_REASON_INTR_WINDOW:
233                 return "intrwindow";
234         case EXIT_REASON_NMI_WINDOW:
235                 return "nmiwindow";
236         case EXIT_REASON_TASK_SWITCH:
237                 return "taskswitch";
238         case EXIT_REASON_CPUID:
239                 return "cpuid";
240         case EXIT_REASON_GETSEC:
241                 return "getsec";
242         case EXIT_REASON_HLT:
243                 return "hlt";
244         case EXIT_REASON_INVD:
245                 return "invd";
246         case EXIT_REASON_INVLPG:
247                 return "invlpg";
248         case EXIT_REASON_RDPMC:
249                 return "rdpmc";
250         case EXIT_REASON_RDTSC:
251                 return "rdtsc";
252         case EXIT_REASON_RSM:
253                 return "rsm";
254         case EXIT_REASON_VMCALL:
255                 return "vmcall";
256         case EXIT_REASON_VMCLEAR:
257                 return "vmclear";
258         case EXIT_REASON_VMLAUNCH:
259                 return "vmlaunch";
260         case EXIT_REASON_VMPTRLD:
261                 return "vmptrld";
262         case EXIT_REASON_VMPTRST:
263                 return "vmptrst";
264         case EXIT_REASON_VMREAD:
265                 return "vmread";
266         case EXIT_REASON_VMRESUME:
267                 return "vmresume";
268         case EXIT_REASON_VMWRITE:
269                 return "vmwrite";
270         case EXIT_REASON_VMXOFF:
271                 return "vmxoff";
272         case EXIT_REASON_VMXON:
273                 return "vmxon";
274         case EXIT_REASON_CR_ACCESS:
275                 return "craccess";
276         case EXIT_REASON_DR_ACCESS:
277                 return "draccess";
278         case EXIT_REASON_INOUT:
279                 return "inout";
280         case EXIT_REASON_RDMSR:
281                 return "rdmsr";
282         case EXIT_REASON_WRMSR:
283                 return "wrmsr";
284         case EXIT_REASON_INVAL_VMCS:
285                 return "invalvmcs";
286         case EXIT_REASON_INVAL_MSR:
287                 return "invalmsr";
288         case EXIT_REASON_MWAIT:
289                 return "mwait";
290         case EXIT_REASON_MTF:
291                 return "mtf";
292         case EXIT_REASON_MONITOR:
293                 return "monitor";
294         case EXIT_REASON_PAUSE:
295                 return "pause";
296         case EXIT_REASON_MCE_DURING_ENTRY:
297                 return "mce-during-entry";
298         case EXIT_REASON_TPR:
299                 return "tpr";
300         case EXIT_REASON_APIC_ACCESS:
301                 return "apic-access";
302         case EXIT_REASON_GDTR_IDTR:
303                 return "gdtridtr";
304         case EXIT_REASON_LDTR_TR:
305                 return "ldtrtr";
306         case EXIT_REASON_EPT_FAULT:
307                 return "eptfault";
308         case EXIT_REASON_EPT_MISCONFIG:
309                 return "eptmisconfig";
310         case EXIT_REASON_INVEPT:
311                 return "invept";
312         case EXIT_REASON_RDTSCP:
313                 return "rdtscp";
314         case EXIT_REASON_VMX_PREEMPT:
315                 return "vmxpreempt";
316         case EXIT_REASON_INVVPID:
317                 return "invvpid";
318         case EXIT_REASON_WBINVD:
319                 return "wbinvd";
320         case EXIT_REASON_XSETBV:
321                 return "xsetbv";
322         case EXIT_REASON_APIC_WRITE:
323                 return "apic-write";
324         default:
325                 snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason);
326                 return (reasonbuf);
327         }
328 }
329 #endif  /* KTR */
330
331 static int
332 vmx_allow_x2apic_msrs(struct vmx *vmx)
333 {
334         int i, error;
335
336         error = 0;
337
338         /*
339          * Allow readonly access to the following x2APIC MSRs from the guest.
340          */
341         error += guest_msr_ro(vmx, MSR_APIC_ID);
342         error += guest_msr_ro(vmx, MSR_APIC_VERSION);
343         error += guest_msr_ro(vmx, MSR_APIC_LDR);
344         error += guest_msr_ro(vmx, MSR_APIC_SVR);
345
346         for (i = 0; i < 8; i++)
347                 error += guest_msr_ro(vmx, MSR_APIC_ISR0 + i);
348
349         for (i = 0; i < 8; i++)
350                 error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i);
351
352         for (i = 0; i < 8; i++)
353                 error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i);
354
355         error += guest_msr_ro(vmx, MSR_APIC_ESR);
356         error += guest_msr_ro(vmx, MSR_APIC_LVT_TIMER);
357         error += guest_msr_ro(vmx, MSR_APIC_LVT_THERMAL);
358         error += guest_msr_ro(vmx, MSR_APIC_LVT_PCINT);
359         error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT0);
360         error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT1);
361         error += guest_msr_ro(vmx, MSR_APIC_LVT_ERROR);
362         error += guest_msr_ro(vmx, MSR_APIC_ICR_TIMER);
363         error += guest_msr_ro(vmx, MSR_APIC_DCR_TIMER);
364         error += guest_msr_ro(vmx, MSR_APIC_ICR);
365
366         /*
367          * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest.
368          *
369          * These registers get special treatment described in the section
370          * "Virtualizing MSR-Based APIC Accesses".
371          */
372         error += guest_msr_rw(vmx, MSR_APIC_TPR);
373         error += guest_msr_rw(vmx, MSR_APIC_EOI);
374         error += guest_msr_rw(vmx, MSR_APIC_SELF_IPI);
375
376         return (error);
377 }
378
379 u_long
380 vmx_fix_cr0(u_long cr0)
381 {
382
383         return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask);
384 }
385
386 u_long
387 vmx_fix_cr4(u_long cr4)
388 {
389
390         return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask);
391 }
392
393 static void
394 vpid_free(int vpid)
395 {
396         if (vpid < 0 || vpid > 0xffff)
397                 panic("vpid_free: invalid vpid %d", vpid);
398
399         /*
400          * VPIDs [0,VM_MAXCPU] are special and are not allocated from
401          * the unit number allocator.
402          */
403
404         if (vpid > VM_MAXCPU)
405                 free_unr(vpid_unr, vpid);
406 }
407
408 static void
409 vpid_alloc(uint16_t *vpid, int num)
410 {
411         int i, x;
412
413         if (num <= 0 || num > VM_MAXCPU)
414                 panic("invalid number of vpids requested: %d", num);
415
416         /*
417          * If the "enable vpid" execution control is not enabled then the
418          * VPID is required to be 0 for all vcpus.
419          */
420         if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) {
421                 for (i = 0; i < num; i++)
422                         vpid[i] = 0;
423                 return;
424         }
425
426         /*
427          * Allocate a unique VPID for each vcpu from the unit number allocator.
428          */
429         for (i = 0; i < num; i++) {
430                 x = alloc_unr(vpid_unr);
431                 if (x == -1)
432                         break;
433                 else
434                         vpid[i] = x;
435         }
436
437         if (i < num) {
438                 atomic_add_int(&vpid_alloc_failed, 1);
439
440                 /*
441                  * If the unit number allocator does not have enough unique
442                  * VPIDs then we need to allocate from the [1,VM_MAXCPU] range.
443                  *
444                  * These VPIDs are not be unique across VMs but this does not
445                  * affect correctness because the combined mappings are also
446                  * tagged with the EP4TA which is unique for each VM.
447                  *
448                  * It is still sub-optimal because the invvpid will invalidate
449                  * combined mappings for a particular VPID across all EP4TAs.
450                  */
451                 while (i-- > 0)
452                         vpid_free(vpid[i]);
453
454                 for (i = 0; i < num; i++)
455                         vpid[i] = i + 1;
456         }
457 }
458
459 static void
460 vpid_init(void)
461 {
462         /*
463          * VPID 0 is required when the "enable VPID" execution control is
464          * disabled.
465          *
466          * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the
467          * unit number allocator does not have sufficient unique VPIDs to
468          * satisfy the allocation.
469          *
470          * The remaining VPIDs are managed by the unit number allocator.
471          */
472         vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL);
473 }
474
475 static void
476 vmx_disable(void *arg __unused)
477 {
478         struct invvpid_desc invvpid_desc = { 0 };
479         struct invept_desc invept_desc = { 0 };
480
481         if (vmxon_enabled[curcpu]) {
482                 /*
483                  * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b.
484                  *
485                  * VMXON or VMXOFF are not required to invalidate any TLB
486                  * caching structures. This prevents potential retention of
487                  * cached information in the TLB between distinct VMX episodes.
488                  */
489                 invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc);
490                 invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc);
491                 vmxoff();
492         }
493         load_cr4(rcr4() & ~CR4_VMXE);
494 }
495
496 static int
497 vmx_cleanup(void)
498 {
499
500         if (pirvec >= 0)
501                 lapic_ipi_free(pirvec);
502
503         if (vpid_unr != NULL) {
504                 delete_unrhdr(vpid_unr);
505                 vpid_unr = NULL;
506         }
507
508         if (nmi_flush_l1d_sw == 1)
509                 nmi_flush_l1d_sw = 0;
510
511         smp_rendezvous(NULL, vmx_disable, NULL, NULL);
512
513         return (0);
514 }
515
516 static void
517 vmx_enable(void *arg __unused)
518 {
519         int error;
520         uint64_t feature_control;
521
522         feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
523         if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 ||
524             (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
525                 wrmsr(MSR_IA32_FEATURE_CONTROL,
526                     feature_control | IA32_FEATURE_CONTROL_VMX_EN |
527                     IA32_FEATURE_CONTROL_LOCK);
528         }
529
530         load_cr4(rcr4() | CR4_VMXE);
531
532         *(uint32_t *)vmxon_region[curcpu] = vmx_revision();
533         error = vmxon(vmxon_region[curcpu]);
534         if (error == 0)
535                 vmxon_enabled[curcpu] = 1;
536 }
537
538 static void
539 vmx_restore(void)
540 {
541
542         if (vmxon_enabled[curcpu])
543                 vmxon(vmxon_region[curcpu]);
544 }
545
546 static int
547 vmx_init(int ipinum)
548 {
549         int error, use_tpr_shadow;
550         uint64_t basic, fixed0, fixed1, feature_control;
551         uint32_t tmp, procbased2_vid_bits;
552
553         /* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */
554         if (!(cpu_feature2 & CPUID2_VMX)) {
555                 printf("vmx_init: processor does not support VMX operation\n");
556                 return (ENXIO);
557         }
558
559         /*
560          * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits
561          * are set (bits 0 and 2 respectively).
562          */
563         feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
564         if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 1 &&
565             (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
566                 printf("vmx_init: VMX operation disabled by BIOS\n");
567                 return (ENXIO);
568         }
569
570         /*
571          * Verify capabilities MSR_VMX_BASIC:
572          * - bit 54 indicates support for INS/OUTS decoding
573          */
574         basic = rdmsr(MSR_VMX_BASIC);
575         if ((basic & (1UL << 54)) == 0) {
576                 printf("vmx_init: processor does not support desired basic "
577                     "capabilities\n");
578                 return (EINVAL);
579         }
580
581         /* Check support for primary processor-based VM-execution controls */
582         error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
583                                MSR_VMX_TRUE_PROCBASED_CTLS,
584                                PROCBASED_CTLS_ONE_SETTING,
585                                PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls);
586         if (error) {
587                 printf("vmx_init: processor does not support desired primary "
588                        "processor-based controls\n");
589                 return (error);
590         }
591
592         /* Clear the processor-based ctl bits that are set on demand */
593         procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING;
594
595         /* Check support for secondary processor-based VM-execution controls */
596         error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
597                                MSR_VMX_PROCBASED_CTLS2,
598                                PROCBASED_CTLS2_ONE_SETTING,
599                                PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2);
600         if (error) {
601                 printf("vmx_init: processor does not support desired secondary "
602                        "processor-based controls\n");
603                 return (error);
604         }
605
606         /* Check support for VPID */
607         error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
608                                PROCBASED2_ENABLE_VPID, 0, &tmp);
609         if (error == 0)
610                 procbased_ctls2 |= PROCBASED2_ENABLE_VPID;
611
612         /* Check support for pin-based VM-execution controls */
613         error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
614                                MSR_VMX_TRUE_PINBASED_CTLS,
615                                PINBASED_CTLS_ONE_SETTING,
616                                PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls);
617         if (error) {
618                 printf("vmx_init: processor does not support desired "
619                        "pin-based controls\n");
620                 return (error);
621         }
622
623         /* Check support for VM-exit controls */
624         error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS,
625                                VM_EXIT_CTLS_ONE_SETTING,
626                                VM_EXIT_CTLS_ZERO_SETTING,
627                                &exit_ctls);
628         if (error) {
629                 printf("vmx_init: processor does not support desired "
630                     "exit controls\n");
631                 return (error);
632         }
633
634         /* Check support for VM-entry controls */
635         error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS,
636             VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING,
637             &entry_ctls);
638         if (error) {
639                 printf("vmx_init: processor does not support desired "
640                     "entry controls\n");
641                 return (error);
642         }
643
644         /*
645          * Check support for optional features by testing them
646          * as individual bits
647          */
648         cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
649                                         MSR_VMX_TRUE_PROCBASED_CTLS,
650                                         PROCBASED_HLT_EXITING, 0,
651                                         &tmp) == 0);
652
653         cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
654                                         MSR_VMX_PROCBASED_CTLS,
655                                         PROCBASED_MTF, 0,
656                                         &tmp) == 0);
657
658         cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
659                                          MSR_VMX_TRUE_PROCBASED_CTLS,
660                                          PROCBASED_PAUSE_EXITING, 0,
661                                          &tmp) == 0);
662
663         cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
664                                         MSR_VMX_PROCBASED_CTLS2,
665                                         PROCBASED2_UNRESTRICTED_GUEST, 0,
666                                         &tmp) == 0);
667
668         cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
669             MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0,
670             &tmp) == 0);
671
672         /*
673          * Check support for virtual interrupt delivery.
674          */
675         procbased2_vid_bits = (PROCBASED2_VIRTUALIZE_APIC_ACCESSES |
676             PROCBASED2_VIRTUALIZE_X2APIC_MODE |
677             PROCBASED2_APIC_REGISTER_VIRTUALIZATION |
678             PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY);
679
680         use_tpr_shadow = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
681             MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0,
682             &tmp) == 0);
683
684         error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
685             procbased2_vid_bits, 0, &tmp);
686         if (error == 0 && use_tpr_shadow) {
687                 virtual_interrupt_delivery = 1;
688                 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid",
689                     &virtual_interrupt_delivery);
690         }
691
692         if (virtual_interrupt_delivery) {
693                 procbased_ctls |= PROCBASED_USE_TPR_SHADOW;
694                 procbased_ctls2 |= procbased2_vid_bits;
695                 procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE;
696
697                 /*
698                  * No need to emulate accesses to %CR8 if virtual
699                  * interrupt delivery is enabled.
700                  */
701                 procbased_ctls &= ~PROCBASED_CR8_LOAD_EXITING;
702                 procbased_ctls &= ~PROCBASED_CR8_STORE_EXITING;
703
704                 /*
705                  * Check for Posted Interrupts only if Virtual Interrupt
706                  * Delivery is enabled.
707                  */
708                 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
709                     MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_POSTED_INTERRUPT, 0,
710                     &tmp);
711                 if (error == 0) {
712                         pirvec = lapic_ipi_alloc(pti ? &IDTVEC(justreturn1_pti) :
713                             &IDTVEC(justreturn));
714                         if (pirvec < 0) {
715                                 if (bootverbose) {
716                                         printf("vmx_init: unable to allocate "
717                                             "posted interrupt vector\n");
718                                 }
719                         } else {
720                                 posted_interrupts = 1;
721                                 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_pir",
722                                     &posted_interrupts);
723                         }
724                 }
725         }
726
727         if (posted_interrupts)
728                     pinbased_ctls |= PINBASED_POSTED_INTERRUPT;
729
730         /* Initialize EPT */
731         error = ept_init(ipinum);
732         if (error) {
733                 printf("vmx_init: ept initialization failed (%d)\n", error);
734                 return (error);
735         }
736
737         guest_l1d_flush = (cpu_ia32_arch_caps &
738             IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) == 0;
739         TUNABLE_INT_FETCH("hw.vmm.l1d_flush", &guest_l1d_flush);
740
741         /*
742          * L1D cache flush is enabled.  Use IA32_FLUSH_CMD MSR when
743          * available.  Otherwise fall back to the software flush
744          * method which loads enough data from the kernel text to
745          * flush existing L1D content, both on VMX entry and on NMI
746          * return.
747          */
748         if (guest_l1d_flush) {
749                 if ((cpu_stdext_feature3 & CPUID_STDEXT3_L1D_FLUSH) == 0) {
750                         guest_l1d_flush_sw = 1;
751                         TUNABLE_INT_FETCH("hw.vmm.l1d_flush_sw",
752                             &guest_l1d_flush_sw);
753                 }
754                 if (guest_l1d_flush_sw) {
755                         if (nmi_flush_l1d_sw <= 1)
756                                 nmi_flush_l1d_sw = 1;
757                 } else {
758                         msr_load_list[0].index = MSR_IA32_FLUSH_CMD;
759                         msr_load_list[0].val = IA32_FLUSH_CMD_L1D;
760                 }
761         }
762
763         /*
764          * Stash the cr0 and cr4 bits that must be fixed to 0 or 1
765          */
766         fixed0 = rdmsr(MSR_VMX_CR0_FIXED0);
767         fixed1 = rdmsr(MSR_VMX_CR0_FIXED1);
768         cr0_ones_mask = fixed0 & fixed1;
769         cr0_zeros_mask = ~fixed0 & ~fixed1;
770
771         /*
772          * CR0_PE and CR0_PG can be set to zero in VMX non-root operation
773          * if unrestricted guest execution is allowed.
774          */
775         if (cap_unrestricted_guest)
776                 cr0_ones_mask &= ~(CR0_PG | CR0_PE);
777
778         /*
779          * Do not allow the guest to set CR0_NW or CR0_CD.
780          */
781         cr0_zeros_mask |= (CR0_NW | CR0_CD);
782
783         fixed0 = rdmsr(MSR_VMX_CR4_FIXED0);
784         fixed1 = rdmsr(MSR_VMX_CR4_FIXED1);
785         cr4_ones_mask = fixed0 & fixed1;
786         cr4_zeros_mask = ~fixed0 & ~fixed1;
787
788         vpid_init();
789
790         vmx_msr_init();
791
792         /* enable VMX operation */
793         smp_rendezvous(NULL, vmx_enable, NULL, NULL);
794
795         vmx_initialized = 1;
796
797         return (0);
798 }
799
800 static void
801 vmx_trigger_hostintr(int vector)
802 {
803         uintptr_t func;
804         struct gate_descriptor *gd;
805
806         gd = &idt[vector];
807
808         KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: "
809             "invalid vector %d", vector));
810         KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present",
811             vector));
812         KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d "
813             "has invalid type %d", vector, gd->gd_type));
814         KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d "
815             "has invalid dpl %d", vector, gd->gd_dpl));
816         KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor "
817             "for vector %d has invalid selector %d", vector, gd->gd_selector));
818         KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid "
819             "IST %d", vector, gd->gd_ist));
820
821         func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset);
822         vmx_call_isr(func);
823 }
824
825 static int
826 vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial)
827 {
828         int error, mask_ident, shadow_ident;
829         uint64_t mask_value;
830
831         if (which != 0 && which != 4)
832                 panic("vmx_setup_cr_shadow: unknown cr%d", which);
833
834         if (which == 0) {
835                 mask_ident = VMCS_CR0_MASK;
836                 mask_value = cr0_ones_mask | cr0_zeros_mask;
837                 shadow_ident = VMCS_CR0_SHADOW;
838         } else {
839                 mask_ident = VMCS_CR4_MASK;
840                 mask_value = cr4_ones_mask | cr4_zeros_mask;
841                 shadow_ident = VMCS_CR4_SHADOW;
842         }
843
844         error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value);
845         if (error)
846                 return (error);
847
848         error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial);
849         if (error)
850                 return (error);
851
852         return (0);
853 }
854 #define vmx_setup_cr0_shadow(vmcs,init) vmx_setup_cr_shadow(0, (vmcs), (init))
855 #define vmx_setup_cr4_shadow(vmcs,init) vmx_setup_cr_shadow(4, (vmcs), (init))
856
857 static void *
858 vmx_vminit(struct vm *vm, pmap_t pmap)
859 {
860         uint16_t vpid[VM_MAXCPU];
861         int i, error;
862         struct vmx *vmx;
863         struct vmcs *vmcs;
864         uint32_t exc_bitmap;
865
866         vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO);
867         if ((uintptr_t)vmx & PAGE_MASK) {
868                 panic("malloc of struct vmx not aligned on %d byte boundary",
869                       PAGE_SIZE);
870         }
871         vmx->vm = vm;
872
873         vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4));
874
875         /*
876          * Clean up EPTP-tagged guest physical and combined mappings
877          *
878          * VMX transitions are not required to invalidate any guest physical
879          * mappings. So, it may be possible for stale guest physical mappings
880          * to be present in the processor TLBs.
881          *
882          * Combined mappings for this EP4TA are also invalidated for all VPIDs.
883          */
884         ept_invalidate_mappings(vmx->eptp);
885
886         msr_bitmap_initialize(vmx->msr_bitmap);
887
888         /*
889          * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE.
890          * The guest FSBASE and GSBASE are saved and restored during
891          * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are
892          * always restored from the vmcs host state area on vm-exit.
893          *
894          * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in
895          * how they are saved/restored so can be directly accessed by the
896          * guest.
897          *
898          * MSR_EFER is saved and restored in the guest VMCS area on a
899          * VM exit and entry respectively. It is also restored from the
900          * host VMCS area on a VM exit.
901          *
902          * The TSC MSR is exposed read-only. Writes are disallowed as
903          * that will impact the host TSC.  If the guest does a write
904          * the "use TSC offsetting" execution control is enabled and the
905          * difference between the host TSC and the guest TSC is written
906          * into the TSC offset in the VMCS.
907          */
908         if (guest_msr_rw(vmx, MSR_GSBASE) ||
909             guest_msr_rw(vmx, MSR_FSBASE) ||
910             guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) ||
911             guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) ||
912             guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) ||
913             guest_msr_rw(vmx, MSR_EFER) ||
914             guest_msr_ro(vmx, MSR_TSC))
915                 panic("vmx_vminit: error setting guest msr access");
916
917         vpid_alloc(vpid, VM_MAXCPU);
918
919         if (virtual_interrupt_delivery) {
920                 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE,
921                     APIC_ACCESS_ADDRESS);
922                 /* XXX this should really return an error to the caller */
923                 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error));
924         }
925
926         for (i = 0; i < VM_MAXCPU; i++) {
927                 vmcs = &vmx->vmcs[i];
928                 vmcs->identifier = vmx_revision();
929                 error = vmclear(vmcs);
930                 if (error != 0) {
931                         panic("vmx_vminit: vmclear error %d on vcpu %d\n",
932                               error, i);
933                 }
934
935                 vmx_msr_guest_init(vmx, i);
936
937                 error = vmcs_init(vmcs);
938                 KASSERT(error == 0, ("vmcs_init error %d", error));
939
940                 VMPTRLD(vmcs);
941                 error = 0;
942                 error += vmwrite(VMCS_HOST_RSP, (u_long)&vmx->ctx[i]);
943                 error += vmwrite(VMCS_EPTP, vmx->eptp);
944                 error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls);
945                 error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls);
946                 error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2);
947                 error += vmwrite(VMCS_EXIT_CTLS, exit_ctls);
948                 error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls);
949                 error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap));
950                 error += vmwrite(VMCS_VPID, vpid[i]);
951
952                 if (guest_l1d_flush && !guest_l1d_flush_sw) {
953                         vmcs_write(VMCS_ENTRY_MSR_LOAD, pmap_kextract(
954                             (vm_offset_t)&msr_load_list[0]));
955                         vmcs_write(VMCS_ENTRY_MSR_LOAD_COUNT,
956                             nitems(msr_load_list));
957                         vmcs_write(VMCS_EXIT_MSR_STORE, 0);
958                         vmcs_write(VMCS_EXIT_MSR_STORE_COUNT, 0);
959                 }
960
961                 /* exception bitmap */
962                 if (vcpu_trace_exceptions(vm, i))
963                         exc_bitmap = 0xffffffff;
964                 else
965                         exc_bitmap = 1 << IDT_MC;
966                 error += vmwrite(VMCS_EXCEPTION_BITMAP, exc_bitmap);
967
968                 vmx->ctx[i].guest_dr6 = DBREG_DR6_RESERVED1;
969                 error += vmwrite(VMCS_GUEST_DR7, DBREG_DR7_RESERVED1);
970
971                 if (virtual_interrupt_delivery) {
972                         error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS);
973                         error += vmwrite(VMCS_VIRTUAL_APIC,
974                             vtophys(&vmx->apic_page[i]));
975                         error += vmwrite(VMCS_EOI_EXIT0, 0);
976                         error += vmwrite(VMCS_EOI_EXIT1, 0);
977                         error += vmwrite(VMCS_EOI_EXIT2, 0);
978                         error += vmwrite(VMCS_EOI_EXIT3, 0);
979                 }
980                 if (posted_interrupts) {
981                         error += vmwrite(VMCS_PIR_VECTOR, pirvec);
982                         error += vmwrite(VMCS_PIR_DESC,
983                             vtophys(&vmx->pir_desc[i]));
984                 }
985                 VMCLEAR(vmcs);
986                 KASSERT(error == 0, ("vmx_vminit: error customizing the vmcs"));
987
988                 vmx->cap[i].set = 0;
989                 vmx->cap[i].proc_ctls = procbased_ctls;
990                 vmx->cap[i].proc_ctls2 = procbased_ctls2;
991
992                 vmx->state[i].nextrip = ~0;
993                 vmx->state[i].lastcpu = NOCPU;
994                 vmx->state[i].vpid = vpid[i];
995
996                 /*
997                  * Set up the CR0/4 shadows, and init the read shadow
998                  * to the power-on register value from the Intel Sys Arch.
999                  *  CR0 - 0x60000010
1000                  *  CR4 - 0
1001                  */
1002                 error = vmx_setup_cr0_shadow(vmcs, 0x60000010);
1003                 if (error != 0)
1004                         panic("vmx_setup_cr0_shadow %d", error);
1005
1006                 error = vmx_setup_cr4_shadow(vmcs, 0);
1007                 if (error != 0)
1008                         panic("vmx_setup_cr4_shadow %d", error);
1009
1010                 vmx->ctx[i].pmap = pmap;
1011         }
1012
1013         return (vmx);
1014 }
1015
1016 static int
1017 vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx)
1018 {
1019         int handled, func;
1020
1021         func = vmxctx->guest_rax;
1022
1023         handled = x86_emulate_cpuid(vm, vcpu,
1024                                     (uint32_t*)(&vmxctx->guest_rax),
1025                                     (uint32_t*)(&vmxctx->guest_rbx),
1026                                     (uint32_t*)(&vmxctx->guest_rcx),
1027                                     (uint32_t*)(&vmxctx->guest_rdx));
1028         return (handled);
1029 }
1030
1031 static __inline void
1032 vmx_run_trace(struct vmx *vmx, int vcpu)
1033 {
1034 #ifdef KTR
1035         VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip());
1036 #endif
1037 }
1038
1039 static __inline void
1040 vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason,
1041                int handled)
1042 {
1043 #ifdef KTR
1044         VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx",
1045                  handled ? "handled" : "unhandled",
1046                  exit_reason_to_str(exit_reason), rip);
1047 #endif
1048 }
1049
1050 static __inline void
1051 vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip)
1052 {
1053 #ifdef KTR
1054         VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip);
1055 #endif
1056 }
1057
1058 static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved");
1059 static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done");
1060
1061 /*
1062  * Invalidate guest mappings identified by its vpid from the TLB.
1063  */
1064 static __inline void
1065 vmx_invvpid(struct vmx *vmx, int vcpu, pmap_t pmap, int running)
1066 {
1067         struct vmxstate *vmxstate;
1068         struct invvpid_desc invvpid_desc;
1069
1070         vmxstate = &vmx->state[vcpu];
1071         if (vmxstate->vpid == 0)
1072                 return;
1073
1074         if (!running) {
1075                 /*
1076                  * Set the 'lastcpu' to an invalid host cpu.
1077                  *
1078                  * This will invalidate TLB entries tagged with the vcpu's
1079                  * vpid the next time it runs via vmx_set_pcpu_defaults().
1080                  */
1081                 vmxstate->lastcpu = NOCPU;
1082                 return;
1083         }
1084
1085         KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside "
1086             "critical section", __func__, vcpu));
1087
1088         /*
1089          * Invalidate all mappings tagged with 'vpid'
1090          *
1091          * We do this because this vcpu was executing on a different host
1092          * cpu when it last ran. We do not track whether it invalidated
1093          * mappings associated with its 'vpid' during that run. So we must
1094          * assume that the mappings associated with 'vpid' on 'curcpu' are
1095          * stale and invalidate them.
1096          *
1097          * Note that we incur this penalty only when the scheduler chooses to
1098          * move the thread associated with this vcpu between host cpus.
1099          *
1100          * Note also that this will invalidate mappings tagged with 'vpid'
1101          * for "all" EP4TAs.
1102          */
1103         if (pmap->pm_eptgen == vmx->eptgen[curcpu]) {
1104                 invvpid_desc._res1 = 0;
1105                 invvpid_desc._res2 = 0;
1106                 invvpid_desc.vpid = vmxstate->vpid;
1107                 invvpid_desc.linear_addr = 0;
1108                 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc);
1109                 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1);
1110         } else {
1111                 /*
1112                  * The invvpid can be skipped if an invept is going to
1113                  * be performed before entering the guest. The invept
1114                  * will invalidate combined mappings tagged with
1115                  * 'vmx->eptp' for all vpids.
1116                  */
1117                 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1);
1118         }
1119 }
1120
1121 static void
1122 vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap)
1123 {
1124         struct vmxstate *vmxstate;
1125
1126         vmxstate = &vmx->state[vcpu];
1127         if (vmxstate->lastcpu == curcpu)
1128                 return;
1129
1130         vmxstate->lastcpu = curcpu;
1131
1132         vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1);
1133
1134         vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase());
1135         vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase());
1136         vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase());
1137         vmx_invvpid(vmx, vcpu, pmap, 1);
1138 }
1139
1140 /*
1141  * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set.
1142  */
1143 CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0);
1144
1145 static void __inline
1146 vmx_set_int_window_exiting(struct vmx *vmx, int vcpu)
1147 {
1148
1149         if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) {
1150                 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING;
1151                 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1152                 VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting");
1153         }
1154 }
1155
1156 static void __inline
1157 vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu)
1158 {
1159
1160         KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0,
1161             ("intr_window_exiting not set: %#x", vmx->cap[vcpu].proc_ctls));
1162         vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING;
1163         vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1164         VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting");
1165 }
1166
1167 static void __inline
1168 vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu)
1169 {
1170
1171         if ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) {
1172                 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING;
1173                 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1174                 VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting");
1175         }
1176 }
1177
1178 static void __inline
1179 vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu)
1180 {
1181
1182         KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0,
1183             ("nmi_window_exiting not set %#x", vmx->cap[vcpu].proc_ctls));
1184         vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING;
1185         vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1186         VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting");
1187 }
1188
1189 int
1190 vmx_set_tsc_offset(struct vmx *vmx, int vcpu, uint64_t offset)
1191 {
1192         int error;
1193
1194         if ((vmx->cap[vcpu].proc_ctls & PROCBASED_TSC_OFFSET) == 0) {
1195                 vmx->cap[vcpu].proc_ctls |= PROCBASED_TSC_OFFSET;
1196                 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1197                 VCPU_CTR0(vmx->vm, vcpu, "Enabling TSC offsetting");
1198         }
1199
1200         error = vmwrite(VMCS_TSC_OFFSET, offset);
1201
1202         return (error);
1203 }
1204
1205 #define NMI_BLOCKING    (VMCS_INTERRUPTIBILITY_NMI_BLOCKING |           \
1206                          VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1207 #define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING |           \
1208                          VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1209
1210 static void
1211 vmx_inject_nmi(struct vmx *vmx, int vcpu)
1212 {
1213         uint32_t gi, info;
1214
1215         gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1216         KASSERT((gi & NMI_BLOCKING) == 0, ("vmx_inject_nmi: invalid guest "
1217             "interruptibility-state %#x", gi));
1218
1219         info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1220         KASSERT((info & VMCS_INTR_VALID) == 0, ("vmx_inject_nmi: invalid "
1221             "VM-entry interruption information %#x", info));
1222
1223         /*
1224          * Inject the virtual NMI. The vector must be the NMI IDT entry
1225          * or the VMCS entry check will fail.
1226          */
1227         info = IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID;
1228         vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1229
1230         VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI");
1231
1232         /* Clear the request */
1233         vm_nmi_clear(vmx->vm, vcpu);
1234 }
1235
1236 static void
1237 vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic,
1238     uint64_t guestrip)
1239 {
1240         int vector, need_nmi_exiting, extint_pending;
1241         uint64_t rflags, entryinfo;
1242         uint32_t gi, info;
1243
1244         if (vmx->state[vcpu].nextrip != guestrip) {
1245                 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1246                 if (gi & HWINTR_BLOCKING) {
1247                         VCPU_CTR2(vmx->vm, vcpu, "Guest interrupt blocking "
1248                             "cleared due to rip change: %#lx/%#lx",
1249                             vmx->state[vcpu].nextrip, guestrip);
1250                         gi &= ~HWINTR_BLOCKING;
1251                         vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1252                 }
1253         }
1254
1255         if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) {
1256                 KASSERT((entryinfo & VMCS_INTR_VALID) != 0, ("%s: entry "
1257                     "intinfo is not valid: %#lx", __func__, entryinfo));
1258
1259                 info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1260                 KASSERT((info & VMCS_INTR_VALID) == 0, ("%s: cannot inject "
1261                      "pending exception: %#lx/%#x", __func__, entryinfo, info));
1262
1263                 info = entryinfo;
1264                 vector = info & 0xff;
1265                 if (vector == IDT_BP || vector == IDT_OF) {
1266                         /*
1267                          * VT-x requires #BP and #OF to be injected as software
1268                          * exceptions.
1269                          */
1270                         info &= ~VMCS_INTR_T_MASK;
1271                         info |= VMCS_INTR_T_SWEXCEPTION;
1272                 }
1273
1274                 if (info & VMCS_INTR_DEL_ERRCODE)
1275                         vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, entryinfo >> 32);
1276
1277                 vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1278         }
1279
1280         if (vm_nmi_pending(vmx->vm, vcpu)) {
1281                 /*
1282                  * If there are no conditions blocking NMI injection then
1283                  * inject it directly here otherwise enable "NMI window
1284                  * exiting" to inject it as soon as we can.
1285                  *
1286                  * We also check for STI_BLOCKING because some implementations
1287                  * don't allow NMI injection in this case. If we are running
1288                  * on a processor that doesn't have this restriction it will
1289                  * immediately exit and the NMI will be injected in the
1290                  * "NMI window exiting" handler.
1291                  */
1292                 need_nmi_exiting = 1;
1293                 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1294                 if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) {
1295                         info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1296                         if ((info & VMCS_INTR_VALID) == 0) {
1297                                 vmx_inject_nmi(vmx, vcpu);
1298                                 need_nmi_exiting = 0;
1299                         } else {
1300                                 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI "
1301                                     "due to VM-entry intr info %#x", info);
1302                         }
1303                 } else {
1304                         VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI due to "
1305                             "Guest Interruptibility-state %#x", gi);
1306                 }
1307
1308                 if (need_nmi_exiting)
1309                         vmx_set_nmi_window_exiting(vmx, vcpu);
1310         }
1311
1312         extint_pending = vm_extint_pending(vmx->vm, vcpu);
1313
1314         if (!extint_pending && virtual_interrupt_delivery) {
1315                 vmx_inject_pir(vlapic);
1316                 return;
1317         }
1318
1319         /*
1320          * If interrupt-window exiting is already in effect then don't bother
1321          * checking for pending interrupts. This is just an optimization and
1322          * not needed for correctness.
1323          */
1324         if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) {
1325                 VCPU_CTR0(vmx->vm, vcpu, "Skip interrupt injection due to "
1326                     "pending int_window_exiting");
1327                 return;
1328         }
1329
1330         if (!extint_pending) {
1331                 /* Ask the local apic for a vector to inject */
1332                 if (!vlapic_pending_intr(vlapic, &vector))
1333                         return;
1334
1335                 /*
1336                  * From the Intel SDM, Volume 3, Section "Maskable
1337                  * Hardware Interrupts":
1338                  * - maskable interrupt vectors [16,255] can be delivered
1339                  *   through the local APIC.
1340                 */
1341                 KASSERT(vector >= 16 && vector <= 255,
1342                     ("invalid vector %d from local APIC", vector));
1343         } else {
1344                 /* Ask the legacy pic for a vector to inject */
1345                 vatpic_pending_intr(vmx->vm, &vector);
1346
1347                 /*
1348                  * From the Intel SDM, Volume 3, Section "Maskable
1349                  * Hardware Interrupts":
1350                  * - maskable interrupt vectors [0,255] can be delivered
1351                  *   through the INTR pin.
1352                  */
1353                 KASSERT(vector >= 0 && vector <= 255,
1354                     ("invalid vector %d from INTR", vector));
1355         }
1356
1357         /* Check RFLAGS.IF and the interruptibility state of the guest */
1358         rflags = vmcs_read(VMCS_GUEST_RFLAGS);
1359         if ((rflags & PSL_I) == 0) {
1360                 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1361                     "rflags %#lx", vector, rflags);
1362                 goto cantinject;
1363         }
1364
1365         gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1366         if (gi & HWINTR_BLOCKING) {
1367                 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1368                     "Guest Interruptibility-state %#x", vector, gi);
1369                 goto cantinject;
1370         }
1371
1372         info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1373         if (info & VMCS_INTR_VALID) {
1374                 /*
1375                  * This is expected and could happen for multiple reasons:
1376                  * - A vectoring VM-entry was aborted due to astpending
1377                  * - A VM-exit happened during event injection.
1378                  * - An exception was injected above.
1379                  * - An NMI was injected above or after "NMI window exiting"
1380                  */
1381                 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1382                     "VM-entry intr info %#x", vector, info);
1383                 goto cantinject;
1384         }
1385
1386         /* Inject the interrupt */
1387         info = VMCS_INTR_T_HWINTR | VMCS_INTR_VALID;
1388         info |= vector;
1389         vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1390
1391         if (!extint_pending) {
1392                 /* Update the Local APIC ISR */
1393                 vlapic_intr_accepted(vlapic, vector);
1394         } else {
1395                 vm_extint_clear(vmx->vm, vcpu);
1396                 vatpic_intr_accepted(vmx->vm, vector);
1397
1398                 /*
1399                  * After we accepted the current ExtINT the PIC may
1400                  * have posted another one.  If that is the case, set
1401                  * the Interrupt Window Exiting execution control so
1402                  * we can inject that one too.
1403                  *
1404                  * Also, interrupt window exiting allows us to inject any
1405                  * pending APIC vector that was preempted by the ExtINT
1406                  * as soon as possible. This applies both for the software
1407                  * emulated vlapic and the hardware assisted virtual APIC.
1408                  */
1409                 vmx_set_int_window_exiting(vmx, vcpu);
1410         }
1411
1412         VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector);
1413
1414         return;
1415
1416 cantinject:
1417         /*
1418          * Set the Interrupt Window Exiting execution control so we can inject
1419          * the interrupt as soon as blocking condition goes away.
1420          */
1421         vmx_set_int_window_exiting(vmx, vcpu);
1422 }
1423
1424 /*
1425  * If the Virtual NMIs execution control is '1' then the logical processor
1426  * tracks virtual-NMI blocking in the Guest Interruptibility-state field of
1427  * the VMCS. An IRET instruction in VMX non-root operation will remove any
1428  * virtual-NMI blocking.
1429  *
1430  * This unblocking occurs even if the IRET causes a fault. In this case the
1431  * hypervisor needs to restore virtual-NMI blocking before resuming the guest.
1432  */
1433 static void
1434 vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid)
1435 {
1436         uint32_t gi;
1437
1438         VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking");
1439         gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1440         gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1441         vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1442 }
1443
1444 static void
1445 vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid)
1446 {
1447         uint32_t gi;
1448
1449         VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking");
1450         gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1451         gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1452         vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1453 }
1454
1455 static void
1456 vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid)
1457 {
1458         uint32_t gi;
1459
1460         gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1461         KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING,
1462             ("NMI blocking is not in effect %#x", gi));
1463 }
1464
1465 static int
1466 vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
1467 {
1468         struct vmxctx *vmxctx;
1469         uint64_t xcrval;
1470         const struct xsave_limits *limits;
1471
1472         vmxctx = &vmx->ctx[vcpu];
1473         limits = vmm_get_xsave_limits();
1474
1475         /*
1476          * Note that the processor raises a GP# fault on its own if
1477          * xsetbv is executed for CPL != 0, so we do not have to
1478          * emulate that fault here.
1479          */
1480
1481         /* Only xcr0 is supported. */
1482         if (vmxctx->guest_rcx != 0) {
1483                 vm_inject_gp(vmx->vm, vcpu);
1484                 return (HANDLED);
1485         }
1486
1487         /* We only handle xcr0 if both the host and guest have XSAVE enabled. */
1488         if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) {
1489                 vm_inject_ud(vmx->vm, vcpu);
1490                 return (HANDLED);
1491         }
1492
1493         xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff);
1494         if ((xcrval & ~limits->xcr0_allowed) != 0) {
1495                 vm_inject_gp(vmx->vm, vcpu);
1496                 return (HANDLED);
1497         }
1498
1499         if (!(xcrval & XFEATURE_ENABLED_X87)) {
1500                 vm_inject_gp(vmx->vm, vcpu);
1501                 return (HANDLED);
1502         }
1503
1504         /* AVX (YMM_Hi128) requires SSE. */
1505         if (xcrval & XFEATURE_ENABLED_AVX &&
1506             (xcrval & XFEATURE_AVX) != XFEATURE_AVX) {
1507                 vm_inject_gp(vmx->vm, vcpu);
1508                 return (HANDLED);
1509         }
1510
1511         /*
1512          * AVX512 requires base AVX (YMM_Hi128) as well as OpMask,
1513          * ZMM_Hi256, and Hi16_ZMM.
1514          */
1515         if (xcrval & XFEATURE_AVX512 &&
1516             (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) !=
1517             (XFEATURE_AVX512 | XFEATURE_AVX)) {
1518                 vm_inject_gp(vmx->vm, vcpu);
1519                 return (HANDLED);
1520         }
1521
1522         /*
1523          * Intel MPX requires both bound register state flags to be
1524          * set.
1525          */
1526         if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) !=
1527             ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) {
1528                 vm_inject_gp(vmx->vm, vcpu);
1529                 return (HANDLED);
1530         }
1531
1532         /*
1533          * This runs "inside" vmrun() with the guest's FPU state, so
1534          * modifying xcr0 directly modifies the guest's xcr0, not the
1535          * host's.
1536          */
1537         load_xcr(0, xcrval);
1538         return (HANDLED);
1539 }
1540
1541 static uint64_t
1542 vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident)
1543 {
1544         const struct vmxctx *vmxctx;
1545
1546         vmxctx = &vmx->ctx[vcpu];
1547
1548         switch (ident) {
1549         case 0:
1550                 return (vmxctx->guest_rax);
1551         case 1:
1552                 return (vmxctx->guest_rcx);
1553         case 2:
1554                 return (vmxctx->guest_rdx);
1555         case 3:
1556                 return (vmxctx->guest_rbx);
1557         case 4:
1558                 return (vmcs_read(VMCS_GUEST_RSP));
1559         case 5:
1560                 return (vmxctx->guest_rbp);
1561         case 6:
1562                 return (vmxctx->guest_rsi);
1563         case 7:
1564                 return (vmxctx->guest_rdi);
1565         case 8:
1566                 return (vmxctx->guest_r8);
1567         case 9:
1568                 return (vmxctx->guest_r9);
1569         case 10:
1570                 return (vmxctx->guest_r10);
1571         case 11:
1572                 return (vmxctx->guest_r11);
1573         case 12:
1574                 return (vmxctx->guest_r12);
1575         case 13:
1576                 return (vmxctx->guest_r13);
1577         case 14:
1578                 return (vmxctx->guest_r14);
1579         case 15:
1580                 return (vmxctx->guest_r15);
1581         default:
1582                 panic("invalid vmx register %d", ident);
1583         }
1584 }
1585
1586 static void
1587 vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval)
1588 {
1589         struct vmxctx *vmxctx;
1590
1591         vmxctx = &vmx->ctx[vcpu];
1592
1593         switch (ident) {
1594         case 0:
1595                 vmxctx->guest_rax = regval;
1596                 break;
1597         case 1:
1598                 vmxctx->guest_rcx = regval;
1599                 break;
1600         case 2:
1601                 vmxctx->guest_rdx = regval;
1602                 break;
1603         case 3:
1604                 vmxctx->guest_rbx = regval;
1605                 break;
1606         case 4:
1607                 vmcs_write(VMCS_GUEST_RSP, regval);
1608                 break;
1609         case 5:
1610                 vmxctx->guest_rbp = regval;
1611                 break;
1612         case 6:
1613                 vmxctx->guest_rsi = regval;
1614                 break;
1615         case 7:
1616                 vmxctx->guest_rdi = regval;
1617                 break;
1618         case 8:
1619                 vmxctx->guest_r8 = regval;
1620                 break;
1621         case 9:
1622                 vmxctx->guest_r9 = regval;
1623                 break;
1624         case 10:
1625                 vmxctx->guest_r10 = regval;
1626                 break;
1627         case 11:
1628                 vmxctx->guest_r11 = regval;
1629                 break;
1630         case 12:
1631                 vmxctx->guest_r12 = regval;
1632                 break;
1633         case 13:
1634                 vmxctx->guest_r13 = regval;
1635                 break;
1636         case 14:
1637                 vmxctx->guest_r14 = regval;
1638                 break;
1639         case 15:
1640                 vmxctx->guest_r15 = regval;
1641                 break;
1642         default:
1643                 panic("invalid vmx register %d", ident);
1644         }
1645 }
1646
1647 static int
1648 vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1649 {
1650         uint64_t crval, regval;
1651
1652         /* We only handle mov to %cr0 at this time */
1653         if ((exitqual & 0xf0) != 0x00)
1654                 return (UNHANDLED);
1655
1656         regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
1657
1658         vmcs_write(VMCS_CR0_SHADOW, regval);
1659
1660         crval = regval | cr0_ones_mask;
1661         crval &= ~cr0_zeros_mask;
1662         vmcs_write(VMCS_GUEST_CR0, crval);
1663
1664         if (regval & CR0_PG) {
1665                 uint64_t efer, entry_ctls;
1666
1667                 /*
1668                  * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and
1669                  * the "IA-32e mode guest" bit in VM-entry control must be
1670                  * equal.
1671                  */
1672                 efer = vmcs_read(VMCS_GUEST_IA32_EFER);
1673                 if (efer & EFER_LME) {
1674                         efer |= EFER_LMA;
1675                         vmcs_write(VMCS_GUEST_IA32_EFER, efer);
1676                         entry_ctls = vmcs_read(VMCS_ENTRY_CTLS);
1677                         entry_ctls |= VM_ENTRY_GUEST_LMA;
1678                         vmcs_write(VMCS_ENTRY_CTLS, entry_ctls);
1679                 }
1680         }
1681
1682         return (HANDLED);
1683 }
1684
1685 static int
1686 vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1687 {
1688         uint64_t crval, regval;
1689
1690         /* We only handle mov to %cr4 at this time */
1691         if ((exitqual & 0xf0) != 0x00)
1692                 return (UNHANDLED);
1693
1694         regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
1695
1696         vmcs_write(VMCS_CR4_SHADOW, regval);
1697
1698         crval = regval | cr4_ones_mask;
1699         crval &= ~cr4_zeros_mask;
1700         vmcs_write(VMCS_GUEST_CR4, crval);
1701
1702         return (HANDLED);
1703 }
1704
1705 static int
1706 vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1707 {
1708         struct vlapic *vlapic;
1709         uint64_t cr8;
1710         int regnum;
1711
1712         /* We only handle mov %cr8 to/from a register at this time. */
1713         if ((exitqual & 0xe0) != 0x00) {
1714                 return (UNHANDLED);
1715         }
1716
1717         vlapic = vm_lapic(vmx->vm, vcpu);
1718         regnum = (exitqual >> 8) & 0xf;
1719         if (exitqual & 0x10) {
1720                 cr8 = vlapic_get_cr8(vlapic);
1721                 vmx_set_guest_reg(vmx, vcpu, regnum, cr8);
1722         } else {
1723                 cr8 = vmx_get_guest_reg(vmx, vcpu, regnum);
1724                 vlapic_set_cr8(vlapic, cr8);
1725         }
1726
1727         return (HANDLED);
1728 }
1729
1730 /*
1731  * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL
1732  */
1733 static int
1734 vmx_cpl(void)
1735 {
1736         uint32_t ssar;
1737
1738         ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS);
1739         return ((ssar >> 5) & 0x3);
1740 }
1741
1742 static enum vm_cpu_mode
1743 vmx_cpu_mode(void)
1744 {
1745         uint32_t csar;
1746
1747         if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) {
1748                 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1749                 if (csar & 0x2000)
1750                         return (CPU_MODE_64BIT);        /* CS.L = 1 */
1751                 else
1752                         return (CPU_MODE_COMPATIBILITY);
1753         } else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) {
1754                 return (CPU_MODE_PROTECTED);
1755         } else {
1756                 return (CPU_MODE_REAL);
1757         }
1758 }
1759
1760 static enum vm_paging_mode
1761 vmx_paging_mode(void)
1762 {
1763
1764         if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG))
1765                 return (PAGING_MODE_FLAT);
1766         if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE))
1767                 return (PAGING_MODE_32);
1768         if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME)
1769                 return (PAGING_MODE_64);
1770         else
1771                 return (PAGING_MODE_PAE);
1772 }
1773
1774 static uint64_t
1775 inout_str_index(struct vmx *vmx, int vcpuid, int in)
1776 {
1777         uint64_t val;
1778         int error;
1779         enum vm_reg_name reg;
1780
1781         reg = in ? VM_REG_GUEST_RDI : VM_REG_GUEST_RSI;
1782         error = vmx_getreg(vmx, vcpuid, reg, &val);
1783         KASSERT(error == 0, ("%s: vmx_getreg error %d", __func__, error));
1784         return (val);
1785 }
1786
1787 static uint64_t
1788 inout_str_count(struct vmx *vmx, int vcpuid, int rep)
1789 {
1790         uint64_t val;
1791         int error;
1792
1793         if (rep) {
1794                 error = vmx_getreg(vmx, vcpuid, VM_REG_GUEST_RCX, &val);
1795                 KASSERT(!error, ("%s: vmx_getreg error %d", __func__, error));
1796         } else {
1797                 val = 1;
1798         }
1799         return (val);
1800 }
1801
1802 static int
1803 inout_str_addrsize(uint32_t inst_info)
1804 {
1805         uint32_t size;
1806
1807         size = (inst_info >> 7) & 0x7;
1808         switch (size) {
1809         case 0:
1810                 return (2);     /* 16 bit */
1811         case 1:
1812                 return (4);     /* 32 bit */
1813         case 2:
1814                 return (8);     /* 64 bit */
1815         default:
1816                 panic("%s: invalid size encoding %d", __func__, size);
1817         }
1818 }
1819
1820 static void
1821 inout_str_seginfo(struct vmx *vmx, int vcpuid, uint32_t inst_info, int in,
1822     struct vm_inout_str *vis)
1823 {
1824         int error, s;
1825
1826         if (in) {
1827                 vis->seg_name = VM_REG_GUEST_ES;
1828         } else {
1829                 s = (inst_info >> 15) & 0x7;
1830                 vis->seg_name = vm_segment_name(s);
1831         }
1832
1833         error = vmx_getdesc(vmx, vcpuid, vis->seg_name, &vis->seg_desc);
1834         KASSERT(error == 0, ("%s: vmx_getdesc error %d", __func__, error));
1835 }
1836
1837 static void
1838 vmx_paging_info(struct vm_guest_paging *paging)
1839 {
1840         paging->cr3 = vmcs_guest_cr3();
1841         paging->cpl = vmx_cpl();
1842         paging->cpu_mode = vmx_cpu_mode();
1843         paging->paging_mode = vmx_paging_mode();
1844 }
1845
1846 static void
1847 vmexit_inst_emul(struct vm_exit *vmexit, uint64_t gpa, uint64_t gla)
1848 {
1849         struct vm_guest_paging *paging;
1850         uint32_t csar;
1851
1852         paging = &vmexit->u.inst_emul.paging;
1853
1854         vmexit->exitcode = VM_EXITCODE_INST_EMUL;
1855         vmexit->inst_length = 0;
1856         vmexit->u.inst_emul.gpa = gpa;
1857         vmexit->u.inst_emul.gla = gla;
1858         vmx_paging_info(paging);
1859         switch (paging->cpu_mode) {
1860         case CPU_MODE_REAL:
1861                 vmexit->u.inst_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE);
1862                 vmexit->u.inst_emul.cs_d = 0;
1863                 break;
1864         case CPU_MODE_PROTECTED:
1865         case CPU_MODE_COMPATIBILITY:
1866                 vmexit->u.inst_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE);
1867                 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1868                 vmexit->u.inst_emul.cs_d = SEG_DESC_DEF32(csar);
1869                 break;
1870         default:
1871                 vmexit->u.inst_emul.cs_base = 0;
1872                 vmexit->u.inst_emul.cs_d = 0;
1873                 break;
1874         }
1875         vie_init(&vmexit->u.inst_emul.vie, NULL, 0);
1876 }
1877
1878 static int
1879 ept_fault_type(uint64_t ept_qual)
1880 {
1881         int fault_type;
1882
1883         if (ept_qual & EPT_VIOLATION_DATA_WRITE)
1884                 fault_type = VM_PROT_WRITE;
1885         else if (ept_qual & EPT_VIOLATION_INST_FETCH)
1886                 fault_type = VM_PROT_EXECUTE;
1887         else
1888                 fault_type= VM_PROT_READ;
1889
1890         return (fault_type);
1891 }
1892
1893 static boolean_t
1894 ept_emulation_fault(uint64_t ept_qual)
1895 {
1896         int read, write;
1897
1898         /* EPT fault on an instruction fetch doesn't make sense here */
1899         if (ept_qual & EPT_VIOLATION_INST_FETCH)
1900                 return (FALSE);
1901
1902         /* EPT fault must be a read fault or a write fault */
1903         read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;
1904         write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;
1905         if ((read | write) == 0)
1906                 return (FALSE);
1907
1908         /*
1909          * The EPT violation must have been caused by accessing a
1910          * guest-physical address that is a translation of a guest-linear
1911          * address.
1912          */
1913         if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||
1914             (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {
1915                 return (FALSE);
1916         }
1917
1918         return (TRUE);
1919 }
1920
1921 static __inline int
1922 apic_access_virtualization(struct vmx *vmx, int vcpuid)
1923 {
1924         uint32_t proc_ctls2;
1925
1926         proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
1927         return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0);
1928 }
1929
1930 static __inline int
1931 x2apic_virtualization(struct vmx *vmx, int vcpuid)
1932 {
1933         uint32_t proc_ctls2;
1934
1935         proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
1936         return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0);
1937 }
1938
1939 static int
1940 vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic,
1941     uint64_t qual)
1942 {
1943         int error, handled, offset;
1944         uint32_t *apic_regs, vector;
1945         bool retu;
1946
1947         handled = HANDLED;
1948         offset = APIC_WRITE_OFFSET(qual);
1949
1950         if (!apic_access_virtualization(vmx, vcpuid)) {
1951                 /*
1952                  * In general there should not be any APIC write VM-exits
1953                  * unless APIC-access virtualization is enabled.
1954                  *
1955                  * However self-IPI virtualization can legitimately trigger
1956                  * an APIC-write VM-exit so treat it specially.
1957                  */
1958                 if (x2apic_virtualization(vmx, vcpuid) &&
1959                     offset == APIC_OFFSET_SELF_IPI) {
1960                         apic_regs = (uint32_t *)(vlapic->apic_page);
1961                         vector = apic_regs[APIC_OFFSET_SELF_IPI / 4];
1962                         vlapic_self_ipi_handler(vlapic, vector);
1963                         return (HANDLED);
1964                 } else
1965                         return (UNHANDLED);
1966         }
1967
1968         switch (offset) {
1969         case APIC_OFFSET_ID:
1970                 vlapic_id_write_handler(vlapic);
1971                 break;
1972         case APIC_OFFSET_LDR:
1973                 vlapic_ldr_write_handler(vlapic);
1974                 break;
1975         case APIC_OFFSET_DFR:
1976                 vlapic_dfr_write_handler(vlapic);
1977                 break;
1978         case APIC_OFFSET_SVR:
1979                 vlapic_svr_write_handler(vlapic);
1980                 break;
1981         case APIC_OFFSET_ESR:
1982                 vlapic_esr_write_handler(vlapic);
1983                 break;
1984         case APIC_OFFSET_ICR_LOW:
1985                 retu = false;
1986                 error = vlapic_icrlo_write_handler(vlapic, &retu);
1987                 if (error != 0 || retu)
1988                         handled = UNHANDLED;
1989                 break;
1990         case APIC_OFFSET_CMCI_LVT:
1991         case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT:
1992                 vlapic_lvt_write_handler(vlapic, offset);
1993                 break;
1994         case APIC_OFFSET_TIMER_ICR:
1995                 vlapic_icrtmr_write_handler(vlapic);
1996                 break;
1997         case APIC_OFFSET_TIMER_DCR:
1998                 vlapic_dcr_write_handler(vlapic);
1999                 break;
2000         default:
2001                 handled = UNHANDLED;
2002                 break;
2003         }
2004         return (handled);
2005 }
2006
2007 static bool
2008 apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa)
2009 {
2010
2011         if (apic_access_virtualization(vmx, vcpuid) &&
2012             (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE))
2013                 return (true);
2014         else
2015                 return (false);
2016 }
2017
2018 static int
2019 vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
2020 {
2021         uint64_t qual;
2022         int access_type, offset, allowed;
2023
2024         if (!apic_access_virtualization(vmx, vcpuid))
2025                 return (UNHANDLED);
2026
2027         qual = vmexit->u.vmx.exit_qualification;
2028         access_type = APIC_ACCESS_TYPE(qual);
2029         offset = APIC_ACCESS_OFFSET(qual);
2030
2031         allowed = 0;
2032         if (access_type == 0) {
2033                 /*
2034                  * Read data access to the following registers is expected.
2035                  */
2036                 switch (offset) {
2037                 case APIC_OFFSET_APR:
2038                 case APIC_OFFSET_PPR:
2039                 case APIC_OFFSET_RRR:
2040                 case APIC_OFFSET_CMCI_LVT:
2041                 case APIC_OFFSET_TIMER_CCR:
2042                         allowed = 1;
2043                         break;
2044                 default:
2045                         break;
2046                 }
2047         } else if (access_type == 1) {
2048                 /*
2049                  * Write data access to the following registers is expected.
2050                  */
2051                 switch (offset) {
2052                 case APIC_OFFSET_VER:
2053                 case APIC_OFFSET_APR:
2054                 case APIC_OFFSET_PPR:
2055                 case APIC_OFFSET_RRR:
2056                 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7:
2057                 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7:
2058                 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7:
2059                 case APIC_OFFSET_CMCI_LVT:
2060                 case APIC_OFFSET_TIMER_CCR:
2061                         allowed = 1;
2062                         break;
2063                 default:
2064                         break;
2065                 }
2066         }
2067
2068         if (allowed) {
2069                 vmexit_inst_emul(vmexit, DEFAULT_APIC_BASE + offset,
2070                     VIE_INVALID_GLA);
2071         }
2072
2073         /*
2074          * Regardless of whether the APIC-access is allowed this handler
2075          * always returns UNHANDLED:
2076          * - if the access is allowed then it is handled by emulating the
2077          *   instruction that caused the VM-exit (outside the critical section)
2078          * - if the access is not allowed then it will be converted to an
2079          *   exitcode of VM_EXITCODE_VMX and will be dealt with in userland.
2080          */
2081         return (UNHANDLED);
2082 }
2083
2084 static enum task_switch_reason
2085 vmx_task_switch_reason(uint64_t qual)
2086 {
2087         int reason;
2088
2089         reason = (qual >> 30) & 0x3;
2090         switch (reason) {
2091         case 0:
2092                 return (TSR_CALL);
2093         case 1:
2094                 return (TSR_IRET);
2095         case 2:
2096                 return (TSR_JMP);
2097         case 3:
2098                 return (TSR_IDT_GATE);
2099         default:
2100                 panic("%s: invalid reason %d", __func__, reason);
2101         }
2102 }
2103
2104 static int
2105 emulate_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu)
2106 {
2107         int error;
2108
2109         if (lapic_msr(num))
2110                 error = lapic_wrmsr(vmx->vm, vcpuid, num, val, retu);
2111         else
2112                 error = vmx_wrmsr(vmx, vcpuid, num, val, retu);
2113
2114         return (error);
2115 }
2116
2117 static int
2118 emulate_rdmsr(struct vmx *vmx, int vcpuid, u_int num, bool *retu)
2119 {
2120         struct vmxctx *vmxctx;
2121         uint64_t result;
2122         uint32_t eax, edx;
2123         int error;
2124
2125         if (lapic_msr(num))
2126                 error = lapic_rdmsr(vmx->vm, vcpuid, num, &result, retu);
2127         else
2128                 error = vmx_rdmsr(vmx, vcpuid, num, &result, retu);
2129
2130         if (error == 0) {
2131                 eax = result;
2132                 vmxctx = &vmx->ctx[vcpuid];
2133                 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RAX, eax);
2134                 KASSERT(error == 0, ("vmxctx_setreg(rax) error %d", error));
2135
2136                 edx = result >> 32;
2137                 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RDX, edx);
2138                 KASSERT(error == 0, ("vmxctx_setreg(rdx) error %d", error));
2139         }
2140
2141         return (error);
2142 }
2143
2144 static int
2145 vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
2146 {
2147         int error, errcode, errcode_valid, handled, in;
2148         struct vmxctx *vmxctx;
2149         struct vlapic *vlapic;
2150         struct vm_inout_str *vis;
2151         struct vm_task_switch *ts;
2152         uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, inst_info;
2153         uint32_t intr_type, intr_vec, reason;
2154         uint64_t exitintinfo, qual, gpa;
2155         bool retu;
2156
2157         CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0);
2158         CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0);
2159
2160         handled = UNHANDLED;
2161         vmxctx = &vmx->ctx[vcpu];
2162
2163         qual = vmexit->u.vmx.exit_qualification;
2164         reason = vmexit->u.vmx.exit_reason;
2165         vmexit->exitcode = VM_EXITCODE_BOGUS;
2166
2167         vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1);
2168
2169         /*
2170          * VM-entry failures during or after loading guest state.
2171          *
2172          * These VM-exits are uncommon but must be handled specially
2173          * as most VM-exit fields are not populated as usual.
2174          */
2175         if (__predict_false(reason == EXIT_REASON_MCE_DURING_ENTRY)) {
2176                 VCPU_CTR0(vmx->vm, vcpu, "Handling MCE during VM-entry");
2177                 __asm __volatile("int $18");
2178                 return (1);
2179         }
2180
2181         /*
2182          * VM exits that can be triggered during event delivery need to
2183          * be handled specially by re-injecting the event if the IDT
2184          * vectoring information field's valid bit is set.
2185          *
2186          * See "Information for VM Exits During Event Delivery" in Intel SDM
2187          * for details.
2188          */
2189         idtvec_info = vmcs_idt_vectoring_info();
2190         if (idtvec_info & VMCS_IDT_VEC_VALID) {
2191                 idtvec_info &= ~(1 << 12); /* clear undefined bit */
2192                 exitintinfo = idtvec_info;
2193                 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
2194                         idtvec_err = vmcs_idt_vectoring_err();
2195                         exitintinfo |= (uint64_t)idtvec_err << 32;
2196                 }
2197                 error = vm_exit_intinfo(vmx->vm, vcpu, exitintinfo);
2198                 KASSERT(error == 0, ("%s: vm_set_intinfo error %d",
2199                     __func__, error));
2200
2201                 /*
2202                  * If 'virtual NMIs' are being used and the VM-exit
2203                  * happened while injecting an NMI during the previous
2204                  * VM-entry, then clear "blocking by NMI" in the
2205                  * Guest Interruptibility-State so the NMI can be
2206                  * reinjected on the subsequent VM-entry.
2207                  *
2208                  * However, if the NMI was being delivered through a task
2209                  * gate, then the new task must start execution with NMIs
2210                  * blocked so don't clear NMI blocking in this case.
2211                  */
2212                 intr_type = idtvec_info & VMCS_INTR_T_MASK;
2213                 if (intr_type == VMCS_INTR_T_NMI) {
2214                         if (reason != EXIT_REASON_TASK_SWITCH)
2215                                 vmx_clear_nmi_blocking(vmx, vcpu);
2216                         else
2217                                 vmx_assert_nmi_blocking(vmx, vcpu);
2218                 }
2219
2220                 /*
2221                  * Update VM-entry instruction length if the event being
2222                  * delivered was a software interrupt or software exception.
2223                  */
2224                 if (intr_type == VMCS_INTR_T_SWINTR ||
2225                     intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION ||
2226                     intr_type == VMCS_INTR_T_SWEXCEPTION) {
2227                         vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
2228                 }
2229         }
2230
2231         switch (reason) {
2232         case EXIT_REASON_TASK_SWITCH:
2233                 ts = &vmexit->u.task_switch;
2234                 ts->tsssel = qual & 0xffff;
2235                 ts->reason = vmx_task_switch_reason(qual);
2236                 ts->ext = 0;
2237                 ts->errcode_valid = 0;
2238                 vmx_paging_info(&ts->paging);
2239                 /*
2240                  * If the task switch was due to a CALL, JMP, IRET, software
2241                  * interrupt (INT n) or software exception (INT3, INTO),
2242                  * then the saved %rip references the instruction that caused
2243                  * the task switch. The instruction length field in the VMCS
2244                  * is valid in this case.
2245                  *
2246                  * In all other cases (e.g., NMI, hardware exception) the
2247                  * saved %rip is one that would have been saved in the old TSS
2248                  * had the task switch completed normally so the instruction
2249                  * length field is not needed in this case and is explicitly
2250                  * set to 0.
2251                  */
2252                 if (ts->reason == TSR_IDT_GATE) {
2253                         KASSERT(idtvec_info & VMCS_IDT_VEC_VALID,
2254                             ("invalid idtvec_info %#x for IDT task switch",
2255                             idtvec_info));
2256                         intr_type = idtvec_info & VMCS_INTR_T_MASK;
2257                         if (intr_type != VMCS_INTR_T_SWINTR &&
2258                             intr_type != VMCS_INTR_T_SWEXCEPTION &&
2259                             intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) {
2260                                 /* Task switch triggered by external event */
2261                                 ts->ext = 1;
2262                                 vmexit->inst_length = 0;
2263                                 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
2264                                         ts->errcode_valid = 1;
2265                                         ts->errcode = vmcs_idt_vectoring_err();
2266                                 }
2267                         }
2268                 }
2269                 vmexit->exitcode = VM_EXITCODE_TASK_SWITCH;
2270                 VCPU_CTR4(vmx->vm, vcpu, "task switch reason %d, tss 0x%04x, "
2271                     "%s errcode 0x%016lx", ts->reason, ts->tsssel,
2272                     ts->ext ? "external" : "internal",
2273                     ((uint64_t)ts->errcode << 32) | ts->errcode_valid);
2274                 break;
2275         case EXIT_REASON_CR_ACCESS:
2276                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1);
2277                 switch (qual & 0xf) {
2278                 case 0:
2279                         handled = vmx_emulate_cr0_access(vmx, vcpu, qual);
2280                         break;
2281                 case 4:
2282                         handled = vmx_emulate_cr4_access(vmx, vcpu, qual);
2283                         break;
2284                 case 8:
2285                         handled = vmx_emulate_cr8_access(vmx, vcpu, qual);
2286                         break;
2287                 }
2288                 break;
2289         case EXIT_REASON_RDMSR:
2290                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1);
2291                 retu = false;
2292                 ecx = vmxctx->guest_rcx;
2293                 VCPU_CTR1(vmx->vm, vcpu, "rdmsr 0x%08x", ecx);
2294                 error = emulate_rdmsr(vmx, vcpu, ecx, &retu);
2295                 if (error) {
2296                         vmexit->exitcode = VM_EXITCODE_RDMSR;
2297                         vmexit->u.msr.code = ecx;
2298                 } else if (!retu) {
2299                         handled = HANDLED;
2300                 } else {
2301                         /* Return to userspace with a valid exitcode */
2302                         KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
2303                             ("emulate_rdmsr retu with bogus exitcode"));
2304                 }
2305                 break;
2306         case EXIT_REASON_WRMSR:
2307                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1);
2308                 retu = false;
2309                 eax = vmxctx->guest_rax;
2310                 ecx = vmxctx->guest_rcx;
2311                 edx = vmxctx->guest_rdx;
2312                 VCPU_CTR2(vmx->vm, vcpu, "wrmsr 0x%08x value 0x%016lx",
2313                     ecx, (uint64_t)edx << 32 | eax);
2314                 error = emulate_wrmsr(vmx, vcpu, ecx,
2315                     (uint64_t)edx << 32 | eax, &retu);
2316                 if (error) {
2317                         vmexit->exitcode = VM_EXITCODE_WRMSR;
2318                         vmexit->u.msr.code = ecx;
2319                         vmexit->u.msr.wval = (uint64_t)edx << 32 | eax;
2320                 } else if (!retu) {
2321                         handled = HANDLED;
2322                 } else {
2323                         /* Return to userspace with a valid exitcode */
2324                         KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
2325                             ("emulate_wrmsr retu with bogus exitcode"));
2326                 }
2327                 break;
2328         case EXIT_REASON_HLT:
2329                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1);
2330                 vmexit->exitcode = VM_EXITCODE_HLT;
2331                 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS);
2332                 if (virtual_interrupt_delivery)
2333                         vmexit->u.hlt.intr_status =
2334                             vmcs_read(VMCS_GUEST_INTR_STATUS);
2335                 else
2336                         vmexit->u.hlt.intr_status = 0;
2337                 break;
2338         case EXIT_REASON_MTF:
2339                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1);
2340                 vmexit->exitcode = VM_EXITCODE_MTRAP;
2341                 vmexit->inst_length = 0;
2342                 break;
2343         case EXIT_REASON_PAUSE:
2344                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1);
2345                 vmexit->exitcode = VM_EXITCODE_PAUSE;
2346                 break;
2347         case EXIT_REASON_INTR_WINDOW:
2348                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1);
2349                 vmx_clear_int_window_exiting(vmx, vcpu);
2350                 return (1);
2351         case EXIT_REASON_EXT_INTR:
2352                 /*
2353                  * External interrupts serve only to cause VM exits and allow
2354                  * the host interrupt handler to run.
2355                  *
2356                  * If this external interrupt triggers a virtual interrupt
2357                  * to a VM, then that state will be recorded by the
2358                  * host interrupt handler in the VM's softc. We will inject
2359                  * this virtual interrupt during the subsequent VM enter.
2360                  */
2361                 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2362
2363                 /*
2364                  * XXX: Ignore this exit if VMCS_INTR_VALID is not set.
2365                  * This appears to be a bug in VMware Fusion?
2366                  */
2367                 if (!(intr_info & VMCS_INTR_VALID))
2368                         return (1);
2369                 KASSERT((intr_info & VMCS_INTR_VALID) != 0 &&
2370                     (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR,
2371                     ("VM exit interruption info invalid: %#x", intr_info));
2372                 vmx_trigger_hostintr(intr_info & 0xff);
2373
2374                 /*
2375                  * This is special. We want to treat this as an 'handled'
2376                  * VM-exit but not increment the instruction pointer.
2377                  */
2378                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1);
2379                 return (1);
2380         case EXIT_REASON_NMI_WINDOW:
2381                 /* Exit to allow the pending virtual NMI to be injected */
2382                 if (vm_nmi_pending(vmx->vm, vcpu))
2383                         vmx_inject_nmi(vmx, vcpu);
2384                 vmx_clear_nmi_window_exiting(vmx, vcpu);
2385                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1);
2386                 return (1);
2387         case EXIT_REASON_INOUT:
2388                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1);
2389                 vmexit->exitcode = VM_EXITCODE_INOUT;
2390                 vmexit->u.inout.bytes = (qual & 0x7) + 1;
2391                 vmexit->u.inout.in = in = (qual & 0x8) ? 1 : 0;
2392                 vmexit->u.inout.string = (qual & 0x10) ? 1 : 0;
2393                 vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0;
2394                 vmexit->u.inout.port = (uint16_t)(qual >> 16);
2395                 vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax);
2396                 if (vmexit->u.inout.string) {
2397                         inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO);
2398                         vmexit->exitcode = VM_EXITCODE_INOUT_STR;
2399                         vis = &vmexit->u.inout_str;
2400                         vmx_paging_info(&vis->paging);
2401                         vis->rflags = vmcs_read(VMCS_GUEST_RFLAGS);
2402                         vis->cr0 = vmcs_read(VMCS_GUEST_CR0);
2403                         vis->index = inout_str_index(vmx, vcpu, in);
2404                         vis->count = inout_str_count(vmx, vcpu, vis->inout.rep);
2405                         vis->addrsize = inout_str_addrsize(inst_info);
2406                         inout_str_seginfo(vmx, vcpu, inst_info, in, vis);
2407                 }
2408                 break;
2409         case EXIT_REASON_CPUID:
2410                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1);
2411                 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx);
2412                 break;
2413         case EXIT_REASON_EXCEPTION:
2414                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1);
2415                 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2416                 KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2417                     ("VM exit interruption info invalid: %#x", intr_info));
2418
2419                 intr_vec = intr_info & 0xff;
2420                 intr_type = intr_info & VMCS_INTR_T_MASK;
2421
2422                 /*
2423                  * If Virtual NMIs control is 1 and the VM-exit is due to a
2424                  * fault encountered during the execution of IRET then we must
2425                  * restore the state of "virtual-NMI blocking" before resuming
2426                  * the guest.
2427                  *
2428                  * See "Resuming Guest Software after Handling an Exception".
2429                  * See "Information for VM Exits Due to Vectored Events".
2430                  */
2431                 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2432                     (intr_vec != IDT_DF) &&
2433                     (intr_info & EXIT_QUAL_NMIUDTI) != 0)
2434                         vmx_restore_nmi_blocking(vmx, vcpu);
2435
2436                 /*
2437                  * The NMI has already been handled in vmx_exit_handle_nmi().
2438                  */
2439                 if (intr_type == VMCS_INTR_T_NMI)
2440                         return (1);
2441
2442                 /*
2443                  * Call the machine check handler by hand. Also don't reflect
2444                  * the machine check back into the guest.
2445                  */
2446                 if (intr_vec == IDT_MC) {
2447                         VCPU_CTR0(vmx->vm, vcpu, "Vectoring to MCE handler");
2448                         __asm __volatile("int $18");
2449                         return (1);
2450                 }
2451
2452                 if (intr_vec == IDT_PF) {
2453                         error = vmxctx_setreg(vmxctx, VM_REG_GUEST_CR2, qual);
2454                         KASSERT(error == 0, ("%s: vmxctx_setreg(cr2) error %d",
2455                             __func__, error));
2456                 }
2457
2458                 /*
2459                  * Software exceptions exhibit trap-like behavior. This in
2460                  * turn requires populating the VM-entry instruction length
2461                  * so that the %rip in the trap frame is past the INT3/INTO
2462                  * instruction.
2463                  */
2464                 if (intr_type == VMCS_INTR_T_SWEXCEPTION)
2465                         vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
2466
2467                 /* Reflect all other exceptions back into the guest */
2468                 errcode_valid = errcode = 0;
2469                 if (intr_info & VMCS_INTR_DEL_ERRCODE) {
2470                         errcode_valid = 1;
2471                         errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE);
2472                 }
2473                 VCPU_CTR2(vmx->vm, vcpu, "Reflecting exception %d/%#x into "
2474                     "the guest", intr_vec, errcode);
2475                 error = vm_inject_exception(vmx->vm, vcpu, intr_vec,
2476                     errcode_valid, errcode, 0);
2477                 KASSERT(error == 0, ("%s: vm_inject_exception error %d",
2478                     __func__, error));
2479                 return (1);
2480
2481         case EXIT_REASON_EPT_FAULT:
2482                 /*
2483                  * If 'gpa' lies within the address space allocated to
2484                  * memory then this must be a nested page fault otherwise
2485                  * this must be an instruction that accesses MMIO space.
2486                  */
2487                 gpa = vmcs_gpa();
2488                 if (vm_mem_allocated(vmx->vm, vcpu, gpa) ||
2489                     apic_access_fault(vmx, vcpu, gpa)) {
2490                         vmexit->exitcode = VM_EXITCODE_PAGING;
2491                         vmexit->inst_length = 0;
2492                         vmexit->u.paging.gpa = gpa;
2493                         vmexit->u.paging.fault_type = ept_fault_type(qual);
2494                         vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
2495                 } else if (ept_emulation_fault(qual)) {
2496                         vmexit_inst_emul(vmexit, gpa, vmcs_gla());
2497                         vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INST_EMUL, 1);
2498                 }
2499                 /*
2500                  * If Virtual NMIs control is 1 and the VM-exit is due to an
2501                  * EPT fault during the execution of IRET then we must restore
2502                  * the state of "virtual-NMI blocking" before resuming.
2503                  *
2504                  * See description of "NMI unblocking due to IRET" in
2505                  * "Exit Qualification for EPT Violations".
2506                  */
2507                 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2508                     (qual & EXIT_QUAL_NMIUDTI) != 0)
2509                         vmx_restore_nmi_blocking(vmx, vcpu);
2510                 break;
2511         case EXIT_REASON_VIRTUALIZED_EOI:
2512                 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI;
2513                 vmexit->u.ioapic_eoi.vector = qual & 0xFF;
2514                 vmexit->inst_length = 0;        /* trap-like */
2515                 break;
2516         case EXIT_REASON_APIC_ACCESS:
2517                 handled = vmx_handle_apic_access(vmx, vcpu, vmexit);
2518                 break;
2519         case EXIT_REASON_APIC_WRITE:
2520                 /*
2521                  * APIC-write VM exit is trap-like so the %rip is already
2522                  * pointing to the next instruction.
2523                  */
2524                 vmexit->inst_length = 0;
2525                 vlapic = vm_lapic(vmx->vm, vcpu);
2526                 handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual);
2527                 break;
2528         case EXIT_REASON_XSETBV:
2529                 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit);
2530                 break;
2531         case EXIT_REASON_MONITOR:
2532                 vmexit->exitcode = VM_EXITCODE_MONITOR;
2533                 break;
2534         case EXIT_REASON_MWAIT:
2535                 vmexit->exitcode = VM_EXITCODE_MWAIT;
2536                 break;
2537         default:
2538                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1);
2539                 break;
2540         }
2541
2542         if (handled) {
2543                 /*
2544                  * It is possible that control is returned to userland
2545                  * even though we were able to handle the VM exit in the
2546                  * kernel.
2547                  *
2548                  * In such a case we want to make sure that the userland
2549                  * restarts guest execution at the instruction *after*
2550                  * the one we just processed. Therefore we update the
2551                  * guest rip in the VMCS and in 'vmexit'.
2552                  */
2553                 vmexit->rip += vmexit->inst_length;
2554                 vmexit->inst_length = 0;
2555                 vmcs_write(VMCS_GUEST_RIP, vmexit->rip);
2556         } else {
2557                 if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
2558                         /*
2559                          * If this VM exit was not claimed by anybody then
2560                          * treat it as a generic VMX exit.
2561                          */
2562                         vmexit->exitcode = VM_EXITCODE_VMX;
2563                         vmexit->u.vmx.status = VM_SUCCESS;
2564                         vmexit->u.vmx.inst_type = 0;
2565                         vmexit->u.vmx.inst_error = 0;
2566                 } else {
2567                         /*
2568                          * The exitcode and collateral have been populated.
2569                          * The VM exit will be processed further in userland.
2570                          */
2571                 }
2572         }
2573         return (handled);
2574 }
2575
2576 static __inline void
2577 vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit)
2578 {
2579
2580         KASSERT(vmxctx->inst_fail_status != VM_SUCCESS,
2581             ("vmx_exit_inst_error: invalid inst_fail_status %d",
2582             vmxctx->inst_fail_status));
2583
2584         vmexit->inst_length = 0;
2585         vmexit->exitcode = VM_EXITCODE_VMX;
2586         vmexit->u.vmx.status = vmxctx->inst_fail_status;
2587         vmexit->u.vmx.inst_error = vmcs_instruction_error();
2588         vmexit->u.vmx.exit_reason = ~0;
2589         vmexit->u.vmx.exit_qualification = ~0;
2590
2591         switch (rc) {
2592         case VMX_VMRESUME_ERROR:
2593         case VMX_VMLAUNCH_ERROR:
2594         case VMX_INVEPT_ERROR:
2595                 vmexit->u.vmx.inst_type = rc;
2596                 break;
2597         default:
2598                 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc);
2599         }
2600 }
2601
2602 /*
2603  * If the NMI-exiting VM execution control is set to '1' then an NMI in
2604  * non-root operation causes a VM-exit. NMI blocking is in effect so it is
2605  * sufficient to simply vector to the NMI handler via a software interrupt.
2606  * However, this must be done before maskable interrupts are enabled
2607  * otherwise the "iret" issued by an interrupt handler will incorrectly
2608  * clear NMI blocking.
2609  */
2610 static __inline void
2611 vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
2612 {
2613         uint32_t intr_info;
2614
2615         KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled"));
2616
2617         if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION)
2618                 return;
2619
2620         intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2621         KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2622             ("VM exit interruption info invalid: %#x", intr_info));
2623
2624         if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) {
2625                 KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due "
2626                     "to NMI has invalid vector: %#x", intr_info));
2627                 VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler");
2628                 __asm __volatile("int $2");
2629         }
2630 }
2631
2632 static __inline void
2633 vmx_dr_enter_guest(struct vmxctx *vmxctx)
2634 {
2635         register_t rflags;
2636
2637         /* Save host control debug registers. */
2638         vmxctx->host_dr7 = rdr7();
2639         vmxctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR);
2640
2641         /*
2642          * Disable debugging in DR7 and DEBUGCTL to avoid triggering
2643          * exceptions in the host based on the guest DRx values.  The
2644          * guest DR7 and DEBUGCTL are saved/restored in the VMCS.
2645          */
2646         load_dr7(0);
2647         wrmsr(MSR_DEBUGCTLMSR, 0);
2648
2649         /*
2650          * Disable single stepping the kernel to avoid corrupting the
2651          * guest DR6.  A debugger might still be able to corrupt the
2652          * guest DR6 by setting a breakpoint after this point and then
2653          * single stepping.
2654          */
2655         rflags = read_rflags();
2656         vmxctx->host_tf = rflags & PSL_T;
2657         write_rflags(rflags & ~PSL_T);
2658
2659         /* Save host debug registers. */
2660         vmxctx->host_dr0 = rdr0();
2661         vmxctx->host_dr1 = rdr1();
2662         vmxctx->host_dr2 = rdr2();
2663         vmxctx->host_dr3 = rdr3();
2664         vmxctx->host_dr6 = rdr6();
2665
2666         /* Restore guest debug registers. */
2667         load_dr0(vmxctx->guest_dr0);
2668         load_dr1(vmxctx->guest_dr1);
2669         load_dr2(vmxctx->guest_dr2);
2670         load_dr3(vmxctx->guest_dr3);
2671         load_dr6(vmxctx->guest_dr6);
2672 }
2673
2674 static __inline void
2675 vmx_dr_leave_guest(struct vmxctx *vmxctx)
2676 {
2677
2678         /* Save guest debug registers. */
2679         vmxctx->guest_dr0 = rdr0();
2680         vmxctx->guest_dr1 = rdr1();
2681         vmxctx->guest_dr2 = rdr2();
2682         vmxctx->guest_dr3 = rdr3();
2683         vmxctx->guest_dr6 = rdr6();
2684
2685         /*
2686          * Restore host debug registers.  Restore DR7, DEBUGCTL, and
2687          * PSL_T last.
2688          */
2689         load_dr0(vmxctx->host_dr0);
2690         load_dr1(vmxctx->host_dr1);
2691         load_dr2(vmxctx->host_dr2);
2692         load_dr3(vmxctx->host_dr3);
2693         load_dr6(vmxctx->host_dr6);
2694         wrmsr(MSR_DEBUGCTLMSR, vmxctx->host_debugctl);
2695         load_dr7(vmxctx->host_dr7);
2696         write_rflags(read_rflags() | vmxctx->host_tf);
2697 }
2698
2699 static int
2700 vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap,
2701     struct vm_eventinfo *evinfo)
2702 {
2703         int rc, handled, launched;
2704         struct vmx *vmx;
2705         struct vm *vm;
2706         struct vmxctx *vmxctx;
2707         struct vmcs *vmcs;
2708         struct vm_exit *vmexit;
2709         struct vlapic *vlapic;
2710         uint32_t exit_reason;
2711         struct region_descriptor gdtr, idtr;
2712         uint16_t ldt_sel;
2713
2714         vmx = arg;
2715         vm = vmx->vm;
2716         vmcs = &vmx->vmcs[vcpu];
2717         vmxctx = &vmx->ctx[vcpu];
2718         vlapic = vm_lapic(vm, vcpu);
2719         vmexit = vm_exitinfo(vm, vcpu);
2720         launched = 0;
2721
2722         KASSERT(vmxctx->pmap == pmap,
2723             ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap));
2724
2725         vmx_msr_guest_enter(vmx, vcpu);
2726
2727         VMPTRLD(vmcs);
2728
2729         /*
2730          * XXX
2731          * We do this every time because we may setup the virtual machine
2732          * from a different process than the one that actually runs it.
2733          *
2734          * If the life of a virtual machine was spent entirely in the context
2735          * of a single process we could do this once in vmx_vminit().
2736          */
2737         vmcs_write(VMCS_HOST_CR3, rcr3());
2738
2739         vmcs_write(VMCS_GUEST_RIP, rip);
2740         vmx_set_pcpu_defaults(vmx, vcpu, pmap);
2741         do {
2742                 KASSERT(vmcs_guest_rip() == rip, ("%s: vmcs guest rip mismatch "
2743                     "%#lx/%#lx", __func__, vmcs_guest_rip(), rip));
2744
2745                 handled = UNHANDLED;
2746                 /*
2747                  * Interrupts are disabled from this point on until the
2748                  * guest starts executing. This is done for the following
2749                  * reasons:
2750                  *
2751                  * If an AST is asserted on this thread after the check below,
2752                  * then the IPI_AST notification will not be lost, because it
2753                  * will cause a VM exit due to external interrupt as soon as
2754                  * the guest state is loaded.
2755                  *
2756                  * A posted interrupt after 'vmx_inject_interrupts()' will
2757                  * not be "lost" because it will be held pending in the host
2758                  * APIC because interrupts are disabled. The pending interrupt
2759                  * will be recognized as soon as the guest state is loaded.
2760                  *
2761                  * The same reasoning applies to the IPI generated by
2762                  * pmap_invalidate_ept().
2763                  */
2764                 disable_intr();
2765                 vmx_inject_interrupts(vmx, vcpu, vlapic, rip);
2766
2767                 /*
2768                  * Check for vcpu suspension after injecting events because
2769                  * vmx_inject_interrupts() can suspend the vcpu due to a
2770                  * triple fault.
2771                  */
2772                 if (vcpu_suspended(evinfo)) {
2773                         enable_intr();
2774                         vm_exit_suspended(vmx->vm, vcpu, rip);
2775                         break;
2776                 }
2777
2778                 if (vcpu_rendezvous_pending(evinfo)) {
2779                         enable_intr();
2780                         vm_exit_rendezvous(vmx->vm, vcpu, rip);
2781                         break;
2782                 }
2783
2784                 if (vcpu_reqidle(evinfo)) {
2785                         enable_intr();
2786                         vm_exit_reqidle(vmx->vm, vcpu, rip);
2787                         break;
2788                 }
2789
2790                 if (vcpu_should_yield(vm, vcpu)) {
2791                         enable_intr();
2792                         vm_exit_astpending(vmx->vm, vcpu, rip);
2793                         vmx_astpending_trace(vmx, vcpu, rip);
2794                         handled = HANDLED;
2795                         break;
2796                 }
2797
2798                 /*
2799                  * VM exits restore the base address but not the
2800                  * limits of GDTR and IDTR.  The VMCS only stores the
2801                  * base address, so VM exits set the limits to 0xffff.
2802                  * Save and restore the full GDTR and IDTR to restore
2803                  * the limits.
2804                  *
2805                  * The VMCS does not save the LDTR at all, and VM
2806                  * exits clear LDTR as if a NULL selector were loaded.
2807                  * The userspace hypervisor probably doesn't use a
2808                  * LDT, but save and restore it to be safe.
2809                  */
2810                 sgdt(&gdtr);
2811                 sidt(&idtr);
2812                 ldt_sel = sldt();
2813
2814                 vmx_run_trace(vmx, vcpu);
2815                 vmx_dr_enter_guest(vmxctx);
2816                 rc = vmx_enter_guest(vmxctx, vmx, launched);
2817                 vmx_dr_leave_guest(vmxctx);
2818
2819                 bare_lgdt(&gdtr);
2820                 lidt(&idtr);
2821                 lldt(ldt_sel);
2822
2823                 /* Collect some information for VM exit processing */
2824                 vmexit->rip = rip = vmcs_guest_rip();
2825                 vmexit->inst_length = vmexit_instruction_length();
2826                 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason();
2827                 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification();
2828
2829                 /* Update 'nextrip' */
2830                 vmx->state[vcpu].nextrip = rip;
2831
2832                 if (rc == VMX_GUEST_VMEXIT) {
2833                         vmx_exit_handle_nmi(vmx, vcpu, vmexit);
2834                         enable_intr();
2835                         handled = vmx_exit_process(vmx, vcpu, vmexit);
2836                 } else {
2837                         enable_intr();
2838                         vmx_exit_inst_error(vmxctx, rc, vmexit);
2839                 }
2840                 launched = 1;
2841                 vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled);
2842                 rip = vmexit->rip;
2843         } while (handled);
2844
2845         /*
2846          * If a VM exit has been handled then the exitcode must be BOGUS
2847          * If a VM exit is not handled then the exitcode must not be BOGUS
2848          */
2849         if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) ||
2850             (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) {
2851                 panic("Mismatch between handled (%d) and exitcode (%d)",
2852                       handled, vmexit->exitcode);
2853         }
2854
2855         if (!handled)
2856                 vmm_stat_incr(vm, vcpu, VMEXIT_USERSPACE, 1);
2857
2858         VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d",
2859             vmexit->exitcode);
2860
2861         VMCLEAR(vmcs);
2862         vmx_msr_guest_exit(vmx, vcpu);
2863
2864         return (0);
2865 }
2866
2867 static void
2868 vmx_vmcleanup(void *arg)
2869 {
2870         int i;
2871         struct vmx *vmx = arg;
2872
2873         if (apic_access_virtualization(vmx, 0))
2874                 vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
2875
2876         for (i = 0; i < VM_MAXCPU; i++)
2877                 vpid_free(vmx->state[i].vpid);
2878
2879         free(vmx, M_VMX);
2880
2881         return;
2882 }
2883
2884 static register_t *
2885 vmxctx_regptr(struct vmxctx *vmxctx, int reg)
2886 {
2887
2888         switch (reg) {
2889         case VM_REG_GUEST_RAX:
2890                 return (&vmxctx->guest_rax);
2891         case VM_REG_GUEST_RBX:
2892                 return (&vmxctx->guest_rbx);
2893         case VM_REG_GUEST_RCX:
2894                 return (&vmxctx->guest_rcx);
2895         case VM_REG_GUEST_RDX:
2896                 return (&vmxctx->guest_rdx);
2897         case VM_REG_GUEST_RSI:
2898                 return (&vmxctx->guest_rsi);
2899         case VM_REG_GUEST_RDI:
2900                 return (&vmxctx->guest_rdi);
2901         case VM_REG_GUEST_RBP:
2902                 return (&vmxctx->guest_rbp);
2903         case VM_REG_GUEST_R8:
2904                 return (&vmxctx->guest_r8);
2905         case VM_REG_GUEST_R9:
2906                 return (&vmxctx->guest_r9);
2907         case VM_REG_GUEST_R10:
2908                 return (&vmxctx->guest_r10);
2909         case VM_REG_GUEST_R11:
2910                 return (&vmxctx->guest_r11);
2911         case VM_REG_GUEST_R12:
2912                 return (&vmxctx->guest_r12);
2913         case VM_REG_GUEST_R13:
2914                 return (&vmxctx->guest_r13);
2915         case VM_REG_GUEST_R14:
2916                 return (&vmxctx->guest_r14);
2917         case VM_REG_GUEST_R15:
2918                 return (&vmxctx->guest_r15);
2919         case VM_REG_GUEST_CR2:
2920                 return (&vmxctx->guest_cr2);
2921         case VM_REG_GUEST_DR0:
2922                 return (&vmxctx->guest_dr0);
2923         case VM_REG_GUEST_DR1:
2924                 return (&vmxctx->guest_dr1);
2925         case VM_REG_GUEST_DR2:
2926                 return (&vmxctx->guest_dr2);
2927         case VM_REG_GUEST_DR3:
2928                 return (&vmxctx->guest_dr3);
2929         case VM_REG_GUEST_DR6:
2930                 return (&vmxctx->guest_dr6);
2931         default:
2932                 break;
2933         }
2934         return (NULL);
2935 }
2936
2937 static int
2938 vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval)
2939 {
2940         register_t *regp;
2941
2942         if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
2943                 *retval = *regp;
2944                 return (0);
2945         } else
2946                 return (EINVAL);
2947 }
2948
2949 static int
2950 vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val)
2951 {
2952         register_t *regp;
2953
2954         if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
2955                 *regp = val;
2956                 return (0);
2957         } else
2958                 return (EINVAL);
2959 }
2960
2961 static int
2962 vmx_get_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t *retval)
2963 {
2964         uint64_t gi;
2965         int error;
2966
2967         error = vmcs_getreg(&vmx->vmcs[vcpu], running,
2968             VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY), &gi);
2969         *retval = (gi & HWINTR_BLOCKING) ? 1 : 0;
2970         return (error);
2971 }
2972
2973 static int
2974 vmx_modify_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t val)
2975 {
2976         struct vmcs *vmcs;
2977         uint64_t gi;
2978         int error, ident;
2979
2980         /*
2981          * Forcing the vcpu into an interrupt shadow is not supported.
2982          */
2983         if (val) {
2984                 error = EINVAL;
2985                 goto done;
2986         }
2987
2988         vmcs = &vmx->vmcs[vcpu];
2989         ident = VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY);
2990         error = vmcs_getreg(vmcs, running, ident, &gi);
2991         if (error == 0) {
2992                 gi &= ~HWINTR_BLOCKING;
2993                 error = vmcs_setreg(vmcs, running, ident, gi);
2994         }
2995 done:
2996         VCPU_CTR2(vmx->vm, vcpu, "Setting intr_shadow to %#lx %s", val,
2997             error ? "failed" : "succeeded");
2998         return (error);
2999 }
3000
3001 static int
3002 vmx_shadow_reg(int reg)
3003 {
3004         int shreg;
3005
3006         shreg = -1;
3007
3008         switch (reg) {
3009         case VM_REG_GUEST_CR0:
3010                 shreg = VMCS_CR0_SHADOW;
3011                 break;
3012         case VM_REG_GUEST_CR4:
3013                 shreg = VMCS_CR4_SHADOW;
3014                 break;
3015         default:
3016                 break;
3017         }
3018
3019         return (shreg);
3020 }
3021
3022 static int
3023 vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval)
3024 {
3025         int running, hostcpu;
3026         struct vmx *vmx = arg;
3027
3028         running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
3029         if (running && hostcpu != curcpu)
3030                 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu);
3031
3032         if (reg == VM_REG_GUEST_INTR_SHADOW)
3033                 return (vmx_get_intr_shadow(vmx, vcpu, running, retval));
3034
3035         if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0)
3036                 return (0);
3037
3038         return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval));
3039 }
3040
3041 static int
3042 vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
3043 {
3044         int error, hostcpu, running, shadow;
3045         uint64_t ctls;
3046         pmap_t pmap;
3047         struct vmx *vmx = arg;
3048
3049         running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
3050         if (running && hostcpu != curcpu)
3051                 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu);
3052
3053         if (reg == VM_REG_GUEST_INTR_SHADOW)
3054                 return (vmx_modify_intr_shadow(vmx, vcpu, running, val));
3055
3056         if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0)
3057                 return (0);
3058
3059         error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val);
3060
3061         if (error == 0) {
3062                 /*
3063                  * If the "load EFER" VM-entry control is 1 then the
3064                  * value of EFER.LMA must be identical to "IA-32e mode guest"
3065                  * bit in the VM-entry control.
3066                  */
3067                 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 &&
3068                     (reg == VM_REG_GUEST_EFER)) {
3069                         vmcs_getreg(&vmx->vmcs[vcpu], running,
3070                                     VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls);
3071                         if (val & EFER_LMA)
3072                                 ctls |= VM_ENTRY_GUEST_LMA;
3073                         else
3074                                 ctls &= ~VM_ENTRY_GUEST_LMA;
3075                         vmcs_setreg(&vmx->vmcs[vcpu], running,
3076                                     VMCS_IDENT(VMCS_ENTRY_CTLS), ctls);
3077                 }
3078
3079                 shadow = vmx_shadow_reg(reg);
3080                 if (shadow > 0) {
3081                         /*
3082                          * Store the unmodified value in the shadow
3083                          */
3084                         error = vmcs_setreg(&vmx->vmcs[vcpu], running,
3085                                     VMCS_IDENT(shadow), val);
3086                 }
3087
3088                 if (reg == VM_REG_GUEST_CR3) {
3089                         /*
3090                          * Invalidate the guest vcpu's TLB mappings to emulate
3091                          * the behavior of updating %cr3.
3092                          *
3093                          * XXX the processor retains global mappings when %cr3
3094                          * is updated but vmx_invvpid() does not.
3095                          */
3096                         pmap = vmx->ctx[vcpu].pmap;
3097                         vmx_invvpid(vmx, vcpu, pmap, running);
3098                 }
3099         }
3100
3101         return (error);
3102 }
3103
3104 static int
3105 vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
3106 {
3107         int hostcpu, running;
3108         struct vmx *vmx = arg;
3109
3110         running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
3111         if (running && hostcpu != curcpu)
3112                 panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), vcpu);
3113
3114         return (vmcs_getdesc(&vmx->vmcs[vcpu], running, reg, desc));
3115 }
3116
3117 static int
3118 vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
3119 {
3120         int hostcpu, running;
3121         struct vmx *vmx = arg;
3122
3123         running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
3124         if (running && hostcpu != curcpu)
3125                 panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), vcpu);
3126
3127         return (vmcs_setdesc(&vmx->vmcs[vcpu], running, reg, desc));
3128 }
3129
3130 static int
3131 vmx_getcap(void *arg, int vcpu, int type, int *retval)
3132 {
3133         struct vmx *vmx = arg;
3134         int vcap;
3135         int ret;
3136
3137         ret = ENOENT;
3138
3139         vcap = vmx->cap[vcpu].set;
3140
3141         switch (type) {
3142         case VM_CAP_HALT_EXIT:
3143                 if (cap_halt_exit)
3144                         ret = 0;
3145                 break;
3146         case VM_CAP_PAUSE_EXIT:
3147                 if (cap_pause_exit)
3148                         ret = 0;
3149                 break;
3150         case VM_CAP_MTRAP_EXIT:
3151                 if (cap_monitor_trap)
3152                         ret = 0;
3153                 break;
3154         case VM_CAP_UNRESTRICTED_GUEST:
3155                 if (cap_unrestricted_guest)
3156                         ret = 0;
3157                 break;
3158         case VM_CAP_ENABLE_INVPCID:
3159                 if (cap_invpcid)
3160                         ret = 0;
3161                 break;
3162         default:
3163                 break;
3164         }
3165
3166         if (ret == 0)
3167                 *retval = (vcap & (1 << type)) ? 1 : 0;
3168
3169         return (ret);
3170 }
3171
3172 static int
3173 vmx_setcap(void *arg, int vcpu, int type, int val)
3174 {
3175         struct vmx *vmx = arg;
3176         struct vmcs *vmcs = &vmx->vmcs[vcpu];
3177         uint32_t baseval;
3178         uint32_t *pptr;
3179         int error;
3180         int flag;
3181         int reg;
3182         int retval;
3183
3184         retval = ENOENT;
3185         pptr = NULL;
3186
3187         switch (type) {
3188         case VM_CAP_HALT_EXIT:
3189                 if (cap_halt_exit) {
3190                         retval = 0;
3191                         pptr = &vmx->cap[vcpu].proc_ctls;
3192                         baseval = *pptr;
3193                         flag = PROCBASED_HLT_EXITING;
3194                         reg = VMCS_PRI_PROC_BASED_CTLS;
3195                 }
3196                 break;
3197         case VM_CAP_MTRAP_EXIT:
3198                 if (cap_monitor_trap) {
3199                         retval = 0;
3200                         pptr = &vmx->cap[vcpu].proc_ctls;
3201                         baseval = *pptr;
3202                         flag = PROCBASED_MTF;
3203                         reg = VMCS_PRI_PROC_BASED_CTLS;
3204                 }
3205                 break;
3206         case VM_CAP_PAUSE_EXIT:
3207                 if (cap_pause_exit) {
3208                         retval = 0;
3209                         pptr = &vmx->cap[vcpu].proc_ctls;
3210                         baseval = *pptr;
3211                         flag = PROCBASED_PAUSE_EXITING;
3212                         reg = VMCS_PRI_PROC_BASED_CTLS;
3213                 }
3214                 break;
3215         case VM_CAP_UNRESTRICTED_GUEST:
3216                 if (cap_unrestricted_guest) {
3217                         retval = 0;
3218                         pptr = &vmx->cap[vcpu].proc_ctls2;
3219                         baseval = *pptr;
3220                         flag = PROCBASED2_UNRESTRICTED_GUEST;
3221                         reg = VMCS_SEC_PROC_BASED_CTLS;
3222                 }
3223                 break;
3224         case VM_CAP_ENABLE_INVPCID:
3225                 if (cap_invpcid) {
3226                         retval = 0;
3227                         pptr = &vmx->cap[vcpu].proc_ctls2;
3228                         baseval = *pptr;
3229                         flag = PROCBASED2_ENABLE_INVPCID;
3230                         reg = VMCS_SEC_PROC_BASED_CTLS;
3231                 }
3232                 break;
3233         default:
3234                 break;
3235         }
3236
3237         if (retval == 0) {
3238                 if (val) {
3239                         baseval |= flag;
3240                 } else {
3241                         baseval &= ~flag;
3242                 }
3243                 VMPTRLD(vmcs);
3244                 error = vmwrite(reg, baseval);
3245                 VMCLEAR(vmcs);
3246
3247                 if (error) {
3248                         retval = error;
3249                 } else {
3250                         /*
3251                          * Update optional stored flags, and record
3252                          * setting
3253                          */
3254                         if (pptr != NULL) {
3255                                 *pptr = baseval;
3256                         }
3257
3258                         if (val) {
3259                                 vmx->cap[vcpu].set |= (1 << type);
3260                         } else {
3261                                 vmx->cap[vcpu].set &= ~(1 << type);
3262                         }
3263                 }
3264         }
3265
3266         return (retval);
3267 }
3268
3269 struct vlapic_vtx {
3270         struct vlapic   vlapic;
3271         struct pir_desc *pir_desc;
3272         struct vmx      *vmx;
3273         u_int   pending_prio;
3274 };
3275
3276 #define VPR_PRIO_BIT(vpr)       (1 << ((vpr) >> 4))
3277
3278 #define VMX_CTR_PIR(vm, vcpuid, pir_desc, notify, vector, level, msg)   \
3279 do {                                                                    \
3280         VCPU_CTR2(vm, vcpuid, msg " assert %s-triggered vector %d",     \
3281             level ? "level" : "edge", vector);                          \
3282         VCPU_CTR1(vm, vcpuid, msg " pir0 0x%016lx", pir_desc->pir[0]);  \
3283         VCPU_CTR1(vm, vcpuid, msg " pir1 0x%016lx", pir_desc->pir[1]);  \
3284         VCPU_CTR1(vm, vcpuid, msg " pir2 0x%016lx", pir_desc->pir[2]);  \
3285         VCPU_CTR1(vm, vcpuid, msg " pir3 0x%016lx", pir_desc->pir[3]);  \
3286         VCPU_CTR1(vm, vcpuid, msg " notify: %s", notify ? "yes" : "no");\
3287 } while (0)
3288
3289 /*
3290  * vlapic->ops handlers that utilize the APICv hardware assist described in
3291  * Chapter 29 of the Intel SDM.
3292  */
3293 static int
3294 vmx_set_intr_ready(struct vlapic *vlapic, int vector, bool level)
3295 {
3296         struct vlapic_vtx *vlapic_vtx;
3297         struct pir_desc *pir_desc;
3298         uint64_t mask;
3299         int idx, notify = 0;
3300
3301         vlapic_vtx = (struct vlapic_vtx *)vlapic;
3302         pir_desc = vlapic_vtx->pir_desc;
3303
3304         /*
3305          * Keep track of interrupt requests in the PIR descriptor. This is
3306          * because the virtual APIC page pointed to by the VMCS cannot be
3307          * modified if the vcpu is running.
3308          */
3309         idx = vector / 64;
3310         mask = 1UL << (vector % 64);
3311         atomic_set_long(&pir_desc->pir[idx], mask);
3312
3313         /*
3314          * A notification is required whenever the 'pending' bit makes a
3315          * transition from 0->1.
3316          *
3317          * Even if the 'pending' bit is already asserted, notification about
3318          * the incoming interrupt may still be necessary.  For example, if a
3319          * vCPU is HLTed with a high PPR, a low priority interrupt would cause
3320          * the 0->1 'pending' transition with a notification, but the vCPU
3321          * would ignore the interrupt for the time being.  The same vCPU would
3322          * need to then be notified if a high-priority interrupt arrived which
3323          * satisfied the PPR.
3324          *
3325          * The priorities of interrupts injected while 'pending' is asserted
3326          * are tracked in a custom bitfield 'pending_prio'.  Should the
3327          * to-be-injected interrupt exceed the priorities already present, the
3328          * notification is sent.  The priorities recorded in 'pending_prio' are
3329          * cleared whenever the 'pending' bit makes another 0->1 transition.
3330          */
3331         if (atomic_cmpset_long(&pir_desc->pending, 0, 1) != 0) {
3332                 notify = 1;
3333                 vlapic_vtx->pending_prio = 0;
3334         } else {
3335                 const u_int old_prio = vlapic_vtx->pending_prio;
3336                 const u_int prio_bit = VPR_PRIO_BIT(vector & APIC_TPR_INT);
3337
3338                 if ((old_prio & prio_bit) == 0 && prio_bit > old_prio) {
3339                         atomic_set_int(&vlapic_vtx->pending_prio, prio_bit);
3340                         notify = 1;
3341                 }
3342         }
3343
3344         VMX_CTR_PIR(vlapic->vm, vlapic->vcpuid, pir_desc, notify, vector,
3345             level, "vmx_set_intr_ready");
3346         return (notify);
3347 }
3348
3349 static int
3350 vmx_pending_intr(struct vlapic *vlapic, int *vecptr)
3351 {
3352         struct vlapic_vtx *vlapic_vtx;
3353         struct pir_desc *pir_desc;
3354         struct LAPIC *lapic;
3355         uint64_t pending, pirval;
3356         uint32_t ppr, vpr;
3357         int i;
3358
3359         /*
3360          * This function is only expected to be called from the 'HLT' exit
3361          * handler which does not care about the vector that is pending.
3362          */
3363         KASSERT(vecptr == NULL, ("vmx_pending_intr: vecptr must be NULL"));
3364
3365         vlapic_vtx = (struct vlapic_vtx *)vlapic;
3366         pir_desc = vlapic_vtx->pir_desc;
3367
3368         pending = atomic_load_acq_long(&pir_desc->pending);
3369         if (!pending) {
3370                 /*
3371                  * While a virtual interrupt may have already been
3372                  * processed the actual delivery maybe pending the
3373                  * interruptibility of the guest.  Recognize a pending
3374                  * interrupt by reevaluating virtual interrupts
3375                  * following Section 29.2.1 in the Intel SDM Volume 3.
3376                  */
3377                 struct vm_exit *vmexit;
3378                 uint8_t rvi, ppr;
3379
3380                 vmexit = vm_exitinfo(vlapic->vm, vlapic->vcpuid);
3381                 KASSERT(vmexit->exitcode == VM_EXITCODE_HLT,
3382                     ("vmx_pending_intr: exitcode not 'HLT'"));
3383                 rvi = vmexit->u.hlt.intr_status & APIC_TPR_INT;
3384                 lapic = vlapic->apic_page;
3385                 ppr = lapic->ppr & APIC_TPR_INT;
3386                 if (rvi > ppr) {
3387                         return (1);
3388                 }
3389
3390                 return (0);
3391         }
3392
3393         /*
3394          * If there is an interrupt pending then it will be recognized only
3395          * if its priority is greater than the processor priority.
3396          *
3397          * Special case: if the processor priority is zero then any pending
3398          * interrupt will be recognized.
3399          */
3400         lapic = vlapic->apic_page;
3401         ppr = lapic->ppr & APIC_TPR_INT;
3402         if (ppr == 0)
3403                 return (1);
3404
3405         VCPU_CTR1(vlapic->vm, vlapic->vcpuid, "HLT with non-zero PPR %d",
3406             lapic->ppr);
3407
3408         vpr = 0;
3409         for (i = 3; i >= 0; i--) {
3410                 pirval = pir_desc->pir[i];
3411                 if (pirval != 0) {
3412                         vpr = (i * 64 + flsl(pirval) - 1) & APIC_TPR_INT;
3413                         break;
3414                 }
3415         }
3416
3417         /*
3418          * If the highest-priority pending interrupt falls short of the
3419          * processor priority of this vCPU, ensure that 'pending_prio' does not
3420          * have any stale bits which would preclude a higher-priority interrupt
3421          * from incurring a notification later.
3422          */
3423         if (vpr <= ppr) {
3424                 const u_int prio_bit = VPR_PRIO_BIT(vpr);
3425                 const u_int old = vlapic_vtx->pending_prio;
3426
3427                 if (old > prio_bit && (old & prio_bit) == 0) {
3428                         vlapic_vtx->pending_prio = prio_bit;
3429                 }
3430                 return (0);
3431         }
3432         return (1);
3433 }
3434
3435 static void
3436 vmx_intr_accepted(struct vlapic *vlapic, int vector)
3437 {
3438
3439         panic("vmx_intr_accepted: not expected to be called");
3440 }
3441
3442 static void
3443 vmx_set_tmr(struct vlapic *vlapic, int vector, bool level)
3444 {
3445         struct vlapic_vtx *vlapic_vtx;
3446         struct vmx *vmx;
3447         struct vmcs *vmcs;
3448         uint64_t mask, val;
3449
3450         KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d", vector));
3451         KASSERT(!vcpu_is_running(vlapic->vm, vlapic->vcpuid, NULL),
3452             ("vmx_set_tmr: vcpu cannot be running"));
3453
3454         vlapic_vtx = (struct vlapic_vtx *)vlapic;
3455         vmx = vlapic_vtx->vmx;
3456         vmcs = &vmx->vmcs[vlapic->vcpuid];
3457         mask = 1UL << (vector % 64);
3458
3459         VMPTRLD(vmcs);
3460         val = vmcs_read(VMCS_EOI_EXIT(vector));
3461         if (level)
3462                 val |= mask;
3463         else
3464                 val &= ~mask;
3465         vmcs_write(VMCS_EOI_EXIT(vector), val);
3466         VMCLEAR(vmcs);
3467 }
3468
3469 static void
3470 vmx_enable_x2apic_mode(struct vlapic *vlapic)
3471 {
3472         struct vmx *vmx;
3473         struct vmcs *vmcs;
3474         uint32_t proc_ctls2;
3475         int vcpuid, error;
3476
3477         vcpuid = vlapic->vcpuid;
3478         vmx = ((struct vlapic_vtx *)vlapic)->vmx;
3479         vmcs = &vmx->vmcs[vcpuid];
3480
3481         proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
3482         KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0,
3483             ("%s: invalid proc_ctls2 %#x", __func__, proc_ctls2));
3484
3485         proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES;
3486         proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE;
3487         vmx->cap[vcpuid].proc_ctls2 = proc_ctls2;
3488
3489         VMPTRLD(vmcs);
3490         vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2);
3491         VMCLEAR(vmcs);
3492
3493         if (vlapic->vcpuid == 0) {
3494                 /*
3495                  * The nested page table mappings are shared by all vcpus
3496                  * so unmap the APIC access page just once.
3497                  */
3498                 error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
3499                 KASSERT(error == 0, ("%s: vm_unmap_mmio error %d",
3500                     __func__, error));
3501
3502                 /*
3503                  * The MSR bitmap is shared by all vcpus so modify it only
3504                  * once in the context of vcpu 0.
3505                  */
3506                 error = vmx_allow_x2apic_msrs(vmx);
3507                 KASSERT(error == 0, ("%s: vmx_allow_x2apic_msrs error %d",
3508                     __func__, error));
3509         }
3510 }
3511
3512 static void
3513 vmx_post_intr(struct vlapic *vlapic, int hostcpu)
3514 {
3515
3516         ipi_cpu(hostcpu, pirvec);
3517 }
3518
3519 /*
3520  * Transfer the pending interrupts in the PIR descriptor to the IRR
3521  * in the virtual APIC page.
3522  */
3523 static void
3524 vmx_inject_pir(struct vlapic *vlapic)
3525 {
3526         struct vlapic_vtx *vlapic_vtx;
3527         struct pir_desc *pir_desc;
3528         struct LAPIC *lapic;
3529         uint64_t val, pirval;
3530         int rvi, pirbase = -1;
3531         uint16_t intr_status_old, intr_status_new;
3532
3533         vlapic_vtx = (struct vlapic_vtx *)vlapic;
3534         pir_desc = vlapic_vtx->pir_desc;
3535         if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) {
3536                 VCPU_CTR0(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: "
3537                     "no posted interrupt pending");
3538                 return;
3539         }
3540
3541         pirval = 0;
3542         pirbase = -1;
3543         lapic = vlapic->apic_page;
3544
3545         val = atomic_readandclear_long(&pir_desc->pir[0]);
3546         if (val != 0) {
3547                 lapic->irr0 |= val;
3548                 lapic->irr1 |= val >> 32;
3549                 pirbase = 0;
3550                 pirval = val;
3551         }
3552
3553         val = atomic_readandclear_long(&pir_desc->pir[1]);
3554         if (val != 0) {
3555                 lapic->irr2 |= val;
3556                 lapic->irr3 |= val >> 32;
3557                 pirbase = 64;
3558                 pirval = val;
3559         }
3560
3561         val = atomic_readandclear_long(&pir_desc->pir[2]);
3562         if (val != 0) {
3563                 lapic->irr4 |= val;
3564                 lapic->irr5 |= val >> 32;
3565                 pirbase = 128;
3566                 pirval = val;
3567         }
3568
3569         val = atomic_readandclear_long(&pir_desc->pir[3]);
3570         if (val != 0) {
3571                 lapic->irr6 |= val;
3572                 lapic->irr7 |= val >> 32;
3573                 pirbase = 192;
3574                 pirval = val;
3575         }
3576
3577         VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir");
3578
3579         /*
3580          * Update RVI so the processor can evaluate pending virtual
3581          * interrupts on VM-entry.
3582          *
3583          * It is possible for pirval to be 0 here, even though the
3584          * pending bit has been set. The scenario is:
3585          * CPU-Y is sending a posted interrupt to CPU-X, which
3586          * is running a guest and processing posted interrupts in h/w.
3587          * CPU-X will eventually exit and the state seen in s/w is
3588          * the pending bit set, but no PIR bits set.
3589          *
3590          *      CPU-X                      CPU-Y
3591          *   (vm running)                (host running)
3592          *   rx posted interrupt
3593          *   CLEAR pending bit
3594          *                               SET PIR bit
3595          *   READ/CLEAR PIR bits
3596          *                               SET pending bit
3597          *   (vm exit)
3598          *   pending bit set, PIR 0
3599          */
3600         if (pirval != 0) {
3601                 rvi = pirbase + flsl(pirval) - 1;
3602                 intr_status_old = vmcs_read(VMCS_GUEST_INTR_STATUS);
3603                 intr_status_new = (intr_status_old & 0xFF00) | rvi;
3604                 if (intr_status_new > intr_status_old) {
3605                         vmcs_write(VMCS_GUEST_INTR_STATUS, intr_status_new);
3606                         VCPU_CTR2(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: "
3607                             "guest_intr_status changed from 0x%04x to 0x%04x",
3608                             intr_status_old, intr_status_new);
3609                 }
3610         }
3611 }
3612
3613 static struct vlapic *
3614 vmx_vlapic_init(void *arg, int vcpuid)
3615 {
3616         struct vmx *vmx;
3617         struct vlapic *vlapic;
3618         struct vlapic_vtx *vlapic_vtx;
3619
3620         vmx = arg;
3621
3622         vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO);
3623         vlapic->vm = vmx->vm;
3624         vlapic->vcpuid = vcpuid;
3625         vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid];
3626
3627         vlapic_vtx = (struct vlapic_vtx *)vlapic;
3628         vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid];
3629         vlapic_vtx->vmx = vmx;
3630
3631         if (virtual_interrupt_delivery) {
3632                 vlapic->ops.set_intr_ready = vmx_set_intr_ready;
3633                 vlapic->ops.pending_intr = vmx_pending_intr;
3634                 vlapic->ops.intr_accepted = vmx_intr_accepted;
3635                 vlapic->ops.set_tmr = vmx_set_tmr;
3636                 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode;
3637         }
3638
3639         if (posted_interrupts)
3640                 vlapic->ops.post_intr = vmx_post_intr;
3641
3642         vlapic_init(vlapic);
3643
3644         return (vlapic);
3645 }
3646
3647 static void
3648 vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic)
3649 {
3650
3651         vlapic_cleanup(vlapic);
3652         free(vlapic, M_VLAPIC);
3653 }
3654
3655 struct vmm_ops vmm_ops_intel = {
3656         vmx_init,
3657         vmx_cleanup,
3658         vmx_restore,
3659         vmx_vminit,
3660         vmx_run,
3661         vmx_vmcleanup,
3662         vmx_getreg,
3663         vmx_setreg,
3664         vmx_getdesc,
3665         vmx_setdesc,
3666         vmx_getcap,
3667         vmx_setcap,
3668         ept_vmspace_alloc,
3669         ept_vmspace_free,
3670         vmx_vlapic_init,
3671         vmx_vlapic_cleanup,
3672 };