]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/amd64/vmm/intel/vmx.c
Import Intel Processor Trace decoder library from
[FreeBSD/FreeBSD.git] / sys / amd64 / vmm / intel / vmx.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2011 NetApp, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/smp.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/pcpu.h>
40 #include <sys/proc.h>
41 #include <sys/sysctl.h>
42
43 #include <vm/vm.h>
44 #include <vm/pmap.h>
45
46 #include <machine/psl.h>
47 #include <machine/cpufunc.h>
48 #include <machine/md_var.h>
49 #include <machine/segments.h>
50 #include <machine/smp.h>
51 #include <machine/specialreg.h>
52 #include <machine/vmparam.h>
53
54 #include <machine/vmm.h>
55 #include <machine/vmm_dev.h>
56 #include <machine/vmm_instruction_emul.h>
57 #include "vmm_lapic.h"
58 #include "vmm_host.h"
59 #include "vmm_ioport.h"
60 #include "vmm_ktr.h"
61 #include "vmm_stat.h"
62 #include "vatpic.h"
63 #include "vlapic.h"
64 #include "vlapic_priv.h"
65
66 #include "ept.h"
67 #include "vmx_cpufunc.h"
68 #include "vmx.h"
69 #include "vmx_msr.h"
70 #include "x86.h"
71 #include "vmx_controls.h"
72
73 #define PINBASED_CTLS_ONE_SETTING                                       \
74         (PINBASED_EXTINT_EXITING        |                               \
75          PINBASED_NMI_EXITING           |                               \
76          PINBASED_VIRTUAL_NMI)
77 #define PINBASED_CTLS_ZERO_SETTING      0
78
79 #define PROCBASED_CTLS_WINDOW_SETTING                                   \
80         (PROCBASED_INT_WINDOW_EXITING   |                               \
81          PROCBASED_NMI_WINDOW_EXITING)
82
83 #define PROCBASED_CTLS_ONE_SETTING                                      \
84         (PROCBASED_SECONDARY_CONTROLS   |                               \
85          PROCBASED_MWAIT_EXITING        |                               \
86          PROCBASED_MONITOR_EXITING      |                               \
87          PROCBASED_IO_EXITING           |                               \
88          PROCBASED_MSR_BITMAPS          |                               \
89          PROCBASED_CTLS_WINDOW_SETTING  |                               \
90          PROCBASED_CR8_LOAD_EXITING     |                               \
91          PROCBASED_CR8_STORE_EXITING)
92 #define PROCBASED_CTLS_ZERO_SETTING     \
93         (PROCBASED_CR3_LOAD_EXITING |   \
94         PROCBASED_CR3_STORE_EXITING |   \
95         PROCBASED_IO_BITMAPS)
96
97 #define PROCBASED_CTLS2_ONE_SETTING     PROCBASED2_ENABLE_EPT
98 #define PROCBASED_CTLS2_ZERO_SETTING    0
99
100 #define VM_EXIT_CTLS_ONE_SETTING                                        \
101         (VM_EXIT_SAVE_DEBUG_CONTROLS            |                       \
102         VM_EXIT_HOST_LMA                        |                       \
103         VM_EXIT_SAVE_EFER                       |                       \
104         VM_EXIT_LOAD_EFER                       |                       \
105         VM_EXIT_ACKNOWLEDGE_INTERRUPT)
106
107 #define VM_EXIT_CTLS_ZERO_SETTING       0
108
109 #define VM_ENTRY_CTLS_ONE_SETTING                                       \
110         (VM_ENTRY_LOAD_DEBUG_CONTROLS           |                       \
111         VM_ENTRY_LOAD_EFER)
112
113 #define VM_ENTRY_CTLS_ZERO_SETTING                                      \
114         (VM_ENTRY_INTO_SMM                      |                       \
115         VM_ENTRY_DEACTIVATE_DUAL_MONITOR)
116
117 #define HANDLED         1
118 #define UNHANDLED       0
119
120 static MALLOC_DEFINE(M_VMX, "vmx", "vmx");
121 static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic");
122
123 SYSCTL_DECL(_hw_vmm);
124 SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW, NULL, NULL);
125
126 int vmxon_enabled[MAXCPU];
127 static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
128
129 static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2;
130 static uint32_t exit_ctls, entry_ctls;
131
132 static uint64_t cr0_ones_mask, cr0_zeros_mask;
133 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD,
134              &cr0_ones_mask, 0, NULL);
135 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD,
136              &cr0_zeros_mask, 0, NULL);
137
138 static uint64_t cr4_ones_mask, cr4_zeros_mask;
139 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD,
140              &cr4_ones_mask, 0, NULL);
141 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD,
142              &cr4_zeros_mask, 0, NULL);
143
144 static int vmx_initialized;
145 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD,
146            &vmx_initialized, 0, "Intel VMX initialized");
147
148 /*
149  * Optional capabilities
150  */
151 static SYSCTL_NODE(_hw_vmm_vmx, OID_AUTO, cap, CTLFLAG_RW, NULL, NULL);
152
153 static int cap_halt_exit;
154 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, halt_exit, CTLFLAG_RD, &cap_halt_exit, 0,
155     "HLT triggers a VM-exit");
156
157 static int cap_pause_exit;
158 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, pause_exit, CTLFLAG_RD, &cap_pause_exit,
159     0, "PAUSE triggers a VM-exit");
160
161 static int cap_unrestricted_guest;
162 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, unrestricted_guest, CTLFLAG_RD,
163     &cap_unrestricted_guest, 0, "Unrestricted guests");
164
165 static int cap_monitor_trap;
166 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, monitor_trap, CTLFLAG_RD,
167     &cap_monitor_trap, 0, "Monitor trap flag");
168
169 static int cap_invpcid;
170 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, invpcid, CTLFLAG_RD, &cap_invpcid,
171     0, "Guests are allowed to use INVPCID");
172
173 static int virtual_interrupt_delivery;
174 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD,
175     &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support");
176
177 static int posted_interrupts;
178 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, posted_interrupts, CTLFLAG_RD,
179     &posted_interrupts, 0, "APICv posted interrupt support");
180
181 static int pirvec = -1;
182 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD,
183     &pirvec, 0, "APICv posted interrupt vector");
184
185 static struct unrhdr *vpid_unr;
186 static u_int vpid_alloc_failed;
187 SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD,
188             &vpid_alloc_failed, 0, NULL);
189
190 /*
191  * Use the last page below 4GB as the APIC access address. This address is
192  * occupied by the boot firmware so it is guaranteed that it will not conflict
193  * with a page in system memory.
194  */
195 #define APIC_ACCESS_ADDRESS     0xFFFFF000
196
197 static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc);
198 static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval);
199 static int vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val);
200 static void vmx_inject_pir(struct vlapic *vlapic);
201
202 #ifdef KTR
203 static const char *
204 exit_reason_to_str(int reason)
205 {
206         static char reasonbuf[32];
207
208         switch (reason) {
209         case EXIT_REASON_EXCEPTION:
210                 return "exception";
211         case EXIT_REASON_EXT_INTR:
212                 return "extint";
213         case EXIT_REASON_TRIPLE_FAULT:
214                 return "triplefault";
215         case EXIT_REASON_INIT:
216                 return "init";
217         case EXIT_REASON_SIPI:
218                 return "sipi";
219         case EXIT_REASON_IO_SMI:
220                 return "iosmi";
221         case EXIT_REASON_SMI:
222                 return "smi";
223         case EXIT_REASON_INTR_WINDOW:
224                 return "intrwindow";
225         case EXIT_REASON_NMI_WINDOW:
226                 return "nmiwindow";
227         case EXIT_REASON_TASK_SWITCH:
228                 return "taskswitch";
229         case EXIT_REASON_CPUID:
230                 return "cpuid";
231         case EXIT_REASON_GETSEC:
232                 return "getsec";
233         case EXIT_REASON_HLT:
234                 return "hlt";
235         case EXIT_REASON_INVD:
236                 return "invd";
237         case EXIT_REASON_INVLPG:
238                 return "invlpg";
239         case EXIT_REASON_RDPMC:
240                 return "rdpmc";
241         case EXIT_REASON_RDTSC:
242                 return "rdtsc";
243         case EXIT_REASON_RSM:
244                 return "rsm";
245         case EXIT_REASON_VMCALL:
246                 return "vmcall";
247         case EXIT_REASON_VMCLEAR:
248                 return "vmclear";
249         case EXIT_REASON_VMLAUNCH:
250                 return "vmlaunch";
251         case EXIT_REASON_VMPTRLD:
252                 return "vmptrld";
253         case EXIT_REASON_VMPTRST:
254                 return "vmptrst";
255         case EXIT_REASON_VMREAD:
256                 return "vmread";
257         case EXIT_REASON_VMRESUME:
258                 return "vmresume";
259         case EXIT_REASON_VMWRITE:
260                 return "vmwrite";
261         case EXIT_REASON_VMXOFF:
262                 return "vmxoff";
263         case EXIT_REASON_VMXON:
264                 return "vmxon";
265         case EXIT_REASON_CR_ACCESS:
266                 return "craccess";
267         case EXIT_REASON_DR_ACCESS:
268                 return "draccess";
269         case EXIT_REASON_INOUT:
270                 return "inout";
271         case EXIT_REASON_RDMSR:
272                 return "rdmsr";
273         case EXIT_REASON_WRMSR:
274                 return "wrmsr";
275         case EXIT_REASON_INVAL_VMCS:
276                 return "invalvmcs";
277         case EXIT_REASON_INVAL_MSR:
278                 return "invalmsr";
279         case EXIT_REASON_MWAIT:
280                 return "mwait";
281         case EXIT_REASON_MTF:
282                 return "mtf";
283         case EXIT_REASON_MONITOR:
284                 return "monitor";
285         case EXIT_REASON_PAUSE:
286                 return "pause";
287         case EXIT_REASON_MCE_DURING_ENTRY:
288                 return "mce-during-entry";
289         case EXIT_REASON_TPR:
290                 return "tpr";
291         case EXIT_REASON_APIC_ACCESS:
292                 return "apic-access";
293         case EXIT_REASON_GDTR_IDTR:
294                 return "gdtridtr";
295         case EXIT_REASON_LDTR_TR:
296                 return "ldtrtr";
297         case EXIT_REASON_EPT_FAULT:
298                 return "eptfault";
299         case EXIT_REASON_EPT_MISCONFIG:
300                 return "eptmisconfig";
301         case EXIT_REASON_INVEPT:
302                 return "invept";
303         case EXIT_REASON_RDTSCP:
304                 return "rdtscp";
305         case EXIT_REASON_VMX_PREEMPT:
306                 return "vmxpreempt";
307         case EXIT_REASON_INVVPID:
308                 return "invvpid";
309         case EXIT_REASON_WBINVD:
310                 return "wbinvd";
311         case EXIT_REASON_XSETBV:
312                 return "xsetbv";
313         case EXIT_REASON_APIC_WRITE:
314                 return "apic-write";
315         default:
316                 snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason);
317                 return (reasonbuf);
318         }
319 }
320 #endif  /* KTR */
321
322 static int
323 vmx_allow_x2apic_msrs(struct vmx *vmx)
324 {
325         int i, error;
326
327         error = 0;
328
329         /*
330          * Allow readonly access to the following x2APIC MSRs from the guest.
331          */
332         error += guest_msr_ro(vmx, MSR_APIC_ID);
333         error += guest_msr_ro(vmx, MSR_APIC_VERSION);
334         error += guest_msr_ro(vmx, MSR_APIC_LDR);
335         error += guest_msr_ro(vmx, MSR_APIC_SVR);
336
337         for (i = 0; i < 8; i++)
338                 error += guest_msr_ro(vmx, MSR_APIC_ISR0 + i);
339
340         for (i = 0; i < 8; i++)
341                 error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i);
342         
343         for (i = 0; i < 8; i++)
344                 error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i);
345
346         error += guest_msr_ro(vmx, MSR_APIC_ESR);
347         error += guest_msr_ro(vmx, MSR_APIC_LVT_TIMER);
348         error += guest_msr_ro(vmx, MSR_APIC_LVT_THERMAL);
349         error += guest_msr_ro(vmx, MSR_APIC_LVT_PCINT);
350         error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT0);
351         error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT1);
352         error += guest_msr_ro(vmx, MSR_APIC_LVT_ERROR);
353         error += guest_msr_ro(vmx, MSR_APIC_ICR_TIMER);
354         error += guest_msr_ro(vmx, MSR_APIC_DCR_TIMER);
355         error += guest_msr_ro(vmx, MSR_APIC_ICR);
356
357         /*
358          * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest.
359          *
360          * These registers get special treatment described in the section
361          * "Virtualizing MSR-Based APIC Accesses".
362          */
363         error += guest_msr_rw(vmx, MSR_APIC_TPR);
364         error += guest_msr_rw(vmx, MSR_APIC_EOI);
365         error += guest_msr_rw(vmx, MSR_APIC_SELF_IPI);
366
367         return (error);
368 }
369
370 u_long
371 vmx_fix_cr0(u_long cr0)
372 {
373
374         return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask);
375 }
376
377 u_long
378 vmx_fix_cr4(u_long cr4)
379 {
380
381         return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask);
382 }
383
384 static void
385 vpid_free(int vpid)
386 {
387         if (vpid < 0 || vpid > 0xffff)
388                 panic("vpid_free: invalid vpid %d", vpid);
389
390         /*
391          * VPIDs [0,VM_MAXCPU] are special and are not allocated from
392          * the unit number allocator.
393          */
394
395         if (vpid > VM_MAXCPU)
396                 free_unr(vpid_unr, vpid);
397 }
398
399 static void
400 vpid_alloc(uint16_t *vpid, int num)
401 {
402         int i, x;
403
404         if (num <= 0 || num > VM_MAXCPU)
405                 panic("invalid number of vpids requested: %d", num);
406
407         /*
408          * If the "enable vpid" execution control is not enabled then the
409          * VPID is required to be 0 for all vcpus.
410          */
411         if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) {
412                 for (i = 0; i < num; i++)
413                         vpid[i] = 0;
414                 return;
415         }
416
417         /*
418          * Allocate a unique VPID for each vcpu from the unit number allocator.
419          */
420         for (i = 0; i < num; i++) {
421                 x = alloc_unr(vpid_unr);
422                 if (x == -1)
423                         break;
424                 else
425                         vpid[i] = x;
426         }
427
428         if (i < num) {
429                 atomic_add_int(&vpid_alloc_failed, 1);
430
431                 /*
432                  * If the unit number allocator does not have enough unique
433                  * VPIDs then we need to allocate from the [1,VM_MAXCPU] range.
434                  *
435                  * These VPIDs are not be unique across VMs but this does not
436                  * affect correctness because the combined mappings are also
437                  * tagged with the EP4TA which is unique for each VM.
438                  *
439                  * It is still sub-optimal because the invvpid will invalidate
440                  * combined mappings for a particular VPID across all EP4TAs.
441                  */
442                 while (i-- > 0)
443                         vpid_free(vpid[i]);
444
445                 for (i = 0; i < num; i++)
446                         vpid[i] = i + 1;
447         }
448 }
449
450 static void
451 vpid_init(void)
452 {
453         /*
454          * VPID 0 is required when the "enable VPID" execution control is
455          * disabled.
456          *
457          * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the
458          * unit number allocator does not have sufficient unique VPIDs to
459          * satisfy the allocation.
460          *
461          * The remaining VPIDs are managed by the unit number allocator.
462          */
463         vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL);
464 }
465
466 static void
467 vmx_disable(void *arg __unused)
468 {
469         struct invvpid_desc invvpid_desc = { 0 };
470         struct invept_desc invept_desc = { 0 };
471
472         if (vmxon_enabled[curcpu]) {
473                 /*
474                  * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b.
475                  *
476                  * VMXON or VMXOFF are not required to invalidate any TLB
477                  * caching structures. This prevents potential retention of
478                  * cached information in the TLB between distinct VMX episodes.
479                  */
480                 invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc);
481                 invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc);
482                 vmxoff();
483         }
484         load_cr4(rcr4() & ~CR4_VMXE);
485 }
486
487 static int
488 vmx_cleanup(void)
489 {
490         
491         if (pirvec >= 0)
492                 lapic_ipi_free(pirvec);
493
494         if (vpid_unr != NULL) {
495                 delete_unrhdr(vpid_unr);
496                 vpid_unr = NULL;
497         }
498
499         smp_rendezvous(NULL, vmx_disable, NULL, NULL);
500
501         return (0);
502 }
503
504 static void
505 vmx_enable(void *arg __unused)
506 {
507         int error;
508         uint64_t feature_control;
509
510         feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
511         if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 ||
512             (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
513                 wrmsr(MSR_IA32_FEATURE_CONTROL,
514                     feature_control | IA32_FEATURE_CONTROL_VMX_EN |
515                     IA32_FEATURE_CONTROL_LOCK);
516         }
517
518         load_cr4(rcr4() | CR4_VMXE);
519
520         *(uint32_t *)vmxon_region[curcpu] = vmx_revision();
521         error = vmxon(vmxon_region[curcpu]);
522         if (error == 0)
523                 vmxon_enabled[curcpu] = 1;
524 }
525
526 static void
527 vmx_restore(void)
528 {
529
530         if (vmxon_enabled[curcpu])
531                 vmxon(vmxon_region[curcpu]);
532 }
533
534 static int
535 vmx_init(int ipinum)
536 {
537         int error, use_tpr_shadow;
538         uint64_t basic, fixed0, fixed1, feature_control;
539         uint32_t tmp, procbased2_vid_bits;
540
541         /* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */
542         if (!(cpu_feature2 & CPUID2_VMX)) {
543                 printf("vmx_init: processor does not support VMX operation\n");
544                 return (ENXIO);
545         }
546
547         /*
548          * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits
549          * are set (bits 0 and 2 respectively).
550          */
551         feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
552         if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 1 &&
553             (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
554                 printf("vmx_init: VMX operation disabled by BIOS\n");
555                 return (ENXIO);
556         }
557
558         /*
559          * Verify capabilities MSR_VMX_BASIC:
560          * - bit 54 indicates support for INS/OUTS decoding
561          */
562         basic = rdmsr(MSR_VMX_BASIC);
563         if ((basic & (1UL << 54)) == 0) {
564                 printf("vmx_init: processor does not support desired basic "
565                     "capabilities\n");
566                 return (EINVAL);
567         }
568
569         /* Check support for primary processor-based VM-execution controls */
570         error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
571                                MSR_VMX_TRUE_PROCBASED_CTLS,
572                                PROCBASED_CTLS_ONE_SETTING,
573                                PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls);
574         if (error) {
575                 printf("vmx_init: processor does not support desired primary "
576                        "processor-based controls\n");
577                 return (error);
578         }
579
580         /* Clear the processor-based ctl bits that are set on demand */
581         procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING;
582
583         /* Check support for secondary processor-based VM-execution controls */
584         error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
585                                MSR_VMX_PROCBASED_CTLS2,
586                                PROCBASED_CTLS2_ONE_SETTING,
587                                PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2);
588         if (error) {
589                 printf("vmx_init: processor does not support desired secondary "
590                        "processor-based controls\n");
591                 return (error);
592         }
593
594         /* Check support for VPID */
595         error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
596                                PROCBASED2_ENABLE_VPID, 0, &tmp);
597         if (error == 0)
598                 procbased_ctls2 |= PROCBASED2_ENABLE_VPID;
599
600         /* Check support for pin-based VM-execution controls */
601         error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
602                                MSR_VMX_TRUE_PINBASED_CTLS,
603                                PINBASED_CTLS_ONE_SETTING,
604                                PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls);
605         if (error) {
606                 printf("vmx_init: processor does not support desired "
607                        "pin-based controls\n");
608                 return (error);
609         }
610
611         /* Check support for VM-exit controls */
612         error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS,
613                                VM_EXIT_CTLS_ONE_SETTING,
614                                VM_EXIT_CTLS_ZERO_SETTING,
615                                &exit_ctls);
616         if (error) {
617                 printf("vmx_init: processor does not support desired "
618                     "exit controls\n");
619                 return (error);
620         }
621
622         /* Check support for VM-entry controls */
623         error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS,
624             VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING,
625             &entry_ctls);
626         if (error) {
627                 printf("vmx_init: processor does not support desired "
628                     "entry controls\n");
629                 return (error);
630         }
631
632         /*
633          * Check support for optional features by testing them
634          * as individual bits
635          */
636         cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
637                                         MSR_VMX_TRUE_PROCBASED_CTLS,
638                                         PROCBASED_HLT_EXITING, 0,
639                                         &tmp) == 0);
640
641         cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
642                                         MSR_VMX_PROCBASED_CTLS,
643                                         PROCBASED_MTF, 0,
644                                         &tmp) == 0);
645
646         cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
647                                          MSR_VMX_TRUE_PROCBASED_CTLS,
648                                          PROCBASED_PAUSE_EXITING, 0,
649                                          &tmp) == 0);
650
651         cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
652                                         MSR_VMX_PROCBASED_CTLS2,
653                                         PROCBASED2_UNRESTRICTED_GUEST, 0,
654                                         &tmp) == 0);
655
656         cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
657             MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0,
658             &tmp) == 0);
659
660         /*
661          * Check support for virtual interrupt delivery.
662          */
663         procbased2_vid_bits = (PROCBASED2_VIRTUALIZE_APIC_ACCESSES |
664             PROCBASED2_VIRTUALIZE_X2APIC_MODE |
665             PROCBASED2_APIC_REGISTER_VIRTUALIZATION |
666             PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY);
667
668         use_tpr_shadow = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
669             MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0,
670             &tmp) == 0);
671
672         error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
673             procbased2_vid_bits, 0, &tmp);
674         if (error == 0 && use_tpr_shadow) {
675                 virtual_interrupt_delivery = 1;
676                 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid",
677                     &virtual_interrupt_delivery);
678         }
679
680         if (virtual_interrupt_delivery) {
681                 procbased_ctls |= PROCBASED_USE_TPR_SHADOW;
682                 procbased_ctls2 |= procbased2_vid_bits;
683                 procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE;
684
685                 /*
686                  * No need to emulate accesses to %CR8 if virtual
687                  * interrupt delivery is enabled.
688                  */
689                 procbased_ctls &= ~PROCBASED_CR8_LOAD_EXITING;
690                 procbased_ctls &= ~PROCBASED_CR8_STORE_EXITING;
691
692                 /*
693                  * Check for Posted Interrupts only if Virtual Interrupt
694                  * Delivery is enabled.
695                  */
696                 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
697                     MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_POSTED_INTERRUPT, 0,
698                     &tmp);
699                 if (error == 0) {
700                         pirvec = lapic_ipi_alloc(pti ? &IDTVEC(justreturn1_pti) :
701                             &IDTVEC(justreturn));
702                         if (pirvec < 0) {
703                                 if (bootverbose) {
704                                         printf("vmx_init: unable to allocate "
705                                             "posted interrupt vector\n");
706                                 }
707                         } else {
708                                 posted_interrupts = 1;
709                                 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_pir",
710                                     &posted_interrupts);
711                         }
712                 }
713         }
714
715         if (posted_interrupts)
716                     pinbased_ctls |= PINBASED_POSTED_INTERRUPT;
717
718         /* Initialize EPT */
719         error = ept_init(ipinum);
720         if (error) {
721                 printf("vmx_init: ept initialization failed (%d)\n", error);
722                 return (error);
723         }
724
725         /*
726          * Stash the cr0 and cr4 bits that must be fixed to 0 or 1
727          */
728         fixed0 = rdmsr(MSR_VMX_CR0_FIXED0);
729         fixed1 = rdmsr(MSR_VMX_CR0_FIXED1);
730         cr0_ones_mask = fixed0 & fixed1;
731         cr0_zeros_mask = ~fixed0 & ~fixed1;
732
733         /*
734          * CR0_PE and CR0_PG can be set to zero in VMX non-root operation
735          * if unrestricted guest execution is allowed.
736          */
737         if (cap_unrestricted_guest)
738                 cr0_ones_mask &= ~(CR0_PG | CR0_PE);
739
740         /*
741          * Do not allow the guest to set CR0_NW or CR0_CD.
742          */
743         cr0_zeros_mask |= (CR0_NW | CR0_CD);
744
745         fixed0 = rdmsr(MSR_VMX_CR4_FIXED0);
746         fixed1 = rdmsr(MSR_VMX_CR4_FIXED1);
747         cr4_ones_mask = fixed0 & fixed1;
748         cr4_zeros_mask = ~fixed0 & ~fixed1;
749
750         vpid_init();
751
752         vmx_msr_init();
753
754         /* enable VMX operation */
755         smp_rendezvous(NULL, vmx_enable, NULL, NULL);
756
757         vmx_initialized = 1;
758
759         return (0);
760 }
761
762 static void
763 vmx_trigger_hostintr(int vector)
764 {
765         uintptr_t func;
766         struct gate_descriptor *gd;
767
768         gd = &idt[vector];
769
770         KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: "
771             "invalid vector %d", vector));
772         KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present",
773             vector));
774         KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d "
775             "has invalid type %d", vector, gd->gd_type));
776         KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d "
777             "has invalid dpl %d", vector, gd->gd_dpl));
778         KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor "
779             "for vector %d has invalid selector %d", vector, gd->gd_selector));
780         KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid "
781             "IST %d", vector, gd->gd_ist));
782
783         func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset);
784         vmx_call_isr(func);
785 }
786
787 static int
788 vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial)
789 {
790         int error, mask_ident, shadow_ident;
791         uint64_t mask_value;
792
793         if (which != 0 && which != 4)
794                 panic("vmx_setup_cr_shadow: unknown cr%d", which);
795
796         if (which == 0) {
797                 mask_ident = VMCS_CR0_MASK;
798                 mask_value = cr0_ones_mask | cr0_zeros_mask;
799                 shadow_ident = VMCS_CR0_SHADOW;
800         } else {
801                 mask_ident = VMCS_CR4_MASK;
802                 mask_value = cr4_ones_mask | cr4_zeros_mask;
803                 shadow_ident = VMCS_CR4_SHADOW;
804         }
805
806         error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value);
807         if (error)
808                 return (error);
809
810         error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial);
811         if (error)
812                 return (error);
813
814         return (0);
815 }
816 #define vmx_setup_cr0_shadow(vmcs,init) vmx_setup_cr_shadow(0, (vmcs), (init))
817 #define vmx_setup_cr4_shadow(vmcs,init) vmx_setup_cr_shadow(4, (vmcs), (init))
818
819 static void *
820 vmx_vminit(struct vm *vm, pmap_t pmap)
821 {
822         uint16_t vpid[VM_MAXCPU];
823         int i, error;
824         struct vmx *vmx;
825         struct vmcs *vmcs;
826         uint32_t exc_bitmap;
827
828         vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO);
829         if ((uintptr_t)vmx & PAGE_MASK) {
830                 panic("malloc of struct vmx not aligned on %d byte boundary",
831                       PAGE_SIZE);
832         }
833         vmx->vm = vm;
834
835         vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4));
836
837         /*
838          * Clean up EPTP-tagged guest physical and combined mappings
839          *
840          * VMX transitions are not required to invalidate any guest physical
841          * mappings. So, it may be possible for stale guest physical mappings
842          * to be present in the processor TLBs.
843          *
844          * Combined mappings for this EP4TA are also invalidated for all VPIDs.
845          */
846         ept_invalidate_mappings(vmx->eptp);
847
848         msr_bitmap_initialize(vmx->msr_bitmap);
849
850         /*
851          * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE.
852          * The guest FSBASE and GSBASE are saved and restored during
853          * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are
854          * always restored from the vmcs host state area on vm-exit.
855          *
856          * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in
857          * how they are saved/restored so can be directly accessed by the
858          * guest.
859          *
860          * MSR_EFER is saved and restored in the guest VMCS area on a
861          * VM exit and entry respectively. It is also restored from the
862          * host VMCS area on a VM exit.
863          *
864          * The TSC MSR is exposed read-only. Writes are disallowed as
865          * that will impact the host TSC.  If the guest does a write
866          * the "use TSC offsetting" execution control is enabled and the
867          * difference between the host TSC and the guest TSC is written
868          * into the TSC offset in the VMCS.
869          */
870         if (guest_msr_rw(vmx, MSR_GSBASE) ||
871             guest_msr_rw(vmx, MSR_FSBASE) ||
872             guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) ||
873             guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) ||
874             guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) ||
875             guest_msr_rw(vmx, MSR_EFER) ||
876             guest_msr_ro(vmx, MSR_TSC))
877                 panic("vmx_vminit: error setting guest msr access");
878
879         vpid_alloc(vpid, VM_MAXCPU);
880
881         if (virtual_interrupt_delivery) {
882                 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE,
883                     APIC_ACCESS_ADDRESS);
884                 /* XXX this should really return an error to the caller */
885                 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error));
886         }
887
888         for (i = 0; i < VM_MAXCPU; i++) {
889                 vmcs = &vmx->vmcs[i];
890                 vmcs->identifier = vmx_revision();
891                 error = vmclear(vmcs);
892                 if (error != 0) {
893                         panic("vmx_vminit: vmclear error %d on vcpu %d\n",
894                               error, i);
895                 }
896
897                 vmx_msr_guest_init(vmx, i);
898
899                 error = vmcs_init(vmcs);
900                 KASSERT(error == 0, ("vmcs_init error %d", error));
901
902                 VMPTRLD(vmcs);
903                 error = 0;
904                 error += vmwrite(VMCS_HOST_RSP, (u_long)&vmx->ctx[i]);
905                 error += vmwrite(VMCS_EPTP, vmx->eptp);
906                 error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls);
907                 error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls);
908                 error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2);
909                 error += vmwrite(VMCS_EXIT_CTLS, exit_ctls);
910                 error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls);
911                 error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap));
912                 error += vmwrite(VMCS_VPID, vpid[i]);
913
914                 /* exception bitmap */
915                 if (vcpu_trace_exceptions(vm, i))
916                         exc_bitmap = 0xffffffff;
917                 else
918                         exc_bitmap = 1 << IDT_MC;
919                 error += vmwrite(VMCS_EXCEPTION_BITMAP, exc_bitmap);
920
921                 vmx->ctx[i].guest_dr6 = 0xffff0ff0;
922                 error += vmwrite(VMCS_GUEST_DR7, 0x400);
923
924                 if (virtual_interrupt_delivery) {
925                         error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS);
926                         error += vmwrite(VMCS_VIRTUAL_APIC,
927                             vtophys(&vmx->apic_page[i]));
928                         error += vmwrite(VMCS_EOI_EXIT0, 0);
929                         error += vmwrite(VMCS_EOI_EXIT1, 0);
930                         error += vmwrite(VMCS_EOI_EXIT2, 0);
931                         error += vmwrite(VMCS_EOI_EXIT3, 0);
932                 }
933                 if (posted_interrupts) {
934                         error += vmwrite(VMCS_PIR_VECTOR, pirvec);
935                         error += vmwrite(VMCS_PIR_DESC,
936                             vtophys(&vmx->pir_desc[i]));
937                 }
938                 VMCLEAR(vmcs);
939                 KASSERT(error == 0, ("vmx_vminit: error customizing the vmcs"));
940
941                 vmx->cap[i].set = 0;
942                 vmx->cap[i].proc_ctls = procbased_ctls;
943                 vmx->cap[i].proc_ctls2 = procbased_ctls2;
944
945                 vmx->state[i].nextrip = ~0;
946                 vmx->state[i].lastcpu = NOCPU;
947                 vmx->state[i].vpid = vpid[i];
948
949                 /*
950                  * Set up the CR0/4 shadows, and init the read shadow
951                  * to the power-on register value from the Intel Sys Arch.
952                  *  CR0 - 0x60000010
953                  *  CR4 - 0
954                  */
955                 error = vmx_setup_cr0_shadow(vmcs, 0x60000010);
956                 if (error != 0)
957                         panic("vmx_setup_cr0_shadow %d", error);
958
959                 error = vmx_setup_cr4_shadow(vmcs, 0);
960                 if (error != 0)
961                         panic("vmx_setup_cr4_shadow %d", error);
962
963                 vmx->ctx[i].pmap = pmap;
964         }
965
966         return (vmx);
967 }
968
969 static int
970 vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx)
971 {
972         int handled, func;
973         
974         func = vmxctx->guest_rax;
975
976         handled = x86_emulate_cpuid(vm, vcpu,
977                                     (uint32_t*)(&vmxctx->guest_rax),
978                                     (uint32_t*)(&vmxctx->guest_rbx),
979                                     (uint32_t*)(&vmxctx->guest_rcx),
980                                     (uint32_t*)(&vmxctx->guest_rdx));
981         return (handled);
982 }
983
984 static __inline void
985 vmx_run_trace(struct vmx *vmx, int vcpu)
986 {
987 #ifdef KTR
988         VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip());
989 #endif
990 }
991
992 static __inline void
993 vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason,
994                int handled)
995 {
996 #ifdef KTR
997         VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx",
998                  handled ? "handled" : "unhandled",
999                  exit_reason_to_str(exit_reason), rip);
1000 #endif
1001 }
1002
1003 static __inline void
1004 vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip)
1005 {
1006 #ifdef KTR
1007         VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip);
1008 #endif
1009 }
1010
1011 static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved");
1012 static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done");
1013
1014 /*
1015  * Invalidate guest mappings identified by its vpid from the TLB.
1016  */
1017 static __inline void
1018 vmx_invvpid(struct vmx *vmx, int vcpu, pmap_t pmap, int running)
1019 {
1020         struct vmxstate *vmxstate;
1021         struct invvpid_desc invvpid_desc;
1022
1023         vmxstate = &vmx->state[vcpu];
1024         if (vmxstate->vpid == 0)
1025                 return;
1026
1027         if (!running) {
1028                 /*
1029                  * Set the 'lastcpu' to an invalid host cpu.
1030                  *
1031                  * This will invalidate TLB entries tagged with the vcpu's
1032                  * vpid the next time it runs via vmx_set_pcpu_defaults().
1033                  */
1034                 vmxstate->lastcpu = NOCPU;
1035                 return;
1036         }
1037
1038         KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside "
1039             "critical section", __func__, vcpu));
1040
1041         /*
1042          * Invalidate all mappings tagged with 'vpid'
1043          *
1044          * We do this because this vcpu was executing on a different host
1045          * cpu when it last ran. We do not track whether it invalidated
1046          * mappings associated with its 'vpid' during that run. So we must
1047          * assume that the mappings associated with 'vpid' on 'curcpu' are
1048          * stale and invalidate them.
1049          *
1050          * Note that we incur this penalty only when the scheduler chooses to
1051          * move the thread associated with this vcpu between host cpus.
1052          *
1053          * Note also that this will invalidate mappings tagged with 'vpid'
1054          * for "all" EP4TAs.
1055          */
1056         if (pmap->pm_eptgen == vmx->eptgen[curcpu]) {
1057                 invvpid_desc._res1 = 0;
1058                 invvpid_desc._res2 = 0;
1059                 invvpid_desc.vpid = vmxstate->vpid;
1060                 invvpid_desc.linear_addr = 0;
1061                 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc);
1062                 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1);
1063         } else {
1064                 /*
1065                  * The invvpid can be skipped if an invept is going to
1066                  * be performed before entering the guest. The invept
1067                  * will invalidate combined mappings tagged with
1068                  * 'vmx->eptp' for all vpids.
1069                  */
1070                 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1);
1071         }
1072 }
1073
1074 static void
1075 vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap)
1076 {
1077         struct vmxstate *vmxstate;
1078
1079         vmxstate = &vmx->state[vcpu];
1080         if (vmxstate->lastcpu == curcpu)
1081                 return;
1082
1083         vmxstate->lastcpu = curcpu;
1084
1085         vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1);
1086
1087         vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase());
1088         vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase());
1089         vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase());
1090         vmx_invvpid(vmx, vcpu, pmap, 1);
1091 }
1092
1093 /*
1094  * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set.
1095  */
1096 CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0);
1097
1098 static void __inline
1099 vmx_set_int_window_exiting(struct vmx *vmx, int vcpu)
1100 {
1101
1102         if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) {
1103                 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING;
1104                 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1105                 VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting");
1106         }
1107 }
1108
1109 static void __inline
1110 vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu)
1111 {
1112
1113         KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0,
1114             ("intr_window_exiting not set: %#x", vmx->cap[vcpu].proc_ctls));
1115         vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING;
1116         vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1117         VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting");
1118 }
1119
1120 static void __inline
1121 vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu)
1122 {
1123
1124         if ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) {
1125                 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING;
1126                 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1127                 VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting");
1128         }
1129 }
1130
1131 static void __inline
1132 vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu)
1133 {
1134
1135         KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0,
1136             ("nmi_window_exiting not set %#x", vmx->cap[vcpu].proc_ctls));
1137         vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING;
1138         vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1139         VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting");
1140 }
1141
1142 int
1143 vmx_set_tsc_offset(struct vmx *vmx, int vcpu, uint64_t offset)
1144 {
1145         int error;
1146
1147         if ((vmx->cap[vcpu].proc_ctls & PROCBASED_TSC_OFFSET) == 0) {
1148                 vmx->cap[vcpu].proc_ctls |= PROCBASED_TSC_OFFSET;
1149                 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1150                 VCPU_CTR0(vmx->vm, vcpu, "Enabling TSC offsetting");
1151         }
1152
1153         error = vmwrite(VMCS_TSC_OFFSET, offset);
1154
1155         return (error);
1156 }
1157
1158 #define NMI_BLOCKING    (VMCS_INTERRUPTIBILITY_NMI_BLOCKING |           \
1159                          VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1160 #define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING |           \
1161                          VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1162
1163 static void
1164 vmx_inject_nmi(struct vmx *vmx, int vcpu)
1165 {
1166         uint32_t gi, info;
1167
1168         gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1169         KASSERT((gi & NMI_BLOCKING) == 0, ("vmx_inject_nmi: invalid guest "
1170             "interruptibility-state %#x", gi));
1171
1172         info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1173         KASSERT((info & VMCS_INTR_VALID) == 0, ("vmx_inject_nmi: invalid "
1174             "VM-entry interruption information %#x", info));
1175
1176         /*
1177          * Inject the virtual NMI. The vector must be the NMI IDT entry
1178          * or the VMCS entry check will fail.
1179          */
1180         info = IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID;
1181         vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1182
1183         VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI");
1184
1185         /* Clear the request */
1186         vm_nmi_clear(vmx->vm, vcpu);
1187 }
1188
1189 static void
1190 vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic,
1191     uint64_t guestrip)
1192 {
1193         int vector, need_nmi_exiting, extint_pending;
1194         uint64_t rflags, entryinfo;
1195         uint32_t gi, info;
1196
1197         if (vmx->state[vcpu].nextrip != guestrip) {
1198                 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1199                 if (gi & HWINTR_BLOCKING) {
1200                         VCPU_CTR2(vmx->vm, vcpu, "Guest interrupt blocking "
1201                             "cleared due to rip change: %#lx/%#lx",
1202                             vmx->state[vcpu].nextrip, guestrip);
1203                         gi &= ~HWINTR_BLOCKING;
1204                         vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1205                 }
1206         }
1207
1208         if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) {
1209                 KASSERT((entryinfo & VMCS_INTR_VALID) != 0, ("%s: entry "
1210                     "intinfo is not valid: %#lx", __func__, entryinfo));
1211
1212                 info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1213                 KASSERT((info & VMCS_INTR_VALID) == 0, ("%s: cannot inject "
1214                      "pending exception: %#lx/%#x", __func__, entryinfo, info));
1215
1216                 info = entryinfo;
1217                 vector = info & 0xff;
1218                 if (vector == IDT_BP || vector == IDT_OF) {
1219                         /*
1220                          * VT-x requires #BP and #OF to be injected as software
1221                          * exceptions.
1222                          */
1223                         info &= ~VMCS_INTR_T_MASK;
1224                         info |= VMCS_INTR_T_SWEXCEPTION;
1225                 }
1226
1227                 if (info & VMCS_INTR_DEL_ERRCODE)
1228                         vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, entryinfo >> 32);
1229
1230                 vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1231         }
1232
1233         if (vm_nmi_pending(vmx->vm, vcpu)) {
1234                 /*
1235                  * If there are no conditions blocking NMI injection then
1236                  * inject it directly here otherwise enable "NMI window
1237                  * exiting" to inject it as soon as we can.
1238                  *
1239                  * We also check for STI_BLOCKING because some implementations
1240                  * don't allow NMI injection in this case. If we are running
1241                  * on a processor that doesn't have this restriction it will
1242                  * immediately exit and the NMI will be injected in the
1243                  * "NMI window exiting" handler.
1244                  */
1245                 need_nmi_exiting = 1;
1246                 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1247                 if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) {
1248                         info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1249                         if ((info & VMCS_INTR_VALID) == 0) {
1250                                 vmx_inject_nmi(vmx, vcpu);
1251                                 need_nmi_exiting = 0;
1252                         } else {
1253                                 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI "
1254                                     "due to VM-entry intr info %#x", info);
1255                         }
1256                 } else {
1257                         VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI due to "
1258                             "Guest Interruptibility-state %#x", gi);
1259                 }
1260
1261                 if (need_nmi_exiting)
1262                         vmx_set_nmi_window_exiting(vmx, vcpu);
1263         }
1264
1265         extint_pending = vm_extint_pending(vmx->vm, vcpu);
1266
1267         if (!extint_pending && virtual_interrupt_delivery) {
1268                 vmx_inject_pir(vlapic);
1269                 return;
1270         }
1271
1272         /*
1273          * If interrupt-window exiting is already in effect then don't bother
1274          * checking for pending interrupts. This is just an optimization and
1275          * not needed for correctness.
1276          */
1277         if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) {
1278                 VCPU_CTR0(vmx->vm, vcpu, "Skip interrupt injection due to "
1279                     "pending int_window_exiting");
1280                 return;
1281         }
1282
1283         if (!extint_pending) {
1284                 /* Ask the local apic for a vector to inject */
1285                 if (!vlapic_pending_intr(vlapic, &vector))
1286                         return;
1287
1288                 /*
1289                  * From the Intel SDM, Volume 3, Section "Maskable
1290                  * Hardware Interrupts":
1291                  * - maskable interrupt vectors [16,255] can be delivered
1292                  *   through the local APIC.
1293                 */
1294                 KASSERT(vector >= 16 && vector <= 255,
1295                     ("invalid vector %d from local APIC", vector));
1296         } else {
1297                 /* Ask the legacy pic for a vector to inject */
1298                 vatpic_pending_intr(vmx->vm, &vector);
1299
1300                 /*
1301                  * From the Intel SDM, Volume 3, Section "Maskable
1302                  * Hardware Interrupts":
1303                  * - maskable interrupt vectors [0,255] can be delivered
1304                  *   through the INTR pin.
1305                  */
1306                 KASSERT(vector >= 0 && vector <= 255,
1307                     ("invalid vector %d from INTR", vector));
1308         }
1309
1310         /* Check RFLAGS.IF and the interruptibility state of the guest */
1311         rflags = vmcs_read(VMCS_GUEST_RFLAGS);
1312         if ((rflags & PSL_I) == 0) {
1313                 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1314                     "rflags %#lx", vector, rflags);
1315                 goto cantinject;
1316         }
1317
1318         gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1319         if (gi & HWINTR_BLOCKING) {
1320                 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1321                     "Guest Interruptibility-state %#x", vector, gi);
1322                 goto cantinject;
1323         }
1324
1325         info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1326         if (info & VMCS_INTR_VALID) {
1327                 /*
1328                  * This is expected and could happen for multiple reasons:
1329                  * - A vectoring VM-entry was aborted due to astpending
1330                  * - A VM-exit happened during event injection.
1331                  * - An exception was injected above.
1332                  * - An NMI was injected above or after "NMI window exiting"
1333                  */
1334                 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1335                     "VM-entry intr info %#x", vector, info);
1336                 goto cantinject;
1337         }
1338
1339         /* Inject the interrupt */
1340         info = VMCS_INTR_T_HWINTR | VMCS_INTR_VALID;
1341         info |= vector;
1342         vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1343
1344         if (!extint_pending) {
1345                 /* Update the Local APIC ISR */
1346                 vlapic_intr_accepted(vlapic, vector);
1347         } else {
1348                 vm_extint_clear(vmx->vm, vcpu);
1349                 vatpic_intr_accepted(vmx->vm, vector);
1350
1351                 /*
1352                  * After we accepted the current ExtINT the PIC may
1353                  * have posted another one.  If that is the case, set
1354                  * the Interrupt Window Exiting execution control so
1355                  * we can inject that one too.
1356                  *
1357                  * Also, interrupt window exiting allows us to inject any
1358                  * pending APIC vector that was preempted by the ExtINT
1359                  * as soon as possible. This applies both for the software
1360                  * emulated vlapic and the hardware assisted virtual APIC.
1361                  */
1362                 vmx_set_int_window_exiting(vmx, vcpu);
1363         }
1364
1365         VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector);
1366
1367         return;
1368
1369 cantinject:
1370         /*
1371          * Set the Interrupt Window Exiting execution control so we can inject
1372          * the interrupt as soon as blocking condition goes away.
1373          */
1374         vmx_set_int_window_exiting(vmx, vcpu);
1375 }
1376
1377 /*
1378  * If the Virtual NMIs execution control is '1' then the logical processor
1379  * tracks virtual-NMI blocking in the Guest Interruptibility-state field of
1380  * the VMCS. An IRET instruction in VMX non-root operation will remove any
1381  * virtual-NMI blocking.
1382  *
1383  * This unblocking occurs even if the IRET causes a fault. In this case the
1384  * hypervisor needs to restore virtual-NMI blocking before resuming the guest.
1385  */
1386 static void
1387 vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid)
1388 {
1389         uint32_t gi;
1390
1391         VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking");
1392         gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1393         gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1394         vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1395 }
1396
1397 static void
1398 vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid)
1399 {
1400         uint32_t gi;
1401
1402         VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking");
1403         gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1404         gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1405         vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1406 }
1407
1408 static void
1409 vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid)
1410 {
1411         uint32_t gi;
1412
1413         gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1414         KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING,
1415             ("NMI blocking is not in effect %#x", gi));
1416 }
1417
1418 static int
1419 vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
1420 {
1421         struct vmxctx *vmxctx;
1422         uint64_t xcrval;
1423         const struct xsave_limits *limits;
1424
1425         vmxctx = &vmx->ctx[vcpu];
1426         limits = vmm_get_xsave_limits();
1427
1428         /*
1429          * Note that the processor raises a GP# fault on its own if
1430          * xsetbv is executed for CPL != 0, so we do not have to
1431          * emulate that fault here.
1432          */
1433
1434         /* Only xcr0 is supported. */
1435         if (vmxctx->guest_rcx != 0) {
1436                 vm_inject_gp(vmx->vm, vcpu);
1437                 return (HANDLED);
1438         }
1439
1440         /* We only handle xcr0 if both the host and guest have XSAVE enabled. */
1441         if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) {
1442                 vm_inject_ud(vmx->vm, vcpu);
1443                 return (HANDLED);
1444         }
1445
1446         xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff);
1447         if ((xcrval & ~limits->xcr0_allowed) != 0) {
1448                 vm_inject_gp(vmx->vm, vcpu);
1449                 return (HANDLED);
1450         }
1451
1452         if (!(xcrval & XFEATURE_ENABLED_X87)) {
1453                 vm_inject_gp(vmx->vm, vcpu);
1454                 return (HANDLED);
1455         }
1456
1457         /* AVX (YMM_Hi128) requires SSE. */
1458         if (xcrval & XFEATURE_ENABLED_AVX &&
1459             (xcrval & XFEATURE_AVX) != XFEATURE_AVX) {
1460                 vm_inject_gp(vmx->vm, vcpu);
1461                 return (HANDLED);
1462         }
1463
1464         /*
1465          * AVX512 requires base AVX (YMM_Hi128) as well as OpMask,
1466          * ZMM_Hi256, and Hi16_ZMM.
1467          */
1468         if (xcrval & XFEATURE_AVX512 &&
1469             (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) !=
1470             (XFEATURE_AVX512 | XFEATURE_AVX)) {
1471                 vm_inject_gp(vmx->vm, vcpu);
1472                 return (HANDLED);
1473         }
1474
1475         /*
1476          * Intel MPX requires both bound register state flags to be
1477          * set.
1478          */
1479         if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) !=
1480             ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) {
1481                 vm_inject_gp(vmx->vm, vcpu);
1482                 return (HANDLED);
1483         }
1484
1485         /*
1486          * This runs "inside" vmrun() with the guest's FPU state, so
1487          * modifying xcr0 directly modifies the guest's xcr0, not the
1488          * host's.
1489          */
1490         load_xcr(0, xcrval);
1491         return (HANDLED);
1492 }
1493
1494 static uint64_t
1495 vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident)
1496 {
1497         const struct vmxctx *vmxctx;
1498
1499         vmxctx = &vmx->ctx[vcpu];
1500
1501         switch (ident) {
1502         case 0:
1503                 return (vmxctx->guest_rax);
1504         case 1:
1505                 return (vmxctx->guest_rcx);
1506         case 2:
1507                 return (vmxctx->guest_rdx);
1508         case 3:
1509                 return (vmxctx->guest_rbx);
1510         case 4:
1511                 return (vmcs_read(VMCS_GUEST_RSP));
1512         case 5:
1513                 return (vmxctx->guest_rbp);
1514         case 6:
1515                 return (vmxctx->guest_rsi);
1516         case 7:
1517                 return (vmxctx->guest_rdi);
1518         case 8:
1519                 return (vmxctx->guest_r8);
1520         case 9:
1521                 return (vmxctx->guest_r9);
1522         case 10:
1523                 return (vmxctx->guest_r10);
1524         case 11:
1525                 return (vmxctx->guest_r11);
1526         case 12:
1527                 return (vmxctx->guest_r12);
1528         case 13:
1529                 return (vmxctx->guest_r13);
1530         case 14:
1531                 return (vmxctx->guest_r14);
1532         case 15:
1533                 return (vmxctx->guest_r15);
1534         default:
1535                 panic("invalid vmx register %d", ident);
1536         }
1537 }
1538
1539 static void
1540 vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval)
1541 {
1542         struct vmxctx *vmxctx;
1543
1544         vmxctx = &vmx->ctx[vcpu];
1545
1546         switch (ident) {
1547         case 0:
1548                 vmxctx->guest_rax = regval;
1549                 break;
1550         case 1:
1551                 vmxctx->guest_rcx = regval;
1552                 break;
1553         case 2:
1554                 vmxctx->guest_rdx = regval;
1555                 break;
1556         case 3:
1557                 vmxctx->guest_rbx = regval;
1558                 break;
1559         case 4:
1560                 vmcs_write(VMCS_GUEST_RSP, regval);
1561                 break;
1562         case 5:
1563                 vmxctx->guest_rbp = regval;
1564                 break;
1565         case 6:
1566                 vmxctx->guest_rsi = regval;
1567                 break;
1568         case 7:
1569                 vmxctx->guest_rdi = regval;
1570                 break;
1571         case 8:
1572                 vmxctx->guest_r8 = regval;
1573                 break;
1574         case 9:
1575                 vmxctx->guest_r9 = regval;
1576                 break;
1577         case 10:
1578                 vmxctx->guest_r10 = regval;
1579                 break;
1580         case 11:
1581                 vmxctx->guest_r11 = regval;
1582                 break;
1583         case 12:
1584                 vmxctx->guest_r12 = regval;
1585                 break;
1586         case 13:
1587                 vmxctx->guest_r13 = regval;
1588                 break;
1589         case 14:
1590                 vmxctx->guest_r14 = regval;
1591                 break;
1592         case 15:
1593                 vmxctx->guest_r15 = regval;
1594                 break;
1595         default:
1596                 panic("invalid vmx register %d", ident);
1597         }
1598 }
1599
1600 static int
1601 vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1602 {
1603         uint64_t crval, regval;
1604
1605         /* We only handle mov to %cr0 at this time */
1606         if ((exitqual & 0xf0) != 0x00)
1607                 return (UNHANDLED);
1608
1609         regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
1610
1611         vmcs_write(VMCS_CR0_SHADOW, regval);
1612
1613         crval = regval | cr0_ones_mask;
1614         crval &= ~cr0_zeros_mask;
1615         vmcs_write(VMCS_GUEST_CR0, crval);
1616
1617         if (regval & CR0_PG) {
1618                 uint64_t efer, entry_ctls;
1619
1620                 /*
1621                  * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and
1622                  * the "IA-32e mode guest" bit in VM-entry control must be
1623                  * equal.
1624                  */
1625                 efer = vmcs_read(VMCS_GUEST_IA32_EFER);
1626                 if (efer & EFER_LME) {
1627                         efer |= EFER_LMA;
1628                         vmcs_write(VMCS_GUEST_IA32_EFER, efer);
1629                         entry_ctls = vmcs_read(VMCS_ENTRY_CTLS);
1630                         entry_ctls |= VM_ENTRY_GUEST_LMA;
1631                         vmcs_write(VMCS_ENTRY_CTLS, entry_ctls);
1632                 }
1633         }
1634
1635         return (HANDLED);
1636 }
1637
1638 static int
1639 vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1640 {
1641         uint64_t crval, regval;
1642
1643         /* We only handle mov to %cr4 at this time */
1644         if ((exitqual & 0xf0) != 0x00)
1645                 return (UNHANDLED);
1646
1647         regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
1648
1649         vmcs_write(VMCS_CR4_SHADOW, regval);
1650
1651         crval = regval | cr4_ones_mask;
1652         crval &= ~cr4_zeros_mask;
1653         vmcs_write(VMCS_GUEST_CR4, crval);
1654
1655         return (HANDLED);
1656 }
1657
1658 static int
1659 vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1660 {
1661         struct vlapic *vlapic;
1662         uint64_t cr8;
1663         int regnum;
1664
1665         /* We only handle mov %cr8 to/from a register at this time. */
1666         if ((exitqual & 0xe0) != 0x00) {
1667                 return (UNHANDLED);
1668         }
1669
1670         vlapic = vm_lapic(vmx->vm, vcpu);
1671         regnum = (exitqual >> 8) & 0xf;
1672         if (exitqual & 0x10) {
1673                 cr8 = vlapic_get_cr8(vlapic);
1674                 vmx_set_guest_reg(vmx, vcpu, regnum, cr8);
1675         } else {
1676                 cr8 = vmx_get_guest_reg(vmx, vcpu, regnum);
1677                 vlapic_set_cr8(vlapic, cr8);
1678         }
1679
1680         return (HANDLED);
1681 }
1682
1683 /*
1684  * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL
1685  */
1686 static int
1687 vmx_cpl(void)
1688 {
1689         uint32_t ssar;
1690
1691         ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS);
1692         return ((ssar >> 5) & 0x3);
1693 }
1694
1695 static enum vm_cpu_mode
1696 vmx_cpu_mode(void)
1697 {
1698         uint32_t csar;
1699
1700         if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) {
1701                 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1702                 if (csar & 0x2000)
1703                         return (CPU_MODE_64BIT);        /* CS.L = 1 */
1704                 else
1705                         return (CPU_MODE_COMPATIBILITY);
1706         } else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) {
1707                 return (CPU_MODE_PROTECTED);
1708         } else {
1709                 return (CPU_MODE_REAL);
1710         }
1711 }
1712
1713 static enum vm_paging_mode
1714 vmx_paging_mode(void)
1715 {
1716
1717         if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG))
1718                 return (PAGING_MODE_FLAT);
1719         if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE))
1720                 return (PAGING_MODE_32);
1721         if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME)
1722                 return (PAGING_MODE_64);
1723         else
1724                 return (PAGING_MODE_PAE);
1725 }
1726
1727 static uint64_t
1728 inout_str_index(struct vmx *vmx, int vcpuid, int in)
1729 {
1730         uint64_t val;
1731         int error;
1732         enum vm_reg_name reg;
1733
1734         reg = in ? VM_REG_GUEST_RDI : VM_REG_GUEST_RSI;
1735         error = vmx_getreg(vmx, vcpuid, reg, &val);
1736         KASSERT(error == 0, ("%s: vmx_getreg error %d", __func__, error));
1737         return (val);
1738 }
1739
1740 static uint64_t
1741 inout_str_count(struct vmx *vmx, int vcpuid, int rep)
1742 {
1743         uint64_t val;
1744         int error;
1745
1746         if (rep) {
1747                 error = vmx_getreg(vmx, vcpuid, VM_REG_GUEST_RCX, &val);
1748                 KASSERT(!error, ("%s: vmx_getreg error %d", __func__, error));
1749         } else {
1750                 val = 1;
1751         }
1752         return (val);
1753 }
1754
1755 static int
1756 inout_str_addrsize(uint32_t inst_info)
1757 {
1758         uint32_t size;
1759
1760         size = (inst_info >> 7) & 0x7;
1761         switch (size) {
1762         case 0:
1763                 return (2);     /* 16 bit */
1764         case 1:
1765                 return (4);     /* 32 bit */
1766         case 2:
1767                 return (8);     /* 64 bit */
1768         default:
1769                 panic("%s: invalid size encoding %d", __func__, size);
1770         }
1771 }
1772
1773 static void
1774 inout_str_seginfo(struct vmx *vmx, int vcpuid, uint32_t inst_info, int in,
1775     struct vm_inout_str *vis)
1776 {
1777         int error, s;
1778
1779         if (in) {
1780                 vis->seg_name = VM_REG_GUEST_ES;
1781         } else {
1782                 s = (inst_info >> 15) & 0x7;
1783                 vis->seg_name = vm_segment_name(s);
1784         }
1785
1786         error = vmx_getdesc(vmx, vcpuid, vis->seg_name, &vis->seg_desc);
1787         KASSERT(error == 0, ("%s: vmx_getdesc error %d", __func__, error));
1788 }
1789
1790 static void
1791 vmx_paging_info(struct vm_guest_paging *paging)
1792 {
1793         paging->cr3 = vmcs_guest_cr3();
1794         paging->cpl = vmx_cpl();
1795         paging->cpu_mode = vmx_cpu_mode();
1796         paging->paging_mode = vmx_paging_mode();
1797 }
1798
1799 static void
1800 vmexit_inst_emul(struct vm_exit *vmexit, uint64_t gpa, uint64_t gla)
1801 {
1802         struct vm_guest_paging *paging;
1803         uint32_t csar;
1804
1805         paging = &vmexit->u.inst_emul.paging;
1806
1807         vmexit->exitcode = VM_EXITCODE_INST_EMUL;
1808         vmexit->inst_length = 0;
1809         vmexit->u.inst_emul.gpa = gpa;
1810         vmexit->u.inst_emul.gla = gla;
1811         vmx_paging_info(paging);
1812         switch (paging->cpu_mode) {
1813         case CPU_MODE_REAL:
1814                 vmexit->u.inst_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE);
1815                 vmexit->u.inst_emul.cs_d = 0;
1816                 break;
1817         case CPU_MODE_PROTECTED:
1818         case CPU_MODE_COMPATIBILITY:
1819                 vmexit->u.inst_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE);
1820                 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1821                 vmexit->u.inst_emul.cs_d = SEG_DESC_DEF32(csar);
1822                 break;
1823         default:
1824                 vmexit->u.inst_emul.cs_base = 0;
1825                 vmexit->u.inst_emul.cs_d = 0;
1826                 break;
1827         }
1828         vie_init(&vmexit->u.inst_emul.vie, NULL, 0);
1829 }
1830
1831 static int
1832 ept_fault_type(uint64_t ept_qual)
1833 {
1834         int fault_type;
1835
1836         if (ept_qual & EPT_VIOLATION_DATA_WRITE)
1837                 fault_type = VM_PROT_WRITE;
1838         else if (ept_qual & EPT_VIOLATION_INST_FETCH)
1839                 fault_type = VM_PROT_EXECUTE;
1840         else
1841                 fault_type= VM_PROT_READ;
1842
1843         return (fault_type);
1844 }
1845
1846 static boolean_t
1847 ept_emulation_fault(uint64_t ept_qual)
1848 {
1849         int read, write;
1850
1851         /* EPT fault on an instruction fetch doesn't make sense here */
1852         if (ept_qual & EPT_VIOLATION_INST_FETCH)
1853                 return (FALSE);
1854
1855         /* EPT fault must be a read fault or a write fault */
1856         read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;
1857         write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;
1858         if ((read | write) == 0)
1859                 return (FALSE);
1860
1861         /*
1862          * The EPT violation must have been caused by accessing a
1863          * guest-physical address that is a translation of a guest-linear
1864          * address.
1865          */
1866         if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||
1867             (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {
1868                 return (FALSE);
1869         }
1870
1871         return (TRUE);
1872 }
1873
1874 static __inline int
1875 apic_access_virtualization(struct vmx *vmx, int vcpuid)
1876 {
1877         uint32_t proc_ctls2;
1878
1879         proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
1880         return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0);
1881 }
1882
1883 static __inline int
1884 x2apic_virtualization(struct vmx *vmx, int vcpuid)
1885 {
1886         uint32_t proc_ctls2;
1887
1888         proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
1889         return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0);
1890 }
1891
1892 static int
1893 vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic,
1894     uint64_t qual)
1895 {
1896         int error, handled, offset;
1897         uint32_t *apic_regs, vector;
1898         bool retu;
1899
1900         handled = HANDLED;
1901         offset = APIC_WRITE_OFFSET(qual);
1902
1903         if (!apic_access_virtualization(vmx, vcpuid)) {
1904                 /*
1905                  * In general there should not be any APIC write VM-exits
1906                  * unless APIC-access virtualization is enabled.
1907                  *
1908                  * However self-IPI virtualization can legitimately trigger
1909                  * an APIC-write VM-exit so treat it specially.
1910                  */
1911                 if (x2apic_virtualization(vmx, vcpuid) &&
1912                     offset == APIC_OFFSET_SELF_IPI) {
1913                         apic_regs = (uint32_t *)(vlapic->apic_page);
1914                         vector = apic_regs[APIC_OFFSET_SELF_IPI / 4];
1915                         vlapic_self_ipi_handler(vlapic, vector);
1916                         return (HANDLED);
1917                 } else
1918                         return (UNHANDLED);
1919         }
1920
1921         switch (offset) {
1922         case APIC_OFFSET_ID:
1923                 vlapic_id_write_handler(vlapic);
1924                 break;
1925         case APIC_OFFSET_LDR:
1926                 vlapic_ldr_write_handler(vlapic);
1927                 break;
1928         case APIC_OFFSET_DFR:
1929                 vlapic_dfr_write_handler(vlapic);
1930                 break;
1931         case APIC_OFFSET_SVR:
1932                 vlapic_svr_write_handler(vlapic);
1933                 break;
1934         case APIC_OFFSET_ESR:
1935                 vlapic_esr_write_handler(vlapic);
1936                 break;
1937         case APIC_OFFSET_ICR_LOW:
1938                 retu = false;
1939                 error = vlapic_icrlo_write_handler(vlapic, &retu);
1940                 if (error != 0 || retu)
1941                         handled = UNHANDLED;
1942                 break;
1943         case APIC_OFFSET_CMCI_LVT:
1944         case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT:
1945                 vlapic_lvt_write_handler(vlapic, offset);
1946                 break;
1947         case APIC_OFFSET_TIMER_ICR:
1948                 vlapic_icrtmr_write_handler(vlapic);
1949                 break;
1950         case APIC_OFFSET_TIMER_DCR:
1951                 vlapic_dcr_write_handler(vlapic);
1952                 break;
1953         default:
1954                 handled = UNHANDLED;
1955                 break;
1956         }
1957         return (handled);
1958 }
1959
1960 static bool
1961 apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa)
1962 {
1963
1964         if (apic_access_virtualization(vmx, vcpuid) &&
1965             (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE))
1966                 return (true);
1967         else
1968                 return (false);
1969 }
1970
1971 static int
1972 vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
1973 {
1974         uint64_t qual;
1975         int access_type, offset, allowed;
1976
1977         if (!apic_access_virtualization(vmx, vcpuid))
1978                 return (UNHANDLED);
1979
1980         qual = vmexit->u.vmx.exit_qualification;
1981         access_type = APIC_ACCESS_TYPE(qual);
1982         offset = APIC_ACCESS_OFFSET(qual);
1983
1984         allowed = 0;
1985         if (access_type == 0) {
1986                 /*
1987                  * Read data access to the following registers is expected.
1988                  */
1989                 switch (offset) {
1990                 case APIC_OFFSET_APR:
1991                 case APIC_OFFSET_PPR:
1992                 case APIC_OFFSET_RRR:
1993                 case APIC_OFFSET_CMCI_LVT:
1994                 case APIC_OFFSET_TIMER_CCR:
1995                         allowed = 1;
1996                         break;
1997                 default:
1998                         break;
1999                 }
2000         } else if (access_type == 1) {
2001                 /*
2002                  * Write data access to the following registers is expected.
2003                  */
2004                 switch (offset) {
2005                 case APIC_OFFSET_VER:
2006                 case APIC_OFFSET_APR:
2007                 case APIC_OFFSET_PPR:
2008                 case APIC_OFFSET_RRR:
2009                 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7:
2010                 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7:
2011                 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7:
2012                 case APIC_OFFSET_CMCI_LVT:
2013                 case APIC_OFFSET_TIMER_CCR:
2014                         allowed = 1;
2015                         break;
2016                 default:
2017                         break;
2018                 }
2019         }
2020
2021         if (allowed) {
2022                 vmexit_inst_emul(vmexit, DEFAULT_APIC_BASE + offset,
2023                     VIE_INVALID_GLA);
2024         }
2025
2026         /*
2027          * Regardless of whether the APIC-access is allowed this handler
2028          * always returns UNHANDLED:
2029          * - if the access is allowed then it is handled by emulating the
2030          *   instruction that caused the VM-exit (outside the critical section)
2031          * - if the access is not allowed then it will be converted to an
2032          *   exitcode of VM_EXITCODE_VMX and will be dealt with in userland.
2033          */
2034         return (UNHANDLED);
2035 }
2036
2037 static enum task_switch_reason
2038 vmx_task_switch_reason(uint64_t qual)
2039 {
2040         int reason;
2041
2042         reason = (qual >> 30) & 0x3;
2043         switch (reason) {
2044         case 0:
2045                 return (TSR_CALL);
2046         case 1:
2047                 return (TSR_IRET);
2048         case 2:
2049                 return (TSR_JMP);
2050         case 3:
2051                 return (TSR_IDT_GATE);
2052         default:
2053                 panic("%s: invalid reason %d", __func__, reason);
2054         }
2055 }
2056
2057 static int
2058 emulate_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu)
2059 {
2060         int error;
2061
2062         if (lapic_msr(num))
2063                 error = lapic_wrmsr(vmx->vm, vcpuid, num, val, retu);
2064         else
2065                 error = vmx_wrmsr(vmx, vcpuid, num, val, retu);
2066
2067         return (error);
2068 }
2069
2070 static int
2071 emulate_rdmsr(struct vmx *vmx, int vcpuid, u_int num, bool *retu)
2072 {
2073         struct vmxctx *vmxctx;
2074         uint64_t result;
2075         uint32_t eax, edx;
2076         int error;
2077
2078         if (lapic_msr(num))
2079                 error = lapic_rdmsr(vmx->vm, vcpuid, num, &result, retu);
2080         else
2081                 error = vmx_rdmsr(vmx, vcpuid, num, &result, retu);
2082
2083         if (error == 0) {
2084                 eax = result;
2085                 vmxctx = &vmx->ctx[vcpuid];
2086                 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RAX, eax);
2087                 KASSERT(error == 0, ("vmxctx_setreg(rax) error %d", error));
2088
2089                 edx = result >> 32;
2090                 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RDX, edx);
2091                 KASSERT(error == 0, ("vmxctx_setreg(rdx) error %d", error));
2092         }
2093
2094         return (error);
2095 }
2096
2097 static int
2098 vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
2099 {
2100         int error, errcode, errcode_valid, handled, in;
2101         struct vmxctx *vmxctx;
2102         struct vlapic *vlapic;
2103         struct vm_inout_str *vis;
2104         struct vm_task_switch *ts;
2105         uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, inst_info;
2106         uint32_t intr_type, intr_vec, reason;
2107         uint64_t exitintinfo, qual, gpa;
2108         bool retu;
2109
2110         CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0);
2111         CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0);
2112
2113         handled = UNHANDLED;
2114         vmxctx = &vmx->ctx[vcpu];
2115
2116         qual = vmexit->u.vmx.exit_qualification;
2117         reason = vmexit->u.vmx.exit_reason;
2118         vmexit->exitcode = VM_EXITCODE_BOGUS;
2119
2120         vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1);
2121
2122         /*
2123          * VM-entry failures during or after loading guest state.
2124          *
2125          * These VM-exits are uncommon but must be handled specially
2126          * as most VM-exit fields are not populated as usual.
2127          */
2128         if (__predict_false(reason == EXIT_REASON_MCE_DURING_ENTRY)) {
2129                 VCPU_CTR0(vmx->vm, vcpu, "Handling MCE during VM-entry");
2130                 __asm __volatile("int $18");
2131                 return (1);
2132         }
2133
2134         /*
2135          * VM exits that can be triggered during event delivery need to
2136          * be handled specially by re-injecting the event if the IDT
2137          * vectoring information field's valid bit is set.
2138          *
2139          * See "Information for VM Exits During Event Delivery" in Intel SDM
2140          * for details.
2141          */
2142         idtvec_info = vmcs_idt_vectoring_info();
2143         if (idtvec_info & VMCS_IDT_VEC_VALID) {
2144                 idtvec_info &= ~(1 << 12); /* clear undefined bit */
2145                 exitintinfo = idtvec_info;
2146                 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
2147                         idtvec_err = vmcs_idt_vectoring_err();
2148                         exitintinfo |= (uint64_t)idtvec_err << 32;
2149                 }
2150                 error = vm_exit_intinfo(vmx->vm, vcpu, exitintinfo);
2151                 KASSERT(error == 0, ("%s: vm_set_intinfo error %d",
2152                     __func__, error));
2153
2154                 /*
2155                  * If 'virtual NMIs' are being used and the VM-exit
2156                  * happened while injecting an NMI during the previous
2157                  * VM-entry, then clear "blocking by NMI" in the
2158                  * Guest Interruptibility-State so the NMI can be
2159                  * reinjected on the subsequent VM-entry.
2160                  *
2161                  * However, if the NMI was being delivered through a task
2162                  * gate, then the new task must start execution with NMIs
2163                  * blocked so don't clear NMI blocking in this case.
2164                  */
2165                 intr_type = idtvec_info & VMCS_INTR_T_MASK;
2166                 if (intr_type == VMCS_INTR_T_NMI) {
2167                         if (reason != EXIT_REASON_TASK_SWITCH)
2168                                 vmx_clear_nmi_blocking(vmx, vcpu);
2169                         else
2170                                 vmx_assert_nmi_blocking(vmx, vcpu);
2171                 }
2172
2173                 /*
2174                  * Update VM-entry instruction length if the event being
2175                  * delivered was a software interrupt or software exception.
2176                  */
2177                 if (intr_type == VMCS_INTR_T_SWINTR ||
2178                     intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION ||
2179                     intr_type == VMCS_INTR_T_SWEXCEPTION) {
2180                         vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
2181                 }
2182         }
2183
2184         switch (reason) {
2185         case EXIT_REASON_TASK_SWITCH:
2186                 ts = &vmexit->u.task_switch;
2187                 ts->tsssel = qual & 0xffff;
2188                 ts->reason = vmx_task_switch_reason(qual);
2189                 ts->ext = 0;
2190                 ts->errcode_valid = 0;
2191                 vmx_paging_info(&ts->paging);
2192                 /*
2193                  * If the task switch was due to a CALL, JMP, IRET, software
2194                  * interrupt (INT n) or software exception (INT3, INTO),
2195                  * then the saved %rip references the instruction that caused
2196                  * the task switch. The instruction length field in the VMCS
2197                  * is valid in this case.
2198                  *
2199                  * In all other cases (e.g., NMI, hardware exception) the
2200                  * saved %rip is one that would have been saved in the old TSS
2201                  * had the task switch completed normally so the instruction
2202                  * length field is not needed in this case and is explicitly
2203                  * set to 0.
2204                  */
2205                 if (ts->reason == TSR_IDT_GATE) {
2206                         KASSERT(idtvec_info & VMCS_IDT_VEC_VALID,
2207                             ("invalid idtvec_info %#x for IDT task switch",
2208                             idtvec_info));
2209                         intr_type = idtvec_info & VMCS_INTR_T_MASK;
2210                         if (intr_type != VMCS_INTR_T_SWINTR &&
2211                             intr_type != VMCS_INTR_T_SWEXCEPTION &&
2212                             intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) {
2213                                 /* Task switch triggered by external event */
2214                                 ts->ext = 1;
2215                                 vmexit->inst_length = 0;
2216                                 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
2217                                         ts->errcode_valid = 1;
2218                                         ts->errcode = vmcs_idt_vectoring_err();
2219                                 }
2220                         }
2221                 }
2222                 vmexit->exitcode = VM_EXITCODE_TASK_SWITCH;
2223                 VCPU_CTR4(vmx->vm, vcpu, "task switch reason %d, tss 0x%04x, "
2224                     "%s errcode 0x%016lx", ts->reason, ts->tsssel,
2225                     ts->ext ? "external" : "internal",
2226                     ((uint64_t)ts->errcode << 32) | ts->errcode_valid);
2227                 break;
2228         case EXIT_REASON_CR_ACCESS:
2229                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1);
2230                 switch (qual & 0xf) {
2231                 case 0:
2232                         handled = vmx_emulate_cr0_access(vmx, vcpu, qual);
2233                         break;
2234                 case 4:
2235                         handled = vmx_emulate_cr4_access(vmx, vcpu, qual);
2236                         break;
2237                 case 8:
2238                         handled = vmx_emulate_cr8_access(vmx, vcpu, qual);
2239                         break;
2240                 }
2241                 break;
2242         case EXIT_REASON_RDMSR:
2243                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1);
2244                 retu = false;
2245                 ecx = vmxctx->guest_rcx;
2246                 VCPU_CTR1(vmx->vm, vcpu, "rdmsr 0x%08x", ecx);
2247                 error = emulate_rdmsr(vmx, vcpu, ecx, &retu);
2248                 if (error) {
2249                         vmexit->exitcode = VM_EXITCODE_RDMSR;
2250                         vmexit->u.msr.code = ecx;
2251                 } else if (!retu) {
2252                         handled = HANDLED;
2253                 } else {
2254                         /* Return to userspace with a valid exitcode */
2255                         KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
2256                             ("emulate_rdmsr retu with bogus exitcode"));
2257                 }
2258                 break;
2259         case EXIT_REASON_WRMSR:
2260                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1);
2261                 retu = false;
2262                 eax = vmxctx->guest_rax;
2263                 ecx = vmxctx->guest_rcx;
2264                 edx = vmxctx->guest_rdx;
2265                 VCPU_CTR2(vmx->vm, vcpu, "wrmsr 0x%08x value 0x%016lx",
2266                     ecx, (uint64_t)edx << 32 | eax);
2267                 error = emulate_wrmsr(vmx, vcpu, ecx,
2268                     (uint64_t)edx << 32 | eax, &retu);
2269                 if (error) {
2270                         vmexit->exitcode = VM_EXITCODE_WRMSR;
2271                         vmexit->u.msr.code = ecx;
2272                         vmexit->u.msr.wval = (uint64_t)edx << 32 | eax;
2273                 } else if (!retu) {
2274                         handled = HANDLED;
2275                 } else {
2276                         /* Return to userspace with a valid exitcode */
2277                         KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
2278                             ("emulate_wrmsr retu with bogus exitcode"));
2279                 }
2280                 break;
2281         case EXIT_REASON_HLT:
2282                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1);
2283                 vmexit->exitcode = VM_EXITCODE_HLT;
2284                 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS);
2285                 if (virtual_interrupt_delivery)
2286                         vmexit->u.hlt.intr_status =
2287                             vmcs_read(VMCS_GUEST_INTR_STATUS);
2288                 else
2289                         vmexit->u.hlt.intr_status = 0;
2290                 break;
2291         case EXIT_REASON_MTF:
2292                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1);
2293                 vmexit->exitcode = VM_EXITCODE_MTRAP;
2294                 vmexit->inst_length = 0;
2295                 break;
2296         case EXIT_REASON_PAUSE:
2297                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1);
2298                 vmexit->exitcode = VM_EXITCODE_PAUSE;
2299                 break;
2300         case EXIT_REASON_INTR_WINDOW:
2301                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1);
2302                 vmx_clear_int_window_exiting(vmx, vcpu);
2303                 return (1);
2304         case EXIT_REASON_EXT_INTR:
2305                 /*
2306                  * External interrupts serve only to cause VM exits and allow
2307                  * the host interrupt handler to run.
2308                  *
2309                  * If this external interrupt triggers a virtual interrupt
2310                  * to a VM, then that state will be recorded by the
2311                  * host interrupt handler in the VM's softc. We will inject
2312                  * this virtual interrupt during the subsequent VM enter.
2313                  */
2314                 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2315
2316                 /*
2317                  * XXX: Ignore this exit if VMCS_INTR_VALID is not set.
2318                  * This appears to be a bug in VMware Fusion?
2319                  */
2320                 if (!(intr_info & VMCS_INTR_VALID))
2321                         return (1);
2322                 KASSERT((intr_info & VMCS_INTR_VALID) != 0 &&
2323                     (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR,
2324                     ("VM exit interruption info invalid: %#x", intr_info));
2325                 vmx_trigger_hostintr(intr_info & 0xff);
2326
2327                 /*
2328                  * This is special. We want to treat this as an 'handled'
2329                  * VM-exit but not increment the instruction pointer.
2330                  */
2331                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1);
2332                 return (1);
2333         case EXIT_REASON_NMI_WINDOW:
2334                 /* Exit to allow the pending virtual NMI to be injected */
2335                 if (vm_nmi_pending(vmx->vm, vcpu))
2336                         vmx_inject_nmi(vmx, vcpu);
2337                 vmx_clear_nmi_window_exiting(vmx, vcpu);
2338                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1);
2339                 return (1);
2340         case EXIT_REASON_INOUT:
2341                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1);
2342                 vmexit->exitcode = VM_EXITCODE_INOUT;
2343                 vmexit->u.inout.bytes = (qual & 0x7) + 1;
2344                 vmexit->u.inout.in = in = (qual & 0x8) ? 1 : 0;
2345                 vmexit->u.inout.string = (qual & 0x10) ? 1 : 0;
2346                 vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0;
2347                 vmexit->u.inout.port = (uint16_t)(qual >> 16);
2348                 vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax);
2349                 if (vmexit->u.inout.string) {
2350                         inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO);
2351                         vmexit->exitcode = VM_EXITCODE_INOUT_STR;
2352                         vis = &vmexit->u.inout_str;
2353                         vmx_paging_info(&vis->paging);
2354                         vis->rflags = vmcs_read(VMCS_GUEST_RFLAGS);
2355                         vis->cr0 = vmcs_read(VMCS_GUEST_CR0);
2356                         vis->index = inout_str_index(vmx, vcpu, in);
2357                         vis->count = inout_str_count(vmx, vcpu, vis->inout.rep);
2358                         vis->addrsize = inout_str_addrsize(inst_info);
2359                         inout_str_seginfo(vmx, vcpu, inst_info, in, vis);
2360                 }
2361                 break;
2362         case EXIT_REASON_CPUID:
2363                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1);
2364                 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx);
2365                 break;
2366         case EXIT_REASON_EXCEPTION:
2367                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1);
2368                 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2369                 KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2370                     ("VM exit interruption info invalid: %#x", intr_info));
2371
2372                 intr_vec = intr_info & 0xff;
2373                 intr_type = intr_info & VMCS_INTR_T_MASK;
2374
2375                 /*
2376                  * If Virtual NMIs control is 1 and the VM-exit is due to a
2377                  * fault encountered during the execution of IRET then we must
2378                  * restore the state of "virtual-NMI blocking" before resuming
2379                  * the guest.
2380                  *
2381                  * See "Resuming Guest Software after Handling an Exception".
2382                  * See "Information for VM Exits Due to Vectored Events".
2383                  */
2384                 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2385                     (intr_vec != IDT_DF) &&
2386                     (intr_info & EXIT_QUAL_NMIUDTI) != 0)
2387                         vmx_restore_nmi_blocking(vmx, vcpu);
2388
2389                 /*
2390                  * The NMI has already been handled in vmx_exit_handle_nmi().
2391                  */
2392                 if (intr_type == VMCS_INTR_T_NMI)
2393                         return (1);
2394
2395                 /*
2396                  * Call the machine check handler by hand. Also don't reflect
2397                  * the machine check back into the guest.
2398                  */
2399                 if (intr_vec == IDT_MC) {
2400                         VCPU_CTR0(vmx->vm, vcpu, "Vectoring to MCE handler");
2401                         __asm __volatile("int $18");
2402                         return (1);
2403                 }
2404
2405                 if (intr_vec == IDT_PF) {
2406                         error = vmxctx_setreg(vmxctx, VM_REG_GUEST_CR2, qual);
2407                         KASSERT(error == 0, ("%s: vmxctx_setreg(cr2) error %d",
2408                             __func__, error));
2409                 }
2410
2411                 /*
2412                  * Software exceptions exhibit trap-like behavior. This in
2413                  * turn requires populating the VM-entry instruction length
2414                  * so that the %rip in the trap frame is past the INT3/INTO
2415                  * instruction.
2416                  */
2417                 if (intr_type == VMCS_INTR_T_SWEXCEPTION)
2418                         vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
2419
2420                 /* Reflect all other exceptions back into the guest */
2421                 errcode_valid = errcode = 0;
2422                 if (intr_info & VMCS_INTR_DEL_ERRCODE) {
2423                         errcode_valid = 1;
2424                         errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE);
2425                 }
2426                 VCPU_CTR2(vmx->vm, vcpu, "Reflecting exception %d/%#x into "
2427                     "the guest", intr_vec, errcode);
2428                 error = vm_inject_exception(vmx->vm, vcpu, intr_vec,
2429                     errcode_valid, errcode, 0);
2430                 KASSERT(error == 0, ("%s: vm_inject_exception error %d",
2431                     __func__, error));
2432                 return (1);
2433
2434         case EXIT_REASON_EPT_FAULT:
2435                 /*
2436                  * If 'gpa' lies within the address space allocated to
2437                  * memory then this must be a nested page fault otherwise
2438                  * this must be an instruction that accesses MMIO space.
2439                  */
2440                 gpa = vmcs_gpa();
2441                 if (vm_mem_allocated(vmx->vm, vcpu, gpa) ||
2442                     apic_access_fault(vmx, vcpu, gpa)) {
2443                         vmexit->exitcode = VM_EXITCODE_PAGING;
2444                         vmexit->inst_length = 0;
2445                         vmexit->u.paging.gpa = gpa;
2446                         vmexit->u.paging.fault_type = ept_fault_type(qual);
2447                         vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
2448                 } else if (ept_emulation_fault(qual)) {
2449                         vmexit_inst_emul(vmexit, gpa, vmcs_gla());
2450                         vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INST_EMUL, 1);
2451                 }
2452                 /*
2453                  * If Virtual NMIs control is 1 and the VM-exit is due to an
2454                  * EPT fault during the execution of IRET then we must restore
2455                  * the state of "virtual-NMI blocking" before resuming.
2456                  *
2457                  * See description of "NMI unblocking due to IRET" in
2458                  * "Exit Qualification for EPT Violations".
2459                  */
2460                 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2461                     (qual & EXIT_QUAL_NMIUDTI) != 0)
2462                         vmx_restore_nmi_blocking(vmx, vcpu);
2463                 break;
2464         case EXIT_REASON_VIRTUALIZED_EOI:
2465                 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI;
2466                 vmexit->u.ioapic_eoi.vector = qual & 0xFF;
2467                 vmexit->inst_length = 0;        /* trap-like */
2468                 break;
2469         case EXIT_REASON_APIC_ACCESS:
2470                 handled = vmx_handle_apic_access(vmx, vcpu, vmexit);
2471                 break;
2472         case EXIT_REASON_APIC_WRITE:
2473                 /*
2474                  * APIC-write VM exit is trap-like so the %rip is already
2475                  * pointing to the next instruction.
2476                  */
2477                 vmexit->inst_length = 0;
2478                 vlapic = vm_lapic(vmx->vm, vcpu);
2479                 handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual);
2480                 break;
2481         case EXIT_REASON_XSETBV:
2482                 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit);
2483                 break;
2484         case EXIT_REASON_MONITOR:
2485                 vmexit->exitcode = VM_EXITCODE_MONITOR;
2486                 break;
2487         case EXIT_REASON_MWAIT:
2488                 vmexit->exitcode = VM_EXITCODE_MWAIT;
2489                 break;
2490         default:
2491                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1);
2492                 break;
2493         }
2494
2495         if (handled) {
2496                 /*
2497                  * It is possible that control is returned to userland
2498                  * even though we were able to handle the VM exit in the
2499                  * kernel.
2500                  *
2501                  * In such a case we want to make sure that the userland
2502                  * restarts guest execution at the instruction *after*
2503                  * the one we just processed. Therefore we update the
2504                  * guest rip in the VMCS and in 'vmexit'.
2505                  */
2506                 vmexit->rip += vmexit->inst_length;
2507                 vmexit->inst_length = 0;
2508                 vmcs_write(VMCS_GUEST_RIP, vmexit->rip);
2509         } else {
2510                 if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
2511                         /*
2512                          * If this VM exit was not claimed by anybody then
2513                          * treat it as a generic VMX exit.
2514                          */
2515                         vmexit->exitcode = VM_EXITCODE_VMX;
2516                         vmexit->u.vmx.status = VM_SUCCESS;
2517                         vmexit->u.vmx.inst_type = 0;
2518                         vmexit->u.vmx.inst_error = 0;
2519                 } else {
2520                         /*
2521                          * The exitcode and collateral have been populated.
2522                          * The VM exit will be processed further in userland.
2523                          */
2524                 }
2525         }
2526         return (handled);
2527 }
2528
2529 static __inline void
2530 vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit)
2531 {
2532
2533         KASSERT(vmxctx->inst_fail_status != VM_SUCCESS,
2534             ("vmx_exit_inst_error: invalid inst_fail_status %d",
2535             vmxctx->inst_fail_status));
2536
2537         vmexit->inst_length = 0;
2538         vmexit->exitcode = VM_EXITCODE_VMX;
2539         vmexit->u.vmx.status = vmxctx->inst_fail_status;
2540         vmexit->u.vmx.inst_error = vmcs_instruction_error();
2541         vmexit->u.vmx.exit_reason = ~0;
2542         vmexit->u.vmx.exit_qualification = ~0;
2543
2544         switch (rc) {
2545         case VMX_VMRESUME_ERROR:
2546         case VMX_VMLAUNCH_ERROR:
2547         case VMX_INVEPT_ERROR:
2548                 vmexit->u.vmx.inst_type = rc;
2549                 break;
2550         default:
2551                 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc);
2552         }
2553 }
2554
2555 /*
2556  * If the NMI-exiting VM execution control is set to '1' then an NMI in
2557  * non-root operation causes a VM-exit. NMI blocking is in effect so it is
2558  * sufficient to simply vector to the NMI handler via a software interrupt.
2559  * However, this must be done before maskable interrupts are enabled
2560  * otherwise the "iret" issued by an interrupt handler will incorrectly
2561  * clear NMI blocking.
2562  */
2563 static __inline void
2564 vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
2565 {
2566         uint32_t intr_info;
2567
2568         KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled"));
2569
2570         if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION)
2571                 return;
2572
2573         intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2574         KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2575             ("VM exit interruption info invalid: %#x", intr_info));
2576
2577         if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) {
2578                 KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due "
2579                     "to NMI has invalid vector: %#x", intr_info));
2580                 VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler");
2581                 __asm __volatile("int $2");
2582         }
2583 }
2584
2585 static __inline void
2586 vmx_dr_enter_guest(struct vmxctx *vmxctx)
2587 {
2588         register_t rflags;
2589
2590         /* Save host control debug registers. */
2591         vmxctx->host_dr7 = rdr7();
2592         vmxctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR);
2593
2594         /*
2595          * Disable debugging in DR7 and DEBUGCTL to avoid triggering
2596          * exceptions in the host based on the guest DRx values.  The
2597          * guest DR7 and DEBUGCTL are saved/restored in the VMCS.
2598          */
2599         load_dr7(0);
2600         wrmsr(MSR_DEBUGCTLMSR, 0);
2601
2602         /*
2603          * Disable single stepping the kernel to avoid corrupting the
2604          * guest DR6.  A debugger might still be able to corrupt the
2605          * guest DR6 by setting a breakpoint after this point and then
2606          * single stepping.
2607          */
2608         rflags = read_rflags();
2609         vmxctx->host_tf = rflags & PSL_T;
2610         write_rflags(rflags & ~PSL_T);
2611
2612         /* Save host debug registers. */
2613         vmxctx->host_dr0 = rdr0();
2614         vmxctx->host_dr1 = rdr1();
2615         vmxctx->host_dr2 = rdr2();
2616         vmxctx->host_dr3 = rdr3();
2617         vmxctx->host_dr6 = rdr6();
2618
2619         /* Restore guest debug registers. */
2620         load_dr0(vmxctx->guest_dr0);
2621         load_dr1(vmxctx->guest_dr1);
2622         load_dr2(vmxctx->guest_dr2);
2623         load_dr3(vmxctx->guest_dr3);
2624         load_dr6(vmxctx->guest_dr6);
2625 }
2626
2627 static __inline void
2628 vmx_dr_leave_guest(struct vmxctx *vmxctx)
2629 {
2630
2631         /* Save guest debug registers. */
2632         vmxctx->guest_dr0 = rdr0();
2633         vmxctx->guest_dr1 = rdr1();
2634         vmxctx->guest_dr2 = rdr2();
2635         vmxctx->guest_dr3 = rdr3();
2636         vmxctx->guest_dr6 = rdr6();
2637
2638         /*
2639          * Restore host debug registers.  Restore DR7, DEBUGCTL, and
2640          * PSL_T last.
2641          */
2642         load_dr0(vmxctx->host_dr0);
2643         load_dr1(vmxctx->host_dr1);
2644         load_dr2(vmxctx->host_dr2);
2645         load_dr3(vmxctx->host_dr3);
2646         load_dr6(vmxctx->host_dr6);
2647         wrmsr(MSR_DEBUGCTLMSR, vmxctx->host_debugctl);
2648         load_dr7(vmxctx->host_dr7);
2649         write_rflags(read_rflags() | vmxctx->host_tf);
2650 }
2651
2652 static int
2653 vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap,
2654     struct vm_eventinfo *evinfo)
2655 {
2656         int rc, handled, launched;
2657         struct vmx *vmx;
2658         struct vm *vm;
2659         struct vmxctx *vmxctx;
2660         struct vmcs *vmcs;
2661         struct vm_exit *vmexit;
2662         struct vlapic *vlapic;
2663         uint32_t exit_reason;
2664
2665         vmx = arg;
2666         vm = vmx->vm;
2667         vmcs = &vmx->vmcs[vcpu];
2668         vmxctx = &vmx->ctx[vcpu];
2669         vlapic = vm_lapic(vm, vcpu);
2670         vmexit = vm_exitinfo(vm, vcpu);
2671         launched = 0;
2672
2673         KASSERT(vmxctx->pmap == pmap,
2674             ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap));
2675
2676         vmx_msr_guest_enter(vmx, vcpu);
2677
2678         VMPTRLD(vmcs);
2679
2680         /*
2681          * XXX
2682          * We do this every time because we may setup the virtual machine
2683          * from a different process than the one that actually runs it.
2684          *
2685          * If the life of a virtual machine was spent entirely in the context
2686          * of a single process we could do this once in vmx_vminit().
2687          */
2688         vmcs_write(VMCS_HOST_CR3, rcr3());
2689
2690         vmcs_write(VMCS_GUEST_RIP, rip);
2691         vmx_set_pcpu_defaults(vmx, vcpu, pmap);
2692         do {
2693                 KASSERT(vmcs_guest_rip() == rip, ("%s: vmcs guest rip mismatch "
2694                     "%#lx/%#lx", __func__, vmcs_guest_rip(), rip));
2695
2696                 handled = UNHANDLED;
2697                 /*
2698                  * Interrupts are disabled from this point on until the
2699                  * guest starts executing. This is done for the following
2700                  * reasons:
2701                  *
2702                  * If an AST is asserted on this thread after the check below,
2703                  * then the IPI_AST notification will not be lost, because it
2704                  * will cause a VM exit due to external interrupt as soon as
2705                  * the guest state is loaded.
2706                  *
2707                  * A posted interrupt after 'vmx_inject_interrupts()' will
2708                  * not be "lost" because it will be held pending in the host
2709                  * APIC because interrupts are disabled. The pending interrupt
2710                  * will be recognized as soon as the guest state is loaded.
2711                  *
2712                  * The same reasoning applies to the IPI generated by
2713                  * pmap_invalidate_ept().
2714                  */
2715                 disable_intr();
2716                 vmx_inject_interrupts(vmx, vcpu, vlapic, rip);
2717
2718                 /*
2719                  * Check for vcpu suspension after injecting events because
2720                  * vmx_inject_interrupts() can suspend the vcpu due to a
2721                  * triple fault.
2722                  */
2723                 if (vcpu_suspended(evinfo)) {
2724                         enable_intr();
2725                         vm_exit_suspended(vmx->vm, vcpu, rip);
2726                         break;
2727                 }
2728
2729                 if (vcpu_rendezvous_pending(evinfo)) {
2730                         enable_intr();
2731                         vm_exit_rendezvous(vmx->vm, vcpu, rip);
2732                         break;
2733                 }
2734
2735                 if (vcpu_reqidle(evinfo)) {
2736                         enable_intr();
2737                         vm_exit_reqidle(vmx->vm, vcpu, rip);
2738                         break;
2739                 }
2740
2741                 if (vcpu_should_yield(vm, vcpu)) {
2742                         enable_intr();
2743                         vm_exit_astpending(vmx->vm, vcpu, rip);
2744                         vmx_astpending_trace(vmx, vcpu, rip);
2745                         handled = HANDLED;
2746                         break;
2747                 }
2748
2749                 vmx_run_trace(vmx, vcpu);
2750                 vmx_dr_enter_guest(vmxctx);
2751                 rc = vmx_enter_guest(vmxctx, vmx, launched);
2752                 vmx_dr_leave_guest(vmxctx);
2753
2754                 /* Collect some information for VM exit processing */
2755                 vmexit->rip = rip = vmcs_guest_rip();
2756                 vmexit->inst_length = vmexit_instruction_length();
2757                 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason();
2758                 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification();
2759
2760                 /* Update 'nextrip' */
2761                 vmx->state[vcpu].nextrip = rip;
2762
2763                 if (rc == VMX_GUEST_VMEXIT) {
2764                         vmx_exit_handle_nmi(vmx, vcpu, vmexit);
2765                         enable_intr();
2766                         handled = vmx_exit_process(vmx, vcpu, vmexit);
2767                 } else {
2768                         enable_intr();
2769                         vmx_exit_inst_error(vmxctx, rc, vmexit);
2770                 }
2771                 launched = 1;
2772                 vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled);
2773                 rip = vmexit->rip;
2774         } while (handled);
2775
2776         /*
2777          * If a VM exit has been handled then the exitcode must be BOGUS
2778          * If a VM exit is not handled then the exitcode must not be BOGUS
2779          */
2780         if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) ||
2781             (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) {
2782                 panic("Mismatch between handled (%d) and exitcode (%d)",
2783                       handled, vmexit->exitcode);
2784         }
2785
2786         if (!handled)
2787                 vmm_stat_incr(vm, vcpu, VMEXIT_USERSPACE, 1);
2788
2789         VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d",
2790             vmexit->exitcode);
2791
2792         VMCLEAR(vmcs);
2793         vmx_msr_guest_exit(vmx, vcpu);
2794
2795         return (0);
2796 }
2797
2798 static void
2799 vmx_vmcleanup(void *arg)
2800 {
2801         int i;
2802         struct vmx *vmx = arg;
2803
2804         if (apic_access_virtualization(vmx, 0))
2805                 vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
2806
2807         for (i = 0; i < VM_MAXCPU; i++)
2808                 vpid_free(vmx->state[i].vpid);
2809
2810         free(vmx, M_VMX);
2811
2812         return;
2813 }
2814
2815 static register_t *
2816 vmxctx_regptr(struct vmxctx *vmxctx, int reg)
2817 {
2818
2819         switch (reg) {
2820         case VM_REG_GUEST_RAX:
2821                 return (&vmxctx->guest_rax);
2822         case VM_REG_GUEST_RBX:
2823                 return (&vmxctx->guest_rbx);
2824         case VM_REG_GUEST_RCX:
2825                 return (&vmxctx->guest_rcx);
2826         case VM_REG_GUEST_RDX:
2827                 return (&vmxctx->guest_rdx);
2828         case VM_REG_GUEST_RSI:
2829                 return (&vmxctx->guest_rsi);
2830         case VM_REG_GUEST_RDI:
2831                 return (&vmxctx->guest_rdi);
2832         case VM_REG_GUEST_RBP:
2833                 return (&vmxctx->guest_rbp);
2834         case VM_REG_GUEST_R8:
2835                 return (&vmxctx->guest_r8);
2836         case VM_REG_GUEST_R9:
2837                 return (&vmxctx->guest_r9);
2838         case VM_REG_GUEST_R10:
2839                 return (&vmxctx->guest_r10);
2840         case VM_REG_GUEST_R11:
2841                 return (&vmxctx->guest_r11);
2842         case VM_REG_GUEST_R12:
2843                 return (&vmxctx->guest_r12);
2844         case VM_REG_GUEST_R13:
2845                 return (&vmxctx->guest_r13);
2846         case VM_REG_GUEST_R14:
2847                 return (&vmxctx->guest_r14);
2848         case VM_REG_GUEST_R15:
2849                 return (&vmxctx->guest_r15);
2850         case VM_REG_GUEST_CR2:
2851                 return (&vmxctx->guest_cr2);
2852         case VM_REG_GUEST_DR0:
2853                 return (&vmxctx->guest_dr0);
2854         case VM_REG_GUEST_DR1:
2855                 return (&vmxctx->guest_dr1);
2856         case VM_REG_GUEST_DR2:
2857                 return (&vmxctx->guest_dr2);
2858         case VM_REG_GUEST_DR3:
2859                 return (&vmxctx->guest_dr3);
2860         case VM_REG_GUEST_DR6:
2861                 return (&vmxctx->guest_dr6);
2862         default:
2863                 break;
2864         }
2865         return (NULL);
2866 }
2867
2868 static int
2869 vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval)
2870 {
2871         register_t *regp;
2872
2873         if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
2874                 *retval = *regp;
2875                 return (0);
2876         } else
2877                 return (EINVAL);
2878 }
2879
2880 static int
2881 vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val)
2882 {
2883         register_t *regp;
2884
2885         if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
2886                 *regp = val;
2887                 return (0);
2888         } else
2889                 return (EINVAL);
2890 }
2891
2892 static int
2893 vmx_get_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t *retval)
2894 {
2895         uint64_t gi;
2896         int error;
2897
2898         error = vmcs_getreg(&vmx->vmcs[vcpu], running, 
2899             VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY), &gi);
2900         *retval = (gi & HWINTR_BLOCKING) ? 1 : 0;
2901         return (error);
2902 }
2903
2904 static int
2905 vmx_modify_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t val)
2906 {
2907         struct vmcs *vmcs;
2908         uint64_t gi;
2909         int error, ident;
2910
2911         /*
2912          * Forcing the vcpu into an interrupt shadow is not supported.
2913          */
2914         if (val) {
2915                 error = EINVAL;
2916                 goto done;
2917         }
2918
2919         vmcs = &vmx->vmcs[vcpu];
2920         ident = VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY);
2921         error = vmcs_getreg(vmcs, running, ident, &gi);
2922         if (error == 0) {
2923                 gi &= ~HWINTR_BLOCKING;
2924                 error = vmcs_setreg(vmcs, running, ident, gi);
2925         }
2926 done:
2927         VCPU_CTR2(vmx->vm, vcpu, "Setting intr_shadow to %#lx %s", val,
2928             error ? "failed" : "succeeded");
2929         return (error);
2930 }
2931
2932 static int
2933 vmx_shadow_reg(int reg)
2934 {
2935         int shreg;
2936
2937         shreg = -1;
2938
2939         switch (reg) {
2940         case VM_REG_GUEST_CR0:
2941                 shreg = VMCS_CR0_SHADOW;
2942                 break;
2943         case VM_REG_GUEST_CR4:
2944                 shreg = VMCS_CR4_SHADOW;
2945                 break;
2946         default:
2947                 break;
2948         }
2949
2950         return (shreg);
2951 }
2952
2953 static int
2954 vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval)
2955 {
2956         int running, hostcpu;
2957         struct vmx *vmx = arg;
2958
2959         running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2960         if (running && hostcpu != curcpu)
2961                 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu);
2962
2963         if (reg == VM_REG_GUEST_INTR_SHADOW)
2964                 return (vmx_get_intr_shadow(vmx, vcpu, running, retval));
2965
2966         if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0)
2967                 return (0);
2968
2969         return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval));
2970 }
2971
2972 static int
2973 vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
2974 {
2975         int error, hostcpu, running, shadow;
2976         uint64_t ctls;
2977         pmap_t pmap;
2978         struct vmx *vmx = arg;
2979
2980         running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
2981         if (running && hostcpu != curcpu)
2982                 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu);
2983
2984         if (reg == VM_REG_GUEST_INTR_SHADOW)
2985                 return (vmx_modify_intr_shadow(vmx, vcpu, running, val));
2986
2987         if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0)
2988                 return (0);
2989
2990         error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val);
2991
2992         if (error == 0) {
2993                 /*
2994                  * If the "load EFER" VM-entry control is 1 then the
2995                  * value of EFER.LMA must be identical to "IA-32e mode guest"
2996                  * bit in the VM-entry control.
2997                  */
2998                 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 &&
2999                     (reg == VM_REG_GUEST_EFER)) {
3000                         vmcs_getreg(&vmx->vmcs[vcpu], running,
3001                                     VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls);
3002                         if (val & EFER_LMA)
3003                                 ctls |= VM_ENTRY_GUEST_LMA;
3004                         else
3005                                 ctls &= ~VM_ENTRY_GUEST_LMA;
3006                         vmcs_setreg(&vmx->vmcs[vcpu], running,
3007                                     VMCS_IDENT(VMCS_ENTRY_CTLS), ctls);
3008                 }
3009
3010                 shadow = vmx_shadow_reg(reg);
3011                 if (shadow > 0) {
3012                         /*
3013                          * Store the unmodified value in the shadow
3014                          */                     
3015                         error = vmcs_setreg(&vmx->vmcs[vcpu], running,
3016                                     VMCS_IDENT(shadow), val);
3017                 }
3018
3019                 if (reg == VM_REG_GUEST_CR3) {
3020                         /*
3021                          * Invalidate the guest vcpu's TLB mappings to emulate
3022                          * the behavior of updating %cr3.
3023                          *
3024                          * XXX the processor retains global mappings when %cr3
3025                          * is updated but vmx_invvpid() does not.
3026                          */
3027                         pmap = vmx->ctx[vcpu].pmap;
3028                         vmx_invvpid(vmx, vcpu, pmap, running);
3029                 }
3030         }
3031
3032         return (error);
3033 }
3034
3035 static int
3036 vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
3037 {
3038         int hostcpu, running;
3039         struct vmx *vmx = arg;
3040
3041         running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
3042         if (running && hostcpu != curcpu)
3043                 panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), vcpu);
3044
3045         return (vmcs_getdesc(&vmx->vmcs[vcpu], running, reg, desc));
3046 }
3047
3048 static int
3049 vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
3050 {
3051         int hostcpu, running;
3052         struct vmx *vmx = arg;
3053
3054         running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
3055         if (running && hostcpu != curcpu)
3056                 panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), vcpu);
3057
3058         return (vmcs_setdesc(&vmx->vmcs[vcpu], running, reg, desc));
3059 }
3060
3061 static int
3062 vmx_getcap(void *arg, int vcpu, int type, int *retval)
3063 {
3064         struct vmx *vmx = arg;
3065         int vcap;
3066         int ret;
3067
3068         ret = ENOENT;
3069
3070         vcap = vmx->cap[vcpu].set;
3071
3072         switch (type) {
3073         case VM_CAP_HALT_EXIT:
3074                 if (cap_halt_exit)
3075                         ret = 0;
3076                 break;
3077         case VM_CAP_PAUSE_EXIT:
3078                 if (cap_pause_exit)
3079                         ret = 0;
3080                 break;
3081         case VM_CAP_MTRAP_EXIT:
3082                 if (cap_monitor_trap)
3083                         ret = 0;
3084                 break;
3085         case VM_CAP_UNRESTRICTED_GUEST:
3086                 if (cap_unrestricted_guest)
3087                         ret = 0;
3088                 break;
3089         case VM_CAP_ENABLE_INVPCID:
3090                 if (cap_invpcid)
3091                         ret = 0;
3092                 break;
3093         default:
3094                 break;
3095         }
3096
3097         if (ret == 0)
3098                 *retval = (vcap & (1 << type)) ? 1 : 0;
3099
3100         return (ret);
3101 }
3102
3103 static int
3104 vmx_setcap(void *arg, int vcpu, int type, int val)
3105 {
3106         struct vmx *vmx = arg;
3107         struct vmcs *vmcs = &vmx->vmcs[vcpu];
3108         uint32_t baseval;
3109         uint32_t *pptr;
3110         int error;
3111         int flag;
3112         int reg;
3113         int retval;
3114
3115         retval = ENOENT;
3116         pptr = NULL;
3117
3118         switch (type) {
3119         case VM_CAP_HALT_EXIT:
3120                 if (cap_halt_exit) {
3121                         retval = 0;
3122                         pptr = &vmx->cap[vcpu].proc_ctls;
3123                         baseval = *pptr;
3124                         flag = PROCBASED_HLT_EXITING;
3125                         reg = VMCS_PRI_PROC_BASED_CTLS;
3126                 }
3127                 break;
3128         case VM_CAP_MTRAP_EXIT:
3129                 if (cap_monitor_trap) {
3130                         retval = 0;
3131                         pptr = &vmx->cap[vcpu].proc_ctls;
3132                         baseval = *pptr;
3133                         flag = PROCBASED_MTF;
3134                         reg = VMCS_PRI_PROC_BASED_CTLS;
3135                 }
3136                 break;
3137         case VM_CAP_PAUSE_EXIT:
3138                 if (cap_pause_exit) {
3139                         retval = 0;
3140                         pptr = &vmx->cap[vcpu].proc_ctls;
3141                         baseval = *pptr;
3142                         flag = PROCBASED_PAUSE_EXITING;
3143                         reg = VMCS_PRI_PROC_BASED_CTLS;
3144                 }
3145                 break;
3146         case VM_CAP_UNRESTRICTED_GUEST:
3147                 if (cap_unrestricted_guest) {
3148                         retval = 0;
3149                         pptr = &vmx->cap[vcpu].proc_ctls2;
3150                         baseval = *pptr;
3151                         flag = PROCBASED2_UNRESTRICTED_GUEST;
3152                         reg = VMCS_SEC_PROC_BASED_CTLS;
3153                 }
3154                 break;
3155         case VM_CAP_ENABLE_INVPCID:
3156                 if (cap_invpcid) {
3157                         retval = 0;
3158                         pptr = &vmx->cap[vcpu].proc_ctls2;
3159                         baseval = *pptr;
3160                         flag = PROCBASED2_ENABLE_INVPCID;
3161                         reg = VMCS_SEC_PROC_BASED_CTLS;
3162                 }
3163                 break;
3164         default:
3165                 break;
3166         }
3167
3168         if (retval == 0) {
3169                 if (val) {
3170                         baseval |= flag;
3171                 } else {
3172                         baseval &= ~flag;
3173                 }
3174                 VMPTRLD(vmcs);
3175                 error = vmwrite(reg, baseval);
3176                 VMCLEAR(vmcs);
3177
3178                 if (error) {
3179                         retval = error;
3180                 } else {
3181                         /*
3182                          * Update optional stored flags, and record
3183                          * setting
3184                          */
3185                         if (pptr != NULL) {
3186                                 *pptr = baseval;
3187                         }
3188
3189                         if (val) {
3190                                 vmx->cap[vcpu].set |= (1 << type);
3191                         } else {
3192                                 vmx->cap[vcpu].set &= ~(1 << type);
3193                         }
3194                 }
3195         }
3196
3197         return (retval);
3198 }
3199
3200 struct vlapic_vtx {
3201         struct vlapic   vlapic;
3202         struct pir_desc *pir_desc;
3203         struct vmx      *vmx;
3204 };
3205
3206 #define VMX_CTR_PIR(vm, vcpuid, pir_desc, notify, vector, level, msg)   \
3207 do {                                                                    \
3208         VCPU_CTR2(vm, vcpuid, msg " assert %s-triggered vector %d",     \
3209             level ? "level" : "edge", vector);                          \
3210         VCPU_CTR1(vm, vcpuid, msg " pir0 0x%016lx", pir_desc->pir[0]);  \
3211         VCPU_CTR1(vm, vcpuid, msg " pir1 0x%016lx", pir_desc->pir[1]);  \
3212         VCPU_CTR1(vm, vcpuid, msg " pir2 0x%016lx", pir_desc->pir[2]);  \
3213         VCPU_CTR1(vm, vcpuid, msg " pir3 0x%016lx", pir_desc->pir[3]);  \
3214         VCPU_CTR1(vm, vcpuid, msg " notify: %s", notify ? "yes" : "no");\
3215 } while (0)
3216
3217 /*
3218  * vlapic->ops handlers that utilize the APICv hardware assist described in
3219  * Chapter 29 of the Intel SDM.
3220  */
3221 static int
3222 vmx_set_intr_ready(struct vlapic *vlapic, int vector, bool level)
3223 {
3224         struct vlapic_vtx *vlapic_vtx;
3225         struct pir_desc *pir_desc;
3226         uint64_t mask;
3227         int idx, notify;
3228
3229         vlapic_vtx = (struct vlapic_vtx *)vlapic;
3230         pir_desc = vlapic_vtx->pir_desc;
3231
3232         /*
3233          * Keep track of interrupt requests in the PIR descriptor. This is
3234          * because the virtual APIC page pointed to by the VMCS cannot be
3235          * modified if the vcpu is running.
3236          */
3237         idx = vector / 64;
3238         mask = 1UL << (vector % 64);
3239         atomic_set_long(&pir_desc->pir[idx], mask);
3240         notify = atomic_cmpset_long(&pir_desc->pending, 0, 1);
3241
3242         VMX_CTR_PIR(vlapic->vm, vlapic->vcpuid, pir_desc, notify, vector,
3243             level, "vmx_set_intr_ready");
3244         return (notify);
3245 }
3246
3247 static int
3248 vmx_pending_intr(struct vlapic *vlapic, int *vecptr)
3249 {
3250         struct vlapic_vtx *vlapic_vtx;
3251         struct pir_desc *pir_desc;
3252         struct LAPIC *lapic;
3253         uint64_t pending, pirval;
3254         uint32_t ppr, vpr;
3255         int i;
3256
3257         /*
3258          * This function is only expected to be called from the 'HLT' exit
3259          * handler which does not care about the vector that is pending.
3260          */
3261         KASSERT(vecptr == NULL, ("vmx_pending_intr: vecptr must be NULL"));
3262
3263         vlapic_vtx = (struct vlapic_vtx *)vlapic;
3264         pir_desc = vlapic_vtx->pir_desc;
3265
3266         pending = atomic_load_acq_long(&pir_desc->pending);
3267         if (!pending) {
3268                 /*
3269                  * While a virtual interrupt may have already been
3270                  * processed the actual delivery maybe pending the
3271                  * interruptibility of the guest.  Recognize a pending
3272                  * interrupt by reevaluating virtual interrupts
3273                  * following Section 29.2.1 in the Intel SDM Volume 3.
3274                  */
3275                 struct vm_exit *vmexit;
3276                 uint8_t rvi, ppr;
3277
3278                 vmexit = vm_exitinfo(vlapic->vm, vlapic->vcpuid);
3279                 KASSERT(vmexit->exitcode == VM_EXITCODE_HLT,
3280                     ("vmx_pending_intr: exitcode not 'HLT'"));
3281                 rvi = vmexit->u.hlt.intr_status & APIC_TPR_INT;
3282                 lapic = vlapic->apic_page;
3283                 ppr = lapic->ppr & APIC_TPR_INT;
3284                 if (rvi > ppr) {
3285                         return (1);
3286                 }
3287
3288                 return (0);
3289         }
3290
3291         /*
3292          * If there is an interrupt pending then it will be recognized only
3293          * if its priority is greater than the processor priority.
3294          *
3295          * Special case: if the processor priority is zero then any pending
3296          * interrupt will be recognized.
3297          */
3298         lapic = vlapic->apic_page;
3299         ppr = lapic->ppr & APIC_TPR_INT;
3300         if (ppr == 0)
3301                 return (1);
3302
3303         VCPU_CTR1(vlapic->vm, vlapic->vcpuid, "HLT with non-zero PPR %d",
3304             lapic->ppr);
3305
3306         for (i = 3; i >= 0; i--) {
3307                 pirval = pir_desc->pir[i];
3308                 if (pirval != 0) {
3309                         vpr = (i * 64 + flsl(pirval) - 1) & APIC_TPR_INT;
3310                         return (vpr > ppr);
3311                 }
3312         }
3313         return (0);
3314 }
3315
3316 static void
3317 vmx_intr_accepted(struct vlapic *vlapic, int vector)
3318 {
3319
3320         panic("vmx_intr_accepted: not expected to be called");
3321 }
3322
3323 static void
3324 vmx_set_tmr(struct vlapic *vlapic, int vector, bool level)
3325 {
3326         struct vlapic_vtx *vlapic_vtx;
3327         struct vmx *vmx;
3328         struct vmcs *vmcs;
3329         uint64_t mask, val;
3330
3331         KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d", vector));
3332         KASSERT(!vcpu_is_running(vlapic->vm, vlapic->vcpuid, NULL),
3333             ("vmx_set_tmr: vcpu cannot be running"));
3334
3335         vlapic_vtx = (struct vlapic_vtx *)vlapic;
3336         vmx = vlapic_vtx->vmx;
3337         vmcs = &vmx->vmcs[vlapic->vcpuid];
3338         mask = 1UL << (vector % 64);
3339
3340         VMPTRLD(vmcs);
3341         val = vmcs_read(VMCS_EOI_EXIT(vector));
3342         if (level)
3343                 val |= mask;
3344         else
3345                 val &= ~mask;
3346         vmcs_write(VMCS_EOI_EXIT(vector), val);
3347         VMCLEAR(vmcs);
3348 }
3349
3350 static void
3351 vmx_enable_x2apic_mode(struct vlapic *vlapic)
3352 {
3353         struct vmx *vmx;
3354         struct vmcs *vmcs;
3355         uint32_t proc_ctls2;
3356         int vcpuid, error;
3357
3358         vcpuid = vlapic->vcpuid;
3359         vmx = ((struct vlapic_vtx *)vlapic)->vmx;
3360         vmcs = &vmx->vmcs[vcpuid];
3361
3362         proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
3363         KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0,
3364             ("%s: invalid proc_ctls2 %#x", __func__, proc_ctls2));
3365
3366         proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES;
3367         proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE;
3368         vmx->cap[vcpuid].proc_ctls2 = proc_ctls2;
3369
3370         VMPTRLD(vmcs);
3371         vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2);
3372         VMCLEAR(vmcs);
3373
3374         if (vlapic->vcpuid == 0) {
3375                 /*
3376                  * The nested page table mappings are shared by all vcpus
3377                  * so unmap the APIC access page just once.
3378                  */
3379                 error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
3380                 KASSERT(error == 0, ("%s: vm_unmap_mmio error %d",
3381                     __func__, error));
3382
3383                 /*
3384                  * The MSR bitmap is shared by all vcpus so modify it only
3385                  * once in the context of vcpu 0.
3386                  */
3387                 error = vmx_allow_x2apic_msrs(vmx);
3388                 KASSERT(error == 0, ("%s: vmx_allow_x2apic_msrs error %d",
3389                     __func__, error));
3390         }
3391 }
3392
3393 static void
3394 vmx_post_intr(struct vlapic *vlapic, int hostcpu)
3395 {
3396
3397         ipi_cpu(hostcpu, pirvec);
3398 }
3399
3400 /*
3401  * Transfer the pending interrupts in the PIR descriptor to the IRR
3402  * in the virtual APIC page.
3403  */
3404 static void
3405 vmx_inject_pir(struct vlapic *vlapic)
3406 {
3407         struct vlapic_vtx *vlapic_vtx;
3408         struct pir_desc *pir_desc;
3409         struct LAPIC *lapic;
3410         uint64_t val, pirval;
3411         int rvi, pirbase = -1;
3412         uint16_t intr_status_old, intr_status_new;
3413
3414         vlapic_vtx = (struct vlapic_vtx *)vlapic;
3415         pir_desc = vlapic_vtx->pir_desc;
3416         if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) {
3417                 VCPU_CTR0(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: "
3418                     "no posted interrupt pending");
3419                 return;
3420         }
3421
3422         pirval = 0;
3423         pirbase = -1;
3424         lapic = vlapic->apic_page;
3425
3426         val = atomic_readandclear_long(&pir_desc->pir[0]);
3427         if (val != 0) {
3428                 lapic->irr0 |= val;
3429                 lapic->irr1 |= val >> 32;
3430                 pirbase = 0;
3431                 pirval = val;
3432         }
3433
3434         val = atomic_readandclear_long(&pir_desc->pir[1]);
3435         if (val != 0) {
3436                 lapic->irr2 |= val;
3437                 lapic->irr3 |= val >> 32;
3438                 pirbase = 64;
3439                 pirval = val;
3440         }
3441
3442         val = atomic_readandclear_long(&pir_desc->pir[2]);
3443         if (val != 0) {
3444                 lapic->irr4 |= val;
3445                 lapic->irr5 |= val >> 32;
3446                 pirbase = 128;
3447                 pirval = val;
3448         }
3449
3450         val = atomic_readandclear_long(&pir_desc->pir[3]);
3451         if (val != 0) {
3452                 lapic->irr6 |= val;
3453                 lapic->irr7 |= val >> 32;
3454                 pirbase = 192;
3455                 pirval = val;
3456         }
3457
3458         VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir");
3459
3460         /*
3461          * Update RVI so the processor can evaluate pending virtual
3462          * interrupts on VM-entry.
3463          *
3464          * It is possible for pirval to be 0 here, even though the
3465          * pending bit has been set. The scenario is:
3466          * CPU-Y is sending a posted interrupt to CPU-X, which
3467          * is running a guest and processing posted interrupts in h/w.
3468          * CPU-X will eventually exit and the state seen in s/w is
3469          * the pending bit set, but no PIR bits set.
3470          *
3471          *      CPU-X                      CPU-Y
3472          *   (vm running)                (host running)
3473          *   rx posted interrupt
3474          *   CLEAR pending bit
3475          *                               SET PIR bit
3476          *   READ/CLEAR PIR bits
3477          *                               SET pending bit
3478          *   (vm exit)
3479          *   pending bit set, PIR 0
3480          */
3481         if (pirval != 0) {
3482                 rvi = pirbase + flsl(pirval) - 1;
3483                 intr_status_old = vmcs_read(VMCS_GUEST_INTR_STATUS);
3484                 intr_status_new = (intr_status_old & 0xFF00) | rvi;
3485                 if (intr_status_new > intr_status_old) {
3486                         vmcs_write(VMCS_GUEST_INTR_STATUS, intr_status_new);
3487                         VCPU_CTR2(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: "
3488                             "guest_intr_status changed from 0x%04x to 0x%04x",
3489                             intr_status_old, intr_status_new);
3490                 }
3491         }
3492 }
3493
3494 static struct vlapic *
3495 vmx_vlapic_init(void *arg, int vcpuid)
3496 {
3497         struct vmx *vmx;
3498         struct vlapic *vlapic;
3499         struct vlapic_vtx *vlapic_vtx;
3500         
3501         vmx = arg;
3502
3503         vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO);
3504         vlapic->vm = vmx->vm;
3505         vlapic->vcpuid = vcpuid;
3506         vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid];
3507
3508         vlapic_vtx = (struct vlapic_vtx *)vlapic;
3509         vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid];
3510         vlapic_vtx->vmx = vmx;
3511
3512         if (virtual_interrupt_delivery) {
3513                 vlapic->ops.set_intr_ready = vmx_set_intr_ready;
3514                 vlapic->ops.pending_intr = vmx_pending_intr;
3515                 vlapic->ops.intr_accepted = vmx_intr_accepted;
3516                 vlapic->ops.set_tmr = vmx_set_tmr;
3517                 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode;
3518         }
3519
3520         if (posted_interrupts)
3521                 vlapic->ops.post_intr = vmx_post_intr;
3522
3523         vlapic_init(vlapic);
3524
3525         return (vlapic);
3526 }
3527
3528 static void
3529 vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic)
3530 {
3531
3532         vlapic_cleanup(vlapic);
3533         free(vlapic, M_VLAPIC);
3534 }
3535
3536 struct vmm_ops vmm_ops_intel = {
3537         vmx_init,
3538         vmx_cleanup,
3539         vmx_restore,
3540         vmx_vminit,
3541         vmx_run,
3542         vmx_vmcleanup,
3543         vmx_getreg,
3544         vmx_setreg,
3545         vmx_getdesc,
3546         vmx_setdesc,
3547         vmx_getcap,
3548         vmx_setcap,
3549         ept_vmspace_alloc,
3550         ept_vmspace_free,
3551         vmx_vlapic_init,
3552         vmx_vlapic_cleanup,
3553 };