]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/amd64/vmm/intel/vmx.c
Use vmcs_read() and vmcs_write() in preference to vmread() and vmwrite()
[FreeBSD/FreeBSD.git] / sys / amd64 / vmm / intel / vmx.c
1 /*-
2  * Copyright (c) 2011 NetApp, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/smp.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/pcpu.h>
38 #include <sys/proc.h>
39 #include <sys/sysctl.h>
40
41 #include <vm/vm.h>
42 #include <vm/pmap.h>
43
44 #include <machine/psl.h>
45 #include <machine/cpufunc.h>
46 #include <machine/md_var.h>
47 #include <machine/segments.h>
48 #include <machine/specialreg.h>
49 #include <machine/vmparam.h>
50
51 #include <machine/vmm.h>
52 #include "vmm_host.h"
53 #include "vmm_lapic.h"
54 #include "vmm_msr.h"
55 #include "vmm_ktr.h"
56 #include "vmm_stat.h"
57
58 #include "vmx_msr.h"
59 #include "ept.h"
60 #include "vmx_cpufunc.h"
61 #include "vmx.h"
62 #include "x86.h"
63 #include "vmx_controls.h"
64
65 #define PINBASED_CTLS_ONE_SETTING                                       \
66         (PINBASED_EXTINT_EXITING        |                               \
67          PINBASED_NMI_EXITING           |                               \
68          PINBASED_VIRTUAL_NMI)
69 #define PINBASED_CTLS_ZERO_SETTING      0
70
71 #define PROCBASED_CTLS_WINDOW_SETTING                                   \
72         (PROCBASED_INT_WINDOW_EXITING   |                               \
73          PROCBASED_NMI_WINDOW_EXITING)
74
75 #define PROCBASED_CTLS_ONE_SETTING                                      \
76         (PROCBASED_SECONDARY_CONTROLS   |                               \
77          PROCBASED_IO_EXITING           |                               \
78          PROCBASED_MSR_BITMAPS          |                               \
79          PROCBASED_CTLS_WINDOW_SETTING)
80 #define PROCBASED_CTLS_ZERO_SETTING     \
81         (PROCBASED_CR3_LOAD_EXITING |   \
82         PROCBASED_CR3_STORE_EXITING |   \
83         PROCBASED_IO_BITMAPS)
84
85 #define PROCBASED_CTLS2_ONE_SETTING     PROCBASED2_ENABLE_EPT
86 #define PROCBASED_CTLS2_ZERO_SETTING    0
87
88 #define VM_EXIT_CTLS_ONE_SETTING_NO_PAT                                 \
89         (VM_EXIT_HOST_LMA                       |                       \
90         VM_EXIT_SAVE_EFER                       |                       \
91         VM_EXIT_LOAD_EFER)
92
93 #define VM_EXIT_CTLS_ONE_SETTING                                        \
94         (VM_EXIT_CTLS_ONE_SETTING_NO_PAT        |                       \
95         VM_EXIT_SAVE_PAT                        |                       \
96         VM_EXIT_LOAD_PAT)
97 #define VM_EXIT_CTLS_ZERO_SETTING       VM_EXIT_SAVE_DEBUG_CONTROLS
98
99 #define VM_ENTRY_CTLS_ONE_SETTING_NO_PAT        VM_ENTRY_LOAD_EFER
100
101 #define VM_ENTRY_CTLS_ONE_SETTING                                       \
102         (VM_ENTRY_CTLS_ONE_SETTING_NO_PAT       |                       \
103         VM_ENTRY_LOAD_PAT)
104 #define VM_ENTRY_CTLS_ZERO_SETTING                                      \
105         (VM_ENTRY_LOAD_DEBUG_CONTROLS           |                       \
106         VM_ENTRY_INTO_SMM                       |                       \
107         VM_ENTRY_DEACTIVATE_DUAL_MONITOR)
108
109 #define guest_msr_rw(vmx, msr) \
110         msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_RW)
111
112 #define HANDLED         1
113 #define UNHANDLED       0
114
115 MALLOC_DEFINE(M_VMX, "vmx", "vmx");
116
117 SYSCTL_DECL(_hw_vmm);
118 SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW, NULL, NULL);
119
120 int vmxon_enabled[MAXCPU];
121 static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
122
123 static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2;
124 static uint32_t exit_ctls, entry_ctls;
125
126 static uint64_t cr0_ones_mask, cr0_zeros_mask;
127 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD,
128              &cr0_ones_mask, 0, NULL);
129 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD,
130              &cr0_zeros_mask, 0, NULL);
131
132 static uint64_t cr4_ones_mask, cr4_zeros_mask;
133 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD,
134              &cr4_ones_mask, 0, NULL);
135 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD,
136              &cr4_zeros_mask, 0, NULL);
137
138 static int vmx_no_patmsr;
139
140 static int vmx_initialized;
141 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD,
142            &vmx_initialized, 0, "Intel VMX initialized");
143
144 /*
145  * Virtual NMI blocking conditions.
146  *
147  * Some processor implementations also require NMI to be blocked if
148  * the STI_BLOCKING bit is set. It is possible to detect this at runtime
149  * based on the (exit_reason,exit_qual) tuple being set to 
150  * (EXIT_REASON_INVAL_VMCS, EXIT_QUAL_NMI_WHILE_STI_BLOCKING).
151  *
152  * We take the easy way out and also include STI_BLOCKING as one of the
153  * gating items for vNMI injection.
154  */
155 static uint64_t nmi_blocking_bits = VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING |
156                                     VMCS_INTERRUPTIBILITY_NMI_BLOCKING |
157                                     VMCS_INTERRUPTIBILITY_STI_BLOCKING;
158
159 /*
160  * Optional capabilities
161  */
162 static int cap_halt_exit;
163 static int cap_pause_exit;
164 static int cap_unrestricted_guest;
165 static int cap_monitor_trap;
166 static int cap_invpcid;
167  
168 static struct unrhdr *vpid_unr;
169 static u_int vpid_alloc_failed;
170 SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD,
171             &vpid_alloc_failed, 0, NULL);
172
173 #ifdef KTR
174 static const char *
175 exit_reason_to_str(int reason)
176 {
177         static char reasonbuf[32];
178
179         switch (reason) {
180         case EXIT_REASON_EXCEPTION:
181                 return "exception";
182         case EXIT_REASON_EXT_INTR:
183                 return "extint";
184         case EXIT_REASON_TRIPLE_FAULT:
185                 return "triplefault";
186         case EXIT_REASON_INIT:
187                 return "init";
188         case EXIT_REASON_SIPI:
189                 return "sipi";
190         case EXIT_REASON_IO_SMI:
191                 return "iosmi";
192         case EXIT_REASON_SMI:
193                 return "smi";
194         case EXIT_REASON_INTR_WINDOW:
195                 return "intrwindow";
196         case EXIT_REASON_NMI_WINDOW:
197                 return "nmiwindow";
198         case EXIT_REASON_TASK_SWITCH:
199                 return "taskswitch";
200         case EXIT_REASON_CPUID:
201                 return "cpuid";
202         case EXIT_REASON_GETSEC:
203                 return "getsec";
204         case EXIT_REASON_HLT:
205                 return "hlt";
206         case EXIT_REASON_INVD:
207                 return "invd";
208         case EXIT_REASON_INVLPG:
209                 return "invlpg";
210         case EXIT_REASON_RDPMC:
211                 return "rdpmc";
212         case EXIT_REASON_RDTSC:
213                 return "rdtsc";
214         case EXIT_REASON_RSM:
215                 return "rsm";
216         case EXIT_REASON_VMCALL:
217                 return "vmcall";
218         case EXIT_REASON_VMCLEAR:
219                 return "vmclear";
220         case EXIT_REASON_VMLAUNCH:
221                 return "vmlaunch";
222         case EXIT_REASON_VMPTRLD:
223                 return "vmptrld";
224         case EXIT_REASON_VMPTRST:
225                 return "vmptrst";
226         case EXIT_REASON_VMREAD:
227                 return "vmread";
228         case EXIT_REASON_VMRESUME:
229                 return "vmresume";
230         case EXIT_REASON_VMWRITE:
231                 return "vmwrite";
232         case EXIT_REASON_VMXOFF:
233                 return "vmxoff";
234         case EXIT_REASON_VMXON:
235                 return "vmxon";
236         case EXIT_REASON_CR_ACCESS:
237                 return "craccess";
238         case EXIT_REASON_DR_ACCESS:
239                 return "draccess";
240         case EXIT_REASON_INOUT:
241                 return "inout";
242         case EXIT_REASON_RDMSR:
243                 return "rdmsr";
244         case EXIT_REASON_WRMSR:
245                 return "wrmsr";
246         case EXIT_REASON_INVAL_VMCS:
247                 return "invalvmcs";
248         case EXIT_REASON_INVAL_MSR:
249                 return "invalmsr";
250         case EXIT_REASON_MWAIT:
251                 return "mwait";
252         case EXIT_REASON_MTF:
253                 return "mtf";
254         case EXIT_REASON_MONITOR:
255                 return "monitor";
256         case EXIT_REASON_PAUSE:
257                 return "pause";
258         case EXIT_REASON_MCE:
259                 return "mce";
260         case EXIT_REASON_TPR:
261                 return "tpr";
262         case EXIT_REASON_APIC:
263                 return "apic";
264         case EXIT_REASON_GDTR_IDTR:
265                 return "gdtridtr";
266         case EXIT_REASON_LDTR_TR:
267                 return "ldtrtr";
268         case EXIT_REASON_EPT_FAULT:
269                 return "eptfault";
270         case EXIT_REASON_EPT_MISCONFIG:
271                 return "eptmisconfig";
272         case EXIT_REASON_INVEPT:
273                 return "invept";
274         case EXIT_REASON_RDTSCP:
275                 return "rdtscp";
276         case EXIT_REASON_VMX_PREEMPT:
277                 return "vmxpreempt";
278         case EXIT_REASON_INVVPID:
279                 return "invvpid";
280         case EXIT_REASON_WBINVD:
281                 return "wbinvd";
282         case EXIT_REASON_XSETBV:
283                 return "xsetbv";
284         default:
285                 snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason);
286                 return (reasonbuf);
287         }
288 }
289
290 #ifdef SETJMP_TRACE
291 static const char *
292 vmx_setjmp_rc2str(int rc)
293 {
294         switch (rc) {
295         case VMX_RETURN_DIRECT:
296                 return "direct";
297         case VMX_RETURN_LONGJMP:
298                 return "longjmp";
299         case VMX_RETURN_VMRESUME:
300                 return "vmresume";
301         case VMX_RETURN_VMLAUNCH:
302                 return "vmlaunch";
303         case VMX_RETURN_AST:
304                 return "ast";
305         default:
306                 return "unknown";
307         }
308 }
309
310 #define SETJMP_TRACE(vmx, vcpu, vmxctx, regname)                            \
311         VCPU_CTR1((vmx)->vm, (vcpu), "setjmp trace " #regname " 0x%016lx",  \
312                  (vmxctx)->regname)
313
314 static void
315 vmx_setjmp_trace(struct vmx *vmx, int vcpu, struct vmxctx *vmxctx, int rc)
316 {
317         uint64_t host_rip, host_rsp;
318
319         if (vmxctx != &vmx->ctx[vcpu])
320                 panic("vmx_setjmp_trace: invalid vmxctx %p; should be %p",
321                         vmxctx, &vmx->ctx[vcpu]);
322
323         VCPU_CTR1((vmx)->vm, (vcpu), "vmxctx = %p", vmxctx);
324         VCPU_CTR2((vmx)->vm, (vcpu), "setjmp return code %s(%d)",
325                  vmx_setjmp_rc2str(rc), rc);
326
327         host_rip = vmcs_read(VMCS_HOST_RIP);
328         host_rsp = vmcs_read(VMCS_HOST_RSP);
329         VCPU_CTR2((vmx)->vm, (vcpu), "vmcs host_rip 0x%016lx, host_rsp %#lx",
330                  host_rip, host_rsp);
331
332         SETJMP_TRACE(vmx, vcpu, vmxctx, host_r15);
333         SETJMP_TRACE(vmx, vcpu, vmxctx, host_r14);
334         SETJMP_TRACE(vmx, vcpu, vmxctx, host_r13);
335         SETJMP_TRACE(vmx, vcpu, vmxctx, host_r12);
336         SETJMP_TRACE(vmx, vcpu, vmxctx, host_rbp);
337         SETJMP_TRACE(vmx, vcpu, vmxctx, host_rsp);
338         SETJMP_TRACE(vmx, vcpu, vmxctx, host_rbx);
339         SETJMP_TRACE(vmx, vcpu, vmxctx, host_rip);
340
341         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rdi);
342         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rsi);
343         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rdx);
344         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rcx);
345         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r8);
346         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r9);
347         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rax);
348         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rbx);
349         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rbp);
350         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r10);
351         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r11);
352         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r12);
353         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r13);
354         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r14);
355         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r15);
356         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_cr2);
357 }
358 #endif
359 #else
360 static void __inline
361 vmx_setjmp_trace(struct vmx *vmx, int vcpu, struct vmxctx *vmxctx, int rc)
362 {
363         return;
364 }
365 #endif  /* KTR */
366
367 u_long
368 vmx_fix_cr0(u_long cr0)
369 {
370
371         return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask);
372 }
373
374 u_long
375 vmx_fix_cr4(u_long cr4)
376 {
377
378         return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask);
379 }
380
381 static void
382 vpid_free(int vpid)
383 {
384         if (vpid < 0 || vpid > 0xffff)
385                 panic("vpid_free: invalid vpid %d", vpid);
386
387         /*
388          * VPIDs [0,VM_MAXCPU] are special and are not allocated from
389          * the unit number allocator.
390          */
391
392         if (vpid > VM_MAXCPU)
393                 free_unr(vpid_unr, vpid);
394 }
395
396 static void
397 vpid_alloc(uint16_t *vpid, int num)
398 {
399         int i, x;
400
401         if (num <= 0 || num > VM_MAXCPU)
402                 panic("invalid number of vpids requested: %d", num);
403
404         /*
405          * If the "enable vpid" execution control is not enabled then the
406          * VPID is required to be 0 for all vcpus.
407          */
408         if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) {
409                 for (i = 0; i < num; i++)
410                         vpid[i] = 0;
411                 return;
412         }
413
414         /*
415          * Allocate a unique VPID for each vcpu from the unit number allocator.
416          */
417         for (i = 0; i < num; i++) {
418                 x = alloc_unr(vpid_unr);
419                 if (x == -1)
420                         break;
421                 else
422                         vpid[i] = x;
423         }
424
425         if (i < num) {
426                 atomic_add_int(&vpid_alloc_failed, 1);
427
428                 /*
429                  * If the unit number allocator does not have enough unique
430                  * VPIDs then we need to allocate from the [1,VM_MAXCPU] range.
431                  *
432                  * These VPIDs are not be unique across VMs but this does not
433                  * affect correctness because the combined mappings are also
434                  * tagged with the EP4TA which is unique for each VM.
435                  *
436                  * It is still sub-optimal because the invvpid will invalidate
437                  * combined mappings for a particular VPID across all EP4TAs.
438                  */
439                 while (i-- > 0)
440                         vpid_free(vpid[i]);
441
442                 for (i = 0; i < num; i++)
443                         vpid[i] = i + 1;
444         }
445 }
446
447 static void
448 vpid_init(void)
449 {
450         /*
451          * VPID 0 is required when the "enable VPID" execution control is
452          * disabled.
453          *
454          * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the
455          * unit number allocator does not have sufficient unique VPIDs to
456          * satisfy the allocation.
457          *
458          * The remaining VPIDs are managed by the unit number allocator.
459          */
460         vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL);
461 }
462
463 static void
464 msr_save_area_init(struct msr_entry *g_area, int *g_count)
465 {
466         int cnt;
467
468         static struct msr_entry guest_msrs[] = {
469                 { MSR_KGSBASE, 0, 0 },
470         };
471
472         cnt = sizeof(guest_msrs) / sizeof(guest_msrs[0]);
473         if (cnt > GUEST_MSR_MAX_ENTRIES)
474                 panic("guest msr save area overrun");
475         bcopy(guest_msrs, g_area, sizeof(guest_msrs));
476         *g_count = cnt;
477 }
478
479 static void
480 vmx_disable(void *arg __unused)
481 {
482         struct invvpid_desc invvpid_desc = { 0 };
483         struct invept_desc invept_desc = { 0 };
484
485         if (vmxon_enabled[curcpu]) {
486                 /*
487                  * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b.
488                  *
489                  * VMXON or VMXOFF are not required to invalidate any TLB
490                  * caching structures. This prevents potential retention of
491                  * cached information in the TLB between distinct VMX episodes.
492                  */
493                 invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc);
494                 invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc);
495                 vmxoff();
496         }
497         load_cr4(rcr4() & ~CR4_VMXE);
498 }
499
500 static int
501 vmx_cleanup(void)
502 {
503
504         if (vpid_unr != NULL) {
505                 delete_unrhdr(vpid_unr);
506                 vpid_unr = NULL;
507         }
508
509         smp_rendezvous(NULL, vmx_disable, NULL, NULL);
510
511         return (0);
512 }
513
514 static void
515 vmx_enable(void *arg __unused)
516 {
517         int error;
518
519         load_cr4(rcr4() | CR4_VMXE);
520
521         *(uint32_t *)vmxon_region[curcpu] = vmx_revision();
522         error = vmxon(vmxon_region[curcpu]);
523         if (error == 0)
524                 vmxon_enabled[curcpu] = 1;
525 }
526
527 static int
528 vmx_init(void)
529 {
530         int error;
531         uint64_t fixed0, fixed1, feature_control;
532         uint32_t tmp;
533
534         /* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */
535         if (!(cpu_feature2 & CPUID2_VMX)) {
536                 printf("vmx_init: processor does not support VMX operation\n");
537                 return (ENXIO);
538         }
539
540         /*
541          * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits
542          * are set (bits 0 and 2 respectively).
543          */
544         feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
545         if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 ||
546             (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
547                 printf("vmx_init: VMX operation disabled by BIOS\n");
548                 return (ENXIO);
549         }
550
551         /* Check support for primary processor-based VM-execution controls */
552         error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
553                                MSR_VMX_TRUE_PROCBASED_CTLS,
554                                PROCBASED_CTLS_ONE_SETTING,
555                                PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls);
556         if (error) {
557                 printf("vmx_init: processor does not support desired primary "
558                        "processor-based controls\n");
559                 return (error);
560         }
561
562         /* Clear the processor-based ctl bits that are set on demand */
563         procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING;
564
565         /* Check support for secondary processor-based VM-execution controls */
566         error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
567                                MSR_VMX_PROCBASED_CTLS2,
568                                PROCBASED_CTLS2_ONE_SETTING,
569                                PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2);
570         if (error) {
571                 printf("vmx_init: processor does not support desired secondary "
572                        "processor-based controls\n");
573                 return (error);
574         }
575
576         /* Check support for VPID */
577         error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
578                                PROCBASED2_ENABLE_VPID, 0, &tmp);
579         if (error == 0)
580                 procbased_ctls2 |= PROCBASED2_ENABLE_VPID;
581
582         /* Check support for pin-based VM-execution controls */
583         error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
584                                MSR_VMX_TRUE_PINBASED_CTLS,
585                                PINBASED_CTLS_ONE_SETTING,
586                                PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls);
587         if (error) {
588                 printf("vmx_init: processor does not support desired "
589                        "pin-based controls\n");
590                 return (error);
591         }
592
593         /* Check support for VM-exit controls */
594         error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS,
595                                VM_EXIT_CTLS_ONE_SETTING,
596                                VM_EXIT_CTLS_ZERO_SETTING,
597                                &exit_ctls);
598         if (error) {
599                 /* Try again without the PAT MSR bits */
600                 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS,
601                                        MSR_VMX_TRUE_EXIT_CTLS,
602                                        VM_EXIT_CTLS_ONE_SETTING_NO_PAT,
603                                        VM_EXIT_CTLS_ZERO_SETTING,
604                                        &exit_ctls);
605                 if (error) {
606                         printf("vmx_init: processor does not support desired "
607                                "exit controls\n");
608                         return (error);
609                 } else {
610                         if (bootverbose)
611                                 printf("vmm: PAT MSR access not supported\n");
612                         guest_msr_valid(MSR_PAT);
613                         vmx_no_patmsr = 1;
614                 }
615         }
616
617         /* Check support for VM-entry controls */
618         if (!vmx_no_patmsr) {
619                 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS,
620                                        MSR_VMX_TRUE_ENTRY_CTLS,
621                                        VM_ENTRY_CTLS_ONE_SETTING,
622                                        VM_ENTRY_CTLS_ZERO_SETTING,
623                                        &entry_ctls);
624         } else {
625                 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS,
626                                        MSR_VMX_TRUE_ENTRY_CTLS,
627                                        VM_ENTRY_CTLS_ONE_SETTING_NO_PAT,
628                                        VM_ENTRY_CTLS_ZERO_SETTING,
629                                        &entry_ctls);
630         }
631
632         if (error) {
633                 printf("vmx_init: processor does not support desired "
634                        "entry controls\n");
635                        return (error);
636         }
637
638         /*
639          * Check support for optional features by testing them
640          * as individual bits
641          */
642         cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
643                                         MSR_VMX_TRUE_PROCBASED_CTLS,
644                                         PROCBASED_HLT_EXITING, 0,
645                                         &tmp) == 0);
646
647         cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
648                                         MSR_VMX_PROCBASED_CTLS,
649                                         PROCBASED_MTF, 0,
650                                         &tmp) == 0);
651
652         cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
653                                          MSR_VMX_TRUE_PROCBASED_CTLS,
654                                          PROCBASED_PAUSE_EXITING, 0,
655                                          &tmp) == 0);
656
657         cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
658                                         MSR_VMX_PROCBASED_CTLS2,
659                                         PROCBASED2_UNRESTRICTED_GUEST, 0,
660                                         &tmp) == 0);
661
662         cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
663             MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0,
664             &tmp) == 0);
665
666
667         /* Initialize EPT */
668         error = ept_init();
669         if (error) {
670                 printf("vmx_init: ept initialization failed (%d)\n", error);
671                 return (error);
672         }
673
674         /*
675          * Stash the cr0 and cr4 bits that must be fixed to 0 or 1
676          */
677         fixed0 = rdmsr(MSR_VMX_CR0_FIXED0);
678         fixed1 = rdmsr(MSR_VMX_CR0_FIXED1);
679         cr0_ones_mask = fixed0 & fixed1;
680         cr0_zeros_mask = ~fixed0 & ~fixed1;
681
682         /*
683          * CR0_PE and CR0_PG can be set to zero in VMX non-root operation
684          * if unrestricted guest execution is allowed.
685          */
686         if (cap_unrestricted_guest)
687                 cr0_ones_mask &= ~(CR0_PG | CR0_PE);
688
689         /*
690          * Do not allow the guest to set CR0_NW or CR0_CD.
691          */
692         cr0_zeros_mask |= (CR0_NW | CR0_CD);
693
694         fixed0 = rdmsr(MSR_VMX_CR4_FIXED0);
695         fixed1 = rdmsr(MSR_VMX_CR4_FIXED1);
696         cr4_ones_mask = fixed0 & fixed1;
697         cr4_zeros_mask = ~fixed0 & ~fixed1;
698
699         vpid_init();
700
701         /* enable VMX operation */
702         smp_rendezvous(NULL, vmx_enable, NULL, NULL);
703
704         vmx_initialized = 1;
705
706         return (0);
707 }
708
709 static int
710 vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial)
711 {
712         int error, mask_ident, shadow_ident;
713         uint64_t mask_value;
714
715         if (which != 0 && which != 4)
716                 panic("vmx_setup_cr_shadow: unknown cr%d", which);
717
718         if (which == 0) {
719                 mask_ident = VMCS_CR0_MASK;
720                 mask_value = cr0_ones_mask | cr0_zeros_mask;
721                 shadow_ident = VMCS_CR0_SHADOW;
722         } else {
723                 mask_ident = VMCS_CR4_MASK;
724                 mask_value = cr4_ones_mask | cr4_zeros_mask;
725                 shadow_ident = VMCS_CR4_SHADOW;
726         }
727
728         error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value);
729         if (error)
730                 return (error);
731
732         error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial);
733         if (error)
734                 return (error);
735
736         return (0);
737 }
738 #define vmx_setup_cr0_shadow(vmcs,init) vmx_setup_cr_shadow(0, (vmcs), (init))
739 #define vmx_setup_cr4_shadow(vmcs,init) vmx_setup_cr_shadow(4, (vmcs), (init))
740
741 static void *
742 vmx_vminit(struct vm *vm, pmap_t pmap)
743 {
744         uint16_t vpid[VM_MAXCPU];
745         int i, error, guest_msr_count;
746         struct vmx *vmx;
747
748         vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO);
749         if ((uintptr_t)vmx & PAGE_MASK) {
750                 panic("malloc of struct vmx not aligned on %d byte boundary",
751                       PAGE_SIZE);
752         }
753         vmx->vm = vm;
754
755         vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4));
756
757         /*
758          * Clean up EPTP-tagged guest physical and combined mappings
759          *
760          * VMX transitions are not required to invalidate any guest physical
761          * mappings. So, it may be possible for stale guest physical mappings
762          * to be present in the processor TLBs.
763          *
764          * Combined mappings for this EP4TA are also invalidated for all VPIDs.
765          */
766         ept_invalidate_mappings(vmx->eptp);
767
768         msr_bitmap_initialize(vmx->msr_bitmap);
769
770         /*
771          * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE.
772          * The guest FSBASE and GSBASE are saved and restored during
773          * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are
774          * always restored from the vmcs host state area on vm-exit.
775          *
776          * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in
777          * how they are saved/restored so can be directly accessed by the
778          * guest.
779          *
780          * Guest KGSBASE is saved and restored in the guest MSR save area.
781          * Host KGSBASE is restored before returning to userland from the pcb.
782          * There will be a window of time when we are executing in the host
783          * kernel context with a value of KGSBASE from the guest. This is ok
784          * because the value of KGSBASE is inconsequential in kernel context.
785          *
786          * MSR_EFER is saved and restored in the guest VMCS area on a
787          * VM exit and entry respectively. It is also restored from the
788          * host VMCS area on a VM exit.
789          */
790         if (guest_msr_rw(vmx, MSR_GSBASE) ||
791             guest_msr_rw(vmx, MSR_FSBASE) ||
792             guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) ||
793             guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) ||
794             guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) ||
795             guest_msr_rw(vmx, MSR_KGSBASE) ||
796             guest_msr_rw(vmx, MSR_EFER))
797                 panic("vmx_vminit: error setting guest msr access");
798
799         /*
800          * MSR_PAT is saved and restored in the guest VMCS are on a VM exit
801          * and entry respectively. It is also restored from the host VMCS
802          * area on a VM exit. However, if running on a system with no
803          * MSR_PAT save/restore support, leave access disabled so accesses
804          * will be trapped.
805          */
806         if (!vmx_no_patmsr && guest_msr_rw(vmx, MSR_PAT))
807                 panic("vmx_vminit: error setting guest pat msr access");
808
809         vpid_alloc(vpid, VM_MAXCPU);
810
811         for (i = 0; i < VM_MAXCPU; i++) {
812                 vmx->vmcs[i].identifier = vmx_revision();
813                 error = vmclear(&vmx->vmcs[i]);
814                 if (error != 0) {
815                         panic("vmx_vminit: vmclear error %d on vcpu %d\n",
816                               error, i);
817                 }
818
819                 error = vmcs_set_defaults(&vmx->vmcs[i],
820                                           (u_long)vmx_longjmp,
821                                           (u_long)&vmx->ctx[i],
822                                           vmx->eptp,
823                                           pinbased_ctls,
824                                           procbased_ctls,
825                                           procbased_ctls2,
826                                           exit_ctls, entry_ctls,
827                                           vtophys(vmx->msr_bitmap),
828                                           vpid[i]);
829
830                 if (error != 0)
831                         panic("vmx_vminit: vmcs_set_defaults error %d", error);
832
833                 vmx->cap[i].set = 0;
834                 vmx->cap[i].proc_ctls = procbased_ctls;
835                 vmx->cap[i].proc_ctls2 = procbased_ctls2;
836
837                 vmx->state[i].lastcpu = -1;
838                 vmx->state[i].vpid = vpid[i];
839
840                 msr_save_area_init(vmx->guest_msrs[i], &guest_msr_count);
841
842                 error = vmcs_set_msr_save(&vmx->vmcs[i],
843                                           vtophys(vmx->guest_msrs[i]),
844                                           guest_msr_count);
845                 if (error != 0)
846                         panic("vmcs_set_msr_save error %d", error);
847
848                 /*
849                  * Set up the CR0/4 shadows, and init the read shadow
850                  * to the power-on register value from the Intel Sys Arch.
851                  *  CR0 - 0x60000010
852                  *  CR4 - 0
853                  */
854                 error = vmx_setup_cr0_shadow(&vmx->vmcs[i], 0x60000010);
855                 if (error != 0)
856                         panic("vmx_setup_cr0_shadow %d", error);
857
858                 error = vmx_setup_cr4_shadow(&vmx->vmcs[i], 0);
859                 if (error != 0)
860                         panic("vmx_setup_cr4_shadow %d", error);
861
862                 vmx->ctx[i].pmap = pmap;
863                 vmx->ctx[i].eptp = vmx->eptp;
864         }
865
866         return (vmx);
867 }
868
869 static int
870 vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx)
871 {
872         int handled, func;
873         
874         func = vmxctx->guest_rax;
875
876         handled = x86_emulate_cpuid(vm, vcpu,
877                                     (uint32_t*)(&vmxctx->guest_rax),
878                                     (uint32_t*)(&vmxctx->guest_rbx),
879                                     (uint32_t*)(&vmxctx->guest_rcx),
880                                     (uint32_t*)(&vmxctx->guest_rdx));
881         return (handled);
882 }
883
884 static __inline void
885 vmx_run_trace(struct vmx *vmx, int vcpu)
886 {
887 #ifdef KTR
888         VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip());
889 #endif
890 }
891
892 static __inline void
893 vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason,
894                int handled)
895 {
896 #ifdef KTR
897         VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx",
898                  handled ? "handled" : "unhandled",
899                  exit_reason_to_str(exit_reason), rip);
900 #endif
901 }
902
903 static __inline void
904 vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip)
905 {
906 #ifdef KTR
907         VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip);
908 #endif
909 }
910
911 static void
912 vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu)
913 {
914         int lastcpu;
915         struct vmxstate *vmxstate;
916         struct invvpid_desc invvpid_desc = { 0 };
917
918         vmxstate = &vmx->state[vcpu];
919         lastcpu = vmxstate->lastcpu;
920         vmxstate->lastcpu = curcpu;
921
922         if (lastcpu == curcpu)
923                 return;
924
925         vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1);
926
927         vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase());
928         vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase());
929         vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase());
930
931         /*
932          * If we are using VPIDs then invalidate all mappings tagged with 'vpid'
933          *
934          * We do this because this vcpu was executing on a different host
935          * cpu when it last ran. We do not track whether it invalidated
936          * mappings associated with its 'vpid' during that run. So we must
937          * assume that the mappings associated with 'vpid' on 'curcpu' are
938          * stale and invalidate them.
939          *
940          * Note that we incur this penalty only when the scheduler chooses to
941          * move the thread associated with this vcpu between host cpus.
942          *
943          * Note also that this will invalidate mappings tagged with 'vpid'
944          * for "all" EP4TAs.
945          */
946         if (vmxstate->vpid != 0) {
947                 invvpid_desc.vpid = vmxstate->vpid;
948                 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc);
949         }
950 }
951
952 /*
953  * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set.
954  */
955 CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0);
956
957 static void __inline
958 vmx_set_int_window_exiting(struct vmx *vmx, int vcpu)
959 {
960
961         vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING;
962         vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
963 }
964
965 static void __inline
966 vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu)
967 {
968
969         vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING;
970         vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
971 }
972
973 static void __inline
974 vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu)
975 {
976
977         vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING;
978         vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
979 }
980
981 static void __inline
982 vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu)
983 {
984
985         vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING;
986         vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
987 }
988
989 static int
990 vmx_inject_nmi(struct vmx *vmx, int vcpu)
991 {
992         uint64_t info, interruptibility;
993
994         /* Bail out if no NMI requested */
995         if (!vm_nmi_pending(vmx->vm, vcpu))
996                 return (0);
997
998         interruptibility = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
999         if (interruptibility & nmi_blocking_bits)
1000                 goto nmiblocked;
1001
1002         /*
1003          * Inject the virtual NMI. The vector must be the NMI IDT entry
1004          * or the VMCS entry check will fail.
1005          */
1006         info = VMCS_INTERRUPTION_INFO_NMI | VMCS_INTERRUPTION_INFO_VALID;
1007         info |= IDT_NMI;
1008         vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1009
1010         VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI");
1011
1012         /* Clear the request */
1013         vm_nmi_clear(vmx->vm, vcpu);
1014         return (1);
1015
1016 nmiblocked:
1017         /*
1018          * Set the NMI Window Exiting execution control so we can inject
1019          * the virtual NMI as soon as blocking condition goes away.
1020          */
1021         vmx_set_nmi_window_exiting(vmx, vcpu);
1022
1023         VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting");
1024         return (1);
1025 }
1026
1027 static void
1028 vmx_inject_interrupts(struct vmx *vmx, int vcpu)
1029 {
1030         int vector;
1031         uint64_t info, rflags, interruptibility;
1032
1033         const int HWINTR_BLOCKED = VMCS_INTERRUPTIBILITY_STI_BLOCKING |
1034                                    VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING;
1035
1036         /*
1037          * If there is already an interrupt pending then just return.
1038          *
1039          * This could happen if an interrupt was injected on a prior
1040          * VM entry but the actual entry into guest mode was aborted
1041          * because of a pending AST.
1042          */
1043         info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1044         if (info & VMCS_INTERRUPTION_INFO_VALID)
1045                 return;
1046
1047         /*
1048          * NMI injection has priority so deal with those first
1049          */
1050         if (vmx_inject_nmi(vmx, vcpu))
1051                 return;
1052
1053         /* Ask the local apic for a vector to inject */
1054         vector = lapic_pending_intr(vmx->vm, vcpu);
1055         if (vector < 0)
1056                 return;
1057
1058         if (vector < 32 || vector > 255)
1059                 panic("vmx_inject_interrupts: invalid vector %d\n", vector);
1060
1061         /* Check RFLAGS.IF and the interruptibility state of the guest */
1062         rflags = vmcs_read(VMCS_GUEST_RFLAGS);
1063         if ((rflags & PSL_I) == 0)
1064                 goto cantinject;
1065
1066         interruptibility = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1067         if (interruptibility & HWINTR_BLOCKED)
1068                 goto cantinject;
1069
1070         /* Inject the interrupt */
1071         info = VMCS_INTERRUPTION_INFO_HW_INTR | VMCS_INTERRUPTION_INFO_VALID;
1072         info |= vector;
1073         vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1074
1075         /* Update the Local APIC ISR */
1076         lapic_intr_accepted(vmx->vm, vcpu, vector);
1077
1078         VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector);
1079
1080         return;
1081
1082 cantinject:
1083         /*
1084          * Set the Interrupt Window Exiting execution control so we can inject
1085          * the interrupt as soon as blocking condition goes away.
1086          */
1087         vmx_set_int_window_exiting(vmx, vcpu);
1088
1089         VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting");
1090 }
1091
1092 static int
1093 vmx_emulate_cr_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1094 {
1095         int cr, vmcs_guest_cr, vmcs_shadow_cr;
1096         uint64_t crval, regval, ones_mask, zeros_mask;
1097         const struct vmxctx *vmxctx;
1098
1099         /* We only handle mov to %cr0 or %cr4 at this time */
1100         if ((exitqual & 0xf0) != 0x00)
1101                 return (UNHANDLED);
1102
1103         cr = exitqual & 0xf;
1104         if (cr != 0 && cr != 4)
1105                 return (UNHANDLED);
1106
1107         vmxctx = &vmx->ctx[vcpu];
1108
1109         /*
1110          * We must use vmcs_write() directly here because vmcs_setreg() will
1111          * call vmclear(vmcs) as a side-effect which we certainly don't want.
1112          */
1113         switch ((exitqual >> 8) & 0xf) {
1114         case 0:
1115                 regval = vmxctx->guest_rax;
1116                 break;
1117         case 1:
1118                 regval = vmxctx->guest_rcx;
1119                 break;
1120         case 2:
1121                 regval = vmxctx->guest_rdx;
1122                 break;
1123         case 3:
1124                 regval = vmxctx->guest_rbx;
1125                 break;
1126         case 4:
1127                 regval = vmcs_read(VMCS_GUEST_RSP);
1128                 break;
1129         case 5:
1130                 regval = vmxctx->guest_rbp;
1131                 break;
1132         case 6:
1133                 regval = vmxctx->guest_rsi;
1134                 break;
1135         case 7:
1136                 regval = vmxctx->guest_rdi;
1137                 break;
1138         case 8:
1139                 regval = vmxctx->guest_r8;
1140                 break;
1141         case 9:
1142                 regval = vmxctx->guest_r9;
1143                 break;
1144         case 10:
1145                 regval = vmxctx->guest_r10;
1146                 break;
1147         case 11:
1148                 regval = vmxctx->guest_r11;
1149                 break;
1150         case 12:
1151                 regval = vmxctx->guest_r12;
1152                 break;
1153         case 13:
1154                 regval = vmxctx->guest_r13;
1155                 break;
1156         case 14:
1157                 regval = vmxctx->guest_r14;
1158                 break;
1159         case 15:
1160                 regval = vmxctx->guest_r15;
1161                 break;
1162         }
1163
1164         if (cr == 0) {
1165                 ones_mask = cr0_ones_mask;
1166                 zeros_mask = cr0_zeros_mask;
1167                 vmcs_guest_cr = VMCS_GUEST_CR0;
1168                 vmcs_shadow_cr = VMCS_CR0_SHADOW;
1169         } else {
1170                 ones_mask = cr4_ones_mask;
1171                 zeros_mask = cr4_zeros_mask;
1172                 vmcs_guest_cr = VMCS_GUEST_CR4;
1173                 vmcs_shadow_cr = VMCS_CR4_SHADOW;
1174         }
1175         vmcs_write(vmcs_shadow_cr, regval);
1176
1177         crval = regval | ones_mask;
1178         crval &= ~zeros_mask;
1179         vmcs_write(vmcs_guest_cr, crval);
1180
1181         if (cr == 0 && regval & CR0_PG) {
1182                 uint64_t efer, entry_ctls;
1183
1184                 /*
1185                  * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and
1186                  * the "IA-32e mode guest" bit in VM-entry control must be
1187                  * equal.
1188                  */
1189                 efer = vmcs_read(VMCS_GUEST_IA32_EFER);
1190                 if (efer & EFER_LME) {
1191                         efer |= EFER_LMA;
1192                         vmcs_write(VMCS_GUEST_IA32_EFER, efer);
1193                         entry_ctls = vmcs_read(VMCS_ENTRY_CTLS);
1194                         entry_ctls |= VM_ENTRY_GUEST_LMA;
1195                         vmcs_write(VMCS_ENTRY_CTLS, entry_ctls);
1196                 }
1197         }
1198
1199         return (HANDLED);
1200 }
1201
1202 static int
1203 ept_fault_type(uint64_t ept_qual)
1204 {
1205         int fault_type;
1206
1207         if (ept_qual & EPT_VIOLATION_DATA_WRITE)
1208                 fault_type = VM_PROT_WRITE;
1209         else if (ept_qual & EPT_VIOLATION_INST_FETCH)
1210                 fault_type = VM_PROT_EXECUTE;
1211         else
1212                 fault_type= VM_PROT_READ;
1213
1214         return (fault_type);
1215 }
1216
1217 static boolean_t
1218 ept_emulation_fault(uint64_t ept_qual)
1219 {
1220         int read, write;
1221
1222         /* EPT fault on an instruction fetch doesn't make sense here */
1223         if (ept_qual & EPT_VIOLATION_INST_FETCH)
1224                 return (FALSE);
1225
1226         /* EPT fault must be a read fault or a write fault */
1227         read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;
1228         write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;
1229         if ((read | write) == 0)
1230                 return (FALSE);
1231
1232         /*
1233          * The EPT violation must have been caused by accessing a
1234          * guest-physical address that is a translation of a guest-linear
1235          * address.
1236          */
1237         if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||
1238             (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {
1239                 return (FALSE);
1240         }
1241
1242         return (TRUE);
1243 }
1244
1245 static int
1246 vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
1247 {
1248         int error, handled;
1249         struct vmcs *vmcs;
1250         struct vmxctx *vmxctx;
1251         uint32_t eax, ecx, edx, idtvec_info, idtvec_err, reason;
1252         uint64_t qual, gpa;
1253         bool retu;
1254
1255         handled = 0;
1256         vmcs = &vmx->vmcs[vcpu];
1257         vmxctx = &vmx->ctx[vcpu];
1258         qual = vmexit->u.vmx.exit_qualification;
1259         reason = vmexit->u.vmx.exit_reason;
1260         vmexit->exitcode = VM_EXITCODE_BOGUS;
1261
1262         vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1);
1263
1264         /*
1265          * VM exits that could be triggered during event injection on the
1266          * previous VM entry need to be handled specially by re-injecting
1267          * the event.
1268          *
1269          * See "Information for VM Exits During Event Delivery" in Intel SDM
1270          * for details.
1271          */
1272         switch (reason) {
1273         case EXIT_REASON_EPT_FAULT:
1274         case EXIT_REASON_EPT_MISCONFIG:
1275         case EXIT_REASON_APIC:
1276         case EXIT_REASON_TASK_SWITCH:
1277         case EXIT_REASON_EXCEPTION:
1278                 idtvec_info = vmcs_idt_vectoring_info();
1279                 if (idtvec_info & VMCS_IDT_VEC_VALID) {
1280                         idtvec_info &= ~(1 << 12); /* clear undefined bit */
1281                         vmcs_write(VMCS_ENTRY_INTR_INFO, idtvec_info);
1282                         if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
1283                                 idtvec_err = vmcs_idt_vectoring_err();
1284                                 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR,
1285                                     idtvec_err);
1286                         }
1287                         vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
1288                 }
1289         default:
1290                 break;
1291         }
1292
1293         switch (reason) {
1294         case EXIT_REASON_CR_ACCESS:
1295                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1);
1296                 handled = vmx_emulate_cr_access(vmx, vcpu, qual);
1297                 break;
1298         case EXIT_REASON_RDMSR:
1299                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1);
1300                 retu = false;
1301                 ecx = vmxctx->guest_rcx;
1302                 error = emulate_rdmsr(vmx->vm, vcpu, ecx, &retu);
1303                 if (error) {
1304                         vmexit->exitcode = VM_EXITCODE_RDMSR;
1305                         vmexit->u.msr.code = ecx;
1306                 } else if (!retu) {
1307                         handled = 1;
1308                 } else {
1309                         /* Return to userspace with a valid exitcode */
1310                         KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
1311                             ("emulate_wrmsr retu with bogus exitcode"));
1312                 }
1313                 break;
1314         case EXIT_REASON_WRMSR:
1315                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1);
1316                 retu = false;
1317                 eax = vmxctx->guest_rax;
1318                 ecx = vmxctx->guest_rcx;
1319                 edx = vmxctx->guest_rdx;
1320                 error = emulate_wrmsr(vmx->vm, vcpu, ecx,
1321                     (uint64_t)edx << 32 | eax, &retu);
1322                 if (error) {
1323                         vmexit->exitcode = VM_EXITCODE_WRMSR;
1324                         vmexit->u.msr.code = ecx;
1325                         vmexit->u.msr.wval = (uint64_t)edx << 32 | eax;
1326                 } else if (!retu) {
1327                         handled = 1;
1328                 } else {
1329                         /* Return to userspace with a valid exitcode */
1330                         KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
1331                             ("emulate_wrmsr retu with bogus exitcode"));
1332                 }
1333                 break;
1334         case EXIT_REASON_HLT:
1335                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1);
1336                 vmexit->exitcode = VM_EXITCODE_HLT;
1337                 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS);
1338                 break;
1339         case EXIT_REASON_MTF:
1340                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1);
1341                 vmexit->exitcode = VM_EXITCODE_MTRAP;
1342                 break;
1343         case EXIT_REASON_PAUSE:
1344                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1);
1345                 vmexit->exitcode = VM_EXITCODE_PAUSE;
1346                 break;
1347         case EXIT_REASON_INTR_WINDOW:
1348                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1);
1349                 vmx_clear_int_window_exiting(vmx, vcpu);
1350                 VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting");
1351                 return (1);
1352         case EXIT_REASON_EXT_INTR:
1353                 /*
1354                  * External interrupts serve only to cause VM exits and allow
1355                  * the host interrupt handler to run.
1356                  *
1357                  * If this external interrupt triggers a virtual interrupt
1358                  * to a VM, then that state will be recorded by the
1359                  * host interrupt handler in the VM's softc. We will inject
1360                  * this virtual interrupt during the subsequent VM enter.
1361                  */
1362
1363                 /*
1364                  * This is special. We want to treat this as an 'handled'
1365                  * VM-exit but not increment the instruction pointer.
1366                  */
1367                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1);
1368                 return (1);
1369         case EXIT_REASON_NMI_WINDOW:
1370                 /* Exit to allow the pending virtual NMI to be injected */
1371                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1);
1372                 vmx_clear_nmi_window_exiting(vmx, vcpu);
1373                 VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting");
1374                 return (1);
1375         case EXIT_REASON_INOUT:
1376                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1);
1377                 vmexit->exitcode = VM_EXITCODE_INOUT;
1378                 vmexit->u.inout.bytes = (qual & 0x7) + 1;
1379                 vmexit->u.inout.in = (qual & 0x8) ? 1 : 0;
1380                 vmexit->u.inout.string = (qual & 0x10) ? 1 : 0;
1381                 vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0;
1382                 vmexit->u.inout.port = (uint16_t)(qual >> 16);
1383                 vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax);
1384                 break;
1385         case EXIT_REASON_CPUID:
1386                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1);
1387                 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx);
1388                 break;
1389         case EXIT_REASON_EPT_FAULT:
1390                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EPT_FAULT, 1);
1391                 /*
1392                  * If 'gpa' lies within the address space allocated to
1393                  * memory then this must be a nested page fault otherwise
1394                  * this must be an instruction that accesses MMIO space.
1395                  */
1396                 gpa = vmcs_gpa();
1397                 if (vm_mem_allocated(vmx->vm, gpa)) {
1398                         vmexit->exitcode = VM_EXITCODE_PAGING;
1399                         vmexit->u.paging.gpa = gpa;
1400                         vmexit->u.paging.fault_type = ept_fault_type(qual);
1401                 } else if (ept_emulation_fault(qual)) {
1402                         vmexit->exitcode = VM_EXITCODE_INST_EMUL;
1403                         vmexit->u.inst_emul.gpa = gpa;
1404                         vmexit->u.inst_emul.gla = vmcs_gla();
1405                         vmexit->u.inst_emul.cr3 = vmcs_guest_cr3();
1406                 }
1407                 break;
1408         default:
1409                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1);
1410                 break;
1411         }
1412
1413         if (handled) {
1414                 /*
1415                  * It is possible that control is returned to userland
1416                  * even though we were able to handle the VM exit in the
1417                  * kernel.
1418                  *
1419                  * In such a case we want to make sure that the userland
1420                  * restarts guest execution at the instruction *after*
1421                  * the one we just processed. Therefore we update the
1422                  * guest rip in the VMCS and in 'vmexit'.
1423                  */
1424                 vmexit->rip += vmexit->inst_length;
1425                 vmexit->inst_length = 0;
1426                 vmcs_write(VMCS_GUEST_RIP, vmexit->rip);
1427         } else {
1428                 if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
1429                         /*
1430                          * If this VM exit was not claimed by anybody then
1431                          * treat it as a generic VMX exit.
1432                          */
1433                         vmexit->exitcode = VM_EXITCODE_VMX;
1434                         vmexit->u.vmx.error = 0;
1435                 } else {
1436                         /*
1437                          * The exitcode and collateral have been populated.
1438                          * The VM exit will be processed further in userland.
1439                          */
1440                 }
1441         }
1442         return (handled);
1443 }
1444
1445 static int
1446 vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap)
1447 {
1448         int vie, rc, handled, astpending;
1449         uint32_t exit_reason;
1450         struct vmx *vmx;
1451         struct vmxctx *vmxctx;
1452         struct vmcs *vmcs;
1453         struct vm_exit *vmexit;
1454
1455         vmx = arg;
1456         vmcs = &vmx->vmcs[vcpu];
1457         vmxctx = &vmx->ctx[vcpu];
1458         vmxctx->launched = 0;
1459
1460         astpending = 0;
1461         vmexit = vm_exitinfo(vmx->vm, vcpu);
1462
1463         KASSERT(vmxctx->pmap == pmap,
1464             ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap));
1465         KASSERT(vmxctx->eptp == vmx->eptp,
1466             ("eptp %p different than ctx eptp %#lx", eptp, vmxctx->eptp));
1467
1468         /*
1469          * XXX Can we avoid doing this every time we do a vm run?
1470          */
1471         VMPTRLD(vmcs);
1472
1473         /*
1474          * XXX
1475          * We do this every time because we may setup the virtual machine
1476          * from a different process than the one that actually runs it.
1477          *
1478          * If the life of a virtual machine was spent entirely in the context
1479          * of a single process we could do this once in vmcs_set_defaults().
1480          */
1481         vmcs_write(VMCS_HOST_CR3, rcr3());
1482         vmcs_write(VMCS_GUEST_RIP, rip);
1483         vmx_set_pcpu_defaults(vmx, vcpu);
1484
1485         do {
1486                 vmx_inject_interrupts(vmx, vcpu);
1487                 vmx_run_trace(vmx, vcpu);
1488                 rc = vmx_setjmp(vmxctx);
1489 #ifdef SETJMP_TRACE
1490                 vmx_setjmp_trace(vmx, vcpu, vmxctx, rc);
1491 #endif
1492                 switch (rc) {
1493                 case VMX_RETURN_DIRECT:
1494                         if (vmxctx->launched == 0) {
1495                                 vmxctx->launched = 1;
1496                                 vmx_launch(vmxctx);
1497                         } else
1498                                 vmx_resume(vmxctx);
1499                         panic("vmx_launch/resume should not return");
1500                         break;
1501                 case VMX_RETURN_LONGJMP:
1502                         break;                  /* vm exit */
1503                 case VMX_RETURN_AST:
1504                         astpending = 1;
1505                         break;
1506                 case VMX_RETURN_VMRESUME:
1507                         vie = vmcs_instruction_error();
1508                         if (vmxctx->launch_error == VM_FAIL_INVALID ||
1509                             vie != VMRESUME_WITH_NON_LAUNCHED_VMCS) {
1510                                 printf("vmresume error %d vmcs inst error %d\n",
1511                                         vmxctx->launch_error, vie);
1512                                 goto err_exit;
1513                         }
1514                         vmx_launch(vmxctx);     /* try to launch the guest */
1515                         panic("vmx_launch should not return");
1516                         break;
1517                 case VMX_RETURN_VMLAUNCH:
1518                         vie = vmcs_instruction_error();
1519 #if 1
1520                         printf("vmlaunch error %d vmcs inst error %d\n",
1521                                 vmxctx->launch_error, vie);
1522 #endif
1523                         goto err_exit;
1524                 case VMX_RETURN_INVEPT:
1525                         panic("vm %s:%d invept error %d",
1526                               vm_name(vmx->vm), vcpu, vmxctx->launch_error);
1527                 default:
1528                         panic("vmx_setjmp returned %d", rc);
1529                 }
1530                 
1531                 /* enable interrupts */
1532                 enable_intr();
1533
1534                 /* collect some basic information for VM exit processing */
1535                 vmexit->rip = rip = vmcs_guest_rip();
1536                 vmexit->inst_length = vmexit_instruction_length();
1537                 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason();
1538                 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification();
1539
1540                 if (astpending) {
1541                         handled = 1;
1542                         vmexit->inst_length = 0;
1543                         vmexit->exitcode = VM_EXITCODE_BOGUS;
1544                         vmx_astpending_trace(vmx, vcpu, rip);
1545                         vmm_stat_incr(vmx->vm, vcpu, VMEXIT_ASTPENDING, 1);
1546                         break;
1547                 }
1548
1549                 handled = vmx_exit_process(vmx, vcpu, vmexit);
1550                 vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled);
1551
1552         } while (handled);
1553
1554         /*
1555          * If a VM exit has been handled then the exitcode must be BOGUS
1556          * If a VM exit is not handled then the exitcode must not be BOGUS
1557          */
1558         if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) ||
1559             (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) {
1560                 panic("Mismatch between handled (%d) and exitcode (%d)",
1561                       handled, vmexit->exitcode);
1562         }
1563
1564         if (!handled)
1565                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_USERSPACE, 1);
1566
1567         VCPU_CTR1(vmx->vm, vcpu, "goto userland: exitcode %d",vmexit->exitcode);
1568
1569         /*
1570          * XXX
1571          * We need to do this to ensure that any VMCS state cached by the
1572          * processor is flushed to memory. We need to do this in case the
1573          * VM moves to a different cpu the next time it runs.
1574          *
1575          * Can we avoid doing this?
1576          */
1577         VMCLEAR(vmcs);
1578         return (0);
1579
1580 err_exit:
1581         vmexit->exitcode = VM_EXITCODE_VMX;
1582         vmexit->u.vmx.exit_reason = (uint32_t)-1;
1583         vmexit->u.vmx.exit_qualification = (uint32_t)-1;
1584         vmexit->u.vmx.error = vie;
1585         VMCLEAR(vmcs);
1586         return (ENOEXEC);
1587 }
1588
1589 static void
1590 vmx_vmcleanup(void *arg)
1591 {
1592         int i, error;
1593         struct vmx *vmx = arg;
1594
1595         for (i = 0; i < VM_MAXCPU; i++)
1596                 vpid_free(vmx->state[i].vpid);
1597
1598         /*
1599          * XXXSMP we also need to clear the VMCS active on the other vcpus.
1600          */
1601         error = vmclear(&vmx->vmcs[0]);
1602         if (error != 0)
1603                 panic("vmx_vmcleanup: vmclear error %d on vcpu 0", error);
1604
1605         free(vmx, M_VMX);
1606
1607         return;
1608 }
1609
1610 static register_t *
1611 vmxctx_regptr(struct vmxctx *vmxctx, int reg)
1612 {
1613
1614         switch (reg) {
1615         case VM_REG_GUEST_RAX:
1616                 return (&vmxctx->guest_rax);
1617         case VM_REG_GUEST_RBX:
1618                 return (&vmxctx->guest_rbx);
1619         case VM_REG_GUEST_RCX:
1620                 return (&vmxctx->guest_rcx);
1621         case VM_REG_GUEST_RDX:
1622                 return (&vmxctx->guest_rdx);
1623         case VM_REG_GUEST_RSI:
1624                 return (&vmxctx->guest_rsi);
1625         case VM_REG_GUEST_RDI:
1626                 return (&vmxctx->guest_rdi);
1627         case VM_REG_GUEST_RBP:
1628                 return (&vmxctx->guest_rbp);
1629         case VM_REG_GUEST_R8:
1630                 return (&vmxctx->guest_r8);
1631         case VM_REG_GUEST_R9:
1632                 return (&vmxctx->guest_r9);
1633         case VM_REG_GUEST_R10:
1634                 return (&vmxctx->guest_r10);
1635         case VM_REG_GUEST_R11:
1636                 return (&vmxctx->guest_r11);
1637         case VM_REG_GUEST_R12:
1638                 return (&vmxctx->guest_r12);
1639         case VM_REG_GUEST_R13:
1640                 return (&vmxctx->guest_r13);
1641         case VM_REG_GUEST_R14:
1642                 return (&vmxctx->guest_r14);
1643         case VM_REG_GUEST_R15:
1644                 return (&vmxctx->guest_r15);
1645         default:
1646                 break;
1647         }
1648         return (NULL);
1649 }
1650
1651 static int
1652 vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval)
1653 {
1654         register_t *regp;
1655
1656         if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
1657                 *retval = *regp;
1658                 return (0);
1659         } else
1660                 return (EINVAL);
1661 }
1662
1663 static int
1664 vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val)
1665 {
1666         register_t *regp;
1667
1668         if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
1669                 *regp = val;
1670                 return (0);
1671         } else
1672                 return (EINVAL);
1673 }
1674
1675 static int
1676 vmx_shadow_reg(int reg)
1677 {
1678         int shreg;
1679
1680         shreg = -1;
1681
1682         switch (reg) {
1683         case VM_REG_GUEST_CR0:
1684                 shreg = VMCS_CR0_SHADOW;
1685                 break;
1686         case VM_REG_GUEST_CR4:
1687                 shreg = VMCS_CR4_SHADOW;
1688                 break;
1689         default:
1690                 break;
1691         }
1692
1693         return (shreg);
1694 }
1695
1696 static int
1697 vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval)
1698 {
1699         int running, hostcpu;
1700         struct vmx *vmx = arg;
1701
1702         running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
1703         if (running && hostcpu != curcpu)
1704                 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu);
1705
1706         if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0)
1707                 return (0);
1708
1709         return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval));
1710 }
1711
1712 static int
1713 vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
1714 {
1715         int error, hostcpu, running, shadow;
1716         uint64_t ctls;
1717         struct vmx *vmx = arg;
1718
1719         running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
1720         if (running && hostcpu != curcpu)
1721                 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu);
1722
1723         if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0)
1724                 return (0);
1725
1726         error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val);
1727
1728         if (error == 0) {
1729                 /*
1730                  * If the "load EFER" VM-entry control is 1 then the
1731                  * value of EFER.LMA must be identical to "IA-32e mode guest"
1732                  * bit in the VM-entry control.
1733                  */
1734                 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 &&
1735                     (reg == VM_REG_GUEST_EFER)) {
1736                         vmcs_getreg(&vmx->vmcs[vcpu], running,
1737                                     VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls);
1738                         if (val & EFER_LMA)
1739                                 ctls |= VM_ENTRY_GUEST_LMA;
1740                         else
1741                                 ctls &= ~VM_ENTRY_GUEST_LMA;
1742                         vmcs_setreg(&vmx->vmcs[vcpu], running,
1743                                     VMCS_IDENT(VMCS_ENTRY_CTLS), ctls);
1744                 }
1745
1746                 shadow = vmx_shadow_reg(reg);
1747                 if (shadow > 0) {
1748                         /*
1749                          * Store the unmodified value in the shadow
1750                          */                     
1751                         error = vmcs_setreg(&vmx->vmcs[vcpu], running,
1752                                     VMCS_IDENT(shadow), val);
1753                 }
1754         }
1755
1756         return (error);
1757 }
1758
1759 static int
1760 vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
1761 {
1762         struct vmx *vmx = arg;
1763
1764         return (vmcs_getdesc(&vmx->vmcs[vcpu], reg, desc));
1765 }
1766
1767 static int
1768 vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
1769 {
1770         struct vmx *vmx = arg;
1771
1772         return (vmcs_setdesc(&vmx->vmcs[vcpu], reg, desc));
1773 }
1774
1775 static int
1776 vmx_inject(void *arg, int vcpu, int type, int vector, uint32_t code,
1777            int code_valid)
1778 {
1779         int error;
1780         uint64_t info;
1781         struct vmx *vmx = arg;
1782         struct vmcs *vmcs = &vmx->vmcs[vcpu];
1783
1784         static uint32_t type_map[VM_EVENT_MAX] = {
1785                 0x1,            /* VM_EVENT_NONE */
1786                 0x0,            /* VM_HW_INTR */
1787                 0x2,            /* VM_NMI */
1788                 0x3,            /* VM_HW_EXCEPTION */
1789                 0x4,            /* VM_SW_INTR */
1790                 0x5,            /* VM_PRIV_SW_EXCEPTION */
1791                 0x6,            /* VM_SW_EXCEPTION */
1792         };
1793
1794         /*
1795          * If there is already an exception pending to be delivered to the
1796          * vcpu then just return.
1797          */
1798         error = vmcs_getreg(vmcs, 0, VMCS_IDENT(VMCS_ENTRY_INTR_INFO), &info);
1799         if (error)
1800                 return (error);
1801
1802         if (info & VMCS_INTERRUPTION_INFO_VALID)
1803                 return (EAGAIN);
1804
1805         info = vector | (type_map[type] << 8) | (code_valid ? 1 << 11 : 0);
1806         info |= VMCS_INTERRUPTION_INFO_VALID;
1807         error = vmcs_setreg(vmcs, 0, VMCS_IDENT(VMCS_ENTRY_INTR_INFO), info);
1808         if (error != 0)
1809                 return (error);
1810
1811         if (code_valid) {
1812                 error = vmcs_setreg(vmcs, 0,
1813                                     VMCS_IDENT(VMCS_ENTRY_EXCEPTION_ERROR),
1814                                     code);
1815         }
1816         return (error);
1817 }
1818
1819 static int
1820 vmx_getcap(void *arg, int vcpu, int type, int *retval)
1821 {
1822         struct vmx *vmx = arg;
1823         int vcap;
1824         int ret;
1825
1826         ret = ENOENT;
1827
1828         vcap = vmx->cap[vcpu].set;
1829
1830         switch (type) {
1831         case VM_CAP_HALT_EXIT:
1832                 if (cap_halt_exit)
1833                         ret = 0;
1834                 break;
1835         case VM_CAP_PAUSE_EXIT:
1836                 if (cap_pause_exit)
1837                         ret = 0;
1838                 break;
1839         case VM_CAP_MTRAP_EXIT:
1840                 if (cap_monitor_trap)
1841                         ret = 0;
1842                 break;
1843         case VM_CAP_UNRESTRICTED_GUEST:
1844                 if (cap_unrestricted_guest)
1845                         ret = 0;
1846                 break;
1847         case VM_CAP_ENABLE_INVPCID:
1848                 if (cap_invpcid)
1849                         ret = 0;
1850                 break;
1851         default:
1852                 break;
1853         }
1854
1855         if (ret == 0)
1856                 *retval = (vcap & (1 << type)) ? 1 : 0;
1857
1858         return (ret);
1859 }
1860
1861 static int
1862 vmx_setcap(void *arg, int vcpu, int type, int val)
1863 {
1864         struct vmx *vmx = arg;
1865         struct vmcs *vmcs = &vmx->vmcs[vcpu];
1866         uint32_t baseval;
1867         uint32_t *pptr;
1868         int error;
1869         int flag;
1870         int reg;
1871         int retval;
1872
1873         retval = ENOENT;
1874         pptr = NULL;
1875
1876         switch (type) {
1877         case VM_CAP_HALT_EXIT:
1878                 if (cap_halt_exit) {
1879                         retval = 0;
1880                         pptr = &vmx->cap[vcpu].proc_ctls;
1881                         baseval = *pptr;
1882                         flag = PROCBASED_HLT_EXITING;
1883                         reg = VMCS_PRI_PROC_BASED_CTLS;
1884                 }
1885                 break;
1886         case VM_CAP_MTRAP_EXIT:
1887                 if (cap_monitor_trap) {
1888                         retval = 0;
1889                         pptr = &vmx->cap[vcpu].proc_ctls;
1890                         baseval = *pptr;
1891                         flag = PROCBASED_MTF;
1892                         reg = VMCS_PRI_PROC_BASED_CTLS;
1893                 }
1894                 break;
1895         case VM_CAP_PAUSE_EXIT:
1896                 if (cap_pause_exit) {
1897                         retval = 0;
1898                         pptr = &vmx->cap[vcpu].proc_ctls;
1899                         baseval = *pptr;
1900                         flag = PROCBASED_PAUSE_EXITING;
1901                         reg = VMCS_PRI_PROC_BASED_CTLS;
1902                 }
1903                 break;
1904         case VM_CAP_UNRESTRICTED_GUEST:
1905                 if (cap_unrestricted_guest) {
1906                         retval = 0;
1907                         pptr = &vmx->cap[vcpu].proc_ctls2;
1908                         baseval = *pptr;
1909                         flag = PROCBASED2_UNRESTRICTED_GUEST;
1910                         reg = VMCS_SEC_PROC_BASED_CTLS;
1911                 }
1912                 break;
1913         case VM_CAP_ENABLE_INVPCID:
1914                 if (cap_invpcid) {
1915                         retval = 0;
1916                         pptr = &vmx->cap[vcpu].proc_ctls2;
1917                         baseval = *pptr;
1918                         flag = PROCBASED2_ENABLE_INVPCID;
1919                         reg = VMCS_SEC_PROC_BASED_CTLS;
1920                 }
1921                 break;
1922         default:
1923                 break;
1924         }
1925
1926         if (retval == 0) {
1927                 if (val) {
1928                         baseval |= flag;
1929                 } else {
1930                         baseval &= ~flag;
1931                 }
1932                 VMPTRLD(vmcs);
1933                 error = vmwrite(reg, baseval);
1934                 VMCLEAR(vmcs);
1935
1936                 if (error) {
1937                         retval = error;
1938                 } else {
1939                         /*
1940                          * Update optional stored flags, and record
1941                          * setting
1942                          */
1943                         if (pptr != NULL) {
1944                                 *pptr = baseval;
1945                         }
1946
1947                         if (val) {
1948                                 vmx->cap[vcpu].set |= (1 << type);
1949                         } else {
1950                                 vmx->cap[vcpu].set &= ~(1 << type);
1951                         }
1952                 }
1953         }
1954
1955         return (retval);
1956 }
1957
1958 struct vmm_ops vmm_ops_intel = {
1959         vmx_init,
1960         vmx_cleanup,
1961         vmx_vminit,
1962         vmx_run,
1963         vmx_vmcleanup,
1964         vmx_getreg,
1965         vmx_setreg,
1966         vmx_getdesc,
1967         vmx_setdesc,
1968         vmx_inject,
1969         vmx_getcap,
1970         vmx_setcap,
1971         ept_vmspace_alloc,
1972         ept_vmspace_free,
1973 };