]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/amd64/vmm/intel/vmx.c
vlapic code restructuring to make it easy to support hardware-assist for APIC
[FreeBSD/FreeBSD.git] / sys / amd64 / vmm / intel / vmx.c
1 /*-
2  * Copyright (c) 2011 NetApp, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/smp.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/pcpu.h>
38 #include <sys/proc.h>
39 #include <sys/sysctl.h>
40
41 #include <vm/vm.h>
42 #include <vm/pmap.h>
43
44 #include <machine/psl.h>
45 #include <machine/cpufunc.h>
46 #include <machine/md_var.h>
47 #include <machine/segments.h>
48 #include <machine/specialreg.h>
49 #include <machine/vmparam.h>
50
51 #include <machine/vmm.h>
52 #include "vmm_host.h"
53 #include "vmm_msr.h"
54 #include "vmm_ktr.h"
55 #include "vmm_stat.h"
56 #include "vlapic.h"
57 #include "vlapic_priv.h"
58
59 #include "vmx_msr.h"
60 #include "ept.h"
61 #include "vmx_cpufunc.h"
62 #include "vmx.h"
63 #include "x86.h"
64 #include "vmx_controls.h"
65
66 #define PINBASED_CTLS_ONE_SETTING                                       \
67         (PINBASED_EXTINT_EXITING        |                               \
68          PINBASED_NMI_EXITING           |                               \
69          PINBASED_VIRTUAL_NMI)
70 #define PINBASED_CTLS_ZERO_SETTING      0
71
72 #define PROCBASED_CTLS_WINDOW_SETTING                                   \
73         (PROCBASED_INT_WINDOW_EXITING   |                               \
74          PROCBASED_NMI_WINDOW_EXITING)
75
76 #define PROCBASED_CTLS_ONE_SETTING                                      \
77         (PROCBASED_SECONDARY_CONTROLS   |                               \
78          PROCBASED_IO_EXITING           |                               \
79          PROCBASED_MSR_BITMAPS          |                               \
80          PROCBASED_CTLS_WINDOW_SETTING)
81 #define PROCBASED_CTLS_ZERO_SETTING     \
82         (PROCBASED_CR3_LOAD_EXITING |   \
83         PROCBASED_CR3_STORE_EXITING |   \
84         PROCBASED_IO_BITMAPS)
85
86 #define PROCBASED_CTLS2_ONE_SETTING     PROCBASED2_ENABLE_EPT
87 #define PROCBASED_CTLS2_ZERO_SETTING    0
88
89 #define VM_EXIT_CTLS_ONE_SETTING_NO_PAT                                 \
90         (VM_EXIT_HOST_LMA                       |                       \
91         VM_EXIT_SAVE_EFER                       |                       \
92         VM_EXIT_LOAD_EFER)
93
94 #define VM_EXIT_CTLS_ONE_SETTING                                        \
95         (VM_EXIT_CTLS_ONE_SETTING_NO_PAT        |                       \
96         VM_EXIT_SAVE_PAT                        |                       \
97         VM_EXIT_LOAD_PAT)
98 #define VM_EXIT_CTLS_ZERO_SETTING       VM_EXIT_SAVE_DEBUG_CONTROLS
99
100 #define VM_ENTRY_CTLS_ONE_SETTING_NO_PAT        VM_ENTRY_LOAD_EFER
101
102 #define VM_ENTRY_CTLS_ONE_SETTING                                       \
103         (VM_ENTRY_CTLS_ONE_SETTING_NO_PAT       |                       \
104         VM_ENTRY_LOAD_PAT)
105 #define VM_ENTRY_CTLS_ZERO_SETTING                                      \
106         (VM_ENTRY_LOAD_DEBUG_CONTROLS           |                       \
107         VM_ENTRY_INTO_SMM                       |                       \
108         VM_ENTRY_DEACTIVATE_DUAL_MONITOR)
109
110 #define guest_msr_rw(vmx, msr) \
111         msr_bitmap_change_access((vmx)->msr_bitmap, (msr), MSR_BITMAP_ACCESS_RW)
112
113 #define HANDLED         1
114 #define UNHANDLED       0
115
116 static MALLOC_DEFINE(M_VMX, "vmx", "vmx");
117 static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic");
118
119 SYSCTL_DECL(_hw_vmm);
120 SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW, NULL, NULL);
121
122 int vmxon_enabled[MAXCPU];
123 static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
124
125 static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2;
126 static uint32_t exit_ctls, entry_ctls;
127
128 static uint64_t cr0_ones_mask, cr0_zeros_mask;
129 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD,
130              &cr0_ones_mask, 0, NULL);
131 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD,
132              &cr0_zeros_mask, 0, NULL);
133
134 static uint64_t cr4_ones_mask, cr4_zeros_mask;
135 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD,
136              &cr4_ones_mask, 0, NULL);
137 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD,
138              &cr4_zeros_mask, 0, NULL);
139
140 static int vmx_no_patmsr;
141
142 static int vmx_initialized;
143 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD,
144            &vmx_initialized, 0, "Intel VMX initialized");
145
146 /*
147  * Virtual NMI blocking conditions.
148  *
149  * Some processor implementations also require NMI to be blocked if
150  * the STI_BLOCKING bit is set. It is possible to detect this at runtime
151  * based on the (exit_reason,exit_qual) tuple being set to 
152  * (EXIT_REASON_INVAL_VMCS, EXIT_QUAL_NMI_WHILE_STI_BLOCKING).
153  *
154  * We take the easy way out and also include STI_BLOCKING as one of the
155  * gating items for vNMI injection.
156  */
157 static uint64_t nmi_blocking_bits = VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING |
158                                     VMCS_INTERRUPTIBILITY_NMI_BLOCKING |
159                                     VMCS_INTERRUPTIBILITY_STI_BLOCKING;
160
161 /*
162  * Optional capabilities
163  */
164 static int cap_halt_exit;
165 static int cap_pause_exit;
166 static int cap_unrestricted_guest;
167 static int cap_monitor_trap;
168 static int cap_invpcid;
169  
170 static struct unrhdr *vpid_unr;
171 static u_int vpid_alloc_failed;
172 SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD,
173             &vpid_alloc_failed, 0, NULL);
174
175 #ifdef KTR
176 static const char *
177 exit_reason_to_str(int reason)
178 {
179         static char reasonbuf[32];
180
181         switch (reason) {
182         case EXIT_REASON_EXCEPTION:
183                 return "exception";
184         case EXIT_REASON_EXT_INTR:
185                 return "extint";
186         case EXIT_REASON_TRIPLE_FAULT:
187                 return "triplefault";
188         case EXIT_REASON_INIT:
189                 return "init";
190         case EXIT_REASON_SIPI:
191                 return "sipi";
192         case EXIT_REASON_IO_SMI:
193                 return "iosmi";
194         case EXIT_REASON_SMI:
195                 return "smi";
196         case EXIT_REASON_INTR_WINDOW:
197                 return "intrwindow";
198         case EXIT_REASON_NMI_WINDOW:
199                 return "nmiwindow";
200         case EXIT_REASON_TASK_SWITCH:
201                 return "taskswitch";
202         case EXIT_REASON_CPUID:
203                 return "cpuid";
204         case EXIT_REASON_GETSEC:
205                 return "getsec";
206         case EXIT_REASON_HLT:
207                 return "hlt";
208         case EXIT_REASON_INVD:
209                 return "invd";
210         case EXIT_REASON_INVLPG:
211                 return "invlpg";
212         case EXIT_REASON_RDPMC:
213                 return "rdpmc";
214         case EXIT_REASON_RDTSC:
215                 return "rdtsc";
216         case EXIT_REASON_RSM:
217                 return "rsm";
218         case EXIT_REASON_VMCALL:
219                 return "vmcall";
220         case EXIT_REASON_VMCLEAR:
221                 return "vmclear";
222         case EXIT_REASON_VMLAUNCH:
223                 return "vmlaunch";
224         case EXIT_REASON_VMPTRLD:
225                 return "vmptrld";
226         case EXIT_REASON_VMPTRST:
227                 return "vmptrst";
228         case EXIT_REASON_VMREAD:
229                 return "vmread";
230         case EXIT_REASON_VMRESUME:
231                 return "vmresume";
232         case EXIT_REASON_VMWRITE:
233                 return "vmwrite";
234         case EXIT_REASON_VMXOFF:
235                 return "vmxoff";
236         case EXIT_REASON_VMXON:
237                 return "vmxon";
238         case EXIT_REASON_CR_ACCESS:
239                 return "craccess";
240         case EXIT_REASON_DR_ACCESS:
241                 return "draccess";
242         case EXIT_REASON_INOUT:
243                 return "inout";
244         case EXIT_REASON_RDMSR:
245                 return "rdmsr";
246         case EXIT_REASON_WRMSR:
247                 return "wrmsr";
248         case EXIT_REASON_INVAL_VMCS:
249                 return "invalvmcs";
250         case EXIT_REASON_INVAL_MSR:
251                 return "invalmsr";
252         case EXIT_REASON_MWAIT:
253                 return "mwait";
254         case EXIT_REASON_MTF:
255                 return "mtf";
256         case EXIT_REASON_MONITOR:
257                 return "monitor";
258         case EXIT_REASON_PAUSE:
259                 return "pause";
260         case EXIT_REASON_MCE:
261                 return "mce";
262         case EXIT_REASON_TPR:
263                 return "tpr";
264         case EXIT_REASON_APIC:
265                 return "apic";
266         case EXIT_REASON_GDTR_IDTR:
267                 return "gdtridtr";
268         case EXIT_REASON_LDTR_TR:
269                 return "ldtrtr";
270         case EXIT_REASON_EPT_FAULT:
271                 return "eptfault";
272         case EXIT_REASON_EPT_MISCONFIG:
273                 return "eptmisconfig";
274         case EXIT_REASON_INVEPT:
275                 return "invept";
276         case EXIT_REASON_RDTSCP:
277                 return "rdtscp";
278         case EXIT_REASON_VMX_PREEMPT:
279                 return "vmxpreempt";
280         case EXIT_REASON_INVVPID:
281                 return "invvpid";
282         case EXIT_REASON_WBINVD:
283                 return "wbinvd";
284         case EXIT_REASON_XSETBV:
285                 return "xsetbv";
286         default:
287                 snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason);
288                 return (reasonbuf);
289         }
290 }
291
292 #ifdef SETJMP_TRACE
293 static const char *
294 vmx_setjmp_rc2str(int rc)
295 {
296         switch (rc) {
297         case VMX_RETURN_DIRECT:
298                 return "direct";
299         case VMX_RETURN_LONGJMP:
300                 return "longjmp";
301         case VMX_RETURN_VMRESUME:
302                 return "vmresume";
303         case VMX_RETURN_VMLAUNCH:
304                 return "vmlaunch";
305         case VMX_RETURN_AST:
306                 return "ast";
307         default:
308                 return "unknown";
309         }
310 }
311
312 #define SETJMP_TRACE(vmx, vcpu, vmxctx, regname)                            \
313         VCPU_CTR1((vmx)->vm, (vcpu), "setjmp trace " #regname " 0x%016lx",  \
314                  (vmxctx)->regname)
315
316 static void
317 vmx_setjmp_trace(struct vmx *vmx, int vcpu, struct vmxctx *vmxctx, int rc)
318 {
319         uint64_t host_rip, host_rsp;
320
321         if (vmxctx != &vmx->ctx[vcpu])
322                 panic("vmx_setjmp_trace: invalid vmxctx %p; should be %p",
323                         vmxctx, &vmx->ctx[vcpu]);
324
325         VCPU_CTR1((vmx)->vm, (vcpu), "vmxctx = %p", vmxctx);
326         VCPU_CTR2((vmx)->vm, (vcpu), "setjmp return code %s(%d)",
327                  vmx_setjmp_rc2str(rc), rc);
328
329         host_rip = vmcs_read(VMCS_HOST_RIP);
330         host_rsp = vmcs_read(VMCS_HOST_RSP);
331         VCPU_CTR2((vmx)->vm, (vcpu), "vmcs host_rip 0x%016lx, host_rsp %#lx",
332                  host_rip, host_rsp);
333
334         SETJMP_TRACE(vmx, vcpu, vmxctx, host_r15);
335         SETJMP_TRACE(vmx, vcpu, vmxctx, host_r14);
336         SETJMP_TRACE(vmx, vcpu, vmxctx, host_r13);
337         SETJMP_TRACE(vmx, vcpu, vmxctx, host_r12);
338         SETJMP_TRACE(vmx, vcpu, vmxctx, host_rbp);
339         SETJMP_TRACE(vmx, vcpu, vmxctx, host_rsp);
340         SETJMP_TRACE(vmx, vcpu, vmxctx, host_rbx);
341         SETJMP_TRACE(vmx, vcpu, vmxctx, host_rip);
342
343         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rdi);
344         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rsi);
345         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rdx);
346         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rcx);
347         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r8);
348         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r9);
349         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rax);
350         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rbx);
351         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_rbp);
352         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r10);
353         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r11);
354         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r12);
355         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r13);
356         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r14);
357         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_r15);
358         SETJMP_TRACE(vmx, vcpu, vmxctx, guest_cr2);
359 }
360 #endif
361 #else
362 static void __inline
363 vmx_setjmp_trace(struct vmx *vmx, int vcpu, struct vmxctx *vmxctx, int rc)
364 {
365         return;
366 }
367 #endif  /* KTR */
368
369 u_long
370 vmx_fix_cr0(u_long cr0)
371 {
372
373         return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask);
374 }
375
376 u_long
377 vmx_fix_cr4(u_long cr4)
378 {
379
380         return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask);
381 }
382
383 static void
384 vpid_free(int vpid)
385 {
386         if (vpid < 0 || vpid > 0xffff)
387                 panic("vpid_free: invalid vpid %d", vpid);
388
389         /*
390          * VPIDs [0,VM_MAXCPU] are special and are not allocated from
391          * the unit number allocator.
392          */
393
394         if (vpid > VM_MAXCPU)
395                 free_unr(vpid_unr, vpid);
396 }
397
398 static void
399 vpid_alloc(uint16_t *vpid, int num)
400 {
401         int i, x;
402
403         if (num <= 0 || num > VM_MAXCPU)
404                 panic("invalid number of vpids requested: %d", num);
405
406         /*
407          * If the "enable vpid" execution control is not enabled then the
408          * VPID is required to be 0 for all vcpus.
409          */
410         if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) {
411                 for (i = 0; i < num; i++)
412                         vpid[i] = 0;
413                 return;
414         }
415
416         /*
417          * Allocate a unique VPID for each vcpu from the unit number allocator.
418          */
419         for (i = 0; i < num; i++) {
420                 x = alloc_unr(vpid_unr);
421                 if (x == -1)
422                         break;
423                 else
424                         vpid[i] = x;
425         }
426
427         if (i < num) {
428                 atomic_add_int(&vpid_alloc_failed, 1);
429
430                 /*
431                  * If the unit number allocator does not have enough unique
432                  * VPIDs then we need to allocate from the [1,VM_MAXCPU] range.
433                  *
434                  * These VPIDs are not be unique across VMs but this does not
435                  * affect correctness because the combined mappings are also
436                  * tagged with the EP4TA which is unique for each VM.
437                  *
438                  * It is still sub-optimal because the invvpid will invalidate
439                  * combined mappings for a particular VPID across all EP4TAs.
440                  */
441                 while (i-- > 0)
442                         vpid_free(vpid[i]);
443
444                 for (i = 0; i < num; i++)
445                         vpid[i] = i + 1;
446         }
447 }
448
449 static void
450 vpid_init(void)
451 {
452         /*
453          * VPID 0 is required when the "enable VPID" execution control is
454          * disabled.
455          *
456          * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the
457          * unit number allocator does not have sufficient unique VPIDs to
458          * satisfy the allocation.
459          *
460          * The remaining VPIDs are managed by the unit number allocator.
461          */
462         vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL);
463 }
464
465 static void
466 msr_save_area_init(struct msr_entry *g_area, int *g_count)
467 {
468         int cnt;
469
470         static struct msr_entry guest_msrs[] = {
471                 { MSR_KGSBASE, 0, 0 },
472         };
473
474         cnt = sizeof(guest_msrs) / sizeof(guest_msrs[0]);
475         if (cnt > GUEST_MSR_MAX_ENTRIES)
476                 panic("guest msr save area overrun");
477         bcopy(guest_msrs, g_area, sizeof(guest_msrs));
478         *g_count = cnt;
479 }
480
481 static void
482 vmx_disable(void *arg __unused)
483 {
484         struct invvpid_desc invvpid_desc = { 0 };
485         struct invept_desc invept_desc = { 0 };
486
487         if (vmxon_enabled[curcpu]) {
488                 /*
489                  * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b.
490                  *
491                  * VMXON or VMXOFF are not required to invalidate any TLB
492                  * caching structures. This prevents potential retention of
493                  * cached information in the TLB between distinct VMX episodes.
494                  */
495                 invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc);
496                 invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc);
497                 vmxoff();
498         }
499         load_cr4(rcr4() & ~CR4_VMXE);
500 }
501
502 static int
503 vmx_cleanup(void)
504 {
505
506         if (vpid_unr != NULL) {
507                 delete_unrhdr(vpid_unr);
508                 vpid_unr = NULL;
509         }
510
511         smp_rendezvous(NULL, vmx_disable, NULL, NULL);
512
513         return (0);
514 }
515
516 static void
517 vmx_enable(void *arg __unused)
518 {
519         int error;
520
521         load_cr4(rcr4() | CR4_VMXE);
522
523         *(uint32_t *)vmxon_region[curcpu] = vmx_revision();
524         error = vmxon(vmxon_region[curcpu]);
525         if (error == 0)
526                 vmxon_enabled[curcpu] = 1;
527 }
528
529 static void
530 vmx_restore(void)
531 {
532
533         if (vmxon_enabled[curcpu])
534                 vmxon(vmxon_region[curcpu]);
535 }
536
537 static int
538 vmx_init(void)
539 {
540         int error;
541         uint64_t fixed0, fixed1, feature_control;
542         uint32_t tmp;
543
544         /* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */
545         if (!(cpu_feature2 & CPUID2_VMX)) {
546                 printf("vmx_init: processor does not support VMX operation\n");
547                 return (ENXIO);
548         }
549
550         /*
551          * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits
552          * are set (bits 0 and 2 respectively).
553          */
554         feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
555         if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 ||
556             (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
557                 printf("vmx_init: VMX operation disabled by BIOS\n");
558                 return (ENXIO);
559         }
560
561         /* Check support for primary processor-based VM-execution controls */
562         error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
563                                MSR_VMX_TRUE_PROCBASED_CTLS,
564                                PROCBASED_CTLS_ONE_SETTING,
565                                PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls);
566         if (error) {
567                 printf("vmx_init: processor does not support desired primary "
568                        "processor-based controls\n");
569                 return (error);
570         }
571
572         /* Clear the processor-based ctl bits that are set on demand */
573         procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING;
574
575         /* Check support for secondary processor-based VM-execution controls */
576         error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
577                                MSR_VMX_PROCBASED_CTLS2,
578                                PROCBASED_CTLS2_ONE_SETTING,
579                                PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2);
580         if (error) {
581                 printf("vmx_init: processor does not support desired secondary "
582                        "processor-based controls\n");
583                 return (error);
584         }
585
586         /* Check support for VPID */
587         error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
588                                PROCBASED2_ENABLE_VPID, 0, &tmp);
589         if (error == 0)
590                 procbased_ctls2 |= PROCBASED2_ENABLE_VPID;
591
592         /* Check support for pin-based VM-execution controls */
593         error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
594                                MSR_VMX_TRUE_PINBASED_CTLS,
595                                PINBASED_CTLS_ONE_SETTING,
596                                PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls);
597         if (error) {
598                 printf("vmx_init: processor does not support desired "
599                        "pin-based controls\n");
600                 return (error);
601         }
602
603         /* Check support for VM-exit controls */
604         error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS,
605                                VM_EXIT_CTLS_ONE_SETTING,
606                                VM_EXIT_CTLS_ZERO_SETTING,
607                                &exit_ctls);
608         if (error) {
609                 /* Try again without the PAT MSR bits */
610                 error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS,
611                                        MSR_VMX_TRUE_EXIT_CTLS,
612                                        VM_EXIT_CTLS_ONE_SETTING_NO_PAT,
613                                        VM_EXIT_CTLS_ZERO_SETTING,
614                                        &exit_ctls);
615                 if (error) {
616                         printf("vmx_init: processor does not support desired "
617                                "exit controls\n");
618                         return (error);
619                 } else {
620                         if (bootverbose)
621                                 printf("vmm: PAT MSR access not supported\n");
622                         guest_msr_valid(MSR_PAT);
623                         vmx_no_patmsr = 1;
624                 }
625         }
626
627         /* Check support for VM-entry controls */
628         if (!vmx_no_patmsr) {
629                 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS,
630                                        MSR_VMX_TRUE_ENTRY_CTLS,
631                                        VM_ENTRY_CTLS_ONE_SETTING,
632                                        VM_ENTRY_CTLS_ZERO_SETTING,
633                                        &entry_ctls);
634         } else {
635                 error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS,
636                                        MSR_VMX_TRUE_ENTRY_CTLS,
637                                        VM_ENTRY_CTLS_ONE_SETTING_NO_PAT,
638                                        VM_ENTRY_CTLS_ZERO_SETTING,
639                                        &entry_ctls);
640         }
641
642         if (error) {
643                 printf("vmx_init: processor does not support desired "
644                        "entry controls\n");
645                        return (error);
646         }
647
648         /*
649          * Check support for optional features by testing them
650          * as individual bits
651          */
652         cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
653                                         MSR_VMX_TRUE_PROCBASED_CTLS,
654                                         PROCBASED_HLT_EXITING, 0,
655                                         &tmp) == 0);
656
657         cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
658                                         MSR_VMX_PROCBASED_CTLS,
659                                         PROCBASED_MTF, 0,
660                                         &tmp) == 0);
661
662         cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
663                                          MSR_VMX_TRUE_PROCBASED_CTLS,
664                                          PROCBASED_PAUSE_EXITING, 0,
665                                          &tmp) == 0);
666
667         cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
668                                         MSR_VMX_PROCBASED_CTLS2,
669                                         PROCBASED2_UNRESTRICTED_GUEST, 0,
670                                         &tmp) == 0);
671
672         cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
673             MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0,
674             &tmp) == 0);
675
676
677         /* Initialize EPT */
678         error = ept_init();
679         if (error) {
680                 printf("vmx_init: ept initialization failed (%d)\n", error);
681                 return (error);
682         }
683
684         /*
685          * Stash the cr0 and cr4 bits that must be fixed to 0 or 1
686          */
687         fixed0 = rdmsr(MSR_VMX_CR0_FIXED0);
688         fixed1 = rdmsr(MSR_VMX_CR0_FIXED1);
689         cr0_ones_mask = fixed0 & fixed1;
690         cr0_zeros_mask = ~fixed0 & ~fixed1;
691
692         /*
693          * CR0_PE and CR0_PG can be set to zero in VMX non-root operation
694          * if unrestricted guest execution is allowed.
695          */
696         if (cap_unrestricted_guest)
697                 cr0_ones_mask &= ~(CR0_PG | CR0_PE);
698
699         /*
700          * Do not allow the guest to set CR0_NW or CR0_CD.
701          */
702         cr0_zeros_mask |= (CR0_NW | CR0_CD);
703
704         fixed0 = rdmsr(MSR_VMX_CR4_FIXED0);
705         fixed1 = rdmsr(MSR_VMX_CR4_FIXED1);
706         cr4_ones_mask = fixed0 & fixed1;
707         cr4_zeros_mask = ~fixed0 & ~fixed1;
708
709         vpid_init();
710
711         /* enable VMX operation */
712         smp_rendezvous(NULL, vmx_enable, NULL, NULL);
713
714         vmx_initialized = 1;
715
716         return (0);
717 }
718
719 static int
720 vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial)
721 {
722         int error, mask_ident, shadow_ident;
723         uint64_t mask_value;
724
725         if (which != 0 && which != 4)
726                 panic("vmx_setup_cr_shadow: unknown cr%d", which);
727
728         if (which == 0) {
729                 mask_ident = VMCS_CR0_MASK;
730                 mask_value = cr0_ones_mask | cr0_zeros_mask;
731                 shadow_ident = VMCS_CR0_SHADOW;
732         } else {
733                 mask_ident = VMCS_CR4_MASK;
734                 mask_value = cr4_ones_mask | cr4_zeros_mask;
735                 shadow_ident = VMCS_CR4_SHADOW;
736         }
737
738         error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value);
739         if (error)
740                 return (error);
741
742         error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial);
743         if (error)
744                 return (error);
745
746         return (0);
747 }
748 #define vmx_setup_cr0_shadow(vmcs,init) vmx_setup_cr_shadow(0, (vmcs), (init))
749 #define vmx_setup_cr4_shadow(vmcs,init) vmx_setup_cr_shadow(4, (vmcs), (init))
750
751 static void *
752 vmx_vminit(struct vm *vm, pmap_t pmap)
753 {
754         uint16_t vpid[VM_MAXCPU];
755         int i, error, guest_msr_count;
756         struct vmx *vmx;
757
758         vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO);
759         if ((uintptr_t)vmx & PAGE_MASK) {
760                 panic("malloc of struct vmx not aligned on %d byte boundary",
761                       PAGE_SIZE);
762         }
763         vmx->vm = vm;
764
765         vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4));
766
767         /*
768          * Clean up EPTP-tagged guest physical and combined mappings
769          *
770          * VMX transitions are not required to invalidate any guest physical
771          * mappings. So, it may be possible for stale guest physical mappings
772          * to be present in the processor TLBs.
773          *
774          * Combined mappings for this EP4TA are also invalidated for all VPIDs.
775          */
776         ept_invalidate_mappings(vmx->eptp);
777
778         msr_bitmap_initialize(vmx->msr_bitmap);
779
780         /*
781          * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE.
782          * The guest FSBASE and GSBASE are saved and restored during
783          * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are
784          * always restored from the vmcs host state area on vm-exit.
785          *
786          * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in
787          * how they are saved/restored so can be directly accessed by the
788          * guest.
789          *
790          * Guest KGSBASE is saved and restored in the guest MSR save area.
791          * Host KGSBASE is restored before returning to userland from the pcb.
792          * There will be a window of time when we are executing in the host
793          * kernel context with a value of KGSBASE from the guest. This is ok
794          * because the value of KGSBASE is inconsequential in kernel context.
795          *
796          * MSR_EFER is saved and restored in the guest VMCS area on a
797          * VM exit and entry respectively. It is also restored from the
798          * host VMCS area on a VM exit.
799          */
800         if (guest_msr_rw(vmx, MSR_GSBASE) ||
801             guest_msr_rw(vmx, MSR_FSBASE) ||
802             guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) ||
803             guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) ||
804             guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) ||
805             guest_msr_rw(vmx, MSR_KGSBASE) ||
806             guest_msr_rw(vmx, MSR_EFER))
807                 panic("vmx_vminit: error setting guest msr access");
808
809         /*
810          * MSR_PAT is saved and restored in the guest VMCS are on a VM exit
811          * and entry respectively. It is also restored from the host VMCS
812          * area on a VM exit. However, if running on a system with no
813          * MSR_PAT save/restore support, leave access disabled so accesses
814          * will be trapped.
815          */
816         if (!vmx_no_patmsr && guest_msr_rw(vmx, MSR_PAT))
817                 panic("vmx_vminit: error setting guest pat msr access");
818
819         vpid_alloc(vpid, VM_MAXCPU);
820
821         for (i = 0; i < VM_MAXCPU; i++) {
822                 vmx->vmcs[i].identifier = vmx_revision();
823                 error = vmclear(&vmx->vmcs[i]);
824                 if (error != 0) {
825                         panic("vmx_vminit: vmclear error %d on vcpu %d\n",
826                               error, i);
827                 }
828
829                 error = vmcs_set_defaults(&vmx->vmcs[i],
830                                           (u_long)vmx_longjmp,
831                                           (u_long)&vmx->ctx[i],
832                                           vmx->eptp,
833                                           pinbased_ctls,
834                                           procbased_ctls,
835                                           procbased_ctls2,
836                                           exit_ctls, entry_ctls,
837                                           vtophys(vmx->msr_bitmap),
838                                           vpid[i]);
839
840                 if (error != 0)
841                         panic("vmx_vminit: vmcs_set_defaults error %d", error);
842
843                 vmx->cap[i].set = 0;
844                 vmx->cap[i].proc_ctls = procbased_ctls;
845                 vmx->cap[i].proc_ctls2 = procbased_ctls2;
846
847                 vmx->state[i].lastcpu = -1;
848                 vmx->state[i].vpid = vpid[i];
849
850                 msr_save_area_init(vmx->guest_msrs[i], &guest_msr_count);
851
852                 error = vmcs_set_msr_save(&vmx->vmcs[i],
853                                           vtophys(vmx->guest_msrs[i]),
854                                           guest_msr_count);
855                 if (error != 0)
856                         panic("vmcs_set_msr_save error %d", error);
857
858                 /*
859                  * Set up the CR0/4 shadows, and init the read shadow
860                  * to the power-on register value from the Intel Sys Arch.
861                  *  CR0 - 0x60000010
862                  *  CR4 - 0
863                  */
864                 error = vmx_setup_cr0_shadow(&vmx->vmcs[i], 0x60000010);
865                 if (error != 0)
866                         panic("vmx_setup_cr0_shadow %d", error);
867
868                 error = vmx_setup_cr4_shadow(&vmx->vmcs[i], 0);
869                 if (error != 0)
870                         panic("vmx_setup_cr4_shadow %d", error);
871
872                 vmx->ctx[i].pmap = pmap;
873                 vmx->ctx[i].eptp = vmx->eptp;
874         }
875
876         return (vmx);
877 }
878
879 static int
880 vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx)
881 {
882         int handled, func;
883         
884         func = vmxctx->guest_rax;
885
886         handled = x86_emulate_cpuid(vm, vcpu,
887                                     (uint32_t*)(&vmxctx->guest_rax),
888                                     (uint32_t*)(&vmxctx->guest_rbx),
889                                     (uint32_t*)(&vmxctx->guest_rcx),
890                                     (uint32_t*)(&vmxctx->guest_rdx));
891         return (handled);
892 }
893
894 static __inline void
895 vmx_run_trace(struct vmx *vmx, int vcpu)
896 {
897 #ifdef KTR
898         VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip());
899 #endif
900 }
901
902 static __inline void
903 vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason,
904                int handled)
905 {
906 #ifdef KTR
907         VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx",
908                  handled ? "handled" : "unhandled",
909                  exit_reason_to_str(exit_reason), rip);
910 #endif
911 }
912
913 static __inline void
914 vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip)
915 {
916 #ifdef KTR
917         VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip);
918 #endif
919 }
920
921 static void
922 vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu)
923 {
924         int lastcpu;
925         struct vmxstate *vmxstate;
926         struct invvpid_desc invvpid_desc = { 0 };
927
928         vmxstate = &vmx->state[vcpu];
929         lastcpu = vmxstate->lastcpu;
930         vmxstate->lastcpu = curcpu;
931
932         if (lastcpu == curcpu)
933                 return;
934
935         vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1);
936
937         vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase());
938         vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase());
939         vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase());
940
941         /*
942          * If we are using VPIDs then invalidate all mappings tagged with 'vpid'
943          *
944          * We do this because this vcpu was executing on a different host
945          * cpu when it last ran. We do not track whether it invalidated
946          * mappings associated with its 'vpid' during that run. So we must
947          * assume that the mappings associated with 'vpid' on 'curcpu' are
948          * stale and invalidate them.
949          *
950          * Note that we incur this penalty only when the scheduler chooses to
951          * move the thread associated with this vcpu between host cpus.
952          *
953          * Note also that this will invalidate mappings tagged with 'vpid'
954          * for "all" EP4TAs.
955          */
956         if (vmxstate->vpid != 0) {
957                 invvpid_desc.vpid = vmxstate->vpid;
958                 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc);
959         }
960 }
961
962 /*
963  * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set.
964  */
965 CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0);
966
967 static void __inline
968 vmx_set_int_window_exiting(struct vmx *vmx, int vcpu)
969 {
970
971         vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING;
972         vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
973 }
974
975 static void __inline
976 vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu)
977 {
978
979         vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING;
980         vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
981 }
982
983 static void __inline
984 vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu)
985 {
986
987         vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING;
988         vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
989 }
990
991 static void __inline
992 vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu)
993 {
994
995         vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING;
996         vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
997 }
998
999 static int
1000 vmx_inject_nmi(struct vmx *vmx, int vcpu)
1001 {
1002         uint64_t info, interruptibility;
1003
1004         /* Bail out if no NMI requested */
1005         if (!vm_nmi_pending(vmx->vm, vcpu))
1006                 return (0);
1007
1008         interruptibility = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1009         if (interruptibility & nmi_blocking_bits)
1010                 goto nmiblocked;
1011
1012         /*
1013          * Inject the virtual NMI. The vector must be the NMI IDT entry
1014          * or the VMCS entry check will fail.
1015          */
1016         info = VMCS_INTERRUPTION_INFO_NMI | VMCS_INTERRUPTION_INFO_VALID;
1017         info |= IDT_NMI;
1018         vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1019
1020         VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI");
1021
1022         /* Clear the request */
1023         vm_nmi_clear(vmx->vm, vcpu);
1024         return (1);
1025
1026 nmiblocked:
1027         /*
1028          * Set the NMI Window Exiting execution control so we can inject
1029          * the virtual NMI as soon as blocking condition goes away.
1030          */
1031         vmx_set_nmi_window_exiting(vmx, vcpu);
1032
1033         VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting");
1034         return (1);
1035 }
1036
1037 static void
1038 vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic)
1039 {
1040         int vector;
1041         uint64_t info, rflags, interruptibility;
1042
1043         const int HWINTR_BLOCKED = VMCS_INTERRUPTIBILITY_STI_BLOCKING |
1044                                    VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING;
1045
1046         /*
1047          * If there is already an interrupt pending then just return.
1048          *
1049          * This could happen if an interrupt was injected on a prior
1050          * VM entry but the actual entry into guest mode was aborted
1051          * because of a pending AST.
1052          */
1053         info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1054         if (info & VMCS_INTERRUPTION_INFO_VALID)
1055                 return;
1056
1057         /*
1058          * NMI injection has priority so deal with those first
1059          */
1060         if (vmx_inject_nmi(vmx, vcpu))
1061                 return;
1062
1063         /* Ask the local apic for a vector to inject */
1064         vector = vlapic_pending_intr(vlapic);
1065         if (vector < 0)
1066                 return;
1067
1068         if (vector < 32 || vector > 255)
1069                 panic("vmx_inject_interrupts: invalid vector %d\n", vector);
1070
1071         /* Check RFLAGS.IF and the interruptibility state of the guest */
1072         rflags = vmcs_read(VMCS_GUEST_RFLAGS);
1073         if ((rflags & PSL_I) == 0)
1074                 goto cantinject;
1075
1076         interruptibility = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1077         if (interruptibility & HWINTR_BLOCKED)
1078                 goto cantinject;
1079
1080         /* Inject the interrupt */
1081         info = VMCS_INTERRUPTION_INFO_HW_INTR | VMCS_INTERRUPTION_INFO_VALID;
1082         info |= vector;
1083         vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1084
1085         /* Update the Local APIC ISR */
1086         vlapic_intr_accepted(vlapic, vector);
1087
1088         VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector);
1089
1090         return;
1091
1092 cantinject:
1093         /*
1094          * Set the Interrupt Window Exiting execution control so we can inject
1095          * the interrupt as soon as blocking condition goes away.
1096          */
1097         vmx_set_int_window_exiting(vmx, vcpu);
1098
1099         VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting");
1100 }
1101
1102 static int
1103 vmx_emulate_cr_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1104 {
1105         int cr, vmcs_guest_cr, vmcs_shadow_cr;
1106         uint64_t crval, regval, ones_mask, zeros_mask;
1107         const struct vmxctx *vmxctx;
1108
1109         /* We only handle mov to %cr0 or %cr4 at this time */
1110         if ((exitqual & 0xf0) != 0x00)
1111                 return (UNHANDLED);
1112
1113         cr = exitqual & 0xf;
1114         if (cr != 0 && cr != 4)
1115                 return (UNHANDLED);
1116
1117         vmxctx = &vmx->ctx[vcpu];
1118
1119         /*
1120          * We must use vmcs_write() directly here because vmcs_setreg() will
1121          * call vmclear(vmcs) as a side-effect which we certainly don't want.
1122          */
1123         switch ((exitqual >> 8) & 0xf) {
1124         case 0:
1125                 regval = vmxctx->guest_rax;
1126                 break;
1127         case 1:
1128                 regval = vmxctx->guest_rcx;
1129                 break;
1130         case 2:
1131                 regval = vmxctx->guest_rdx;
1132                 break;
1133         case 3:
1134                 regval = vmxctx->guest_rbx;
1135                 break;
1136         case 4:
1137                 regval = vmcs_read(VMCS_GUEST_RSP);
1138                 break;
1139         case 5:
1140                 regval = vmxctx->guest_rbp;
1141                 break;
1142         case 6:
1143                 regval = vmxctx->guest_rsi;
1144                 break;
1145         case 7:
1146                 regval = vmxctx->guest_rdi;
1147                 break;
1148         case 8:
1149                 regval = vmxctx->guest_r8;
1150                 break;
1151         case 9:
1152                 regval = vmxctx->guest_r9;
1153                 break;
1154         case 10:
1155                 regval = vmxctx->guest_r10;
1156                 break;
1157         case 11:
1158                 regval = vmxctx->guest_r11;
1159                 break;
1160         case 12:
1161                 regval = vmxctx->guest_r12;
1162                 break;
1163         case 13:
1164                 regval = vmxctx->guest_r13;
1165                 break;
1166         case 14:
1167                 regval = vmxctx->guest_r14;
1168                 break;
1169         case 15:
1170                 regval = vmxctx->guest_r15;
1171                 break;
1172         }
1173
1174         if (cr == 0) {
1175                 ones_mask = cr0_ones_mask;
1176                 zeros_mask = cr0_zeros_mask;
1177                 vmcs_guest_cr = VMCS_GUEST_CR0;
1178                 vmcs_shadow_cr = VMCS_CR0_SHADOW;
1179         } else {
1180                 ones_mask = cr4_ones_mask;
1181                 zeros_mask = cr4_zeros_mask;
1182                 vmcs_guest_cr = VMCS_GUEST_CR4;
1183                 vmcs_shadow_cr = VMCS_CR4_SHADOW;
1184         }
1185         vmcs_write(vmcs_shadow_cr, regval);
1186
1187         crval = regval | ones_mask;
1188         crval &= ~zeros_mask;
1189         vmcs_write(vmcs_guest_cr, crval);
1190
1191         if (cr == 0 && regval & CR0_PG) {
1192                 uint64_t efer, entry_ctls;
1193
1194                 /*
1195                  * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and
1196                  * the "IA-32e mode guest" bit in VM-entry control must be
1197                  * equal.
1198                  */
1199                 efer = vmcs_read(VMCS_GUEST_IA32_EFER);
1200                 if (efer & EFER_LME) {
1201                         efer |= EFER_LMA;
1202                         vmcs_write(VMCS_GUEST_IA32_EFER, efer);
1203                         entry_ctls = vmcs_read(VMCS_ENTRY_CTLS);
1204                         entry_ctls |= VM_ENTRY_GUEST_LMA;
1205                         vmcs_write(VMCS_ENTRY_CTLS, entry_ctls);
1206                 }
1207         }
1208
1209         return (HANDLED);
1210 }
1211
1212 static int
1213 ept_fault_type(uint64_t ept_qual)
1214 {
1215         int fault_type;
1216
1217         if (ept_qual & EPT_VIOLATION_DATA_WRITE)
1218                 fault_type = VM_PROT_WRITE;
1219         else if (ept_qual & EPT_VIOLATION_INST_FETCH)
1220                 fault_type = VM_PROT_EXECUTE;
1221         else
1222                 fault_type= VM_PROT_READ;
1223
1224         return (fault_type);
1225 }
1226
1227 static boolean_t
1228 ept_emulation_fault(uint64_t ept_qual)
1229 {
1230         int read, write;
1231
1232         /* EPT fault on an instruction fetch doesn't make sense here */
1233         if (ept_qual & EPT_VIOLATION_INST_FETCH)
1234                 return (FALSE);
1235
1236         /* EPT fault must be a read fault or a write fault */
1237         read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;
1238         write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;
1239         if ((read | write) == 0)
1240                 return (FALSE);
1241
1242         /*
1243          * The EPT violation must have been caused by accessing a
1244          * guest-physical address that is a translation of a guest-linear
1245          * address.
1246          */
1247         if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||
1248             (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {
1249                 return (FALSE);
1250         }
1251
1252         return (TRUE);
1253 }
1254
1255 static int
1256 vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
1257 {
1258         int error, handled;
1259         struct vmcs *vmcs;
1260         struct vmxctx *vmxctx;
1261         uint32_t eax, ecx, edx, idtvec_info, idtvec_err, reason;
1262         uint64_t qual, gpa;
1263         bool retu;
1264
1265         handled = 0;
1266         vmcs = &vmx->vmcs[vcpu];
1267         vmxctx = &vmx->ctx[vcpu];
1268         qual = vmexit->u.vmx.exit_qualification;
1269         reason = vmexit->u.vmx.exit_reason;
1270         vmexit->exitcode = VM_EXITCODE_BOGUS;
1271
1272         vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1);
1273
1274         /*
1275          * VM exits that could be triggered during event injection on the
1276          * previous VM entry need to be handled specially by re-injecting
1277          * the event.
1278          *
1279          * See "Information for VM Exits During Event Delivery" in Intel SDM
1280          * for details.
1281          */
1282         switch (reason) {
1283         case EXIT_REASON_EPT_FAULT:
1284         case EXIT_REASON_EPT_MISCONFIG:
1285         case EXIT_REASON_APIC:
1286         case EXIT_REASON_TASK_SWITCH:
1287         case EXIT_REASON_EXCEPTION:
1288                 idtvec_info = vmcs_idt_vectoring_info();
1289                 if (idtvec_info & VMCS_IDT_VEC_VALID) {
1290                         idtvec_info &= ~(1 << 12); /* clear undefined bit */
1291                         vmcs_write(VMCS_ENTRY_INTR_INFO, idtvec_info);
1292                         if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
1293                                 idtvec_err = vmcs_idt_vectoring_err();
1294                                 vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR,
1295                                     idtvec_err);
1296                         }
1297                         vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
1298                 }
1299         default:
1300                 break;
1301         }
1302
1303         switch (reason) {
1304         case EXIT_REASON_CR_ACCESS:
1305                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1);
1306                 handled = vmx_emulate_cr_access(vmx, vcpu, qual);
1307                 break;
1308         case EXIT_REASON_RDMSR:
1309                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1);
1310                 retu = false;
1311                 ecx = vmxctx->guest_rcx;
1312                 error = emulate_rdmsr(vmx->vm, vcpu, ecx, &retu);
1313                 if (error) {
1314                         vmexit->exitcode = VM_EXITCODE_RDMSR;
1315                         vmexit->u.msr.code = ecx;
1316                 } else if (!retu) {
1317                         handled = 1;
1318                 } else {
1319                         /* Return to userspace with a valid exitcode */
1320                         KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
1321                             ("emulate_wrmsr retu with bogus exitcode"));
1322                 }
1323                 break;
1324         case EXIT_REASON_WRMSR:
1325                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1);
1326                 retu = false;
1327                 eax = vmxctx->guest_rax;
1328                 ecx = vmxctx->guest_rcx;
1329                 edx = vmxctx->guest_rdx;
1330                 error = emulate_wrmsr(vmx->vm, vcpu, ecx,
1331                     (uint64_t)edx << 32 | eax, &retu);
1332                 if (error) {
1333                         vmexit->exitcode = VM_EXITCODE_WRMSR;
1334                         vmexit->u.msr.code = ecx;
1335                         vmexit->u.msr.wval = (uint64_t)edx << 32 | eax;
1336                 } else if (!retu) {
1337                         handled = 1;
1338                 } else {
1339                         /* Return to userspace with a valid exitcode */
1340                         KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
1341                             ("emulate_wrmsr retu with bogus exitcode"));
1342                 }
1343                 break;
1344         case EXIT_REASON_HLT:
1345                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1);
1346                 vmexit->exitcode = VM_EXITCODE_HLT;
1347                 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS);
1348                 break;
1349         case EXIT_REASON_MTF:
1350                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1);
1351                 vmexit->exitcode = VM_EXITCODE_MTRAP;
1352                 break;
1353         case EXIT_REASON_PAUSE:
1354                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1);
1355                 vmexit->exitcode = VM_EXITCODE_PAUSE;
1356                 break;
1357         case EXIT_REASON_INTR_WINDOW:
1358                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1);
1359                 vmx_clear_int_window_exiting(vmx, vcpu);
1360                 VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting");
1361                 return (1);
1362         case EXIT_REASON_EXT_INTR:
1363                 /*
1364                  * External interrupts serve only to cause VM exits and allow
1365                  * the host interrupt handler to run.
1366                  *
1367                  * If this external interrupt triggers a virtual interrupt
1368                  * to a VM, then that state will be recorded by the
1369                  * host interrupt handler in the VM's softc. We will inject
1370                  * this virtual interrupt during the subsequent VM enter.
1371                  */
1372
1373                 /*
1374                  * This is special. We want to treat this as an 'handled'
1375                  * VM-exit but not increment the instruction pointer.
1376                  */
1377                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1);
1378                 return (1);
1379         case EXIT_REASON_NMI_WINDOW:
1380                 /* Exit to allow the pending virtual NMI to be injected */
1381                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1);
1382                 vmx_clear_nmi_window_exiting(vmx, vcpu);
1383                 VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting");
1384                 return (1);
1385         case EXIT_REASON_INOUT:
1386                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1);
1387                 vmexit->exitcode = VM_EXITCODE_INOUT;
1388                 vmexit->u.inout.bytes = (qual & 0x7) + 1;
1389                 vmexit->u.inout.in = (qual & 0x8) ? 1 : 0;
1390                 vmexit->u.inout.string = (qual & 0x10) ? 1 : 0;
1391                 vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0;
1392                 vmexit->u.inout.port = (uint16_t)(qual >> 16);
1393                 vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax);
1394                 break;
1395         case EXIT_REASON_CPUID:
1396                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1);
1397                 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx);
1398                 break;
1399         case EXIT_REASON_EPT_FAULT:
1400                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EPT_FAULT, 1);
1401                 /*
1402                  * If 'gpa' lies within the address space allocated to
1403                  * memory then this must be a nested page fault otherwise
1404                  * this must be an instruction that accesses MMIO space.
1405                  */
1406                 gpa = vmcs_gpa();
1407                 if (vm_mem_allocated(vmx->vm, gpa)) {
1408                         vmexit->exitcode = VM_EXITCODE_PAGING;
1409                         vmexit->u.paging.gpa = gpa;
1410                         vmexit->u.paging.fault_type = ept_fault_type(qual);
1411                 } else if (ept_emulation_fault(qual)) {
1412                         vmexit->exitcode = VM_EXITCODE_INST_EMUL;
1413                         vmexit->u.inst_emul.gpa = gpa;
1414                         vmexit->u.inst_emul.gla = vmcs_gla();
1415                         vmexit->u.inst_emul.cr3 = vmcs_guest_cr3();
1416                 }
1417                 break;
1418         default:
1419                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1);
1420                 break;
1421         }
1422
1423         if (handled) {
1424                 /*
1425                  * It is possible that control is returned to userland
1426                  * even though we were able to handle the VM exit in the
1427                  * kernel.
1428                  *
1429                  * In such a case we want to make sure that the userland
1430                  * restarts guest execution at the instruction *after*
1431                  * the one we just processed. Therefore we update the
1432                  * guest rip in the VMCS and in 'vmexit'.
1433                  */
1434                 vmexit->rip += vmexit->inst_length;
1435                 vmexit->inst_length = 0;
1436                 vmcs_write(VMCS_GUEST_RIP, vmexit->rip);
1437         } else {
1438                 if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
1439                         /*
1440                          * If this VM exit was not claimed by anybody then
1441                          * treat it as a generic VMX exit.
1442                          */
1443                         vmexit->exitcode = VM_EXITCODE_VMX;
1444                         vmexit->u.vmx.error = 0;
1445                 } else {
1446                         /*
1447                          * The exitcode and collateral have been populated.
1448                          * The VM exit will be processed further in userland.
1449                          */
1450                 }
1451         }
1452         return (handled);
1453 }
1454
1455 static int
1456 vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap)
1457 {
1458         int vie, rc, handled, astpending;
1459         uint32_t exit_reason;
1460         struct vmx *vmx;
1461         struct vmxctx *vmxctx;
1462         struct vmcs *vmcs;
1463         struct vm_exit *vmexit;
1464         struct vlapic *vlapic;
1465
1466         vmx = arg;
1467         vmcs = &vmx->vmcs[vcpu];
1468         vmxctx = &vmx->ctx[vcpu];
1469         vmxctx->launched = 0;
1470         vlapic = vm_lapic(vmx->vm, vcpu);
1471
1472         astpending = 0;
1473         vmexit = vm_exitinfo(vmx->vm, vcpu);
1474
1475         KASSERT(vmxctx->pmap == pmap,
1476             ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap));
1477         KASSERT(vmxctx->eptp == vmx->eptp,
1478             ("eptp %p different than ctx eptp %#lx", eptp, vmxctx->eptp));
1479
1480         /*
1481          * XXX Can we avoid doing this every time we do a vm run?
1482          */
1483         VMPTRLD(vmcs);
1484
1485         /*
1486          * XXX
1487          * We do this every time because we may setup the virtual machine
1488          * from a different process than the one that actually runs it.
1489          *
1490          * If the life of a virtual machine was spent entirely in the context
1491          * of a single process we could do this once in vmcs_set_defaults().
1492          */
1493         vmcs_write(VMCS_HOST_CR3, rcr3());
1494         vmcs_write(VMCS_GUEST_RIP, rip);
1495         vmx_set_pcpu_defaults(vmx, vcpu);
1496
1497         do {
1498                 vmx_inject_interrupts(vmx, vcpu, vlapic);
1499                 vmx_run_trace(vmx, vcpu);
1500                 rc = vmx_setjmp(vmxctx);
1501 #ifdef SETJMP_TRACE
1502                 vmx_setjmp_trace(vmx, vcpu, vmxctx, rc);
1503 #endif
1504                 switch (rc) {
1505                 case VMX_RETURN_DIRECT:
1506                         if (vmxctx->launched == 0) {
1507                                 vmxctx->launched = 1;
1508                                 vmx_launch(vmxctx);
1509                         } else
1510                                 vmx_resume(vmxctx);
1511                         panic("vmx_launch/resume should not return");
1512                         break;
1513                 case VMX_RETURN_LONGJMP:
1514                         break;                  /* vm exit */
1515                 case VMX_RETURN_AST:
1516                         astpending = 1;
1517                         break;
1518                 case VMX_RETURN_VMRESUME:
1519                         vie = vmcs_instruction_error();
1520                         if (vmxctx->launch_error == VM_FAIL_INVALID ||
1521                             vie != VMRESUME_WITH_NON_LAUNCHED_VMCS) {
1522                                 printf("vmresume error %d vmcs inst error %d\n",
1523                                         vmxctx->launch_error, vie);
1524                                 goto err_exit;
1525                         }
1526                         vmx_launch(vmxctx);     /* try to launch the guest */
1527                         panic("vmx_launch should not return");
1528                         break;
1529                 case VMX_RETURN_VMLAUNCH:
1530                         vie = vmcs_instruction_error();
1531 #if 1
1532                         printf("vmlaunch error %d vmcs inst error %d\n",
1533                                 vmxctx->launch_error, vie);
1534 #endif
1535                         goto err_exit;
1536                 case VMX_RETURN_INVEPT:
1537                         panic("vm %s:%d invept error %d",
1538                               vm_name(vmx->vm), vcpu, vmxctx->launch_error);
1539                 default:
1540                         panic("vmx_setjmp returned %d", rc);
1541                 }
1542                 
1543                 /* enable interrupts */
1544                 enable_intr();
1545
1546                 /* collect some basic information for VM exit processing */
1547                 vmexit->rip = rip = vmcs_guest_rip();
1548                 vmexit->inst_length = vmexit_instruction_length();
1549                 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason();
1550                 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification();
1551
1552                 if (astpending) {
1553                         handled = 1;
1554                         vmexit->inst_length = 0;
1555                         vmexit->exitcode = VM_EXITCODE_BOGUS;
1556                         vmx_astpending_trace(vmx, vcpu, rip);
1557                         vmm_stat_incr(vmx->vm, vcpu, VMEXIT_ASTPENDING, 1);
1558                         break;
1559                 }
1560
1561                 handled = vmx_exit_process(vmx, vcpu, vmexit);
1562                 vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled);
1563
1564         } while (handled);
1565
1566         /*
1567          * If a VM exit has been handled then the exitcode must be BOGUS
1568          * If a VM exit is not handled then the exitcode must not be BOGUS
1569          */
1570         if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) ||
1571             (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) {
1572                 panic("Mismatch between handled (%d) and exitcode (%d)",
1573                       handled, vmexit->exitcode);
1574         }
1575
1576         if (!handled)
1577                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_USERSPACE, 1);
1578
1579         VCPU_CTR1(vmx->vm, vcpu, "goto userland: exitcode %d",vmexit->exitcode);
1580
1581         /*
1582          * XXX
1583          * We need to do this to ensure that any VMCS state cached by the
1584          * processor is flushed to memory. We need to do this in case the
1585          * VM moves to a different cpu the next time it runs.
1586          *
1587          * Can we avoid doing this?
1588          */
1589         VMCLEAR(vmcs);
1590         return (0);
1591
1592 err_exit:
1593         vmexit->exitcode = VM_EXITCODE_VMX;
1594         vmexit->u.vmx.exit_reason = (uint32_t)-1;
1595         vmexit->u.vmx.exit_qualification = (uint32_t)-1;
1596         vmexit->u.vmx.error = vie;
1597         VMCLEAR(vmcs);
1598         return (ENOEXEC);
1599 }
1600
1601 static void
1602 vmx_vmcleanup(void *arg)
1603 {
1604         int i, error;
1605         struct vmx *vmx = arg;
1606
1607         for (i = 0; i < VM_MAXCPU; i++)
1608                 vpid_free(vmx->state[i].vpid);
1609
1610         /*
1611          * XXXSMP we also need to clear the VMCS active on the other vcpus.
1612          */
1613         error = vmclear(&vmx->vmcs[0]);
1614         if (error != 0)
1615                 panic("vmx_vmcleanup: vmclear error %d on vcpu 0", error);
1616
1617         free(vmx, M_VMX);
1618
1619         return;
1620 }
1621
1622 static register_t *
1623 vmxctx_regptr(struct vmxctx *vmxctx, int reg)
1624 {
1625
1626         switch (reg) {
1627         case VM_REG_GUEST_RAX:
1628                 return (&vmxctx->guest_rax);
1629         case VM_REG_GUEST_RBX:
1630                 return (&vmxctx->guest_rbx);
1631         case VM_REG_GUEST_RCX:
1632                 return (&vmxctx->guest_rcx);
1633         case VM_REG_GUEST_RDX:
1634                 return (&vmxctx->guest_rdx);
1635         case VM_REG_GUEST_RSI:
1636                 return (&vmxctx->guest_rsi);
1637         case VM_REG_GUEST_RDI:
1638                 return (&vmxctx->guest_rdi);
1639         case VM_REG_GUEST_RBP:
1640                 return (&vmxctx->guest_rbp);
1641         case VM_REG_GUEST_R8:
1642                 return (&vmxctx->guest_r8);
1643         case VM_REG_GUEST_R9:
1644                 return (&vmxctx->guest_r9);
1645         case VM_REG_GUEST_R10:
1646                 return (&vmxctx->guest_r10);
1647         case VM_REG_GUEST_R11:
1648                 return (&vmxctx->guest_r11);
1649         case VM_REG_GUEST_R12:
1650                 return (&vmxctx->guest_r12);
1651         case VM_REG_GUEST_R13:
1652                 return (&vmxctx->guest_r13);
1653         case VM_REG_GUEST_R14:
1654                 return (&vmxctx->guest_r14);
1655         case VM_REG_GUEST_R15:
1656                 return (&vmxctx->guest_r15);
1657         default:
1658                 break;
1659         }
1660         return (NULL);
1661 }
1662
1663 static int
1664 vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval)
1665 {
1666         register_t *regp;
1667
1668         if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
1669                 *retval = *regp;
1670                 return (0);
1671         } else
1672                 return (EINVAL);
1673 }
1674
1675 static int
1676 vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val)
1677 {
1678         register_t *regp;
1679
1680         if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
1681                 *regp = val;
1682                 return (0);
1683         } else
1684                 return (EINVAL);
1685 }
1686
1687 static int
1688 vmx_shadow_reg(int reg)
1689 {
1690         int shreg;
1691
1692         shreg = -1;
1693
1694         switch (reg) {
1695         case VM_REG_GUEST_CR0:
1696                 shreg = VMCS_CR0_SHADOW;
1697                 break;
1698         case VM_REG_GUEST_CR4:
1699                 shreg = VMCS_CR4_SHADOW;
1700                 break;
1701         default:
1702                 break;
1703         }
1704
1705         return (shreg);
1706 }
1707
1708 static int
1709 vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval)
1710 {
1711         int running, hostcpu;
1712         struct vmx *vmx = arg;
1713
1714         running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
1715         if (running && hostcpu != curcpu)
1716                 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu);
1717
1718         if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0)
1719                 return (0);
1720
1721         return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval));
1722 }
1723
1724 static int
1725 vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
1726 {
1727         int error, hostcpu, running, shadow;
1728         uint64_t ctls;
1729         struct vmx *vmx = arg;
1730
1731         running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
1732         if (running && hostcpu != curcpu)
1733                 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu);
1734
1735         if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0)
1736                 return (0);
1737
1738         error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val);
1739
1740         if (error == 0) {
1741                 /*
1742                  * If the "load EFER" VM-entry control is 1 then the
1743                  * value of EFER.LMA must be identical to "IA-32e mode guest"
1744                  * bit in the VM-entry control.
1745                  */
1746                 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 &&
1747                     (reg == VM_REG_GUEST_EFER)) {
1748                         vmcs_getreg(&vmx->vmcs[vcpu], running,
1749                                     VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls);
1750                         if (val & EFER_LMA)
1751                                 ctls |= VM_ENTRY_GUEST_LMA;
1752                         else
1753                                 ctls &= ~VM_ENTRY_GUEST_LMA;
1754                         vmcs_setreg(&vmx->vmcs[vcpu], running,
1755                                     VMCS_IDENT(VMCS_ENTRY_CTLS), ctls);
1756                 }
1757
1758                 shadow = vmx_shadow_reg(reg);
1759                 if (shadow > 0) {
1760                         /*
1761                          * Store the unmodified value in the shadow
1762                          */                     
1763                         error = vmcs_setreg(&vmx->vmcs[vcpu], running,
1764                                     VMCS_IDENT(shadow), val);
1765                 }
1766         }
1767
1768         return (error);
1769 }
1770
1771 static int
1772 vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
1773 {
1774         struct vmx *vmx = arg;
1775
1776         return (vmcs_getdesc(&vmx->vmcs[vcpu], reg, desc));
1777 }
1778
1779 static int
1780 vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
1781 {
1782         struct vmx *vmx = arg;
1783
1784         return (vmcs_setdesc(&vmx->vmcs[vcpu], reg, desc));
1785 }
1786
1787 static int
1788 vmx_inject(void *arg, int vcpu, int type, int vector, uint32_t code,
1789            int code_valid)
1790 {
1791         int error;
1792         uint64_t info;
1793         struct vmx *vmx = arg;
1794         struct vmcs *vmcs = &vmx->vmcs[vcpu];
1795
1796         static uint32_t type_map[VM_EVENT_MAX] = {
1797                 0x1,            /* VM_EVENT_NONE */
1798                 0x0,            /* VM_HW_INTR */
1799                 0x2,            /* VM_NMI */
1800                 0x3,            /* VM_HW_EXCEPTION */
1801                 0x4,            /* VM_SW_INTR */
1802                 0x5,            /* VM_PRIV_SW_EXCEPTION */
1803                 0x6,            /* VM_SW_EXCEPTION */
1804         };
1805
1806         /*
1807          * If there is already an exception pending to be delivered to the
1808          * vcpu then just return.
1809          */
1810         error = vmcs_getreg(vmcs, 0, VMCS_IDENT(VMCS_ENTRY_INTR_INFO), &info);
1811         if (error)
1812                 return (error);
1813
1814         if (info & VMCS_INTERRUPTION_INFO_VALID)
1815                 return (EAGAIN);
1816
1817         info = vector | (type_map[type] << 8) | (code_valid ? 1 << 11 : 0);
1818         info |= VMCS_INTERRUPTION_INFO_VALID;
1819         error = vmcs_setreg(vmcs, 0, VMCS_IDENT(VMCS_ENTRY_INTR_INFO), info);
1820         if (error != 0)
1821                 return (error);
1822
1823         if (code_valid) {
1824                 error = vmcs_setreg(vmcs, 0,
1825                                     VMCS_IDENT(VMCS_ENTRY_EXCEPTION_ERROR),
1826                                     code);
1827         }
1828         return (error);
1829 }
1830
1831 static int
1832 vmx_getcap(void *arg, int vcpu, int type, int *retval)
1833 {
1834         struct vmx *vmx = arg;
1835         int vcap;
1836         int ret;
1837
1838         ret = ENOENT;
1839
1840         vcap = vmx->cap[vcpu].set;
1841
1842         switch (type) {
1843         case VM_CAP_HALT_EXIT:
1844                 if (cap_halt_exit)
1845                         ret = 0;
1846                 break;
1847         case VM_CAP_PAUSE_EXIT:
1848                 if (cap_pause_exit)
1849                         ret = 0;
1850                 break;
1851         case VM_CAP_MTRAP_EXIT:
1852                 if (cap_monitor_trap)
1853                         ret = 0;
1854                 break;
1855         case VM_CAP_UNRESTRICTED_GUEST:
1856                 if (cap_unrestricted_guest)
1857                         ret = 0;
1858                 break;
1859         case VM_CAP_ENABLE_INVPCID:
1860                 if (cap_invpcid)
1861                         ret = 0;
1862                 break;
1863         default:
1864                 break;
1865         }
1866
1867         if (ret == 0)
1868                 *retval = (vcap & (1 << type)) ? 1 : 0;
1869
1870         return (ret);
1871 }
1872
1873 static int
1874 vmx_setcap(void *arg, int vcpu, int type, int val)
1875 {
1876         struct vmx *vmx = arg;
1877         struct vmcs *vmcs = &vmx->vmcs[vcpu];
1878         uint32_t baseval;
1879         uint32_t *pptr;
1880         int error;
1881         int flag;
1882         int reg;
1883         int retval;
1884
1885         retval = ENOENT;
1886         pptr = NULL;
1887
1888         switch (type) {
1889         case VM_CAP_HALT_EXIT:
1890                 if (cap_halt_exit) {
1891                         retval = 0;
1892                         pptr = &vmx->cap[vcpu].proc_ctls;
1893                         baseval = *pptr;
1894                         flag = PROCBASED_HLT_EXITING;
1895                         reg = VMCS_PRI_PROC_BASED_CTLS;
1896                 }
1897                 break;
1898         case VM_CAP_MTRAP_EXIT:
1899                 if (cap_monitor_trap) {
1900                         retval = 0;
1901                         pptr = &vmx->cap[vcpu].proc_ctls;
1902                         baseval = *pptr;
1903                         flag = PROCBASED_MTF;
1904                         reg = VMCS_PRI_PROC_BASED_CTLS;
1905                 }
1906                 break;
1907         case VM_CAP_PAUSE_EXIT:
1908                 if (cap_pause_exit) {
1909                         retval = 0;
1910                         pptr = &vmx->cap[vcpu].proc_ctls;
1911                         baseval = *pptr;
1912                         flag = PROCBASED_PAUSE_EXITING;
1913                         reg = VMCS_PRI_PROC_BASED_CTLS;
1914                 }
1915                 break;
1916         case VM_CAP_UNRESTRICTED_GUEST:
1917                 if (cap_unrestricted_guest) {
1918                         retval = 0;
1919                         pptr = &vmx->cap[vcpu].proc_ctls2;
1920                         baseval = *pptr;
1921                         flag = PROCBASED2_UNRESTRICTED_GUEST;
1922                         reg = VMCS_SEC_PROC_BASED_CTLS;
1923                 }
1924                 break;
1925         case VM_CAP_ENABLE_INVPCID:
1926                 if (cap_invpcid) {
1927                         retval = 0;
1928                         pptr = &vmx->cap[vcpu].proc_ctls2;
1929                         baseval = *pptr;
1930                         flag = PROCBASED2_ENABLE_INVPCID;
1931                         reg = VMCS_SEC_PROC_BASED_CTLS;
1932                 }
1933                 break;
1934         default:
1935                 break;
1936         }
1937
1938         if (retval == 0) {
1939                 if (val) {
1940                         baseval |= flag;
1941                 } else {
1942                         baseval &= ~flag;
1943                 }
1944                 VMPTRLD(vmcs);
1945                 error = vmwrite(reg, baseval);
1946                 VMCLEAR(vmcs);
1947
1948                 if (error) {
1949                         retval = error;
1950                 } else {
1951                         /*
1952                          * Update optional stored flags, and record
1953                          * setting
1954                          */
1955                         if (pptr != NULL) {
1956                                 *pptr = baseval;
1957                         }
1958
1959                         if (val) {
1960                                 vmx->cap[vcpu].set |= (1 << type);
1961                         } else {
1962                                 vmx->cap[vcpu].set &= ~(1 << type);
1963                         }
1964                 }
1965         }
1966
1967         return (retval);
1968 }
1969
1970 static struct vlapic *
1971 vmx_vlapic_init(void *arg, int vcpuid)
1972 {
1973         struct vmx *vmx;
1974         struct vlapic *vlapic;
1975         
1976         vmx = arg;
1977
1978         vlapic = malloc(sizeof(struct vlapic), M_VLAPIC, M_WAITOK | M_ZERO);
1979         vlapic->vm = vmx->vm;
1980         vlapic->vcpuid = vcpuid;
1981         vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid];
1982
1983         vlapic_init(vlapic);
1984
1985         return (vlapic);
1986 }
1987
1988 static void
1989 vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic)
1990 {
1991
1992         vlapic_cleanup(vlapic);
1993         free(vlapic, M_VLAPIC);
1994 }
1995
1996 struct vmm_ops vmm_ops_intel = {
1997         vmx_init,
1998         vmx_cleanup,
1999         vmx_restore,
2000         vmx_vminit,
2001         vmx_run,
2002         vmx_vmcleanup,
2003         vmx_getreg,
2004         vmx_setreg,
2005         vmx_getdesc,
2006         vmx_setdesc,
2007         vmx_inject,
2008         vmx_getcap,
2009         vmx_setcap,
2010         ept_vmspace_alloc,
2011         ept_vmspace_free,
2012         vmx_vlapic_init,
2013         vmx_vlapic_cleanup,
2014 };