]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/amd64/vmm/intel/vmx.c
Fix bhyve privilege escalation via VMCS access.
[FreeBSD/FreeBSD.git] / sys / amd64 / vmm / intel / vmx.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2011 NetApp, Inc.
5  * All rights reserved.
6  * Copyright (c) 2018 Joyent, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/smp.h>
38 #include <sys/kernel.h>
39 #include <sys/malloc.h>
40 #include <sys/pcpu.h>
41 #include <sys/proc.h>
42 #include <sys/sysctl.h>
43
44 #include <vm/vm.h>
45 #include <vm/pmap.h>
46
47 #include <machine/psl.h>
48 #include <machine/cpufunc.h>
49 #include <machine/md_var.h>
50 #include <machine/reg.h>
51 #include <machine/segments.h>
52 #include <machine/smp.h>
53 #include <machine/specialreg.h>
54 #include <machine/vmparam.h>
55
56 #include <machine/vmm.h>
57 #include <machine/vmm_dev.h>
58 #include <machine/vmm_instruction_emul.h>
59 #include "vmm_lapic.h"
60 #include "vmm_host.h"
61 #include "vmm_ioport.h"
62 #include "vmm_ktr.h"
63 #include "vmm_stat.h"
64 #include "vatpic.h"
65 #include "vlapic.h"
66 #include "vlapic_priv.h"
67
68 #include "ept.h"
69 #include "vmx_cpufunc.h"
70 #include "vmx.h"
71 #include "vmx_msr.h"
72 #include "x86.h"
73 #include "vmx_controls.h"
74
75 #define PINBASED_CTLS_ONE_SETTING                                       \
76         (PINBASED_EXTINT_EXITING        |                               \
77          PINBASED_NMI_EXITING           |                               \
78          PINBASED_VIRTUAL_NMI)
79 #define PINBASED_CTLS_ZERO_SETTING      0
80
81 #define PROCBASED_CTLS_WINDOW_SETTING                                   \
82         (PROCBASED_INT_WINDOW_EXITING   |                               \
83          PROCBASED_NMI_WINDOW_EXITING)
84
85 #define PROCBASED_CTLS_ONE_SETTING                                      \
86         (PROCBASED_SECONDARY_CONTROLS   |                               \
87          PROCBASED_MWAIT_EXITING        |                               \
88          PROCBASED_MONITOR_EXITING      |                               \
89          PROCBASED_IO_EXITING           |                               \
90          PROCBASED_MSR_BITMAPS          |                               \
91          PROCBASED_CTLS_WINDOW_SETTING  |                               \
92          PROCBASED_CR8_LOAD_EXITING     |                               \
93          PROCBASED_CR8_STORE_EXITING)
94 #define PROCBASED_CTLS_ZERO_SETTING     \
95         (PROCBASED_CR3_LOAD_EXITING |   \
96         PROCBASED_CR3_STORE_EXITING |   \
97         PROCBASED_IO_BITMAPS)
98
99 #define PROCBASED_CTLS2_ONE_SETTING     PROCBASED2_ENABLE_EPT
100 #define PROCBASED_CTLS2_ZERO_SETTING    0
101
102 #define VM_EXIT_CTLS_ONE_SETTING                                        \
103         (VM_EXIT_SAVE_DEBUG_CONTROLS            |                       \
104         VM_EXIT_HOST_LMA                        |                       \
105         VM_EXIT_SAVE_EFER                       |                       \
106         VM_EXIT_LOAD_EFER                       |                       \
107         VM_EXIT_ACKNOWLEDGE_INTERRUPT)
108
109 #define VM_EXIT_CTLS_ZERO_SETTING       0
110
111 #define VM_ENTRY_CTLS_ONE_SETTING                                       \
112         (VM_ENTRY_LOAD_DEBUG_CONTROLS           |                       \
113         VM_ENTRY_LOAD_EFER)
114
115 #define VM_ENTRY_CTLS_ZERO_SETTING                                      \
116         (VM_ENTRY_INTO_SMM                      |                       \
117         VM_ENTRY_DEACTIVATE_DUAL_MONITOR)
118
119 #define HANDLED         1
120 #define UNHANDLED       0
121
122 static MALLOC_DEFINE(M_VMX, "vmx", "vmx");
123 static MALLOC_DEFINE(M_VLAPIC, "vlapic", "vlapic");
124
125 SYSCTL_DECL(_hw_vmm);
126 SYSCTL_NODE(_hw_vmm, OID_AUTO, vmx, CTLFLAG_RW, NULL, NULL);
127
128 int vmxon_enabled[MAXCPU];
129 static char vmxon_region[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
130
131 static uint32_t pinbased_ctls, procbased_ctls, procbased_ctls2;
132 static uint32_t exit_ctls, entry_ctls;
133
134 static uint64_t cr0_ones_mask, cr0_zeros_mask;
135 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_ones_mask, CTLFLAG_RD,
136              &cr0_ones_mask, 0, NULL);
137 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr0_zeros_mask, CTLFLAG_RD,
138              &cr0_zeros_mask, 0, NULL);
139
140 static uint64_t cr4_ones_mask, cr4_zeros_mask;
141 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_ones_mask, CTLFLAG_RD,
142              &cr4_ones_mask, 0, NULL);
143 SYSCTL_ULONG(_hw_vmm_vmx, OID_AUTO, cr4_zeros_mask, CTLFLAG_RD,
144              &cr4_zeros_mask, 0, NULL);
145
146 static int vmx_initialized;
147 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, initialized, CTLFLAG_RD,
148            &vmx_initialized, 0, "Intel VMX initialized");
149
150 /*
151  * Optional capabilities
152  */
153 static SYSCTL_NODE(_hw_vmm_vmx, OID_AUTO, cap, CTLFLAG_RW, NULL, NULL);
154
155 static int cap_halt_exit;
156 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, halt_exit, CTLFLAG_RD, &cap_halt_exit, 0,
157     "HLT triggers a VM-exit");
158
159 static int cap_pause_exit;
160 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, pause_exit, CTLFLAG_RD, &cap_pause_exit,
161     0, "PAUSE triggers a VM-exit");
162
163 static int cap_unrestricted_guest;
164 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, unrestricted_guest, CTLFLAG_RD,
165     &cap_unrestricted_guest, 0, "Unrestricted guests");
166
167 static int cap_monitor_trap;
168 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, monitor_trap, CTLFLAG_RD,
169     &cap_monitor_trap, 0, "Monitor trap flag");
170
171 static int cap_invpcid;
172 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, invpcid, CTLFLAG_RD, &cap_invpcid,
173     0, "Guests are allowed to use INVPCID");
174
175 static int virtual_interrupt_delivery;
176 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD,
177     &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support");
178
179 static int posted_interrupts;
180 SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, posted_interrupts, CTLFLAG_RD,
181     &posted_interrupts, 0, "APICv posted interrupt support");
182
183 static int pirvec = -1;
184 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, posted_interrupt_vector, CTLFLAG_RD,
185     &pirvec, 0, "APICv posted interrupt vector");
186
187 static struct unrhdr *vpid_unr;
188 static u_int vpid_alloc_failed;
189 SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD,
190             &vpid_alloc_failed, 0, NULL);
191
192 static int guest_l1d_flush;
193 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush, CTLFLAG_RD,
194     &guest_l1d_flush, 0, NULL);
195 static int guest_l1d_flush_sw;
196 SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush_sw, CTLFLAG_RD,
197     &guest_l1d_flush_sw, 0, NULL);
198
199 static struct msr_entry msr_load_list[1] __aligned(16);
200
201 /*
202  * The definitions of SDT probes for VMX.
203  */
204
205 SDT_PROBE_DEFINE3(vmm, vmx, exit, entry,
206     "struct vmx *", "int", "struct vm_exit *");
207
208 SDT_PROBE_DEFINE4(vmm, vmx, exit, taskswitch,
209     "struct vmx *", "int", "struct vm_exit *", "struct vm_task_switch *");
210
211 SDT_PROBE_DEFINE4(vmm, vmx, exit, craccess,
212     "struct vmx *", "int", "struct vm_exit *", "uint64_t");
213
214 SDT_PROBE_DEFINE4(vmm, vmx, exit, rdmsr,
215     "struct vmx *", "int", "struct vm_exit *", "uint32_t");
216
217 SDT_PROBE_DEFINE5(vmm, vmx, exit, wrmsr,
218     "struct vmx *", "int", "struct vm_exit *", "uint32_t", "uint64_t");
219
220 SDT_PROBE_DEFINE3(vmm, vmx, exit, halt,
221     "struct vmx *", "int", "struct vm_exit *");
222
223 SDT_PROBE_DEFINE3(vmm, vmx, exit, mtrap,
224     "struct vmx *", "int", "struct vm_exit *");
225
226 SDT_PROBE_DEFINE3(vmm, vmx, exit, pause,
227     "struct vmx *", "int", "struct vm_exit *");
228
229 SDT_PROBE_DEFINE3(vmm, vmx, exit, intrwindow,
230     "struct vmx *", "int", "struct vm_exit *");
231
232 SDT_PROBE_DEFINE4(vmm, vmx, exit, interrupt,
233     "struct vmx *", "int", "struct vm_exit *", "uint32_t");
234
235 SDT_PROBE_DEFINE3(vmm, vmx, exit, nmiwindow,
236     "struct vmx *", "int", "struct vm_exit *");
237
238 SDT_PROBE_DEFINE3(vmm, vmx, exit, inout,
239     "struct vmx *", "int", "struct vm_exit *");
240
241 SDT_PROBE_DEFINE3(vmm, vmx, exit, cpuid,
242     "struct vmx *", "int", "struct vm_exit *");
243
244 SDT_PROBE_DEFINE5(vmm, vmx, exit, exception,
245     "struct vmx *", "int", "struct vm_exit *", "uint32_t", "int");
246
247 SDT_PROBE_DEFINE5(vmm, vmx, exit, nestedfault,
248     "struct vmx *", "int", "struct vm_exit *", "uint64_t", "uint64_t");
249
250 SDT_PROBE_DEFINE4(vmm, vmx, exit, mmiofault,
251     "struct vmx *", "int", "struct vm_exit *", "uint64_t");
252
253 SDT_PROBE_DEFINE3(vmm, vmx, exit, eoi,
254     "struct vmx *", "int", "struct vm_exit *");
255
256 SDT_PROBE_DEFINE3(vmm, vmx, exit, apicaccess,
257     "struct vmx *", "int", "struct vm_exit *");
258
259 SDT_PROBE_DEFINE4(vmm, vmx, exit, apicwrite,
260     "struct vmx *", "int", "struct vm_exit *", "struct vlapic *");
261
262 SDT_PROBE_DEFINE3(vmm, vmx, exit, xsetbv,
263     "struct vmx *", "int", "struct vm_exit *");
264
265 SDT_PROBE_DEFINE3(vmm, vmx, exit, monitor,
266     "struct vmx *", "int", "struct vm_exit *");
267
268 SDT_PROBE_DEFINE3(vmm, vmx, exit, mwait,
269     "struct vmx *", "int", "struct vm_exit *");
270
271 SDT_PROBE_DEFINE3(vmm, vmx, exit, vminsn,
272     "struct vmx *", "int", "struct vm_exit *");
273
274 SDT_PROBE_DEFINE4(vmm, vmx, exit, unknown,
275     "struct vmx *", "int", "struct vm_exit *", "uint32_t");
276
277 SDT_PROBE_DEFINE4(vmm, vmx, exit, return,
278     "struct vmx *", "int", "struct vm_exit *", "int");
279
280 /*
281  * Use the last page below 4GB as the APIC access address. This address is
282  * occupied by the boot firmware so it is guaranteed that it will not conflict
283  * with a page in system memory.
284  */
285 #define APIC_ACCESS_ADDRESS     0xFFFFF000
286
287 static int vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc);
288 static int vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval);
289 static int vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val);
290 static void vmx_inject_pir(struct vlapic *vlapic);
291
292 #ifdef KTR
293 static const char *
294 exit_reason_to_str(int reason)
295 {
296         static char reasonbuf[32];
297
298         switch (reason) {
299         case EXIT_REASON_EXCEPTION:
300                 return "exception";
301         case EXIT_REASON_EXT_INTR:
302                 return "extint";
303         case EXIT_REASON_TRIPLE_FAULT:
304                 return "triplefault";
305         case EXIT_REASON_INIT:
306                 return "init";
307         case EXIT_REASON_SIPI:
308                 return "sipi";
309         case EXIT_REASON_IO_SMI:
310                 return "iosmi";
311         case EXIT_REASON_SMI:
312                 return "smi";
313         case EXIT_REASON_INTR_WINDOW:
314                 return "intrwindow";
315         case EXIT_REASON_NMI_WINDOW:
316                 return "nmiwindow";
317         case EXIT_REASON_TASK_SWITCH:
318                 return "taskswitch";
319         case EXIT_REASON_CPUID:
320                 return "cpuid";
321         case EXIT_REASON_GETSEC:
322                 return "getsec";
323         case EXIT_REASON_HLT:
324                 return "hlt";
325         case EXIT_REASON_INVD:
326                 return "invd";
327         case EXIT_REASON_INVLPG:
328                 return "invlpg";
329         case EXIT_REASON_RDPMC:
330                 return "rdpmc";
331         case EXIT_REASON_RDTSC:
332                 return "rdtsc";
333         case EXIT_REASON_RSM:
334                 return "rsm";
335         case EXIT_REASON_VMCALL:
336                 return "vmcall";
337         case EXIT_REASON_VMCLEAR:
338                 return "vmclear";
339         case EXIT_REASON_VMLAUNCH:
340                 return "vmlaunch";
341         case EXIT_REASON_VMPTRLD:
342                 return "vmptrld";
343         case EXIT_REASON_VMPTRST:
344                 return "vmptrst";
345         case EXIT_REASON_VMREAD:
346                 return "vmread";
347         case EXIT_REASON_VMRESUME:
348                 return "vmresume";
349         case EXIT_REASON_VMWRITE:
350                 return "vmwrite";
351         case EXIT_REASON_VMXOFF:
352                 return "vmxoff";
353         case EXIT_REASON_VMXON:
354                 return "vmxon";
355         case EXIT_REASON_CR_ACCESS:
356                 return "craccess";
357         case EXIT_REASON_DR_ACCESS:
358                 return "draccess";
359         case EXIT_REASON_INOUT:
360                 return "inout";
361         case EXIT_REASON_RDMSR:
362                 return "rdmsr";
363         case EXIT_REASON_WRMSR:
364                 return "wrmsr";
365         case EXIT_REASON_INVAL_VMCS:
366                 return "invalvmcs";
367         case EXIT_REASON_INVAL_MSR:
368                 return "invalmsr";
369         case EXIT_REASON_MWAIT:
370                 return "mwait";
371         case EXIT_REASON_MTF:
372                 return "mtf";
373         case EXIT_REASON_MONITOR:
374                 return "monitor";
375         case EXIT_REASON_PAUSE:
376                 return "pause";
377         case EXIT_REASON_MCE_DURING_ENTRY:
378                 return "mce-during-entry";
379         case EXIT_REASON_TPR:
380                 return "tpr";
381         case EXIT_REASON_APIC_ACCESS:
382                 return "apic-access";
383         case EXIT_REASON_GDTR_IDTR:
384                 return "gdtridtr";
385         case EXIT_REASON_LDTR_TR:
386                 return "ldtrtr";
387         case EXIT_REASON_EPT_FAULT:
388                 return "eptfault";
389         case EXIT_REASON_EPT_MISCONFIG:
390                 return "eptmisconfig";
391         case EXIT_REASON_INVEPT:
392                 return "invept";
393         case EXIT_REASON_RDTSCP:
394                 return "rdtscp";
395         case EXIT_REASON_VMX_PREEMPT:
396                 return "vmxpreempt";
397         case EXIT_REASON_INVVPID:
398                 return "invvpid";
399         case EXIT_REASON_WBINVD:
400                 return "wbinvd";
401         case EXIT_REASON_XSETBV:
402                 return "xsetbv";
403         case EXIT_REASON_APIC_WRITE:
404                 return "apic-write";
405         default:
406                 snprintf(reasonbuf, sizeof(reasonbuf), "%d", reason);
407                 return (reasonbuf);
408         }
409 }
410 #endif  /* KTR */
411
412 static int
413 vmx_allow_x2apic_msrs(struct vmx *vmx)
414 {
415         int i, error;
416
417         error = 0;
418
419         /*
420          * Allow readonly access to the following x2APIC MSRs from the guest.
421          */
422         error += guest_msr_ro(vmx, MSR_APIC_ID);
423         error += guest_msr_ro(vmx, MSR_APIC_VERSION);
424         error += guest_msr_ro(vmx, MSR_APIC_LDR);
425         error += guest_msr_ro(vmx, MSR_APIC_SVR);
426
427         for (i = 0; i < 8; i++)
428                 error += guest_msr_ro(vmx, MSR_APIC_ISR0 + i);
429
430         for (i = 0; i < 8; i++)
431                 error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i);
432
433         for (i = 0; i < 8; i++)
434                 error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i);
435
436         error += guest_msr_ro(vmx, MSR_APIC_ESR);
437         error += guest_msr_ro(vmx, MSR_APIC_LVT_TIMER);
438         error += guest_msr_ro(vmx, MSR_APIC_LVT_THERMAL);
439         error += guest_msr_ro(vmx, MSR_APIC_LVT_PCINT);
440         error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT0);
441         error += guest_msr_ro(vmx, MSR_APIC_LVT_LINT1);
442         error += guest_msr_ro(vmx, MSR_APIC_LVT_ERROR);
443         error += guest_msr_ro(vmx, MSR_APIC_ICR_TIMER);
444         error += guest_msr_ro(vmx, MSR_APIC_DCR_TIMER);
445         error += guest_msr_ro(vmx, MSR_APIC_ICR);
446
447         /*
448          * Allow TPR, EOI and SELF_IPI MSRs to be read and written by the guest.
449          *
450          * These registers get special treatment described in the section
451          * "Virtualizing MSR-Based APIC Accesses".
452          */
453         error += guest_msr_rw(vmx, MSR_APIC_TPR);
454         error += guest_msr_rw(vmx, MSR_APIC_EOI);
455         error += guest_msr_rw(vmx, MSR_APIC_SELF_IPI);
456
457         return (error);
458 }
459
460 u_long
461 vmx_fix_cr0(u_long cr0)
462 {
463
464         return ((cr0 | cr0_ones_mask) & ~cr0_zeros_mask);
465 }
466
467 u_long
468 vmx_fix_cr4(u_long cr4)
469 {
470
471         return ((cr4 | cr4_ones_mask) & ~cr4_zeros_mask);
472 }
473
474 static void
475 vpid_free(int vpid)
476 {
477         if (vpid < 0 || vpid > 0xffff)
478                 panic("vpid_free: invalid vpid %d", vpid);
479
480         /*
481          * VPIDs [0,VM_MAXCPU] are special and are not allocated from
482          * the unit number allocator.
483          */
484
485         if (vpid > VM_MAXCPU)
486                 free_unr(vpid_unr, vpid);
487 }
488
489 static void
490 vpid_alloc(uint16_t *vpid, int num)
491 {
492         int i, x;
493
494         if (num <= 0 || num > VM_MAXCPU)
495                 panic("invalid number of vpids requested: %d", num);
496
497         /*
498          * If the "enable vpid" execution control is not enabled then the
499          * VPID is required to be 0 for all vcpus.
500          */
501         if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) {
502                 for (i = 0; i < num; i++)
503                         vpid[i] = 0;
504                 return;
505         }
506
507         /*
508          * Allocate a unique VPID for each vcpu from the unit number allocator.
509          */
510         for (i = 0; i < num; i++) {
511                 x = alloc_unr(vpid_unr);
512                 if (x == -1)
513                         break;
514                 else
515                         vpid[i] = x;
516         }
517
518         if (i < num) {
519                 atomic_add_int(&vpid_alloc_failed, 1);
520
521                 /*
522                  * If the unit number allocator does not have enough unique
523                  * VPIDs then we need to allocate from the [1,VM_MAXCPU] range.
524                  *
525                  * These VPIDs are not be unique across VMs but this does not
526                  * affect correctness because the combined mappings are also
527                  * tagged with the EP4TA which is unique for each VM.
528                  *
529                  * It is still sub-optimal because the invvpid will invalidate
530                  * combined mappings for a particular VPID across all EP4TAs.
531                  */
532                 while (i-- > 0)
533                         vpid_free(vpid[i]);
534
535                 for (i = 0; i < num; i++)
536                         vpid[i] = i + 1;
537         }
538 }
539
540 static void
541 vpid_init(void)
542 {
543         /*
544          * VPID 0 is required when the "enable VPID" execution control is
545          * disabled.
546          *
547          * VPIDs [1,VM_MAXCPU] are used as the "overflow namespace" when the
548          * unit number allocator does not have sufficient unique VPIDs to
549          * satisfy the allocation.
550          *
551          * The remaining VPIDs are managed by the unit number allocator.
552          */
553         vpid_unr = new_unrhdr(VM_MAXCPU + 1, 0xffff, NULL);
554 }
555
556 static void
557 vmx_disable(void *arg __unused)
558 {
559         struct invvpid_desc invvpid_desc = { 0 };
560         struct invept_desc invept_desc = { 0 };
561
562         if (vmxon_enabled[curcpu]) {
563                 /*
564                  * See sections 25.3.3.3 and 25.3.3.4 in Intel Vol 3b.
565                  *
566                  * VMXON or VMXOFF are not required to invalidate any TLB
567                  * caching structures. This prevents potential retention of
568                  * cached information in the TLB between distinct VMX episodes.
569                  */
570                 invvpid(INVVPID_TYPE_ALL_CONTEXTS, invvpid_desc);
571                 invept(INVEPT_TYPE_ALL_CONTEXTS, invept_desc);
572                 vmxoff();
573         }
574         load_cr4(rcr4() & ~CR4_VMXE);
575 }
576
577 static int
578 vmx_cleanup(void)
579 {
580
581         if (pirvec >= 0)
582                 lapic_ipi_free(pirvec);
583
584         if (vpid_unr != NULL) {
585                 delete_unrhdr(vpid_unr);
586                 vpid_unr = NULL;
587         }
588
589         if (nmi_flush_l1d_sw == 1)
590                 nmi_flush_l1d_sw = 0;
591
592         smp_rendezvous(NULL, vmx_disable, NULL, NULL);
593
594         return (0);
595 }
596
597 static void
598 vmx_enable(void *arg __unused)
599 {
600         int error;
601         uint64_t feature_control;
602
603         feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
604         if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 0 ||
605             (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
606                 wrmsr(MSR_IA32_FEATURE_CONTROL,
607                     feature_control | IA32_FEATURE_CONTROL_VMX_EN |
608                     IA32_FEATURE_CONTROL_LOCK);
609         }
610
611         load_cr4(rcr4() | CR4_VMXE);
612
613         *(uint32_t *)vmxon_region[curcpu] = vmx_revision();
614         error = vmxon(vmxon_region[curcpu]);
615         if (error == 0)
616                 vmxon_enabled[curcpu] = 1;
617 }
618
619 static void
620 vmx_restore(void)
621 {
622
623         if (vmxon_enabled[curcpu])
624                 vmxon(vmxon_region[curcpu]);
625 }
626
627 static int
628 vmx_init(int ipinum)
629 {
630         int error, use_tpr_shadow;
631         uint64_t basic, fixed0, fixed1, feature_control;
632         uint32_t tmp, procbased2_vid_bits;
633
634         /* CPUID.1:ECX[bit 5] must be 1 for processor to support VMX */
635         if (!(cpu_feature2 & CPUID2_VMX)) {
636                 printf("vmx_init: processor does not support VMX operation\n");
637                 return (ENXIO);
638         }
639
640         /*
641          * Verify that MSR_IA32_FEATURE_CONTROL lock and VMXON enable bits
642          * are set (bits 0 and 2 respectively).
643          */
644         feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
645         if ((feature_control & IA32_FEATURE_CONTROL_LOCK) == 1 &&
646             (feature_control & IA32_FEATURE_CONTROL_VMX_EN) == 0) {
647                 printf("vmx_init: VMX operation disabled by BIOS\n");
648                 return (ENXIO);
649         }
650
651         /*
652          * Verify capabilities MSR_VMX_BASIC:
653          * - bit 54 indicates support for INS/OUTS decoding
654          */
655         basic = rdmsr(MSR_VMX_BASIC);
656         if ((basic & (1UL << 54)) == 0) {
657                 printf("vmx_init: processor does not support desired basic "
658                     "capabilities\n");
659                 return (EINVAL);
660         }
661
662         /* Check support for primary processor-based VM-execution controls */
663         error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
664                                MSR_VMX_TRUE_PROCBASED_CTLS,
665                                PROCBASED_CTLS_ONE_SETTING,
666                                PROCBASED_CTLS_ZERO_SETTING, &procbased_ctls);
667         if (error) {
668                 printf("vmx_init: processor does not support desired primary "
669                        "processor-based controls\n");
670                 return (error);
671         }
672
673         /* Clear the processor-based ctl bits that are set on demand */
674         procbased_ctls &= ~PROCBASED_CTLS_WINDOW_SETTING;
675
676         /* Check support for secondary processor-based VM-execution controls */
677         error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
678                                MSR_VMX_PROCBASED_CTLS2,
679                                PROCBASED_CTLS2_ONE_SETTING,
680                                PROCBASED_CTLS2_ZERO_SETTING, &procbased_ctls2);
681         if (error) {
682                 printf("vmx_init: processor does not support desired secondary "
683                        "processor-based controls\n");
684                 return (error);
685         }
686
687         /* Check support for VPID */
688         error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
689                                PROCBASED2_ENABLE_VPID, 0, &tmp);
690         if (error == 0)
691                 procbased_ctls2 |= PROCBASED2_ENABLE_VPID;
692
693         /* Check support for pin-based VM-execution controls */
694         error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
695                                MSR_VMX_TRUE_PINBASED_CTLS,
696                                PINBASED_CTLS_ONE_SETTING,
697                                PINBASED_CTLS_ZERO_SETTING, &pinbased_ctls);
698         if (error) {
699                 printf("vmx_init: processor does not support desired "
700                        "pin-based controls\n");
701                 return (error);
702         }
703
704         /* Check support for VM-exit controls */
705         error = vmx_set_ctlreg(MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS,
706                                VM_EXIT_CTLS_ONE_SETTING,
707                                VM_EXIT_CTLS_ZERO_SETTING,
708                                &exit_ctls);
709         if (error) {
710                 printf("vmx_init: processor does not support desired "
711                     "exit controls\n");
712                 return (error);
713         }
714
715         /* Check support for VM-entry controls */
716         error = vmx_set_ctlreg(MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS,
717             VM_ENTRY_CTLS_ONE_SETTING, VM_ENTRY_CTLS_ZERO_SETTING,
718             &entry_ctls);
719         if (error) {
720                 printf("vmx_init: processor does not support desired "
721                     "entry controls\n");
722                 return (error);
723         }
724
725         /*
726          * Check support for optional features by testing them
727          * as individual bits
728          */
729         cap_halt_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
730                                         MSR_VMX_TRUE_PROCBASED_CTLS,
731                                         PROCBASED_HLT_EXITING, 0,
732                                         &tmp) == 0);
733
734         cap_monitor_trap = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
735                                         MSR_VMX_PROCBASED_CTLS,
736                                         PROCBASED_MTF, 0,
737                                         &tmp) == 0);
738
739         cap_pause_exit = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
740                                          MSR_VMX_TRUE_PROCBASED_CTLS,
741                                          PROCBASED_PAUSE_EXITING, 0,
742                                          &tmp) == 0);
743
744         cap_unrestricted_guest = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
745                                         MSR_VMX_PROCBASED_CTLS2,
746                                         PROCBASED2_UNRESTRICTED_GUEST, 0,
747                                         &tmp) == 0);
748
749         cap_invpcid = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2,
750             MSR_VMX_PROCBASED_CTLS2, PROCBASED2_ENABLE_INVPCID, 0,
751             &tmp) == 0);
752
753         /*
754          * Check support for virtual interrupt delivery.
755          */
756         procbased2_vid_bits = (PROCBASED2_VIRTUALIZE_APIC_ACCESSES |
757             PROCBASED2_VIRTUALIZE_X2APIC_MODE |
758             PROCBASED2_APIC_REGISTER_VIRTUALIZATION |
759             PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY);
760
761         use_tpr_shadow = (vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS,
762             MSR_VMX_TRUE_PROCBASED_CTLS, PROCBASED_USE_TPR_SHADOW, 0,
763             &tmp) == 0);
764
765         error = vmx_set_ctlreg(MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2,
766             procbased2_vid_bits, 0, &tmp);
767         if (error == 0 && use_tpr_shadow) {
768                 virtual_interrupt_delivery = 1;
769                 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid",
770                     &virtual_interrupt_delivery);
771         }
772
773         if (virtual_interrupt_delivery) {
774                 procbased_ctls |= PROCBASED_USE_TPR_SHADOW;
775                 procbased_ctls2 |= procbased2_vid_bits;
776                 procbased_ctls2 &= ~PROCBASED2_VIRTUALIZE_X2APIC_MODE;
777
778                 /*
779                  * No need to emulate accesses to %CR8 if virtual
780                  * interrupt delivery is enabled.
781                  */
782                 procbased_ctls &= ~PROCBASED_CR8_LOAD_EXITING;
783                 procbased_ctls &= ~PROCBASED_CR8_STORE_EXITING;
784
785                 /*
786                  * Check for Posted Interrupts only if Virtual Interrupt
787                  * Delivery is enabled.
788                  */
789                 error = vmx_set_ctlreg(MSR_VMX_PINBASED_CTLS,
790                     MSR_VMX_TRUE_PINBASED_CTLS, PINBASED_POSTED_INTERRUPT, 0,
791                     &tmp);
792                 if (error == 0) {
793                         pirvec = lapic_ipi_alloc(pti ? &IDTVEC(justreturn1_pti) :
794                             &IDTVEC(justreturn));
795                         if (pirvec < 0) {
796                                 if (bootverbose) {
797                                         printf("vmx_init: unable to allocate "
798                                             "posted interrupt vector\n");
799                                 }
800                         } else {
801                                 posted_interrupts = 1;
802                                 TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_pir",
803                                     &posted_interrupts);
804                         }
805                 }
806         }
807
808         if (posted_interrupts)
809                     pinbased_ctls |= PINBASED_POSTED_INTERRUPT;
810
811         /* Initialize EPT */
812         error = ept_init(ipinum);
813         if (error) {
814                 printf("vmx_init: ept initialization failed (%d)\n", error);
815                 return (error);
816         }
817
818         guest_l1d_flush = (cpu_ia32_arch_caps &
819             IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) == 0;
820         TUNABLE_INT_FETCH("hw.vmm.l1d_flush", &guest_l1d_flush);
821
822         /*
823          * L1D cache flush is enabled.  Use IA32_FLUSH_CMD MSR when
824          * available.  Otherwise fall back to the software flush
825          * method which loads enough data from the kernel text to
826          * flush existing L1D content, both on VMX entry and on NMI
827          * return.
828          */
829         if (guest_l1d_flush) {
830                 if ((cpu_stdext_feature3 & CPUID_STDEXT3_L1D_FLUSH) == 0) {
831                         guest_l1d_flush_sw = 1;
832                         TUNABLE_INT_FETCH("hw.vmm.l1d_flush_sw",
833                             &guest_l1d_flush_sw);
834                 }
835                 if (guest_l1d_flush_sw) {
836                         if (nmi_flush_l1d_sw <= 1)
837                                 nmi_flush_l1d_sw = 1;
838                 } else {
839                         msr_load_list[0].index = MSR_IA32_FLUSH_CMD;
840                         msr_load_list[0].val = IA32_FLUSH_CMD_L1D;
841                 }
842         }
843
844         /*
845          * Stash the cr0 and cr4 bits that must be fixed to 0 or 1
846          */
847         fixed0 = rdmsr(MSR_VMX_CR0_FIXED0);
848         fixed1 = rdmsr(MSR_VMX_CR0_FIXED1);
849         cr0_ones_mask = fixed0 & fixed1;
850         cr0_zeros_mask = ~fixed0 & ~fixed1;
851
852         /*
853          * CR0_PE and CR0_PG can be set to zero in VMX non-root operation
854          * if unrestricted guest execution is allowed.
855          */
856         if (cap_unrestricted_guest)
857                 cr0_ones_mask &= ~(CR0_PG | CR0_PE);
858
859         /*
860          * Do not allow the guest to set CR0_NW or CR0_CD.
861          */
862         cr0_zeros_mask |= (CR0_NW | CR0_CD);
863
864         fixed0 = rdmsr(MSR_VMX_CR4_FIXED0);
865         fixed1 = rdmsr(MSR_VMX_CR4_FIXED1);
866         cr4_ones_mask = fixed0 & fixed1;
867         cr4_zeros_mask = ~fixed0 & ~fixed1;
868
869         vpid_init();
870
871         vmx_msr_init();
872
873         /* enable VMX operation */
874         smp_rendezvous(NULL, vmx_enable, NULL, NULL);
875
876         vmx_initialized = 1;
877
878         return (0);
879 }
880
881 static void
882 vmx_trigger_hostintr(int vector)
883 {
884         uintptr_t func;
885         struct gate_descriptor *gd;
886
887         gd = &idt[vector];
888
889         KASSERT(vector >= 32 && vector <= 255, ("vmx_trigger_hostintr: "
890             "invalid vector %d", vector));
891         KASSERT(gd->gd_p == 1, ("gate descriptor for vector %d not present",
892             vector));
893         KASSERT(gd->gd_type == SDT_SYSIGT, ("gate descriptor for vector %d "
894             "has invalid type %d", vector, gd->gd_type));
895         KASSERT(gd->gd_dpl == SEL_KPL, ("gate descriptor for vector %d "
896             "has invalid dpl %d", vector, gd->gd_dpl));
897         KASSERT(gd->gd_selector == GSEL(GCODE_SEL, SEL_KPL), ("gate descriptor "
898             "for vector %d has invalid selector %d", vector, gd->gd_selector));
899         KASSERT(gd->gd_ist == 0, ("gate descriptor for vector %d has invalid "
900             "IST %d", vector, gd->gd_ist));
901
902         func = ((long)gd->gd_hioffset << 16 | gd->gd_looffset);
903         vmx_call_isr(func);
904 }
905
906 static int
907 vmx_setup_cr_shadow(int which, struct vmcs *vmcs, uint32_t initial)
908 {
909         int error, mask_ident, shadow_ident;
910         uint64_t mask_value;
911
912         if (which != 0 && which != 4)
913                 panic("vmx_setup_cr_shadow: unknown cr%d", which);
914
915         if (which == 0) {
916                 mask_ident = VMCS_CR0_MASK;
917                 mask_value = cr0_ones_mask | cr0_zeros_mask;
918                 shadow_ident = VMCS_CR0_SHADOW;
919         } else {
920                 mask_ident = VMCS_CR4_MASK;
921                 mask_value = cr4_ones_mask | cr4_zeros_mask;
922                 shadow_ident = VMCS_CR4_SHADOW;
923         }
924
925         error = vmcs_setreg(vmcs, 0, VMCS_IDENT(mask_ident), mask_value);
926         if (error)
927                 return (error);
928
929         error = vmcs_setreg(vmcs, 0, VMCS_IDENT(shadow_ident), initial);
930         if (error)
931                 return (error);
932
933         return (0);
934 }
935 #define vmx_setup_cr0_shadow(vmcs,init) vmx_setup_cr_shadow(0, (vmcs), (init))
936 #define vmx_setup_cr4_shadow(vmcs,init) vmx_setup_cr_shadow(4, (vmcs), (init))
937
938 static void *
939 vmx_vminit(struct vm *vm, pmap_t pmap)
940 {
941         uint16_t vpid[VM_MAXCPU];
942         int i, error;
943         struct vmx *vmx;
944         struct vmcs *vmcs;
945         uint32_t exc_bitmap;
946         uint16_t maxcpus;
947
948         vmx = malloc(sizeof(struct vmx), M_VMX, M_WAITOK | M_ZERO);
949         if ((uintptr_t)vmx & PAGE_MASK) {
950                 panic("malloc of struct vmx not aligned on %d byte boundary",
951                       PAGE_SIZE);
952         }
953         vmx->vm = vm;
954
955         vmx->eptp = eptp(vtophys((vm_offset_t)pmap->pm_pml4));
956
957         /*
958          * Clean up EPTP-tagged guest physical and combined mappings
959          *
960          * VMX transitions are not required to invalidate any guest physical
961          * mappings. So, it may be possible for stale guest physical mappings
962          * to be present in the processor TLBs.
963          *
964          * Combined mappings for this EP4TA are also invalidated for all VPIDs.
965          */
966         ept_invalidate_mappings(vmx->eptp);
967
968         msr_bitmap_initialize(vmx->msr_bitmap);
969
970         /*
971          * It is safe to allow direct access to MSR_GSBASE and MSR_FSBASE.
972          * The guest FSBASE and GSBASE are saved and restored during
973          * vm-exit and vm-entry respectively. The host FSBASE and GSBASE are
974          * always restored from the vmcs host state area on vm-exit.
975          *
976          * The SYSENTER_CS/ESP/EIP MSRs are identical to FS/GSBASE in
977          * how they are saved/restored so can be directly accessed by the
978          * guest.
979          *
980          * MSR_EFER is saved and restored in the guest VMCS area on a
981          * VM exit and entry respectively. It is also restored from the
982          * host VMCS area on a VM exit.
983          *
984          * The TSC MSR is exposed read-only. Writes are disallowed as
985          * that will impact the host TSC.  If the guest does a write
986          * the "use TSC offsetting" execution control is enabled and the
987          * difference between the host TSC and the guest TSC is written
988          * into the TSC offset in the VMCS.
989          */
990         if (guest_msr_rw(vmx, MSR_GSBASE) ||
991             guest_msr_rw(vmx, MSR_FSBASE) ||
992             guest_msr_rw(vmx, MSR_SYSENTER_CS_MSR) ||
993             guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) ||
994             guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) ||
995             guest_msr_rw(vmx, MSR_EFER) ||
996             guest_msr_ro(vmx, MSR_TSC))
997                 panic("vmx_vminit: error setting guest msr access");
998
999         vpid_alloc(vpid, VM_MAXCPU);
1000
1001         if (virtual_interrupt_delivery) {
1002                 error = vm_map_mmio(vm, DEFAULT_APIC_BASE, PAGE_SIZE,
1003                     APIC_ACCESS_ADDRESS);
1004                 /* XXX this should really return an error to the caller */
1005                 KASSERT(error == 0, ("vm_map_mmio(apicbase) error %d", error));
1006         }
1007
1008         maxcpus = vm_get_maxcpus(vm);
1009         for (i = 0; i < maxcpus; i++) {
1010                 vmcs = &vmx->vmcs[i];
1011                 vmcs->identifier = vmx_revision();
1012                 error = vmclear(vmcs);
1013                 if (error != 0) {
1014                         panic("vmx_vminit: vmclear error %d on vcpu %d\n",
1015                               error, i);
1016                 }
1017
1018                 vmx_msr_guest_init(vmx, i);
1019
1020                 error = vmcs_init(vmcs);
1021                 KASSERT(error == 0, ("vmcs_init error %d", error));
1022
1023                 VMPTRLD(vmcs);
1024                 error = 0;
1025                 error += vmwrite(VMCS_HOST_RSP, (u_long)&vmx->ctx[i]);
1026                 error += vmwrite(VMCS_EPTP, vmx->eptp);
1027                 error += vmwrite(VMCS_PIN_BASED_CTLS, pinbased_ctls);
1028                 error += vmwrite(VMCS_PRI_PROC_BASED_CTLS, procbased_ctls);
1029                 error += vmwrite(VMCS_SEC_PROC_BASED_CTLS, procbased_ctls2);
1030                 error += vmwrite(VMCS_EXIT_CTLS, exit_ctls);
1031                 error += vmwrite(VMCS_ENTRY_CTLS, entry_ctls);
1032                 error += vmwrite(VMCS_MSR_BITMAP, vtophys(vmx->msr_bitmap));
1033                 error += vmwrite(VMCS_VPID, vpid[i]);
1034
1035                 if (guest_l1d_flush && !guest_l1d_flush_sw) {
1036                         vmcs_write(VMCS_ENTRY_MSR_LOAD, pmap_kextract(
1037                             (vm_offset_t)&msr_load_list[0]));
1038                         vmcs_write(VMCS_ENTRY_MSR_LOAD_COUNT,
1039                             nitems(msr_load_list));
1040                         vmcs_write(VMCS_EXIT_MSR_STORE, 0);
1041                         vmcs_write(VMCS_EXIT_MSR_STORE_COUNT, 0);
1042                 }
1043
1044                 /* exception bitmap */
1045                 if (vcpu_trace_exceptions(vm, i))
1046                         exc_bitmap = 0xffffffff;
1047                 else
1048                         exc_bitmap = 1 << IDT_MC;
1049                 error += vmwrite(VMCS_EXCEPTION_BITMAP, exc_bitmap);
1050
1051                 vmx->ctx[i].guest_dr6 = DBREG_DR6_RESERVED1;
1052                 error += vmwrite(VMCS_GUEST_DR7, DBREG_DR7_RESERVED1);
1053
1054                 if (virtual_interrupt_delivery) {
1055                         error += vmwrite(VMCS_APIC_ACCESS, APIC_ACCESS_ADDRESS);
1056                         error += vmwrite(VMCS_VIRTUAL_APIC,
1057                             vtophys(&vmx->apic_page[i]));
1058                         error += vmwrite(VMCS_EOI_EXIT0, 0);
1059                         error += vmwrite(VMCS_EOI_EXIT1, 0);
1060                         error += vmwrite(VMCS_EOI_EXIT2, 0);
1061                         error += vmwrite(VMCS_EOI_EXIT3, 0);
1062                 }
1063                 if (posted_interrupts) {
1064                         error += vmwrite(VMCS_PIR_VECTOR, pirvec);
1065                         error += vmwrite(VMCS_PIR_DESC,
1066                             vtophys(&vmx->pir_desc[i]));
1067                 }
1068                 VMCLEAR(vmcs);
1069                 KASSERT(error == 0, ("vmx_vminit: error customizing the vmcs"));
1070
1071                 vmx->cap[i].set = 0;
1072                 vmx->cap[i].proc_ctls = procbased_ctls;
1073                 vmx->cap[i].proc_ctls2 = procbased_ctls2;
1074
1075                 vmx->state[i].nextrip = ~0;
1076                 vmx->state[i].lastcpu = NOCPU;
1077                 vmx->state[i].vpid = vpid[i];
1078
1079                 /*
1080                  * Set up the CR0/4 shadows, and init the read shadow
1081                  * to the power-on register value from the Intel Sys Arch.
1082                  *  CR0 - 0x60000010
1083                  *  CR4 - 0
1084                  */
1085                 error = vmx_setup_cr0_shadow(vmcs, 0x60000010);
1086                 if (error != 0)
1087                         panic("vmx_setup_cr0_shadow %d", error);
1088
1089                 error = vmx_setup_cr4_shadow(vmcs, 0);
1090                 if (error != 0)
1091                         panic("vmx_setup_cr4_shadow %d", error);
1092
1093                 vmx->ctx[i].pmap = pmap;
1094         }
1095
1096         return (vmx);
1097 }
1098
1099 static int
1100 vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx)
1101 {
1102         int handled, func;
1103
1104         func = vmxctx->guest_rax;
1105
1106         handled = x86_emulate_cpuid(vm, vcpu,
1107                                     (uint32_t*)(&vmxctx->guest_rax),
1108                                     (uint32_t*)(&vmxctx->guest_rbx),
1109                                     (uint32_t*)(&vmxctx->guest_rcx),
1110                                     (uint32_t*)(&vmxctx->guest_rdx));
1111         return (handled);
1112 }
1113
1114 static __inline void
1115 vmx_run_trace(struct vmx *vmx, int vcpu)
1116 {
1117 #ifdef KTR
1118         VCPU_CTR1(vmx->vm, vcpu, "Resume execution at %#lx", vmcs_guest_rip());
1119 #endif
1120 }
1121
1122 static __inline void
1123 vmx_exit_trace(struct vmx *vmx, int vcpu, uint64_t rip, uint32_t exit_reason,
1124                int handled)
1125 {
1126 #ifdef KTR
1127         VCPU_CTR3(vmx->vm, vcpu, "%s %s vmexit at 0x%0lx",
1128                  handled ? "handled" : "unhandled",
1129                  exit_reason_to_str(exit_reason), rip);
1130 #endif
1131 }
1132
1133 static __inline void
1134 vmx_astpending_trace(struct vmx *vmx, int vcpu, uint64_t rip)
1135 {
1136 #ifdef KTR
1137         VCPU_CTR1(vmx->vm, vcpu, "astpending vmexit at 0x%0lx", rip);
1138 #endif
1139 }
1140
1141 static VMM_STAT_INTEL(VCPU_INVVPID_SAVED, "Number of vpid invalidations saved");
1142 static VMM_STAT_INTEL(VCPU_INVVPID_DONE, "Number of vpid invalidations done");
1143
1144 /*
1145  * Invalidate guest mappings identified by its vpid from the TLB.
1146  */
1147 static __inline void
1148 vmx_invvpid(struct vmx *vmx, int vcpu, pmap_t pmap, int running)
1149 {
1150         struct vmxstate *vmxstate;
1151         struct invvpid_desc invvpid_desc;
1152
1153         vmxstate = &vmx->state[vcpu];
1154         if (vmxstate->vpid == 0)
1155                 return;
1156
1157         if (!running) {
1158                 /*
1159                  * Set the 'lastcpu' to an invalid host cpu.
1160                  *
1161                  * This will invalidate TLB entries tagged with the vcpu's
1162                  * vpid the next time it runs via vmx_set_pcpu_defaults().
1163                  */
1164                 vmxstate->lastcpu = NOCPU;
1165                 return;
1166         }
1167
1168         KASSERT(curthread->td_critnest > 0, ("%s: vcpu %d running outside "
1169             "critical section", __func__, vcpu));
1170
1171         /*
1172          * Invalidate all mappings tagged with 'vpid'
1173          *
1174          * We do this because this vcpu was executing on a different host
1175          * cpu when it last ran. We do not track whether it invalidated
1176          * mappings associated with its 'vpid' during that run. So we must
1177          * assume that the mappings associated with 'vpid' on 'curcpu' are
1178          * stale and invalidate them.
1179          *
1180          * Note that we incur this penalty only when the scheduler chooses to
1181          * move the thread associated with this vcpu between host cpus.
1182          *
1183          * Note also that this will invalidate mappings tagged with 'vpid'
1184          * for "all" EP4TAs.
1185          */
1186         if (pmap->pm_eptgen == vmx->eptgen[curcpu]) {
1187                 invvpid_desc._res1 = 0;
1188                 invvpid_desc._res2 = 0;
1189                 invvpid_desc.vpid = vmxstate->vpid;
1190                 invvpid_desc.linear_addr = 0;
1191                 invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc);
1192                 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_DONE, 1);
1193         } else {
1194                 /*
1195                  * The invvpid can be skipped if an invept is going to
1196                  * be performed before entering the guest. The invept
1197                  * will invalidate combined mappings tagged with
1198                  * 'vmx->eptp' for all vpids.
1199                  */
1200                 vmm_stat_incr(vmx->vm, vcpu, VCPU_INVVPID_SAVED, 1);
1201         }
1202 }
1203
1204 static void
1205 vmx_set_pcpu_defaults(struct vmx *vmx, int vcpu, pmap_t pmap)
1206 {
1207         struct vmxstate *vmxstate;
1208
1209         vmxstate = &vmx->state[vcpu];
1210         if (vmxstate->lastcpu == curcpu)
1211                 return;
1212
1213         vmxstate->lastcpu = curcpu;
1214
1215         vmm_stat_incr(vmx->vm, vcpu, VCPU_MIGRATIONS, 1);
1216
1217         vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase());
1218         vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase());
1219         vmcs_write(VMCS_HOST_GS_BASE, vmm_get_host_gsbase());
1220         vmx_invvpid(vmx, vcpu, pmap, 1);
1221 }
1222
1223 /*
1224  * We depend on 'procbased_ctls' to have the Interrupt Window Exiting bit set.
1225  */
1226 CTASSERT((PROCBASED_CTLS_ONE_SETTING & PROCBASED_INT_WINDOW_EXITING) != 0);
1227
1228 static void __inline
1229 vmx_set_int_window_exiting(struct vmx *vmx, int vcpu)
1230 {
1231
1232         if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) == 0) {
1233                 vmx->cap[vcpu].proc_ctls |= PROCBASED_INT_WINDOW_EXITING;
1234                 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1235                 VCPU_CTR0(vmx->vm, vcpu, "Enabling interrupt window exiting");
1236         }
1237 }
1238
1239 static void __inline
1240 vmx_clear_int_window_exiting(struct vmx *vmx, int vcpu)
1241 {
1242
1243         KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0,
1244             ("intr_window_exiting not set: %#x", vmx->cap[vcpu].proc_ctls));
1245         vmx->cap[vcpu].proc_ctls &= ~PROCBASED_INT_WINDOW_EXITING;
1246         vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1247         VCPU_CTR0(vmx->vm, vcpu, "Disabling interrupt window exiting");
1248 }
1249
1250 static void __inline
1251 vmx_set_nmi_window_exiting(struct vmx *vmx, int vcpu)
1252 {
1253
1254         if ((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) == 0) {
1255                 vmx->cap[vcpu].proc_ctls |= PROCBASED_NMI_WINDOW_EXITING;
1256                 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1257                 VCPU_CTR0(vmx->vm, vcpu, "Enabling NMI window exiting");
1258         }
1259 }
1260
1261 static void __inline
1262 vmx_clear_nmi_window_exiting(struct vmx *vmx, int vcpu)
1263 {
1264
1265         KASSERT((vmx->cap[vcpu].proc_ctls & PROCBASED_NMI_WINDOW_EXITING) != 0,
1266             ("nmi_window_exiting not set %#x", vmx->cap[vcpu].proc_ctls));
1267         vmx->cap[vcpu].proc_ctls &= ~PROCBASED_NMI_WINDOW_EXITING;
1268         vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1269         VCPU_CTR0(vmx->vm, vcpu, "Disabling NMI window exiting");
1270 }
1271
1272 int
1273 vmx_set_tsc_offset(struct vmx *vmx, int vcpu, uint64_t offset)
1274 {
1275         int error;
1276
1277         if ((vmx->cap[vcpu].proc_ctls & PROCBASED_TSC_OFFSET) == 0) {
1278                 vmx->cap[vcpu].proc_ctls |= PROCBASED_TSC_OFFSET;
1279                 vmcs_write(VMCS_PRI_PROC_BASED_CTLS, vmx->cap[vcpu].proc_ctls);
1280                 VCPU_CTR0(vmx->vm, vcpu, "Enabling TSC offsetting");
1281         }
1282
1283         error = vmwrite(VMCS_TSC_OFFSET, offset);
1284
1285         return (error);
1286 }
1287
1288 #define NMI_BLOCKING    (VMCS_INTERRUPTIBILITY_NMI_BLOCKING |           \
1289                          VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1290 #define HWINTR_BLOCKING (VMCS_INTERRUPTIBILITY_STI_BLOCKING |           \
1291                          VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)
1292
1293 static void
1294 vmx_inject_nmi(struct vmx *vmx, int vcpu)
1295 {
1296         uint32_t gi, info;
1297
1298         gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1299         KASSERT((gi & NMI_BLOCKING) == 0, ("vmx_inject_nmi: invalid guest "
1300             "interruptibility-state %#x", gi));
1301
1302         info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1303         KASSERT((info & VMCS_INTR_VALID) == 0, ("vmx_inject_nmi: invalid "
1304             "VM-entry interruption information %#x", info));
1305
1306         /*
1307          * Inject the virtual NMI. The vector must be the NMI IDT entry
1308          * or the VMCS entry check will fail.
1309          */
1310         info = IDT_NMI | VMCS_INTR_T_NMI | VMCS_INTR_VALID;
1311         vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1312
1313         VCPU_CTR0(vmx->vm, vcpu, "Injecting vNMI");
1314
1315         /* Clear the request */
1316         vm_nmi_clear(vmx->vm, vcpu);
1317 }
1318
1319 static void
1320 vmx_inject_interrupts(struct vmx *vmx, int vcpu, struct vlapic *vlapic,
1321     uint64_t guestrip)
1322 {
1323         int vector, need_nmi_exiting, extint_pending;
1324         uint64_t rflags, entryinfo;
1325         uint32_t gi, info;
1326
1327         if (vmx->state[vcpu].nextrip != guestrip) {
1328                 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1329                 if (gi & HWINTR_BLOCKING) {
1330                         VCPU_CTR2(vmx->vm, vcpu, "Guest interrupt blocking "
1331                             "cleared due to rip change: %#lx/%#lx",
1332                             vmx->state[vcpu].nextrip, guestrip);
1333                         gi &= ~HWINTR_BLOCKING;
1334                         vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1335                 }
1336         }
1337
1338         if (vm_entry_intinfo(vmx->vm, vcpu, &entryinfo)) {
1339                 KASSERT((entryinfo & VMCS_INTR_VALID) != 0, ("%s: entry "
1340                     "intinfo is not valid: %#lx", __func__, entryinfo));
1341
1342                 info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1343                 KASSERT((info & VMCS_INTR_VALID) == 0, ("%s: cannot inject "
1344                      "pending exception: %#lx/%#x", __func__, entryinfo, info));
1345
1346                 info = entryinfo;
1347                 vector = info & 0xff;
1348                 if (vector == IDT_BP || vector == IDT_OF) {
1349                         /*
1350                          * VT-x requires #BP and #OF to be injected as software
1351                          * exceptions.
1352                          */
1353                         info &= ~VMCS_INTR_T_MASK;
1354                         info |= VMCS_INTR_T_SWEXCEPTION;
1355                 }
1356
1357                 if (info & VMCS_INTR_DEL_ERRCODE)
1358                         vmcs_write(VMCS_ENTRY_EXCEPTION_ERROR, entryinfo >> 32);
1359
1360                 vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1361         }
1362
1363         if (vm_nmi_pending(vmx->vm, vcpu)) {
1364                 /*
1365                  * If there are no conditions blocking NMI injection then
1366                  * inject it directly here otherwise enable "NMI window
1367                  * exiting" to inject it as soon as we can.
1368                  *
1369                  * We also check for STI_BLOCKING because some implementations
1370                  * don't allow NMI injection in this case. If we are running
1371                  * on a processor that doesn't have this restriction it will
1372                  * immediately exit and the NMI will be injected in the
1373                  * "NMI window exiting" handler.
1374                  */
1375                 need_nmi_exiting = 1;
1376                 gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1377                 if ((gi & (HWINTR_BLOCKING | NMI_BLOCKING)) == 0) {
1378                         info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1379                         if ((info & VMCS_INTR_VALID) == 0) {
1380                                 vmx_inject_nmi(vmx, vcpu);
1381                                 need_nmi_exiting = 0;
1382                         } else {
1383                                 VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI "
1384                                     "due to VM-entry intr info %#x", info);
1385                         }
1386                 } else {
1387                         VCPU_CTR1(vmx->vm, vcpu, "Cannot inject NMI due to "
1388                             "Guest Interruptibility-state %#x", gi);
1389                 }
1390
1391                 if (need_nmi_exiting)
1392                         vmx_set_nmi_window_exiting(vmx, vcpu);
1393         }
1394
1395         extint_pending = vm_extint_pending(vmx->vm, vcpu);
1396
1397         if (!extint_pending && virtual_interrupt_delivery) {
1398                 vmx_inject_pir(vlapic);
1399                 return;
1400         }
1401
1402         /*
1403          * If interrupt-window exiting is already in effect then don't bother
1404          * checking for pending interrupts. This is just an optimization and
1405          * not needed for correctness.
1406          */
1407         if ((vmx->cap[vcpu].proc_ctls & PROCBASED_INT_WINDOW_EXITING) != 0) {
1408                 VCPU_CTR0(vmx->vm, vcpu, "Skip interrupt injection due to "
1409                     "pending int_window_exiting");
1410                 return;
1411         }
1412
1413         if (!extint_pending) {
1414                 /* Ask the local apic for a vector to inject */
1415                 if (!vlapic_pending_intr(vlapic, &vector))
1416                         return;
1417
1418                 /*
1419                  * From the Intel SDM, Volume 3, Section "Maskable
1420                  * Hardware Interrupts":
1421                  * - maskable interrupt vectors [16,255] can be delivered
1422                  *   through the local APIC.
1423                 */
1424                 KASSERT(vector >= 16 && vector <= 255,
1425                     ("invalid vector %d from local APIC", vector));
1426         } else {
1427                 /* Ask the legacy pic for a vector to inject */
1428                 vatpic_pending_intr(vmx->vm, &vector);
1429
1430                 /*
1431                  * From the Intel SDM, Volume 3, Section "Maskable
1432                  * Hardware Interrupts":
1433                  * - maskable interrupt vectors [0,255] can be delivered
1434                  *   through the INTR pin.
1435                  */
1436                 KASSERT(vector >= 0 && vector <= 255,
1437                     ("invalid vector %d from INTR", vector));
1438         }
1439
1440         /* Check RFLAGS.IF and the interruptibility state of the guest */
1441         rflags = vmcs_read(VMCS_GUEST_RFLAGS);
1442         if ((rflags & PSL_I) == 0) {
1443                 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1444                     "rflags %#lx", vector, rflags);
1445                 goto cantinject;
1446         }
1447
1448         gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1449         if (gi & HWINTR_BLOCKING) {
1450                 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1451                     "Guest Interruptibility-state %#x", vector, gi);
1452                 goto cantinject;
1453         }
1454
1455         info = vmcs_read(VMCS_ENTRY_INTR_INFO);
1456         if (info & VMCS_INTR_VALID) {
1457                 /*
1458                  * This is expected and could happen for multiple reasons:
1459                  * - A vectoring VM-entry was aborted due to astpending
1460                  * - A VM-exit happened during event injection.
1461                  * - An exception was injected above.
1462                  * - An NMI was injected above or after "NMI window exiting"
1463                  */
1464                 VCPU_CTR2(vmx->vm, vcpu, "Cannot inject vector %d due to "
1465                     "VM-entry intr info %#x", vector, info);
1466                 goto cantinject;
1467         }
1468
1469         /* Inject the interrupt */
1470         info = VMCS_INTR_T_HWINTR | VMCS_INTR_VALID;
1471         info |= vector;
1472         vmcs_write(VMCS_ENTRY_INTR_INFO, info);
1473
1474         if (!extint_pending) {
1475                 /* Update the Local APIC ISR */
1476                 vlapic_intr_accepted(vlapic, vector);
1477         } else {
1478                 vm_extint_clear(vmx->vm, vcpu);
1479                 vatpic_intr_accepted(vmx->vm, vector);
1480
1481                 /*
1482                  * After we accepted the current ExtINT the PIC may
1483                  * have posted another one.  If that is the case, set
1484                  * the Interrupt Window Exiting execution control so
1485                  * we can inject that one too.
1486                  *
1487                  * Also, interrupt window exiting allows us to inject any
1488                  * pending APIC vector that was preempted by the ExtINT
1489                  * as soon as possible. This applies both for the software
1490                  * emulated vlapic and the hardware assisted virtual APIC.
1491                  */
1492                 vmx_set_int_window_exiting(vmx, vcpu);
1493         }
1494
1495         VCPU_CTR1(vmx->vm, vcpu, "Injecting hwintr at vector %d", vector);
1496
1497         return;
1498
1499 cantinject:
1500         /*
1501          * Set the Interrupt Window Exiting execution control so we can inject
1502          * the interrupt as soon as blocking condition goes away.
1503          */
1504         vmx_set_int_window_exiting(vmx, vcpu);
1505 }
1506
1507 /*
1508  * If the Virtual NMIs execution control is '1' then the logical processor
1509  * tracks virtual-NMI blocking in the Guest Interruptibility-state field of
1510  * the VMCS. An IRET instruction in VMX non-root operation will remove any
1511  * virtual-NMI blocking.
1512  *
1513  * This unblocking occurs even if the IRET causes a fault. In this case the
1514  * hypervisor needs to restore virtual-NMI blocking before resuming the guest.
1515  */
1516 static void
1517 vmx_restore_nmi_blocking(struct vmx *vmx, int vcpuid)
1518 {
1519         uint32_t gi;
1520
1521         VCPU_CTR0(vmx->vm, vcpuid, "Restore Virtual-NMI blocking");
1522         gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1523         gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1524         vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1525 }
1526
1527 static void
1528 vmx_clear_nmi_blocking(struct vmx *vmx, int vcpuid)
1529 {
1530         uint32_t gi;
1531
1532         VCPU_CTR0(vmx->vm, vcpuid, "Clear Virtual-NMI blocking");
1533         gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1534         gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
1535         vmcs_write(VMCS_GUEST_INTERRUPTIBILITY, gi);
1536 }
1537
1538 static void
1539 vmx_assert_nmi_blocking(struct vmx *vmx, int vcpuid)
1540 {
1541         uint32_t gi;
1542
1543         gi = vmcs_read(VMCS_GUEST_INTERRUPTIBILITY);
1544         KASSERT(gi & VMCS_INTERRUPTIBILITY_NMI_BLOCKING,
1545             ("NMI blocking is not in effect %#x", gi));
1546 }
1547
1548 static int
1549 vmx_emulate_xsetbv(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
1550 {
1551         struct vmxctx *vmxctx;
1552         uint64_t xcrval;
1553         const struct xsave_limits *limits;
1554
1555         vmxctx = &vmx->ctx[vcpu];
1556         limits = vmm_get_xsave_limits();
1557
1558         /*
1559          * Note that the processor raises a GP# fault on its own if
1560          * xsetbv is executed for CPL != 0, so we do not have to
1561          * emulate that fault here.
1562          */
1563
1564         /* Only xcr0 is supported. */
1565         if (vmxctx->guest_rcx != 0) {
1566                 vm_inject_gp(vmx->vm, vcpu);
1567                 return (HANDLED);
1568         }
1569
1570         /* We only handle xcr0 if both the host and guest have XSAVE enabled. */
1571         if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) {
1572                 vm_inject_ud(vmx->vm, vcpu);
1573                 return (HANDLED);
1574         }
1575
1576         xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff);
1577         if ((xcrval & ~limits->xcr0_allowed) != 0) {
1578                 vm_inject_gp(vmx->vm, vcpu);
1579                 return (HANDLED);
1580         }
1581
1582         if (!(xcrval & XFEATURE_ENABLED_X87)) {
1583                 vm_inject_gp(vmx->vm, vcpu);
1584                 return (HANDLED);
1585         }
1586
1587         /* AVX (YMM_Hi128) requires SSE. */
1588         if (xcrval & XFEATURE_ENABLED_AVX &&
1589             (xcrval & XFEATURE_AVX) != XFEATURE_AVX) {
1590                 vm_inject_gp(vmx->vm, vcpu);
1591                 return (HANDLED);
1592         }
1593
1594         /*
1595          * AVX512 requires base AVX (YMM_Hi128) as well as OpMask,
1596          * ZMM_Hi256, and Hi16_ZMM.
1597          */
1598         if (xcrval & XFEATURE_AVX512 &&
1599             (xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) !=
1600             (XFEATURE_AVX512 | XFEATURE_AVX)) {
1601                 vm_inject_gp(vmx->vm, vcpu);
1602                 return (HANDLED);
1603         }
1604
1605         /*
1606          * Intel MPX requires both bound register state flags to be
1607          * set.
1608          */
1609         if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) !=
1610             ((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) {
1611                 vm_inject_gp(vmx->vm, vcpu);
1612                 return (HANDLED);
1613         }
1614
1615         /*
1616          * This runs "inside" vmrun() with the guest's FPU state, so
1617          * modifying xcr0 directly modifies the guest's xcr0, not the
1618          * host's.
1619          */
1620         load_xcr(0, xcrval);
1621         return (HANDLED);
1622 }
1623
1624 static uint64_t
1625 vmx_get_guest_reg(struct vmx *vmx, int vcpu, int ident)
1626 {
1627         const struct vmxctx *vmxctx;
1628
1629         vmxctx = &vmx->ctx[vcpu];
1630
1631         switch (ident) {
1632         case 0:
1633                 return (vmxctx->guest_rax);
1634         case 1:
1635                 return (vmxctx->guest_rcx);
1636         case 2:
1637                 return (vmxctx->guest_rdx);
1638         case 3:
1639                 return (vmxctx->guest_rbx);
1640         case 4:
1641                 return (vmcs_read(VMCS_GUEST_RSP));
1642         case 5:
1643                 return (vmxctx->guest_rbp);
1644         case 6:
1645                 return (vmxctx->guest_rsi);
1646         case 7:
1647                 return (vmxctx->guest_rdi);
1648         case 8:
1649                 return (vmxctx->guest_r8);
1650         case 9:
1651                 return (vmxctx->guest_r9);
1652         case 10:
1653                 return (vmxctx->guest_r10);
1654         case 11:
1655                 return (vmxctx->guest_r11);
1656         case 12:
1657                 return (vmxctx->guest_r12);
1658         case 13:
1659                 return (vmxctx->guest_r13);
1660         case 14:
1661                 return (vmxctx->guest_r14);
1662         case 15:
1663                 return (vmxctx->guest_r15);
1664         default:
1665                 panic("invalid vmx register %d", ident);
1666         }
1667 }
1668
1669 static void
1670 vmx_set_guest_reg(struct vmx *vmx, int vcpu, int ident, uint64_t regval)
1671 {
1672         struct vmxctx *vmxctx;
1673
1674         vmxctx = &vmx->ctx[vcpu];
1675
1676         switch (ident) {
1677         case 0:
1678                 vmxctx->guest_rax = regval;
1679                 break;
1680         case 1:
1681                 vmxctx->guest_rcx = regval;
1682                 break;
1683         case 2:
1684                 vmxctx->guest_rdx = regval;
1685                 break;
1686         case 3:
1687                 vmxctx->guest_rbx = regval;
1688                 break;
1689         case 4:
1690                 vmcs_write(VMCS_GUEST_RSP, regval);
1691                 break;
1692         case 5:
1693                 vmxctx->guest_rbp = regval;
1694                 break;
1695         case 6:
1696                 vmxctx->guest_rsi = regval;
1697                 break;
1698         case 7:
1699                 vmxctx->guest_rdi = regval;
1700                 break;
1701         case 8:
1702                 vmxctx->guest_r8 = regval;
1703                 break;
1704         case 9:
1705                 vmxctx->guest_r9 = regval;
1706                 break;
1707         case 10:
1708                 vmxctx->guest_r10 = regval;
1709                 break;
1710         case 11:
1711                 vmxctx->guest_r11 = regval;
1712                 break;
1713         case 12:
1714                 vmxctx->guest_r12 = regval;
1715                 break;
1716         case 13:
1717                 vmxctx->guest_r13 = regval;
1718                 break;
1719         case 14:
1720                 vmxctx->guest_r14 = regval;
1721                 break;
1722         case 15:
1723                 vmxctx->guest_r15 = regval;
1724                 break;
1725         default:
1726                 panic("invalid vmx register %d", ident);
1727         }
1728 }
1729
1730 static int
1731 vmx_emulate_cr0_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1732 {
1733         uint64_t crval, regval;
1734
1735         /* We only handle mov to %cr0 at this time */
1736         if ((exitqual & 0xf0) != 0x00)
1737                 return (UNHANDLED);
1738
1739         regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
1740
1741         vmcs_write(VMCS_CR0_SHADOW, regval);
1742
1743         crval = regval | cr0_ones_mask;
1744         crval &= ~cr0_zeros_mask;
1745         vmcs_write(VMCS_GUEST_CR0, crval);
1746
1747         if (regval & CR0_PG) {
1748                 uint64_t efer, entry_ctls;
1749
1750                 /*
1751                  * If CR0.PG is 1 and EFER.LME is 1 then EFER.LMA and
1752                  * the "IA-32e mode guest" bit in VM-entry control must be
1753                  * equal.
1754                  */
1755                 efer = vmcs_read(VMCS_GUEST_IA32_EFER);
1756                 if (efer & EFER_LME) {
1757                         efer |= EFER_LMA;
1758                         vmcs_write(VMCS_GUEST_IA32_EFER, efer);
1759                         entry_ctls = vmcs_read(VMCS_ENTRY_CTLS);
1760                         entry_ctls |= VM_ENTRY_GUEST_LMA;
1761                         vmcs_write(VMCS_ENTRY_CTLS, entry_ctls);
1762                 }
1763         }
1764
1765         return (HANDLED);
1766 }
1767
1768 static int
1769 vmx_emulate_cr4_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1770 {
1771         uint64_t crval, regval;
1772
1773         /* We only handle mov to %cr4 at this time */
1774         if ((exitqual & 0xf0) != 0x00)
1775                 return (UNHANDLED);
1776
1777         regval = vmx_get_guest_reg(vmx, vcpu, (exitqual >> 8) & 0xf);
1778
1779         vmcs_write(VMCS_CR4_SHADOW, regval);
1780
1781         crval = regval | cr4_ones_mask;
1782         crval &= ~cr4_zeros_mask;
1783         vmcs_write(VMCS_GUEST_CR4, crval);
1784
1785         return (HANDLED);
1786 }
1787
1788 static int
1789 vmx_emulate_cr8_access(struct vmx *vmx, int vcpu, uint64_t exitqual)
1790 {
1791         struct vlapic *vlapic;
1792         uint64_t cr8;
1793         int regnum;
1794
1795         /* We only handle mov %cr8 to/from a register at this time. */
1796         if ((exitqual & 0xe0) != 0x00) {
1797                 return (UNHANDLED);
1798         }
1799
1800         vlapic = vm_lapic(vmx->vm, vcpu);
1801         regnum = (exitqual >> 8) & 0xf;
1802         if (exitqual & 0x10) {
1803                 cr8 = vlapic_get_cr8(vlapic);
1804                 vmx_set_guest_reg(vmx, vcpu, regnum, cr8);
1805         } else {
1806                 cr8 = vmx_get_guest_reg(vmx, vcpu, regnum);
1807                 vlapic_set_cr8(vlapic, cr8);
1808         }
1809
1810         return (HANDLED);
1811 }
1812
1813 /*
1814  * From section "Guest Register State" in the Intel SDM: CPL = SS.DPL
1815  */
1816 static int
1817 vmx_cpl(void)
1818 {
1819         uint32_t ssar;
1820
1821         ssar = vmcs_read(VMCS_GUEST_SS_ACCESS_RIGHTS);
1822         return ((ssar >> 5) & 0x3);
1823 }
1824
1825 static enum vm_cpu_mode
1826 vmx_cpu_mode(void)
1827 {
1828         uint32_t csar;
1829
1830         if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LMA) {
1831                 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1832                 if (csar & 0x2000)
1833                         return (CPU_MODE_64BIT);        /* CS.L = 1 */
1834                 else
1835                         return (CPU_MODE_COMPATIBILITY);
1836         } else if (vmcs_read(VMCS_GUEST_CR0) & CR0_PE) {
1837                 return (CPU_MODE_PROTECTED);
1838         } else {
1839                 return (CPU_MODE_REAL);
1840         }
1841 }
1842
1843 static enum vm_paging_mode
1844 vmx_paging_mode(void)
1845 {
1846
1847         if (!(vmcs_read(VMCS_GUEST_CR0) & CR0_PG))
1848                 return (PAGING_MODE_FLAT);
1849         if (!(vmcs_read(VMCS_GUEST_CR4) & CR4_PAE))
1850                 return (PAGING_MODE_32);
1851         if (vmcs_read(VMCS_GUEST_IA32_EFER) & EFER_LME)
1852                 return (PAGING_MODE_64);
1853         else
1854                 return (PAGING_MODE_PAE);
1855 }
1856
1857 static uint64_t
1858 inout_str_index(struct vmx *vmx, int vcpuid, int in)
1859 {
1860         uint64_t val;
1861         int error;
1862         enum vm_reg_name reg;
1863
1864         reg = in ? VM_REG_GUEST_RDI : VM_REG_GUEST_RSI;
1865         error = vmx_getreg(vmx, vcpuid, reg, &val);
1866         KASSERT(error == 0, ("%s: vmx_getreg error %d", __func__, error));
1867         return (val);
1868 }
1869
1870 static uint64_t
1871 inout_str_count(struct vmx *vmx, int vcpuid, int rep)
1872 {
1873         uint64_t val;
1874         int error;
1875
1876         if (rep) {
1877                 error = vmx_getreg(vmx, vcpuid, VM_REG_GUEST_RCX, &val);
1878                 KASSERT(!error, ("%s: vmx_getreg error %d", __func__, error));
1879         } else {
1880                 val = 1;
1881         }
1882         return (val);
1883 }
1884
1885 static int
1886 inout_str_addrsize(uint32_t inst_info)
1887 {
1888         uint32_t size;
1889
1890         size = (inst_info >> 7) & 0x7;
1891         switch (size) {
1892         case 0:
1893                 return (2);     /* 16 bit */
1894         case 1:
1895                 return (4);     /* 32 bit */
1896         case 2:
1897                 return (8);     /* 64 bit */
1898         default:
1899                 panic("%s: invalid size encoding %d", __func__, size);
1900         }
1901 }
1902
1903 static void
1904 inout_str_seginfo(struct vmx *vmx, int vcpuid, uint32_t inst_info, int in,
1905     struct vm_inout_str *vis)
1906 {
1907         int error, s;
1908
1909         if (in) {
1910                 vis->seg_name = VM_REG_GUEST_ES;
1911         } else {
1912                 s = (inst_info >> 15) & 0x7;
1913                 vis->seg_name = vm_segment_name(s);
1914         }
1915
1916         error = vmx_getdesc(vmx, vcpuid, vis->seg_name, &vis->seg_desc);
1917         KASSERT(error == 0, ("%s: vmx_getdesc error %d", __func__, error));
1918 }
1919
1920 static void
1921 vmx_paging_info(struct vm_guest_paging *paging)
1922 {
1923         paging->cr3 = vmcs_guest_cr3();
1924         paging->cpl = vmx_cpl();
1925         paging->cpu_mode = vmx_cpu_mode();
1926         paging->paging_mode = vmx_paging_mode();
1927 }
1928
1929 static void
1930 vmexit_inst_emul(struct vm_exit *vmexit, uint64_t gpa, uint64_t gla)
1931 {
1932         struct vm_guest_paging *paging;
1933         uint32_t csar;
1934
1935         paging = &vmexit->u.inst_emul.paging;
1936
1937         vmexit->exitcode = VM_EXITCODE_INST_EMUL;
1938         vmexit->inst_length = 0;
1939         vmexit->u.inst_emul.gpa = gpa;
1940         vmexit->u.inst_emul.gla = gla;
1941         vmx_paging_info(paging);
1942         switch (paging->cpu_mode) {
1943         case CPU_MODE_REAL:
1944                 vmexit->u.inst_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE);
1945                 vmexit->u.inst_emul.cs_d = 0;
1946                 break;
1947         case CPU_MODE_PROTECTED:
1948         case CPU_MODE_COMPATIBILITY:
1949                 vmexit->u.inst_emul.cs_base = vmcs_read(VMCS_GUEST_CS_BASE);
1950                 csar = vmcs_read(VMCS_GUEST_CS_ACCESS_RIGHTS);
1951                 vmexit->u.inst_emul.cs_d = SEG_DESC_DEF32(csar);
1952                 break;
1953         default:
1954                 vmexit->u.inst_emul.cs_base = 0;
1955                 vmexit->u.inst_emul.cs_d = 0;
1956                 break;
1957         }
1958         vie_init(&vmexit->u.inst_emul.vie, NULL, 0);
1959 }
1960
1961 static int
1962 ept_fault_type(uint64_t ept_qual)
1963 {
1964         int fault_type;
1965
1966         if (ept_qual & EPT_VIOLATION_DATA_WRITE)
1967                 fault_type = VM_PROT_WRITE;
1968         else if (ept_qual & EPT_VIOLATION_INST_FETCH)
1969                 fault_type = VM_PROT_EXECUTE;
1970         else
1971                 fault_type= VM_PROT_READ;
1972
1973         return (fault_type);
1974 }
1975
1976 static bool
1977 ept_emulation_fault(uint64_t ept_qual)
1978 {
1979         int read, write;
1980
1981         /* EPT fault on an instruction fetch doesn't make sense here */
1982         if (ept_qual & EPT_VIOLATION_INST_FETCH)
1983                 return (false);
1984
1985         /* EPT fault must be a read fault or a write fault */
1986         read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;
1987         write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;
1988         if ((read | write) == 0)
1989                 return (false);
1990
1991         /*
1992          * The EPT violation must have been caused by accessing a
1993          * guest-physical address that is a translation of a guest-linear
1994          * address.
1995          */
1996         if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||
1997             (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {
1998                 return (false);
1999         }
2000
2001         return (true);
2002 }
2003
2004 static __inline int
2005 apic_access_virtualization(struct vmx *vmx, int vcpuid)
2006 {
2007         uint32_t proc_ctls2;
2008
2009         proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
2010         return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) ? 1 : 0);
2011 }
2012
2013 static __inline int
2014 x2apic_virtualization(struct vmx *vmx, int vcpuid)
2015 {
2016         uint32_t proc_ctls2;
2017
2018         proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
2019         return ((proc_ctls2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE) ? 1 : 0);
2020 }
2021
2022 static int
2023 vmx_handle_apic_write(struct vmx *vmx, int vcpuid, struct vlapic *vlapic,
2024     uint64_t qual)
2025 {
2026         int error, handled, offset;
2027         uint32_t *apic_regs, vector;
2028         bool retu;
2029
2030         handled = HANDLED;
2031         offset = APIC_WRITE_OFFSET(qual);
2032
2033         if (!apic_access_virtualization(vmx, vcpuid)) {
2034                 /*
2035                  * In general there should not be any APIC write VM-exits
2036                  * unless APIC-access virtualization is enabled.
2037                  *
2038                  * However self-IPI virtualization can legitimately trigger
2039                  * an APIC-write VM-exit so treat it specially.
2040                  */
2041                 if (x2apic_virtualization(vmx, vcpuid) &&
2042                     offset == APIC_OFFSET_SELF_IPI) {
2043                         apic_regs = (uint32_t *)(vlapic->apic_page);
2044                         vector = apic_regs[APIC_OFFSET_SELF_IPI / 4];
2045                         vlapic_self_ipi_handler(vlapic, vector);
2046                         return (HANDLED);
2047                 } else
2048                         return (UNHANDLED);
2049         }
2050
2051         switch (offset) {
2052         case APIC_OFFSET_ID:
2053                 vlapic_id_write_handler(vlapic);
2054                 break;
2055         case APIC_OFFSET_LDR:
2056                 vlapic_ldr_write_handler(vlapic);
2057                 break;
2058         case APIC_OFFSET_DFR:
2059                 vlapic_dfr_write_handler(vlapic);
2060                 break;
2061         case APIC_OFFSET_SVR:
2062                 vlapic_svr_write_handler(vlapic);
2063                 break;
2064         case APIC_OFFSET_ESR:
2065                 vlapic_esr_write_handler(vlapic);
2066                 break;
2067         case APIC_OFFSET_ICR_LOW:
2068                 retu = false;
2069                 error = vlapic_icrlo_write_handler(vlapic, &retu);
2070                 if (error != 0 || retu)
2071                         handled = UNHANDLED;
2072                 break;
2073         case APIC_OFFSET_CMCI_LVT:
2074         case APIC_OFFSET_TIMER_LVT ... APIC_OFFSET_ERROR_LVT:
2075                 vlapic_lvt_write_handler(vlapic, offset);
2076                 break;
2077         case APIC_OFFSET_TIMER_ICR:
2078                 vlapic_icrtmr_write_handler(vlapic);
2079                 break;
2080         case APIC_OFFSET_TIMER_DCR:
2081                 vlapic_dcr_write_handler(vlapic);
2082                 break;
2083         default:
2084                 handled = UNHANDLED;
2085                 break;
2086         }
2087         return (handled);
2088 }
2089
2090 static bool
2091 apic_access_fault(struct vmx *vmx, int vcpuid, uint64_t gpa)
2092 {
2093
2094         if (apic_access_virtualization(vmx, vcpuid) &&
2095             (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE))
2096                 return (true);
2097         else
2098                 return (false);
2099 }
2100
2101 static int
2102 vmx_handle_apic_access(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
2103 {
2104         uint64_t qual;
2105         int access_type, offset, allowed;
2106
2107         if (!apic_access_virtualization(vmx, vcpuid))
2108                 return (UNHANDLED);
2109
2110         qual = vmexit->u.vmx.exit_qualification;
2111         access_type = APIC_ACCESS_TYPE(qual);
2112         offset = APIC_ACCESS_OFFSET(qual);
2113
2114         allowed = 0;
2115         if (access_type == 0) {
2116                 /*
2117                  * Read data access to the following registers is expected.
2118                  */
2119                 switch (offset) {
2120                 case APIC_OFFSET_APR:
2121                 case APIC_OFFSET_PPR:
2122                 case APIC_OFFSET_RRR:
2123                 case APIC_OFFSET_CMCI_LVT:
2124                 case APIC_OFFSET_TIMER_CCR:
2125                         allowed = 1;
2126                         break;
2127                 default:
2128                         break;
2129                 }
2130         } else if (access_type == 1) {
2131                 /*
2132                  * Write data access to the following registers is expected.
2133                  */
2134                 switch (offset) {
2135                 case APIC_OFFSET_VER:
2136                 case APIC_OFFSET_APR:
2137                 case APIC_OFFSET_PPR:
2138                 case APIC_OFFSET_RRR:
2139                 case APIC_OFFSET_ISR0 ... APIC_OFFSET_ISR7:
2140                 case APIC_OFFSET_TMR0 ... APIC_OFFSET_TMR7:
2141                 case APIC_OFFSET_IRR0 ... APIC_OFFSET_IRR7:
2142                 case APIC_OFFSET_CMCI_LVT:
2143                 case APIC_OFFSET_TIMER_CCR:
2144                         allowed = 1;
2145                         break;
2146                 default:
2147                         break;
2148                 }
2149         }
2150
2151         if (allowed) {
2152                 vmexit_inst_emul(vmexit, DEFAULT_APIC_BASE + offset,
2153                     VIE_INVALID_GLA);
2154         }
2155
2156         /*
2157          * Regardless of whether the APIC-access is allowed this handler
2158          * always returns UNHANDLED:
2159          * - if the access is allowed then it is handled by emulating the
2160          *   instruction that caused the VM-exit (outside the critical section)
2161          * - if the access is not allowed then it will be converted to an
2162          *   exitcode of VM_EXITCODE_VMX and will be dealt with in userland.
2163          */
2164         return (UNHANDLED);
2165 }
2166
2167 static enum task_switch_reason
2168 vmx_task_switch_reason(uint64_t qual)
2169 {
2170         int reason;
2171
2172         reason = (qual >> 30) & 0x3;
2173         switch (reason) {
2174         case 0:
2175                 return (TSR_CALL);
2176         case 1:
2177                 return (TSR_IRET);
2178         case 2:
2179                 return (TSR_JMP);
2180         case 3:
2181                 return (TSR_IDT_GATE);
2182         default:
2183                 panic("%s: invalid reason %d", __func__, reason);
2184         }
2185 }
2186
2187 static int
2188 emulate_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu)
2189 {
2190         int error;
2191
2192         if (lapic_msr(num))
2193                 error = lapic_wrmsr(vmx->vm, vcpuid, num, val, retu);
2194         else
2195                 error = vmx_wrmsr(vmx, vcpuid, num, val, retu);
2196
2197         return (error);
2198 }
2199
2200 static int
2201 emulate_rdmsr(struct vmx *vmx, int vcpuid, u_int num, bool *retu)
2202 {
2203         struct vmxctx *vmxctx;
2204         uint64_t result;
2205         uint32_t eax, edx;
2206         int error;
2207
2208         if (lapic_msr(num))
2209                 error = lapic_rdmsr(vmx->vm, vcpuid, num, &result, retu);
2210         else
2211                 error = vmx_rdmsr(vmx, vcpuid, num, &result, retu);
2212
2213         if (error == 0) {
2214                 eax = result;
2215                 vmxctx = &vmx->ctx[vcpuid];
2216                 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RAX, eax);
2217                 KASSERT(error == 0, ("vmxctx_setreg(rax) error %d", error));
2218
2219                 edx = result >> 32;
2220                 error = vmxctx_setreg(vmxctx, VM_REG_GUEST_RDX, edx);
2221                 KASSERT(error == 0, ("vmxctx_setreg(rdx) error %d", error));
2222         }
2223
2224         return (error);
2225 }
2226
2227 static int
2228 vmx_exit_process(struct vmx *vmx, int vcpu, struct vm_exit *vmexit)
2229 {
2230         int error, errcode, errcode_valid, handled, in;
2231         struct vmxctx *vmxctx;
2232         struct vlapic *vlapic;
2233         struct vm_inout_str *vis;
2234         struct vm_task_switch *ts;
2235         uint32_t eax, ecx, edx, idtvec_info, idtvec_err, intr_info, inst_info;
2236         uint32_t intr_type, intr_vec, reason;
2237         uint64_t exitintinfo, qual, gpa;
2238         bool retu;
2239
2240         CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_VIRTUAL_NMI) != 0);
2241         CTASSERT((PINBASED_CTLS_ONE_SETTING & PINBASED_NMI_EXITING) != 0);
2242
2243         handled = UNHANDLED;
2244         vmxctx = &vmx->ctx[vcpu];
2245
2246         qual = vmexit->u.vmx.exit_qualification;
2247         reason = vmexit->u.vmx.exit_reason;
2248         vmexit->exitcode = VM_EXITCODE_BOGUS;
2249
2250         vmm_stat_incr(vmx->vm, vcpu, VMEXIT_COUNT, 1);
2251         SDT_PROBE3(vmm, vmx, exit, entry, vmx, vcpu, vmexit);
2252
2253         /*
2254          * VM-entry failures during or after loading guest state.
2255          *
2256          * These VM-exits are uncommon but must be handled specially
2257          * as most VM-exit fields are not populated as usual.
2258          */
2259         if (__predict_false(reason == EXIT_REASON_MCE_DURING_ENTRY)) {
2260                 VCPU_CTR0(vmx->vm, vcpu, "Handling MCE during VM-entry");
2261                 __asm __volatile("int $18");
2262                 return (1);
2263         }
2264
2265         /*
2266          * VM exits that can be triggered during event delivery need to
2267          * be handled specially by re-injecting the event if the IDT
2268          * vectoring information field's valid bit is set.
2269          *
2270          * See "Information for VM Exits During Event Delivery" in Intel SDM
2271          * for details.
2272          */
2273         idtvec_info = vmcs_idt_vectoring_info();
2274         if (idtvec_info & VMCS_IDT_VEC_VALID) {
2275                 idtvec_info &= ~(1 << 12); /* clear undefined bit */
2276                 exitintinfo = idtvec_info;
2277                 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
2278                         idtvec_err = vmcs_idt_vectoring_err();
2279                         exitintinfo |= (uint64_t)idtvec_err << 32;
2280                 }
2281                 error = vm_exit_intinfo(vmx->vm, vcpu, exitintinfo);
2282                 KASSERT(error == 0, ("%s: vm_set_intinfo error %d",
2283                     __func__, error));
2284
2285                 /*
2286                  * If 'virtual NMIs' are being used and the VM-exit
2287                  * happened while injecting an NMI during the previous
2288                  * VM-entry, then clear "blocking by NMI" in the
2289                  * Guest Interruptibility-State so the NMI can be
2290                  * reinjected on the subsequent VM-entry.
2291                  *
2292                  * However, if the NMI was being delivered through a task
2293                  * gate, then the new task must start execution with NMIs
2294                  * blocked so don't clear NMI blocking in this case.
2295                  */
2296                 intr_type = idtvec_info & VMCS_INTR_T_MASK;
2297                 if (intr_type == VMCS_INTR_T_NMI) {
2298                         if (reason != EXIT_REASON_TASK_SWITCH)
2299                                 vmx_clear_nmi_blocking(vmx, vcpu);
2300                         else
2301                                 vmx_assert_nmi_blocking(vmx, vcpu);
2302                 }
2303
2304                 /*
2305                  * Update VM-entry instruction length if the event being
2306                  * delivered was a software interrupt or software exception.
2307                  */
2308                 if (intr_type == VMCS_INTR_T_SWINTR ||
2309                     intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION ||
2310                     intr_type == VMCS_INTR_T_SWEXCEPTION) {
2311                         vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
2312                 }
2313         }
2314
2315         switch (reason) {
2316         case EXIT_REASON_TASK_SWITCH:
2317                 ts = &vmexit->u.task_switch;
2318                 ts->tsssel = qual & 0xffff;
2319                 ts->reason = vmx_task_switch_reason(qual);
2320                 ts->ext = 0;
2321                 ts->errcode_valid = 0;
2322                 vmx_paging_info(&ts->paging);
2323                 /*
2324                  * If the task switch was due to a CALL, JMP, IRET, software
2325                  * interrupt (INT n) or software exception (INT3, INTO),
2326                  * then the saved %rip references the instruction that caused
2327                  * the task switch. The instruction length field in the VMCS
2328                  * is valid in this case.
2329                  *
2330                  * In all other cases (e.g., NMI, hardware exception) the
2331                  * saved %rip is one that would have been saved in the old TSS
2332                  * had the task switch completed normally so the instruction
2333                  * length field is not needed in this case and is explicitly
2334                  * set to 0.
2335                  */
2336                 if (ts->reason == TSR_IDT_GATE) {
2337                         KASSERT(idtvec_info & VMCS_IDT_VEC_VALID,
2338                             ("invalid idtvec_info %#x for IDT task switch",
2339                             idtvec_info));
2340                         intr_type = idtvec_info & VMCS_INTR_T_MASK;
2341                         if (intr_type != VMCS_INTR_T_SWINTR &&
2342                             intr_type != VMCS_INTR_T_SWEXCEPTION &&
2343                             intr_type != VMCS_INTR_T_PRIV_SWEXCEPTION) {
2344                                 /* Task switch triggered by external event */
2345                                 ts->ext = 1;
2346                                 vmexit->inst_length = 0;
2347                                 if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
2348                                         ts->errcode_valid = 1;
2349                                         ts->errcode = vmcs_idt_vectoring_err();
2350                                 }
2351                         }
2352                 }
2353                 vmexit->exitcode = VM_EXITCODE_TASK_SWITCH;
2354                 SDT_PROBE4(vmm, vmx, exit, taskswitch, vmx, vcpu, vmexit, ts);
2355                 VCPU_CTR4(vmx->vm, vcpu, "task switch reason %d, tss 0x%04x, "
2356                     "%s errcode 0x%016lx", ts->reason, ts->tsssel,
2357                     ts->ext ? "external" : "internal",
2358                     ((uint64_t)ts->errcode << 32) | ts->errcode_valid);
2359                 break;
2360         case EXIT_REASON_CR_ACCESS:
2361                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CR_ACCESS, 1);
2362                 SDT_PROBE4(vmm, vmx, exit, craccess, vmx, vcpu, vmexit, qual);
2363                 switch (qual & 0xf) {
2364                 case 0:
2365                         handled = vmx_emulate_cr0_access(vmx, vcpu, qual);
2366                         break;
2367                 case 4:
2368                         handled = vmx_emulate_cr4_access(vmx, vcpu, qual);
2369                         break;
2370                 case 8:
2371                         handled = vmx_emulate_cr8_access(vmx, vcpu, qual);
2372                         break;
2373                 }
2374                 break;
2375         case EXIT_REASON_RDMSR:
2376                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_RDMSR, 1);
2377                 retu = false;
2378                 ecx = vmxctx->guest_rcx;
2379                 VCPU_CTR1(vmx->vm, vcpu, "rdmsr 0x%08x", ecx);
2380                 SDT_PROBE4(vmm, vmx, exit, rdmsr, vmx, vcpu, vmexit, ecx);
2381                 error = emulate_rdmsr(vmx, vcpu, ecx, &retu);
2382                 if (error) {
2383                         vmexit->exitcode = VM_EXITCODE_RDMSR;
2384                         vmexit->u.msr.code = ecx;
2385                 } else if (!retu) {
2386                         handled = HANDLED;
2387                 } else {
2388                         /* Return to userspace with a valid exitcode */
2389                         KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
2390                             ("emulate_rdmsr retu with bogus exitcode"));
2391                 }
2392                 break;
2393         case EXIT_REASON_WRMSR:
2394                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_WRMSR, 1);
2395                 retu = false;
2396                 eax = vmxctx->guest_rax;
2397                 ecx = vmxctx->guest_rcx;
2398                 edx = vmxctx->guest_rdx;
2399                 VCPU_CTR2(vmx->vm, vcpu, "wrmsr 0x%08x value 0x%016lx",
2400                     ecx, (uint64_t)edx << 32 | eax);
2401                 SDT_PROBE5(vmm, vmx, exit, wrmsr, vmx, vmexit, vcpu, ecx,
2402                     (uint64_t)edx << 32 | eax);
2403                 error = emulate_wrmsr(vmx, vcpu, ecx,
2404                     (uint64_t)edx << 32 | eax, &retu);
2405                 if (error) {
2406                         vmexit->exitcode = VM_EXITCODE_WRMSR;
2407                         vmexit->u.msr.code = ecx;
2408                         vmexit->u.msr.wval = (uint64_t)edx << 32 | eax;
2409                 } else if (!retu) {
2410                         handled = HANDLED;
2411                 } else {
2412                         /* Return to userspace with a valid exitcode */
2413                         KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
2414                             ("emulate_wrmsr retu with bogus exitcode"));
2415                 }
2416                 break;
2417         case EXIT_REASON_HLT:
2418                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_HLT, 1);
2419                 SDT_PROBE3(vmm, vmx, exit, halt, vmx, vcpu, vmexit);
2420                 vmexit->exitcode = VM_EXITCODE_HLT;
2421                 vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS);
2422                 if (virtual_interrupt_delivery)
2423                         vmexit->u.hlt.intr_status =
2424                             vmcs_read(VMCS_GUEST_INTR_STATUS);
2425                 else
2426                         vmexit->u.hlt.intr_status = 0;
2427                 break;
2428         case EXIT_REASON_MTF:
2429                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_MTRAP, 1);
2430                 SDT_PROBE3(vmm, vmx, exit, mtrap, vmx, vcpu, vmexit);
2431                 vmexit->exitcode = VM_EXITCODE_MTRAP;
2432                 vmexit->inst_length = 0;
2433                 break;
2434         case EXIT_REASON_PAUSE:
2435                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_PAUSE, 1);
2436                 SDT_PROBE3(vmm, vmx, exit, pause, vmx, vcpu, vmexit);
2437                 vmexit->exitcode = VM_EXITCODE_PAUSE;
2438                 break;
2439         case EXIT_REASON_INTR_WINDOW:
2440                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INTR_WINDOW, 1);
2441                 SDT_PROBE3(vmm, vmx, exit, intrwindow, vmx, vcpu, vmexit);
2442                 vmx_clear_int_window_exiting(vmx, vcpu);
2443                 return (1);
2444         case EXIT_REASON_EXT_INTR:
2445                 /*
2446                  * External interrupts serve only to cause VM exits and allow
2447                  * the host interrupt handler to run.
2448                  *
2449                  * If this external interrupt triggers a virtual interrupt
2450                  * to a VM, then that state will be recorded by the
2451                  * host interrupt handler in the VM's softc. We will inject
2452                  * this virtual interrupt during the subsequent VM enter.
2453                  */
2454                 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2455                 SDT_PROBE4(vmm, vmx, exit, interrupt,
2456                     vmx, vcpu, vmexit, intr_info);
2457
2458                 /*
2459                  * XXX: Ignore this exit if VMCS_INTR_VALID is not set.
2460                  * This appears to be a bug in VMware Fusion?
2461                  */
2462                 if (!(intr_info & VMCS_INTR_VALID))
2463                         return (1);
2464                 KASSERT((intr_info & VMCS_INTR_VALID) != 0 &&
2465                     (intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_HWINTR,
2466                     ("VM exit interruption info invalid: %#x", intr_info));
2467                 vmx_trigger_hostintr(intr_info & 0xff);
2468
2469                 /*
2470                  * This is special. We want to treat this as an 'handled'
2471                  * VM-exit but not increment the instruction pointer.
2472                  */
2473                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXTINT, 1);
2474                 return (1);
2475         case EXIT_REASON_NMI_WINDOW:
2476                 SDT_PROBE3(vmm, vmx, exit, nmiwindow, vmx, vcpu, vmexit);
2477                 /* Exit to allow the pending virtual NMI to be injected */
2478                 if (vm_nmi_pending(vmx->vm, vcpu))
2479                         vmx_inject_nmi(vmx, vcpu);
2480                 vmx_clear_nmi_window_exiting(vmx, vcpu);
2481                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NMI_WINDOW, 1);
2482                 return (1);
2483         case EXIT_REASON_INOUT:
2484                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INOUT, 1);
2485                 vmexit->exitcode = VM_EXITCODE_INOUT;
2486                 vmexit->u.inout.bytes = (qual & 0x7) + 1;
2487                 vmexit->u.inout.in = in = (qual & 0x8) ? 1 : 0;
2488                 vmexit->u.inout.string = (qual & 0x10) ? 1 : 0;
2489                 vmexit->u.inout.rep = (qual & 0x20) ? 1 : 0;
2490                 vmexit->u.inout.port = (uint16_t)(qual >> 16);
2491                 vmexit->u.inout.eax = (uint32_t)(vmxctx->guest_rax);
2492                 if (vmexit->u.inout.string) {
2493                         inst_info = vmcs_read(VMCS_EXIT_INSTRUCTION_INFO);
2494                         vmexit->exitcode = VM_EXITCODE_INOUT_STR;
2495                         vis = &vmexit->u.inout_str;
2496                         vmx_paging_info(&vis->paging);
2497                         vis->rflags = vmcs_read(VMCS_GUEST_RFLAGS);
2498                         vis->cr0 = vmcs_read(VMCS_GUEST_CR0);
2499                         vis->index = inout_str_index(vmx, vcpu, in);
2500                         vis->count = inout_str_count(vmx, vcpu, vis->inout.rep);
2501                         vis->addrsize = inout_str_addrsize(inst_info);
2502                         inout_str_seginfo(vmx, vcpu, inst_info, in, vis);
2503                 }
2504                 SDT_PROBE3(vmm, vmx, exit, inout, vmx, vcpu, vmexit);
2505                 break;
2506         case EXIT_REASON_CPUID:
2507                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_CPUID, 1);
2508                 SDT_PROBE3(vmm, vmx, exit, cpuid, vmx, vcpu, vmexit);
2509                 handled = vmx_handle_cpuid(vmx->vm, vcpu, vmxctx);
2510                 break;
2511         case EXIT_REASON_EXCEPTION:
2512                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_EXCEPTION, 1);
2513                 intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2514                 KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2515                     ("VM exit interruption info invalid: %#x", intr_info));
2516
2517                 intr_vec = intr_info & 0xff;
2518                 intr_type = intr_info & VMCS_INTR_T_MASK;
2519
2520                 /*
2521                  * If Virtual NMIs control is 1 and the VM-exit is due to a
2522                  * fault encountered during the execution of IRET then we must
2523                  * restore the state of "virtual-NMI blocking" before resuming
2524                  * the guest.
2525                  *
2526                  * See "Resuming Guest Software after Handling an Exception".
2527                  * See "Information for VM Exits Due to Vectored Events".
2528                  */
2529                 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2530                     (intr_vec != IDT_DF) &&
2531                     (intr_info & EXIT_QUAL_NMIUDTI) != 0)
2532                         vmx_restore_nmi_blocking(vmx, vcpu);
2533
2534                 /*
2535                  * The NMI has already been handled in vmx_exit_handle_nmi().
2536                  */
2537                 if (intr_type == VMCS_INTR_T_NMI)
2538                         return (1);
2539
2540                 /*
2541                  * Call the machine check handler by hand. Also don't reflect
2542                  * the machine check back into the guest.
2543                  */
2544                 if (intr_vec == IDT_MC) {
2545                         VCPU_CTR0(vmx->vm, vcpu, "Vectoring to MCE handler");
2546                         __asm __volatile("int $18");
2547                         return (1);
2548                 }
2549
2550                 if (intr_vec == IDT_PF) {
2551                         error = vmxctx_setreg(vmxctx, VM_REG_GUEST_CR2, qual);
2552                         KASSERT(error == 0, ("%s: vmxctx_setreg(cr2) error %d",
2553                             __func__, error));
2554                 }
2555
2556                 /*
2557                  * Software exceptions exhibit trap-like behavior. This in
2558                  * turn requires populating the VM-entry instruction length
2559                  * so that the %rip in the trap frame is past the INT3/INTO
2560                  * instruction.
2561                  */
2562                 if (intr_type == VMCS_INTR_T_SWEXCEPTION)
2563                         vmcs_write(VMCS_ENTRY_INST_LENGTH, vmexit->inst_length);
2564
2565                 /* Reflect all other exceptions back into the guest */
2566                 errcode_valid = errcode = 0;
2567                 if (intr_info & VMCS_INTR_DEL_ERRCODE) {
2568                         errcode_valid = 1;
2569                         errcode = vmcs_read(VMCS_EXIT_INTR_ERRCODE);
2570                 }
2571                 VCPU_CTR2(vmx->vm, vcpu, "Reflecting exception %d/%#x into "
2572                     "the guest", intr_vec, errcode);
2573                 SDT_PROBE5(vmm, vmx, exit, exception,
2574                     vmx, vcpu, vmexit, intr_vec, errcode);
2575                 error = vm_inject_exception(vmx->vm, vcpu, intr_vec,
2576                     errcode_valid, errcode, 0);
2577                 KASSERT(error == 0, ("%s: vm_inject_exception error %d",
2578                     __func__, error));
2579                 return (1);
2580
2581         case EXIT_REASON_EPT_FAULT:
2582                 /*
2583                  * If 'gpa' lies within the address space allocated to
2584                  * memory then this must be a nested page fault otherwise
2585                  * this must be an instruction that accesses MMIO space.
2586                  */
2587                 gpa = vmcs_gpa();
2588                 if (vm_mem_allocated(vmx->vm, vcpu, gpa) ||
2589                     apic_access_fault(vmx, vcpu, gpa)) {
2590                         vmexit->exitcode = VM_EXITCODE_PAGING;
2591                         vmexit->inst_length = 0;
2592                         vmexit->u.paging.gpa = gpa;
2593                         vmexit->u.paging.fault_type = ept_fault_type(qual);
2594                         vmm_stat_incr(vmx->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
2595                         SDT_PROBE5(vmm, vmx, exit, nestedfault,
2596                             vmx, vcpu, vmexit, gpa, qual);
2597                 } else if (ept_emulation_fault(qual)) {
2598                         vmexit_inst_emul(vmexit, gpa, vmcs_gla());
2599                         vmm_stat_incr(vmx->vm, vcpu, VMEXIT_INST_EMUL, 1);
2600                         SDT_PROBE4(vmm, vmx, exit, mmiofault,
2601                             vmx, vcpu, vmexit, gpa);
2602                 }
2603                 /*
2604                  * If Virtual NMIs control is 1 and the VM-exit is due to an
2605                  * EPT fault during the execution of IRET then we must restore
2606                  * the state of "virtual-NMI blocking" before resuming.
2607                  *
2608                  * See description of "NMI unblocking due to IRET" in
2609                  * "Exit Qualification for EPT Violations".
2610                  */
2611                 if ((idtvec_info & VMCS_IDT_VEC_VALID) == 0 &&
2612                     (qual & EXIT_QUAL_NMIUDTI) != 0)
2613                         vmx_restore_nmi_blocking(vmx, vcpu);
2614                 break;
2615         case EXIT_REASON_VIRTUALIZED_EOI:
2616                 vmexit->exitcode = VM_EXITCODE_IOAPIC_EOI;
2617                 vmexit->u.ioapic_eoi.vector = qual & 0xFF;
2618                 SDT_PROBE3(vmm, vmx, exit, eoi, vmx, vcpu, vmexit);
2619                 vmexit->inst_length = 0;        /* trap-like */
2620                 break;
2621         case EXIT_REASON_APIC_ACCESS:
2622                 SDT_PROBE3(vmm, vmx, exit, apicaccess, vmx, vcpu, vmexit);
2623                 handled = vmx_handle_apic_access(vmx, vcpu, vmexit);
2624                 break;
2625         case EXIT_REASON_APIC_WRITE:
2626                 /*
2627                  * APIC-write VM exit is trap-like so the %rip is already
2628                  * pointing to the next instruction.
2629                  */
2630                 vmexit->inst_length = 0;
2631                 vlapic = vm_lapic(vmx->vm, vcpu);
2632                 SDT_PROBE4(vmm, vmx, exit, apicwrite,
2633                     vmx, vcpu, vmexit, vlapic);
2634                 handled = vmx_handle_apic_write(vmx, vcpu, vlapic, qual);
2635                 break;
2636         case EXIT_REASON_XSETBV:
2637                 SDT_PROBE3(vmm, vmx, exit, xsetbv, vmx, vcpu, vmexit);
2638                 handled = vmx_emulate_xsetbv(vmx, vcpu, vmexit);
2639                 break;
2640         case EXIT_REASON_MONITOR:
2641                 SDT_PROBE3(vmm, vmx, exit, monitor, vmx, vcpu, vmexit);
2642                 vmexit->exitcode = VM_EXITCODE_MONITOR;
2643                 break;
2644         case EXIT_REASON_MWAIT:
2645                 SDT_PROBE3(vmm, vmx, exit, mwait, vmx, vcpu, vmexit);
2646                 vmexit->exitcode = VM_EXITCODE_MWAIT;
2647                 break;
2648         case EXIT_REASON_VMCALL:
2649         case EXIT_REASON_VMCLEAR:
2650         case EXIT_REASON_VMLAUNCH:
2651         case EXIT_REASON_VMPTRLD:
2652         case EXIT_REASON_VMPTRST:
2653         case EXIT_REASON_VMREAD:
2654         case EXIT_REASON_VMRESUME:
2655         case EXIT_REASON_VMWRITE:
2656         case EXIT_REASON_VMXOFF:
2657         case EXIT_REASON_VMXON:
2658                 SDT_PROBE3(vmm, vmx, exit, vminsn, vmx, vcpu, vmexit);
2659                 vmexit->exitcode = VM_EXITCODE_VMINSN;
2660                 break;
2661         default:
2662                 SDT_PROBE4(vmm, vmx, exit, unknown,
2663                     vmx, vcpu, vmexit, reason);
2664                 vmm_stat_incr(vmx->vm, vcpu, VMEXIT_UNKNOWN, 1);
2665                 break;
2666         }
2667
2668         if (handled) {
2669                 /*
2670                  * It is possible that control is returned to userland
2671                  * even though we were able to handle the VM exit in the
2672                  * kernel.
2673                  *
2674                  * In such a case we want to make sure that the userland
2675                  * restarts guest execution at the instruction *after*
2676                  * the one we just processed. Therefore we update the
2677                  * guest rip in the VMCS and in 'vmexit'.
2678                  */
2679                 vmexit->rip += vmexit->inst_length;
2680                 vmexit->inst_length = 0;
2681                 vmcs_write(VMCS_GUEST_RIP, vmexit->rip);
2682         } else {
2683                 if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
2684                         /*
2685                          * If this VM exit was not claimed by anybody then
2686                          * treat it as a generic VMX exit.
2687                          */
2688                         vmexit->exitcode = VM_EXITCODE_VMX;
2689                         vmexit->u.vmx.status = VM_SUCCESS;
2690                         vmexit->u.vmx.inst_type = 0;
2691                         vmexit->u.vmx.inst_error = 0;
2692                 } else {
2693                         /*
2694                          * The exitcode and collateral have been populated.
2695                          * The VM exit will be processed further in userland.
2696                          */
2697                 }
2698         }
2699
2700         SDT_PROBE4(vmm, vmx, exit, return,
2701             vmx, vcpu, vmexit, handled);
2702         return (handled);
2703 }
2704
2705 static __inline void
2706 vmx_exit_inst_error(struct vmxctx *vmxctx, int rc, struct vm_exit *vmexit)
2707 {
2708
2709         KASSERT(vmxctx->inst_fail_status != VM_SUCCESS,
2710             ("vmx_exit_inst_error: invalid inst_fail_status %d",
2711             vmxctx->inst_fail_status));
2712
2713         vmexit->inst_length = 0;
2714         vmexit->exitcode = VM_EXITCODE_VMX;
2715         vmexit->u.vmx.status = vmxctx->inst_fail_status;
2716         vmexit->u.vmx.inst_error = vmcs_instruction_error();
2717         vmexit->u.vmx.exit_reason = ~0;
2718         vmexit->u.vmx.exit_qualification = ~0;
2719
2720         switch (rc) {
2721         case VMX_VMRESUME_ERROR:
2722         case VMX_VMLAUNCH_ERROR:
2723         case VMX_INVEPT_ERROR:
2724                 vmexit->u.vmx.inst_type = rc;
2725                 break;
2726         default:
2727                 panic("vm_exit_inst_error: vmx_enter_guest returned %d", rc);
2728         }
2729 }
2730
2731 /*
2732  * If the NMI-exiting VM execution control is set to '1' then an NMI in
2733  * non-root operation causes a VM-exit. NMI blocking is in effect so it is
2734  * sufficient to simply vector to the NMI handler via a software interrupt.
2735  * However, this must be done before maskable interrupts are enabled
2736  * otherwise the "iret" issued by an interrupt handler will incorrectly
2737  * clear NMI blocking.
2738  */
2739 static __inline void
2740 vmx_exit_handle_nmi(struct vmx *vmx, int vcpuid, struct vm_exit *vmexit)
2741 {
2742         uint32_t intr_info;
2743
2744         KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled"));
2745
2746         if (vmexit->u.vmx.exit_reason != EXIT_REASON_EXCEPTION)
2747                 return;
2748
2749         intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
2750         KASSERT((intr_info & VMCS_INTR_VALID) != 0,
2751             ("VM exit interruption info invalid: %#x", intr_info));
2752
2753         if ((intr_info & VMCS_INTR_T_MASK) == VMCS_INTR_T_NMI) {
2754                 KASSERT((intr_info & 0xff) == IDT_NMI, ("VM exit due "
2755                     "to NMI has invalid vector: %#x", intr_info));
2756                 VCPU_CTR0(vmx->vm, vcpuid, "Vectoring to NMI handler");
2757                 __asm __volatile("int $2");
2758         }
2759 }
2760
2761 static __inline void
2762 vmx_dr_enter_guest(struct vmxctx *vmxctx)
2763 {
2764         register_t rflags;
2765
2766         /* Save host control debug registers. */
2767         vmxctx->host_dr7 = rdr7();
2768         vmxctx->host_debugctl = rdmsr(MSR_DEBUGCTLMSR);
2769
2770         /*
2771          * Disable debugging in DR7 and DEBUGCTL to avoid triggering
2772          * exceptions in the host based on the guest DRx values.  The
2773          * guest DR7 and DEBUGCTL are saved/restored in the VMCS.
2774          */
2775         load_dr7(0);
2776         wrmsr(MSR_DEBUGCTLMSR, 0);
2777
2778         /*
2779          * Disable single stepping the kernel to avoid corrupting the
2780          * guest DR6.  A debugger might still be able to corrupt the
2781          * guest DR6 by setting a breakpoint after this point and then
2782          * single stepping.
2783          */
2784         rflags = read_rflags();
2785         vmxctx->host_tf = rflags & PSL_T;
2786         write_rflags(rflags & ~PSL_T);
2787
2788         /* Save host debug registers. */
2789         vmxctx->host_dr0 = rdr0();
2790         vmxctx->host_dr1 = rdr1();
2791         vmxctx->host_dr2 = rdr2();
2792         vmxctx->host_dr3 = rdr3();
2793         vmxctx->host_dr6 = rdr6();
2794
2795         /* Restore guest debug registers. */
2796         load_dr0(vmxctx->guest_dr0);
2797         load_dr1(vmxctx->guest_dr1);
2798         load_dr2(vmxctx->guest_dr2);
2799         load_dr3(vmxctx->guest_dr3);
2800         load_dr6(vmxctx->guest_dr6);
2801 }
2802
2803 static __inline void
2804 vmx_dr_leave_guest(struct vmxctx *vmxctx)
2805 {
2806
2807         /* Save guest debug registers. */
2808         vmxctx->guest_dr0 = rdr0();
2809         vmxctx->guest_dr1 = rdr1();
2810         vmxctx->guest_dr2 = rdr2();
2811         vmxctx->guest_dr3 = rdr3();
2812         vmxctx->guest_dr6 = rdr6();
2813
2814         /*
2815          * Restore host debug registers.  Restore DR7, DEBUGCTL, and
2816          * PSL_T last.
2817          */
2818         load_dr0(vmxctx->host_dr0);
2819         load_dr1(vmxctx->host_dr1);
2820         load_dr2(vmxctx->host_dr2);
2821         load_dr3(vmxctx->host_dr3);
2822         load_dr6(vmxctx->host_dr6);
2823         wrmsr(MSR_DEBUGCTLMSR, vmxctx->host_debugctl);
2824         load_dr7(vmxctx->host_dr7);
2825         write_rflags(read_rflags() | vmxctx->host_tf);
2826 }
2827
2828 static int
2829 vmx_run(void *arg, int vcpu, register_t rip, pmap_t pmap,
2830     struct vm_eventinfo *evinfo)
2831 {
2832         int rc, handled, launched;
2833         struct vmx *vmx;
2834         struct vm *vm;
2835         struct vmxctx *vmxctx;
2836         struct vmcs *vmcs;
2837         struct vm_exit *vmexit;
2838         struct vlapic *vlapic;
2839         uint32_t exit_reason;
2840         struct region_descriptor gdtr, idtr;
2841         uint16_t ldt_sel;
2842
2843         vmx = arg;
2844         vm = vmx->vm;
2845         vmcs = &vmx->vmcs[vcpu];
2846         vmxctx = &vmx->ctx[vcpu];
2847         vlapic = vm_lapic(vm, vcpu);
2848         vmexit = vm_exitinfo(vm, vcpu);
2849         launched = 0;
2850
2851         KASSERT(vmxctx->pmap == pmap,
2852             ("pmap %p different than ctx pmap %p", pmap, vmxctx->pmap));
2853
2854         vmx_msr_guest_enter(vmx, vcpu);
2855
2856         VMPTRLD(vmcs);
2857
2858         /*
2859          * XXX
2860          * We do this every time because we may setup the virtual machine
2861          * from a different process than the one that actually runs it.
2862          *
2863          * If the life of a virtual machine was spent entirely in the context
2864          * of a single process we could do this once in vmx_vminit().
2865          */
2866         vmcs_write(VMCS_HOST_CR3, rcr3());
2867
2868         vmcs_write(VMCS_GUEST_RIP, rip);
2869         vmx_set_pcpu_defaults(vmx, vcpu, pmap);
2870         do {
2871                 KASSERT(vmcs_guest_rip() == rip, ("%s: vmcs guest rip mismatch "
2872                     "%#lx/%#lx", __func__, vmcs_guest_rip(), rip));
2873
2874                 handled = UNHANDLED;
2875                 /*
2876                  * Interrupts are disabled from this point on until the
2877                  * guest starts executing. This is done for the following
2878                  * reasons:
2879                  *
2880                  * If an AST is asserted on this thread after the check below,
2881                  * then the IPI_AST notification will not be lost, because it
2882                  * will cause a VM exit due to external interrupt as soon as
2883                  * the guest state is loaded.
2884                  *
2885                  * A posted interrupt after 'vmx_inject_interrupts()' will
2886                  * not be "lost" because it will be held pending in the host
2887                  * APIC because interrupts are disabled. The pending interrupt
2888                  * will be recognized as soon as the guest state is loaded.
2889                  *
2890                  * The same reasoning applies to the IPI generated by
2891                  * pmap_invalidate_ept().
2892                  */
2893                 disable_intr();
2894                 vmx_inject_interrupts(vmx, vcpu, vlapic, rip);
2895
2896                 /*
2897                  * Check for vcpu suspension after injecting events because
2898                  * vmx_inject_interrupts() can suspend the vcpu due to a
2899                  * triple fault.
2900                  */
2901                 if (vcpu_suspended(evinfo)) {
2902                         enable_intr();
2903                         vm_exit_suspended(vmx->vm, vcpu, rip);
2904                         break;
2905                 }
2906
2907                 if (vcpu_rendezvous_pending(evinfo)) {
2908                         enable_intr();
2909                         vm_exit_rendezvous(vmx->vm, vcpu, rip);
2910                         break;
2911                 }
2912
2913                 if (vcpu_reqidle(evinfo)) {
2914                         enable_intr();
2915                         vm_exit_reqidle(vmx->vm, vcpu, rip);
2916                         break;
2917                 }
2918
2919                 if (vcpu_should_yield(vm, vcpu)) {
2920                         enable_intr();
2921                         vm_exit_astpending(vmx->vm, vcpu, rip);
2922                         vmx_astpending_trace(vmx, vcpu, rip);
2923                         handled = HANDLED;
2924                         break;
2925                 }
2926
2927                 if (vcpu_debugged(vm, vcpu)) {
2928                         enable_intr();
2929                         vm_exit_debug(vmx->vm, vcpu, rip);
2930                         break;
2931                 }
2932
2933                 /*
2934                  * VM exits restore the base address but not the
2935                  * limits of GDTR and IDTR.  The VMCS only stores the
2936                  * base address, so VM exits set the limits to 0xffff.
2937                  * Save and restore the full GDTR and IDTR to restore
2938                  * the limits.
2939                  *
2940                  * The VMCS does not save the LDTR at all, and VM
2941                  * exits clear LDTR as if a NULL selector were loaded.
2942                  * The userspace hypervisor probably doesn't use a
2943                  * LDT, but save and restore it to be safe.
2944                  */
2945                 sgdt(&gdtr);
2946                 sidt(&idtr);
2947                 ldt_sel = sldt();
2948
2949                 vmx_run_trace(vmx, vcpu);
2950                 vmx_dr_enter_guest(vmxctx);
2951                 rc = vmx_enter_guest(vmxctx, vmx, launched);
2952                 vmx_dr_leave_guest(vmxctx);
2953
2954                 bare_lgdt(&gdtr);
2955                 lidt(&idtr);
2956                 lldt(ldt_sel);
2957
2958                 /* Collect some information for VM exit processing */
2959                 vmexit->rip = rip = vmcs_guest_rip();
2960                 vmexit->inst_length = vmexit_instruction_length();
2961                 vmexit->u.vmx.exit_reason = exit_reason = vmcs_exit_reason();
2962                 vmexit->u.vmx.exit_qualification = vmcs_exit_qualification();
2963
2964                 /* Update 'nextrip' */
2965                 vmx->state[vcpu].nextrip = rip;
2966
2967                 if (rc == VMX_GUEST_VMEXIT) {
2968                         vmx_exit_handle_nmi(vmx, vcpu, vmexit);
2969                         enable_intr();
2970                         handled = vmx_exit_process(vmx, vcpu, vmexit);
2971                 } else {
2972                         enable_intr();
2973                         vmx_exit_inst_error(vmxctx, rc, vmexit);
2974                 }
2975                 launched = 1;
2976                 vmx_exit_trace(vmx, vcpu, rip, exit_reason, handled);
2977                 rip = vmexit->rip;
2978         } while (handled);
2979
2980         /*
2981          * If a VM exit has been handled then the exitcode must be BOGUS
2982          * If a VM exit is not handled then the exitcode must not be BOGUS
2983          */
2984         if ((handled && vmexit->exitcode != VM_EXITCODE_BOGUS) ||
2985             (!handled && vmexit->exitcode == VM_EXITCODE_BOGUS)) {
2986                 panic("Mismatch between handled (%d) and exitcode (%d)",
2987                       handled, vmexit->exitcode);
2988         }
2989
2990         if (!handled)
2991                 vmm_stat_incr(vm, vcpu, VMEXIT_USERSPACE, 1);
2992
2993         VCPU_CTR1(vm, vcpu, "returning from vmx_run: exitcode %d",
2994             vmexit->exitcode);
2995
2996         VMCLEAR(vmcs);
2997         vmx_msr_guest_exit(vmx, vcpu);
2998
2999         return (0);
3000 }
3001
3002 static void
3003 vmx_vmcleanup(void *arg)
3004 {
3005         int i;
3006         struct vmx *vmx = arg;
3007         uint16_t maxcpus;
3008
3009         if (apic_access_virtualization(vmx, 0))
3010                 vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
3011
3012         maxcpus = vm_get_maxcpus(vmx->vm);
3013         for (i = 0; i < maxcpus; i++)
3014                 vpid_free(vmx->state[i].vpid);
3015
3016         free(vmx, M_VMX);
3017
3018         return;
3019 }
3020
3021 static register_t *
3022 vmxctx_regptr(struct vmxctx *vmxctx, int reg)
3023 {
3024
3025         switch (reg) {
3026         case VM_REG_GUEST_RAX:
3027                 return (&vmxctx->guest_rax);
3028         case VM_REG_GUEST_RBX:
3029                 return (&vmxctx->guest_rbx);
3030         case VM_REG_GUEST_RCX:
3031                 return (&vmxctx->guest_rcx);
3032         case VM_REG_GUEST_RDX:
3033                 return (&vmxctx->guest_rdx);
3034         case VM_REG_GUEST_RSI:
3035                 return (&vmxctx->guest_rsi);
3036         case VM_REG_GUEST_RDI:
3037                 return (&vmxctx->guest_rdi);
3038         case VM_REG_GUEST_RBP:
3039                 return (&vmxctx->guest_rbp);
3040         case VM_REG_GUEST_R8:
3041                 return (&vmxctx->guest_r8);
3042         case VM_REG_GUEST_R9:
3043                 return (&vmxctx->guest_r9);
3044         case VM_REG_GUEST_R10:
3045                 return (&vmxctx->guest_r10);
3046         case VM_REG_GUEST_R11:
3047                 return (&vmxctx->guest_r11);
3048         case VM_REG_GUEST_R12:
3049                 return (&vmxctx->guest_r12);
3050         case VM_REG_GUEST_R13:
3051                 return (&vmxctx->guest_r13);
3052         case VM_REG_GUEST_R14:
3053                 return (&vmxctx->guest_r14);
3054         case VM_REG_GUEST_R15:
3055                 return (&vmxctx->guest_r15);
3056         case VM_REG_GUEST_CR2:
3057                 return (&vmxctx->guest_cr2);
3058         case VM_REG_GUEST_DR0:
3059                 return (&vmxctx->guest_dr0);
3060         case VM_REG_GUEST_DR1:
3061                 return (&vmxctx->guest_dr1);
3062         case VM_REG_GUEST_DR2:
3063                 return (&vmxctx->guest_dr2);
3064         case VM_REG_GUEST_DR3:
3065                 return (&vmxctx->guest_dr3);
3066         case VM_REG_GUEST_DR6:
3067                 return (&vmxctx->guest_dr6);
3068         default:
3069                 break;
3070         }
3071         return (NULL);
3072 }
3073
3074 static int
3075 vmxctx_getreg(struct vmxctx *vmxctx, int reg, uint64_t *retval)
3076 {
3077         register_t *regp;
3078
3079         if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
3080                 *retval = *regp;
3081                 return (0);
3082         } else
3083                 return (EINVAL);
3084 }
3085
3086 static int
3087 vmxctx_setreg(struct vmxctx *vmxctx, int reg, uint64_t val)
3088 {
3089         register_t *regp;
3090
3091         if ((regp = vmxctx_regptr(vmxctx, reg)) != NULL) {
3092                 *regp = val;
3093                 return (0);
3094         } else
3095                 return (EINVAL);
3096 }
3097
3098 static int
3099 vmx_get_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t *retval)
3100 {
3101         uint64_t gi;
3102         int error;
3103
3104         error = vmcs_getreg(&vmx->vmcs[vcpu], running,
3105             VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY), &gi);
3106         *retval = (gi & HWINTR_BLOCKING) ? 1 : 0;
3107         return (error);
3108 }
3109
3110 static int
3111 vmx_modify_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t val)
3112 {
3113         struct vmcs *vmcs;
3114         uint64_t gi;
3115         int error, ident;
3116
3117         /*
3118          * Forcing the vcpu into an interrupt shadow is not supported.
3119          */
3120         if (val) {
3121                 error = EINVAL;
3122                 goto done;
3123         }
3124
3125         vmcs = &vmx->vmcs[vcpu];
3126         ident = VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY);
3127         error = vmcs_getreg(vmcs, running, ident, &gi);
3128         if (error == 0) {
3129                 gi &= ~HWINTR_BLOCKING;
3130                 error = vmcs_setreg(vmcs, running, ident, gi);
3131         }
3132 done:
3133         VCPU_CTR2(vmx->vm, vcpu, "Setting intr_shadow to %#lx %s", val,
3134             error ? "failed" : "succeeded");
3135         return (error);
3136 }
3137
3138 static int
3139 vmx_shadow_reg(int reg)
3140 {
3141         int shreg;
3142
3143         shreg = -1;
3144
3145         switch (reg) {
3146         case VM_REG_GUEST_CR0:
3147                 shreg = VMCS_CR0_SHADOW;
3148                 break;
3149         case VM_REG_GUEST_CR4:
3150                 shreg = VMCS_CR4_SHADOW;
3151                 break;
3152         default:
3153                 break;
3154         }
3155
3156         return (shreg);
3157 }
3158
3159 static int
3160 vmx_getreg(void *arg, int vcpu, int reg, uint64_t *retval)
3161 {
3162         int running, hostcpu;
3163         struct vmx *vmx = arg;
3164
3165         running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
3166         if (running && hostcpu != curcpu)
3167                 panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu);
3168
3169         if (reg == VM_REG_GUEST_INTR_SHADOW)
3170                 return (vmx_get_intr_shadow(vmx, vcpu, running, retval));
3171
3172         if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0)
3173                 return (0);
3174
3175         return (vmcs_getreg(&vmx->vmcs[vcpu], running, reg, retval));
3176 }
3177
3178 static int
3179 vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
3180 {
3181         int error, hostcpu, running, shadow;
3182         uint64_t ctls;
3183         pmap_t pmap;
3184         struct vmx *vmx = arg;
3185
3186         running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
3187         if (running && hostcpu != curcpu)
3188                 panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu);
3189
3190         if (reg == VM_REG_GUEST_INTR_SHADOW)
3191                 return (vmx_modify_intr_shadow(vmx, vcpu, running, val));
3192
3193         if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0)
3194                 return (0);
3195
3196         /* Do not permit user write access to VMCS fields by offset. */
3197         if (reg < 0)
3198                 return (EINVAL);
3199
3200         error = vmcs_setreg(&vmx->vmcs[vcpu], running, reg, val);
3201
3202         if (error == 0) {
3203                 /*
3204                  * If the "load EFER" VM-entry control is 1 then the
3205                  * value of EFER.LMA must be identical to "IA-32e mode guest"
3206                  * bit in the VM-entry control.
3207                  */
3208                 if ((entry_ctls & VM_ENTRY_LOAD_EFER) != 0 &&
3209                     (reg == VM_REG_GUEST_EFER)) {
3210                         vmcs_getreg(&vmx->vmcs[vcpu], running,
3211                                     VMCS_IDENT(VMCS_ENTRY_CTLS), &ctls);
3212                         if (val & EFER_LMA)
3213                                 ctls |= VM_ENTRY_GUEST_LMA;
3214                         else
3215                                 ctls &= ~VM_ENTRY_GUEST_LMA;
3216                         vmcs_setreg(&vmx->vmcs[vcpu], running,
3217                                     VMCS_IDENT(VMCS_ENTRY_CTLS), ctls);
3218                 }
3219
3220                 shadow = vmx_shadow_reg(reg);
3221                 if (shadow > 0) {
3222                         /*
3223                          * Store the unmodified value in the shadow
3224                          */
3225                         error = vmcs_setreg(&vmx->vmcs[vcpu], running,
3226                                     VMCS_IDENT(shadow), val);
3227                 }
3228
3229                 if (reg == VM_REG_GUEST_CR3) {
3230                         /*
3231                          * Invalidate the guest vcpu's TLB mappings to emulate
3232                          * the behavior of updating %cr3.
3233                          *
3234                          * XXX the processor retains global mappings when %cr3
3235                          * is updated but vmx_invvpid() does not.
3236                          */
3237                         pmap = vmx->ctx[vcpu].pmap;
3238                         vmx_invvpid(vmx, vcpu, pmap, running);
3239                 }
3240         }
3241
3242         return (error);
3243 }
3244
3245 static int
3246 vmx_getdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
3247 {
3248         int hostcpu, running;
3249         struct vmx *vmx = arg;
3250
3251         running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
3252         if (running && hostcpu != curcpu)
3253                 panic("vmx_getdesc: %s%d is running", vm_name(vmx->vm), vcpu);
3254
3255         return (vmcs_getdesc(&vmx->vmcs[vcpu], running, reg, desc));
3256 }
3257
3258 static int
3259 vmx_setdesc(void *arg, int vcpu, int reg, struct seg_desc *desc)
3260 {
3261         int hostcpu, running;
3262         struct vmx *vmx = arg;
3263
3264         running = vcpu_is_running(vmx->vm, vcpu, &hostcpu);
3265         if (running && hostcpu != curcpu)
3266                 panic("vmx_setdesc: %s%d is running", vm_name(vmx->vm), vcpu);
3267
3268         return (vmcs_setdesc(&vmx->vmcs[vcpu], running, reg, desc));
3269 }
3270
3271 static int
3272 vmx_getcap(void *arg, int vcpu, int type, int *retval)
3273 {
3274         struct vmx *vmx = arg;
3275         int vcap;
3276         int ret;
3277
3278         ret = ENOENT;
3279
3280         vcap = vmx->cap[vcpu].set;
3281
3282         switch (type) {
3283         case VM_CAP_HALT_EXIT:
3284                 if (cap_halt_exit)
3285                         ret = 0;
3286                 break;
3287         case VM_CAP_PAUSE_EXIT:
3288                 if (cap_pause_exit)
3289                         ret = 0;
3290                 break;
3291         case VM_CAP_MTRAP_EXIT:
3292                 if (cap_monitor_trap)
3293                         ret = 0;
3294                 break;
3295         case VM_CAP_UNRESTRICTED_GUEST:
3296                 if (cap_unrestricted_guest)
3297                         ret = 0;
3298                 break;
3299         case VM_CAP_ENABLE_INVPCID:
3300                 if (cap_invpcid)
3301                         ret = 0;
3302                 break;
3303         default:
3304                 break;
3305         }
3306
3307         if (ret == 0)
3308                 *retval = (vcap & (1 << type)) ? 1 : 0;
3309
3310         return (ret);
3311 }
3312
3313 static int
3314 vmx_setcap(void *arg, int vcpu, int type, int val)
3315 {
3316         struct vmx *vmx = arg;
3317         struct vmcs *vmcs = &vmx->vmcs[vcpu];
3318         uint32_t baseval;
3319         uint32_t *pptr;
3320         int error;
3321         int flag;
3322         int reg;
3323         int retval;
3324
3325         retval = ENOENT;
3326         pptr = NULL;
3327
3328         switch (type) {
3329         case VM_CAP_HALT_EXIT:
3330                 if (cap_halt_exit) {
3331                         retval = 0;
3332                         pptr = &vmx->cap[vcpu].proc_ctls;
3333                         baseval = *pptr;
3334                         flag = PROCBASED_HLT_EXITING;
3335                         reg = VMCS_PRI_PROC_BASED_CTLS;
3336                 }
3337                 break;
3338         case VM_CAP_MTRAP_EXIT:
3339                 if (cap_monitor_trap) {
3340                         retval = 0;
3341                         pptr = &vmx->cap[vcpu].proc_ctls;
3342                         baseval = *pptr;
3343                         flag = PROCBASED_MTF;
3344                         reg = VMCS_PRI_PROC_BASED_CTLS;
3345                 }
3346                 break;
3347         case VM_CAP_PAUSE_EXIT:
3348                 if (cap_pause_exit) {
3349                         retval = 0;
3350                         pptr = &vmx->cap[vcpu].proc_ctls;
3351                         baseval = *pptr;
3352                         flag = PROCBASED_PAUSE_EXITING;
3353                         reg = VMCS_PRI_PROC_BASED_CTLS;
3354                 }
3355                 break;
3356         case VM_CAP_UNRESTRICTED_GUEST:
3357                 if (cap_unrestricted_guest) {
3358                         retval = 0;
3359                         pptr = &vmx->cap[vcpu].proc_ctls2;
3360                         baseval = *pptr;
3361                         flag = PROCBASED2_UNRESTRICTED_GUEST;
3362                         reg = VMCS_SEC_PROC_BASED_CTLS;
3363                 }
3364                 break;
3365         case VM_CAP_ENABLE_INVPCID:
3366                 if (cap_invpcid) {
3367                         retval = 0;
3368                         pptr = &vmx->cap[vcpu].proc_ctls2;
3369                         baseval = *pptr;
3370                         flag = PROCBASED2_ENABLE_INVPCID;
3371                         reg = VMCS_SEC_PROC_BASED_CTLS;
3372                 }
3373                 break;
3374         default:
3375                 break;
3376         }
3377
3378         if (retval == 0) {
3379                 if (val) {
3380                         baseval |= flag;
3381                 } else {
3382                         baseval &= ~flag;
3383                 }
3384                 VMPTRLD(vmcs);
3385                 error = vmwrite(reg, baseval);
3386                 VMCLEAR(vmcs);
3387
3388                 if (error) {
3389                         retval = error;
3390                 } else {
3391                         /*
3392                          * Update optional stored flags, and record
3393                          * setting
3394                          */
3395                         if (pptr != NULL) {
3396                                 *pptr = baseval;
3397                         }
3398
3399                         if (val) {
3400                                 vmx->cap[vcpu].set |= (1 << type);
3401                         } else {
3402                                 vmx->cap[vcpu].set &= ~(1 << type);
3403                         }
3404                 }
3405         }
3406
3407         return (retval);
3408 }
3409
3410 struct vlapic_vtx {
3411         struct vlapic   vlapic;
3412         struct pir_desc *pir_desc;
3413         struct vmx      *vmx;
3414         u_int   pending_prio;
3415 };
3416
3417 #define VPR_PRIO_BIT(vpr)       (1 << ((vpr) >> 4))
3418
3419 #define VMX_CTR_PIR(vm, vcpuid, pir_desc, notify, vector, level, msg)   \
3420 do {                                                                    \
3421         VCPU_CTR2(vm, vcpuid, msg " assert %s-triggered vector %d",     \
3422             level ? "level" : "edge", vector);                          \
3423         VCPU_CTR1(vm, vcpuid, msg " pir0 0x%016lx", pir_desc->pir[0]);  \
3424         VCPU_CTR1(vm, vcpuid, msg " pir1 0x%016lx", pir_desc->pir[1]);  \
3425         VCPU_CTR1(vm, vcpuid, msg " pir2 0x%016lx", pir_desc->pir[2]);  \
3426         VCPU_CTR1(vm, vcpuid, msg " pir3 0x%016lx", pir_desc->pir[3]);  \
3427         VCPU_CTR1(vm, vcpuid, msg " notify: %s", notify ? "yes" : "no");\
3428 } while (0)
3429
3430 /*
3431  * vlapic->ops handlers that utilize the APICv hardware assist described in
3432  * Chapter 29 of the Intel SDM.
3433  */
3434 static int
3435 vmx_set_intr_ready(struct vlapic *vlapic, int vector, bool level)
3436 {
3437         struct vlapic_vtx *vlapic_vtx;
3438         struct pir_desc *pir_desc;
3439         uint64_t mask;
3440         int idx, notify = 0;
3441
3442         vlapic_vtx = (struct vlapic_vtx *)vlapic;
3443         pir_desc = vlapic_vtx->pir_desc;
3444
3445         /*
3446          * Keep track of interrupt requests in the PIR descriptor. This is
3447          * because the virtual APIC page pointed to by the VMCS cannot be
3448          * modified if the vcpu is running.
3449          */
3450         idx = vector / 64;
3451         mask = 1UL << (vector % 64);
3452         atomic_set_long(&pir_desc->pir[idx], mask);
3453
3454         /*
3455          * A notification is required whenever the 'pending' bit makes a
3456          * transition from 0->1.
3457          *
3458          * Even if the 'pending' bit is already asserted, notification about
3459          * the incoming interrupt may still be necessary.  For example, if a
3460          * vCPU is HLTed with a high PPR, a low priority interrupt would cause
3461          * the 0->1 'pending' transition with a notification, but the vCPU
3462          * would ignore the interrupt for the time being.  The same vCPU would
3463          * need to then be notified if a high-priority interrupt arrived which
3464          * satisfied the PPR.
3465          *
3466          * The priorities of interrupts injected while 'pending' is asserted
3467          * are tracked in a custom bitfield 'pending_prio'.  Should the
3468          * to-be-injected interrupt exceed the priorities already present, the
3469          * notification is sent.  The priorities recorded in 'pending_prio' are
3470          * cleared whenever the 'pending' bit makes another 0->1 transition.
3471          */
3472         if (atomic_cmpset_long(&pir_desc->pending, 0, 1) != 0) {
3473                 notify = 1;
3474                 vlapic_vtx->pending_prio = 0;
3475         } else {
3476                 const u_int old_prio = vlapic_vtx->pending_prio;
3477                 const u_int prio_bit = VPR_PRIO_BIT(vector & APIC_TPR_INT);
3478
3479                 if ((old_prio & prio_bit) == 0 && prio_bit > old_prio) {
3480                         atomic_set_int(&vlapic_vtx->pending_prio, prio_bit);
3481                         notify = 1;
3482                 }
3483         }
3484
3485         VMX_CTR_PIR(vlapic->vm, vlapic->vcpuid, pir_desc, notify, vector,
3486             level, "vmx_set_intr_ready");
3487         return (notify);
3488 }
3489
3490 static int
3491 vmx_pending_intr(struct vlapic *vlapic, int *vecptr)
3492 {
3493         struct vlapic_vtx *vlapic_vtx;
3494         struct pir_desc *pir_desc;
3495         struct LAPIC *lapic;
3496         uint64_t pending, pirval;
3497         uint32_t ppr, vpr;
3498         int i;
3499
3500         /*
3501          * This function is only expected to be called from the 'HLT' exit
3502          * handler which does not care about the vector that is pending.
3503          */
3504         KASSERT(vecptr == NULL, ("vmx_pending_intr: vecptr must be NULL"));
3505
3506         vlapic_vtx = (struct vlapic_vtx *)vlapic;
3507         pir_desc = vlapic_vtx->pir_desc;
3508
3509         pending = atomic_load_acq_long(&pir_desc->pending);
3510         if (!pending) {
3511                 /*
3512                  * While a virtual interrupt may have already been
3513                  * processed the actual delivery maybe pending the
3514                  * interruptibility of the guest.  Recognize a pending
3515                  * interrupt by reevaluating virtual interrupts
3516                  * following Section 29.2.1 in the Intel SDM Volume 3.
3517                  */
3518                 struct vm_exit *vmexit;
3519                 uint8_t rvi, ppr;
3520
3521                 vmexit = vm_exitinfo(vlapic->vm, vlapic->vcpuid);
3522                 KASSERT(vmexit->exitcode == VM_EXITCODE_HLT,
3523                     ("vmx_pending_intr: exitcode not 'HLT'"));
3524                 rvi = vmexit->u.hlt.intr_status & APIC_TPR_INT;
3525                 lapic = vlapic->apic_page;
3526                 ppr = lapic->ppr & APIC_TPR_INT;
3527                 if (rvi > ppr) {
3528                         return (1);
3529                 }
3530
3531                 return (0);
3532         }
3533
3534         /*
3535          * If there is an interrupt pending then it will be recognized only
3536          * if its priority is greater than the processor priority.
3537          *
3538          * Special case: if the processor priority is zero then any pending
3539          * interrupt will be recognized.
3540          */
3541         lapic = vlapic->apic_page;
3542         ppr = lapic->ppr & APIC_TPR_INT;
3543         if (ppr == 0)
3544                 return (1);
3545
3546         VCPU_CTR1(vlapic->vm, vlapic->vcpuid, "HLT with non-zero PPR %d",
3547             lapic->ppr);
3548
3549         vpr = 0;
3550         for (i = 3; i >= 0; i--) {
3551                 pirval = pir_desc->pir[i];
3552                 if (pirval != 0) {
3553                         vpr = (i * 64 + flsl(pirval) - 1) & APIC_TPR_INT;
3554                         break;
3555                 }
3556         }
3557
3558         /*
3559          * If the highest-priority pending interrupt falls short of the
3560          * processor priority of this vCPU, ensure that 'pending_prio' does not
3561          * have any stale bits which would preclude a higher-priority interrupt
3562          * from incurring a notification later.
3563          */
3564         if (vpr <= ppr) {
3565                 const u_int prio_bit = VPR_PRIO_BIT(vpr);
3566                 const u_int old = vlapic_vtx->pending_prio;
3567
3568                 if (old > prio_bit && (old & prio_bit) == 0) {
3569                         vlapic_vtx->pending_prio = prio_bit;
3570                 }
3571                 return (0);
3572         }
3573         return (1);
3574 }
3575
3576 static void
3577 vmx_intr_accepted(struct vlapic *vlapic, int vector)
3578 {
3579
3580         panic("vmx_intr_accepted: not expected to be called");
3581 }
3582
3583 static void
3584 vmx_set_tmr(struct vlapic *vlapic, int vector, bool level)
3585 {
3586         struct vlapic_vtx *vlapic_vtx;
3587         struct vmx *vmx;
3588         struct vmcs *vmcs;
3589         uint64_t mask, val;
3590
3591         KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d", vector));
3592         KASSERT(!vcpu_is_running(vlapic->vm, vlapic->vcpuid, NULL),
3593             ("vmx_set_tmr: vcpu cannot be running"));
3594
3595         vlapic_vtx = (struct vlapic_vtx *)vlapic;
3596         vmx = vlapic_vtx->vmx;
3597         vmcs = &vmx->vmcs[vlapic->vcpuid];
3598         mask = 1UL << (vector % 64);
3599
3600         VMPTRLD(vmcs);
3601         val = vmcs_read(VMCS_EOI_EXIT(vector));
3602         if (level)
3603                 val |= mask;
3604         else
3605                 val &= ~mask;
3606         vmcs_write(VMCS_EOI_EXIT(vector), val);
3607         VMCLEAR(vmcs);
3608 }
3609
3610 static void
3611 vmx_enable_x2apic_mode(struct vlapic *vlapic)
3612 {
3613         struct vmx *vmx;
3614         struct vmcs *vmcs;
3615         uint32_t proc_ctls2;
3616         int vcpuid, error;
3617
3618         vcpuid = vlapic->vcpuid;
3619         vmx = ((struct vlapic_vtx *)vlapic)->vmx;
3620         vmcs = &vmx->vmcs[vcpuid];
3621
3622         proc_ctls2 = vmx->cap[vcpuid].proc_ctls2;
3623         KASSERT((proc_ctls2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES) != 0,
3624             ("%s: invalid proc_ctls2 %#x", __func__, proc_ctls2));
3625
3626         proc_ctls2 &= ~PROCBASED2_VIRTUALIZE_APIC_ACCESSES;
3627         proc_ctls2 |= PROCBASED2_VIRTUALIZE_X2APIC_MODE;
3628         vmx->cap[vcpuid].proc_ctls2 = proc_ctls2;
3629
3630         VMPTRLD(vmcs);
3631         vmcs_write(VMCS_SEC_PROC_BASED_CTLS, proc_ctls2);
3632         VMCLEAR(vmcs);
3633
3634         if (vlapic->vcpuid == 0) {
3635                 /*
3636                  * The nested page table mappings are shared by all vcpus
3637                  * so unmap the APIC access page just once.
3638                  */
3639                 error = vm_unmap_mmio(vmx->vm, DEFAULT_APIC_BASE, PAGE_SIZE);
3640                 KASSERT(error == 0, ("%s: vm_unmap_mmio error %d",
3641                     __func__, error));
3642
3643                 /*
3644                  * The MSR bitmap is shared by all vcpus so modify it only
3645                  * once in the context of vcpu 0.
3646                  */
3647                 error = vmx_allow_x2apic_msrs(vmx);
3648                 KASSERT(error == 0, ("%s: vmx_allow_x2apic_msrs error %d",
3649                     __func__, error));
3650         }
3651 }
3652
3653 static void
3654 vmx_post_intr(struct vlapic *vlapic, int hostcpu)
3655 {
3656
3657         ipi_cpu(hostcpu, pirvec);
3658 }
3659
3660 /*
3661  * Transfer the pending interrupts in the PIR descriptor to the IRR
3662  * in the virtual APIC page.
3663  */
3664 static void
3665 vmx_inject_pir(struct vlapic *vlapic)
3666 {
3667         struct vlapic_vtx *vlapic_vtx;
3668         struct pir_desc *pir_desc;
3669         struct LAPIC *lapic;
3670         uint64_t val, pirval;
3671         int rvi, pirbase = -1;
3672         uint16_t intr_status_old, intr_status_new;
3673
3674         vlapic_vtx = (struct vlapic_vtx *)vlapic;
3675         pir_desc = vlapic_vtx->pir_desc;
3676         if (atomic_cmpset_long(&pir_desc->pending, 1, 0) == 0) {
3677                 VCPU_CTR0(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: "
3678                     "no posted interrupt pending");
3679                 return;
3680         }
3681
3682         pirval = 0;
3683         pirbase = -1;
3684         lapic = vlapic->apic_page;
3685
3686         val = atomic_readandclear_long(&pir_desc->pir[0]);
3687         if (val != 0) {
3688                 lapic->irr0 |= val;
3689                 lapic->irr1 |= val >> 32;
3690                 pirbase = 0;
3691                 pirval = val;
3692         }
3693
3694         val = atomic_readandclear_long(&pir_desc->pir[1]);
3695         if (val != 0) {
3696                 lapic->irr2 |= val;
3697                 lapic->irr3 |= val >> 32;
3698                 pirbase = 64;
3699                 pirval = val;
3700         }
3701
3702         val = atomic_readandclear_long(&pir_desc->pir[2]);
3703         if (val != 0) {
3704                 lapic->irr4 |= val;
3705                 lapic->irr5 |= val >> 32;
3706                 pirbase = 128;
3707                 pirval = val;
3708         }
3709
3710         val = atomic_readandclear_long(&pir_desc->pir[3]);
3711         if (val != 0) {
3712                 lapic->irr6 |= val;
3713                 lapic->irr7 |= val >> 32;
3714                 pirbase = 192;
3715                 pirval = val;
3716         }
3717
3718         VLAPIC_CTR_IRR(vlapic, "vmx_inject_pir");
3719
3720         /*
3721          * Update RVI so the processor can evaluate pending virtual
3722          * interrupts on VM-entry.
3723          *
3724          * It is possible for pirval to be 0 here, even though the
3725          * pending bit has been set. The scenario is:
3726          * CPU-Y is sending a posted interrupt to CPU-X, which
3727          * is running a guest and processing posted interrupts in h/w.
3728          * CPU-X will eventually exit and the state seen in s/w is
3729          * the pending bit set, but no PIR bits set.
3730          *
3731          *      CPU-X                      CPU-Y
3732          *   (vm running)                (host running)
3733          *   rx posted interrupt
3734          *   CLEAR pending bit
3735          *                               SET PIR bit
3736          *   READ/CLEAR PIR bits
3737          *                               SET pending bit
3738          *   (vm exit)
3739          *   pending bit set, PIR 0
3740          */
3741         if (pirval != 0) {
3742                 rvi = pirbase + flsl(pirval) - 1;
3743                 intr_status_old = vmcs_read(VMCS_GUEST_INTR_STATUS);
3744                 intr_status_new = (intr_status_old & 0xFF00) | rvi;
3745                 if (intr_status_new > intr_status_old) {
3746                         vmcs_write(VMCS_GUEST_INTR_STATUS, intr_status_new);
3747                         VCPU_CTR2(vlapic->vm, vlapic->vcpuid, "vmx_inject_pir: "
3748                             "guest_intr_status changed from 0x%04x to 0x%04x",
3749                             intr_status_old, intr_status_new);
3750                 }
3751         }
3752 }
3753
3754 static struct vlapic *
3755 vmx_vlapic_init(void *arg, int vcpuid)
3756 {
3757         struct vmx *vmx;
3758         struct vlapic *vlapic;
3759         struct vlapic_vtx *vlapic_vtx;
3760
3761         vmx = arg;
3762
3763         vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO);
3764         vlapic->vm = vmx->vm;
3765         vlapic->vcpuid = vcpuid;
3766         vlapic->apic_page = (struct LAPIC *)&vmx->apic_page[vcpuid];
3767
3768         vlapic_vtx = (struct vlapic_vtx *)vlapic;
3769         vlapic_vtx->pir_desc = &vmx->pir_desc[vcpuid];
3770         vlapic_vtx->vmx = vmx;
3771
3772         if (virtual_interrupt_delivery) {
3773                 vlapic->ops.set_intr_ready = vmx_set_intr_ready;
3774                 vlapic->ops.pending_intr = vmx_pending_intr;
3775                 vlapic->ops.intr_accepted = vmx_intr_accepted;
3776                 vlapic->ops.set_tmr = vmx_set_tmr;
3777                 vlapic->ops.enable_x2apic_mode = vmx_enable_x2apic_mode;
3778         }
3779
3780         if (posted_interrupts)
3781                 vlapic->ops.post_intr = vmx_post_intr;
3782
3783         vlapic_init(vlapic);
3784
3785         return (vlapic);
3786 }
3787
3788 static void
3789 vmx_vlapic_cleanup(void *arg, struct vlapic *vlapic)
3790 {
3791
3792         vlapic_cleanup(vlapic);
3793         free(vlapic, M_VLAPIC);
3794 }
3795
3796 struct vmm_ops vmm_ops_intel = {
3797         .init           = vmx_init,
3798         .cleanup        = vmx_cleanup,
3799         .resume         = vmx_restore,
3800         .vminit         = vmx_vminit,
3801         .vmrun          = vmx_run,
3802         .vmcleanup      = vmx_vmcleanup,
3803         .vmgetreg       = vmx_getreg,
3804         .vmsetreg       = vmx_setreg,
3805         .vmgetdesc      = vmx_getdesc,
3806         .vmsetdesc      = vmx_setdesc,
3807         .vmgetcap       = vmx_getcap,
3808         .vmsetcap       = vmx_setcap,
3809         .vmspace_alloc  = ept_vmspace_alloc,
3810         .vmspace_free   = ept_vmspace_free,
3811         .vlapic_init    = vmx_vlapic_init,
3812         .vlapic_cleanup = vmx_vlapic_cleanup,
3813 };