2 * Copyright (c) 2011 NetApp, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
41 #include <machine/segments.h>
42 #include <machine/vmm.h>
44 #include "vmx_cpufunc.h"
54 vmcs_fix_regval(uint32_t encoding, uint64_t val)
59 val = vmx_fix_cr0(val);
62 val = vmx_fix_cr4(val);
71 vmcs_field_encoding(int ident)
74 case VM_REG_GUEST_CR0:
75 return (VMCS_GUEST_CR0);
76 case VM_REG_GUEST_CR3:
77 return (VMCS_GUEST_CR3);
78 case VM_REG_GUEST_CR4:
79 return (VMCS_GUEST_CR4);
80 case VM_REG_GUEST_DR7:
81 return (VMCS_GUEST_DR7);
82 case VM_REG_GUEST_RSP:
83 return (VMCS_GUEST_RSP);
84 case VM_REG_GUEST_RIP:
85 return (VMCS_GUEST_RIP);
86 case VM_REG_GUEST_RFLAGS:
87 return (VMCS_GUEST_RFLAGS);
89 return (VMCS_GUEST_ES_SELECTOR);
91 return (VMCS_GUEST_CS_SELECTOR);
93 return (VMCS_GUEST_SS_SELECTOR);
95 return (VMCS_GUEST_DS_SELECTOR);
97 return (VMCS_GUEST_FS_SELECTOR);
99 return (VMCS_GUEST_GS_SELECTOR);
100 case VM_REG_GUEST_TR:
101 return (VMCS_GUEST_TR_SELECTOR);
102 case VM_REG_GUEST_LDTR:
103 return (VMCS_GUEST_LDTR_SELECTOR);
104 case VM_REG_GUEST_EFER:
105 return (VMCS_GUEST_IA32_EFER);
113 vmcs_seg_desc_encoding(int seg, uint32_t *base, uint32_t *lim, uint32_t *acc)
117 case VM_REG_GUEST_ES:
118 *base = VMCS_GUEST_ES_BASE;
119 *lim = VMCS_GUEST_ES_LIMIT;
120 *acc = VMCS_GUEST_ES_ACCESS_RIGHTS;
122 case VM_REG_GUEST_CS:
123 *base = VMCS_GUEST_CS_BASE;
124 *lim = VMCS_GUEST_CS_LIMIT;
125 *acc = VMCS_GUEST_CS_ACCESS_RIGHTS;
127 case VM_REG_GUEST_SS:
128 *base = VMCS_GUEST_SS_BASE;
129 *lim = VMCS_GUEST_SS_LIMIT;
130 *acc = VMCS_GUEST_SS_ACCESS_RIGHTS;
132 case VM_REG_GUEST_DS:
133 *base = VMCS_GUEST_DS_BASE;
134 *lim = VMCS_GUEST_DS_LIMIT;
135 *acc = VMCS_GUEST_DS_ACCESS_RIGHTS;
137 case VM_REG_GUEST_FS:
138 *base = VMCS_GUEST_FS_BASE;
139 *lim = VMCS_GUEST_FS_LIMIT;
140 *acc = VMCS_GUEST_FS_ACCESS_RIGHTS;
142 case VM_REG_GUEST_GS:
143 *base = VMCS_GUEST_GS_BASE;
144 *lim = VMCS_GUEST_GS_LIMIT;
145 *acc = VMCS_GUEST_GS_ACCESS_RIGHTS;
147 case VM_REG_GUEST_TR:
148 *base = VMCS_GUEST_TR_BASE;
149 *lim = VMCS_GUEST_TR_LIMIT;
150 *acc = VMCS_GUEST_TR_ACCESS_RIGHTS;
152 case VM_REG_GUEST_LDTR:
153 *base = VMCS_GUEST_LDTR_BASE;
154 *lim = VMCS_GUEST_LDTR_LIMIT;
155 *acc = VMCS_GUEST_LDTR_ACCESS_RIGHTS;
157 case VM_REG_GUEST_IDTR:
158 *base = VMCS_GUEST_IDTR_BASE;
159 *lim = VMCS_GUEST_IDTR_LIMIT;
160 *acc = VMCS_INVALID_ENCODING;
162 case VM_REG_GUEST_GDTR:
163 *base = VMCS_GUEST_GDTR_BASE;
164 *lim = VMCS_GUEST_GDTR_LIMIT;
165 *acc = VMCS_INVALID_ENCODING;
175 vmcs_getreg(struct vmcs *vmcs, int running, int ident, uint64_t *retval)
181 * If we need to get at vmx-specific state in the VMCS we can bypass
182 * the translation of 'ident' to 'encoding' by simply setting the
183 * sign bit. As it so happens the upper 16 bits are reserved (i.e
184 * set to 0) in the encodings for the VMCS so we are free to use the
188 encoding = ident & 0x7fffffff;
190 encoding = vmcs_field_encoding(ident);
192 if (encoding == (uint32_t)-1)
198 error = vmread(encoding, retval);
207 vmcs_setreg(struct vmcs *vmcs, int running, int ident, uint64_t val)
213 encoding = ident & 0x7fffffff;
215 encoding = vmcs_field_encoding(ident);
217 if (encoding == (uint32_t)-1)
220 val = vmcs_fix_regval(encoding, val);
225 error = vmwrite(encoding, val);
234 vmcs_setdesc(struct vmcs *vmcs, int running, int seg, struct seg_desc *desc)
237 uint32_t base, limit, access;
239 error = vmcs_seg_desc_encoding(seg, &base, &limit, &access);
241 panic("vmcs_setdesc: invalid segment register %d", seg);
245 if ((error = vmwrite(base, desc->base)) != 0)
248 if ((error = vmwrite(limit, desc->limit)) != 0)
251 if (access != VMCS_INVALID_ENCODING) {
252 if ((error = vmwrite(access, desc->access)) != 0)
262 vmcs_getdesc(struct vmcs *vmcs, int running, int seg, struct seg_desc *desc)
265 uint32_t base, limit, access;
268 error = vmcs_seg_desc_encoding(seg, &base, &limit, &access);
270 panic("vmcs_getdesc: invalid segment register %d", seg);
274 if ((error = vmread(base, &u64)) != 0)
278 if ((error = vmread(limit, &u64)) != 0)
282 if (access != VMCS_INVALID_ENCODING) {
283 if ((error = vmread(access, &u64)) != 0)
294 vmcs_set_msr_save(struct vmcs *vmcs, u_long g_area, u_int g_count)
301 * Guest MSRs are saved in the VM-exit MSR-store area.
302 * Guest MSRs are loaded from the VM-entry MSR-load area.
303 * Both areas point to the same location in memory.
305 if ((error = vmwrite(VMCS_EXIT_MSR_STORE, g_area)) != 0)
307 if ((error = vmwrite(VMCS_EXIT_MSR_STORE_COUNT, g_count)) != 0)
310 if ((error = vmwrite(VMCS_ENTRY_MSR_LOAD, g_area)) != 0)
312 if ((error = vmwrite(VMCS_ENTRY_MSR_LOAD_COUNT, g_count)) != 0)
322 vmcs_init(struct vmcs *vmcs)
324 int error, codesel, datasel, tsssel;
325 u_long cr0, cr4, efer;
326 uint64_t pat, fsbase, idtrbase;
329 codesel = vmm_get_host_codesel();
330 datasel = vmm_get_host_datasel();
331 tsssel = vmm_get_host_tsssel();
334 * Make sure we have a "current" VMCS to work with.
338 /* Initialize guest IA32_PAT MSR with the default value */
339 pat = PAT_VALUE(0, PAT_WRITE_BACK) |
340 PAT_VALUE(1, PAT_WRITE_THROUGH) |
341 PAT_VALUE(2, PAT_UNCACHED) |
342 PAT_VALUE(3, PAT_UNCACHEABLE) |
343 PAT_VALUE(4, PAT_WRITE_BACK) |
344 PAT_VALUE(5, PAT_WRITE_THROUGH) |
345 PAT_VALUE(6, PAT_UNCACHED) |
346 PAT_VALUE(7, PAT_UNCACHEABLE);
347 if ((error = vmwrite(VMCS_GUEST_IA32_PAT, pat)) != 0)
352 /* Initialize host IA32_PAT MSR */
353 pat = vmm_get_host_pat();
354 if ((error = vmwrite(VMCS_HOST_IA32_PAT, pat)) != 0)
357 /* Load the IA32_EFER MSR */
358 efer = vmm_get_host_efer();
359 if ((error = vmwrite(VMCS_HOST_IA32_EFER, efer)) != 0)
362 /* Load the control registers */
364 cr0 = vmm_get_host_cr0();
365 if ((error = vmwrite(VMCS_HOST_CR0, cr0)) != 0)
368 cr4 = vmm_get_host_cr4() | CR4_VMXE;
369 if ((error = vmwrite(VMCS_HOST_CR4, cr4)) != 0)
372 /* Load the segment selectors */
373 if ((error = vmwrite(VMCS_HOST_ES_SELECTOR, datasel)) != 0)
376 if ((error = vmwrite(VMCS_HOST_CS_SELECTOR, codesel)) != 0)
379 if ((error = vmwrite(VMCS_HOST_SS_SELECTOR, datasel)) != 0)
382 if ((error = vmwrite(VMCS_HOST_DS_SELECTOR, datasel)) != 0)
385 if ((error = vmwrite(VMCS_HOST_FS_SELECTOR, datasel)) != 0)
388 if ((error = vmwrite(VMCS_HOST_GS_SELECTOR, datasel)) != 0)
391 if ((error = vmwrite(VMCS_HOST_TR_SELECTOR, tsssel)) != 0)
395 * Load the Base-Address for %fs and idtr.
397 * Note that we exclude %gs, tss and gdtr here because their base
398 * address is pcpu specific.
400 fsbase = vmm_get_host_fsbase();
401 if ((error = vmwrite(VMCS_HOST_FS_BASE, fsbase)) != 0)
404 idtrbase = vmm_get_host_idtrbase();
405 if ((error = vmwrite(VMCS_HOST_IDTR_BASE, idtrbase)) != 0)
408 /* instruction pointer */
409 if ((error = vmwrite(VMCS_HOST_RIP, (u_long)vmx_exit_guest)) != 0)
412 /* exception bitmap */
413 exc_bitmap = 1 << IDT_MC;
414 if ((error = vmwrite(VMCS_EXCEPTION_BITMAP, exc_bitmap)) != 0)
418 if ((error = vmwrite(VMCS_LINK_POINTER, ~0)) != 0)
426 extern int vmxon_enabled[];
428 DB_SHOW_COMMAND(vmcs, db_show_vmcs)
430 uint64_t cur_vmcs, val;
433 if (!vmxon_enabled[curcpu]) {
434 db_printf("VMX not enabled\n");
439 db_printf("Only current VMCS supported\n");
444 if (cur_vmcs == VMCS_INITIAL) {
445 db_printf("No current VM context\n");
448 db_printf("VMCS: %jx\n", cur_vmcs);
449 db_printf("VPID: %lu\n", vmcs_read(VMCS_VPID));
450 db_printf("Activity: ");
451 val = vmcs_read(VMCS_GUEST_ACTIVITY);
460 db_printf("Shutdown");
463 db_printf("Wait for SIPI");
466 db_printf("Unknown: %#lx", val);
469 exit = vmcs_read(VMCS_EXIT_REASON);
470 if (exit & 0x80000000)
471 db_printf("Entry Failure Reason: %u\n", exit & 0xffff);
473 db_printf("Exit Reason: %u\n", exit & 0xffff);
474 db_printf("Qualification: %#lx\n", vmcs_exit_qualification());
475 db_printf("Guest Linear Address: %#lx\n",
476 vmcs_read(VMCS_GUEST_LINEAR_ADDRESS));
477 switch (exit & 0x8000ffff) {
478 case EXIT_REASON_EXCEPTION:
479 case EXIT_REASON_EXT_INTR:
480 val = vmcs_read(VMCS_EXIT_INTR_INFO);
481 db_printf("Interrupt Type: ");
482 switch (val >> 8 & 0x7) {
484 db_printf("external");
490 db_printf("HW exception");
493 db_printf("SW exception");
496 db_printf("?? %lu", val >> 8 & 0x7);
499 db_printf(" Vector: %lu", val & 0xff);
501 db_printf(" Error Code: %lx",
502 vmcs_read(VMCS_EXIT_INTR_ERRCODE));
505 case EXIT_REASON_EPT_FAULT:
506 case EXIT_REASON_EPT_MISCONFIG:
507 db_printf("Guest Physical Address: %#lx\n",
508 vmcs_read(VMCS_GUEST_PHYSICAL_ADDRESS));
511 db_printf("VM-instruction error: %#lx\n", vmcs_instruction_error());