2 * Copyright (c) 2011 NetApp, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 #define GUEST_MSR_MAX_ENTRIES 64 /* arbitrary */
39 register_t tmpstk[32]; /* vmx_return() stack */
42 register_t guest_rdi; /* Guest state */
59 register_t host_r15; /* Host state */
68 * XXX todo debug registers and fpu state
71 int launched; /* vmcs launch state */
74 long eptgen[MAXCPU]; /* cached pmap->pm_eptgen */
77 * The 'eptp' and the 'pmap' do not change during the lifetime of
78 * the VM so it is safe to keep a copy in each vcpu's vmxctx.
91 int lastcpu; /* host cpu that this 'vcpu' last ran on */
95 /* virtual machine softc */
97 struct vmcs vmcs[VM_MAXCPU]; /* one vmcs per virtual cpu */
98 char msr_bitmap[PAGE_SIZE];
99 struct msr_entry guest_msrs[VM_MAXCPU][GUEST_MSR_MAX_ENTRIES];
100 struct vmxctx ctx[VM_MAXCPU];
101 struct vmxcap cap[VM_MAXCPU];
102 struct vmxstate state[VM_MAXCPU];
106 CTASSERT((offsetof(struct vmx, vmcs) & PAGE_MASK) == 0);
107 CTASSERT((offsetof(struct vmx, msr_bitmap) & PAGE_MASK) == 0);
108 CTASSERT((offsetof(struct vmx, guest_msrs) & 15) == 0);
110 #define VMX_RETURN_DIRECT 0
111 #define VMX_RETURN_LONGJMP 1
112 #define VMX_RETURN_VMRESUME 2
113 #define VMX_RETURN_VMLAUNCH 3
114 #define VMX_RETURN_AST 4
115 #define VMX_RETURN_INVEPT 5
117 * vmx_setjmp() returns:
118 * - 0 when it returns directly
119 * - 1 when it returns from vmx_longjmp
120 * - 2 when it returns from vmx_resume (which would only be in the error case)
121 * - 3 when it returns from vmx_launch (which would only be in the error case)
122 * - 4 when it returns from vmx_resume or vmx_launch because of AST pending
123 * - 5 when it returns from vmx_launch/vmx_resume because of invept error
125 int vmx_setjmp(struct vmxctx *ctx);
126 void vmx_longjmp(void); /* returns via vmx_setjmp */
127 void vmx_launch(struct vmxctx *ctx) __dead2; /* may return via vmx_setjmp */
128 void vmx_resume(struct vmxctx *ctx) __dead2; /* may return via vmx_setjmp */
130 u_long vmx_fix_cr0(u_long cr0);
131 u_long vmx_fix_cr4(u_long cr4);