2 * Copyright (c) 2011 NetApp, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <machine/asmacros.h>
31 #include "vmx_assym.s"
40 * Disable interrupts before updating %rsp in VMX_CHECK_AST or
43 * The location that %rsp points to is a 'vmxctx' and not a
44 * real stack so we don't want an interrupt handler to trash it
46 #define VMX_DISABLE_INTERRUPTS cli
49 * If the thread hosting the vcpu has an ast pending then take care of it
50 * by returning from vmx_setjmp() with a return value of VMX_RETURN_AST.
52 * Assumes that %rdi holds a pointer to the 'vmxctx' and that interrupts
55 #define VMX_CHECK_AST \
56 movq PCPU(CURTHREAD),%rax; \
57 testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%rax); \
59 movq $VMX_RETURN_AST,%rsi; \
61 addq $VMXCTX_TMPSTKTOP,%rsp; \
66 * Assumes that %rdi holds a pointer to the 'vmxctx'.
68 * On "return" all registers are updated to reflect guest state. The two
69 * exceptions are %rip and %rsp. These registers are atomically switched
70 * by hardware from the guest area of the vmcs.
72 * We modify %rsp to point to the 'vmxctx' so we can use it to restore
73 * host context in case of an error with 'vmlaunch' or 'vmresume'.
75 #define VMX_GUEST_RESTORE \
77 movq VMXCTX_GUEST_CR2(%rdi),%rsi; \
79 movq VMXCTX_GUEST_RSI(%rdi),%rsi; \
80 movq VMXCTX_GUEST_RDX(%rdi),%rdx; \
81 movq VMXCTX_GUEST_RCX(%rdi),%rcx; \
82 movq VMXCTX_GUEST_R8(%rdi),%r8; \
83 movq VMXCTX_GUEST_R9(%rdi),%r9; \
84 movq VMXCTX_GUEST_RAX(%rdi),%rax; \
85 movq VMXCTX_GUEST_RBX(%rdi),%rbx; \
86 movq VMXCTX_GUEST_RBP(%rdi),%rbp; \
87 movq VMXCTX_GUEST_R10(%rdi),%r10; \
88 movq VMXCTX_GUEST_R11(%rdi),%r11; \
89 movq VMXCTX_GUEST_R12(%rdi),%r12; \
90 movq VMXCTX_GUEST_R13(%rdi),%r13; \
91 movq VMXCTX_GUEST_R14(%rdi),%r14; \
92 movq VMXCTX_GUEST_R15(%rdi),%r15; \
93 movq VMXCTX_GUEST_RDI(%rdi),%rdi; /* restore rdi the last */
96 * Check for an error after executing a VMX instruction.
97 * 'errreg' will be zero on success and non-zero otherwise.
98 * 'ctxreg' points to the 'struct vmxctx' associated with the vcpu.
100 #define VM_INSTRUCTION_ERROR(errreg, ctxreg) \
102 movl $VM_FAIL_INVALID,errreg; /* CF is set */ \
105 movl $VM_FAIL_VALID,errreg; /* ZF is set */ \
107 2: movl $VM_SUCCESS,errreg; \
108 3: movl errreg,VMXCTX_LAUNCH_ERROR(ctxreg)
111 * set or clear the appropriate bit in 'pm_active'
113 * %rax, %r11 = scratch registers
115 #define VMX_SET_PM_ACTIVE \
116 movq VMXCTX_PMAP(%rdi), %r11; \
117 movl PCPU(CPUID), %eax; \
118 LK btsl %eax, PM_ACTIVE(%r11)
120 #define VMX_CLEAR_PM_ACTIVE \
121 movq VMXCTX_PMAP(%rdi), %r11; \
122 movl PCPU(CPUID), %eax; \
123 LK btrl %eax, PM_ACTIVE(%r11)
126 * If 'vmxctx->eptgen[curcpu]' is not identical to 'pmap->pm_eptgen'
127 * then we must invalidate all mappings associated with this eptp.
130 * %rax, %rbx, %r11 = scratch registers
132 #define VMX_CHECK_EPTGEN \
133 movl PCPU(CPUID), %ebx; \
134 movq VMXCTX_PMAP(%rdi), %r11; \
135 movq PM_EPTGEN(%r11), %rax; \
136 cmpq %rax, VMXCTX_EPTGEN(%rdi, %rbx, 8); \
139 /* Refresh 'vmxctx->eptgen[curcpu]' */ \
140 movq %rax, VMXCTX_EPTGEN(%rdi, %rbx, 8); \
142 /* Setup the invept descriptor at the top of tmpstk */ \
144 addq $VMXCTX_TMPSTKTOP, %r11; \
145 movq VMXCTX_EPTP(%rdi), %rax; \
146 movq %rax, -16(%r11); \
147 movq $0x0, -8(%r11); \
148 mov $0x1, %eax; /* Single context invalidate */ \
149 invept -16(%r11), %rax; \
151 /* Check for invept error */ \
152 VM_INSTRUCTION_ERROR(%eax, %rdi); \
156 /* Return via vmx_setjmp with retval of VMX_RETURN_INVEPT */ \
157 movq $VMX_RETURN_INVEPT, %rsi; \
159 addq $VMXCTX_TMPSTKTOP, %rsp; \
165 * int vmx_setjmp(ctxp)
168 * Return value is '0' when it returns directly from here.
169 * Return value is '1' when it returns after a vm exit through vmx_longjmp.
172 movq (%rsp),%rax /* return address */
173 movq %r15,VMXCTX_HOST_R15(%rdi)
174 movq %r14,VMXCTX_HOST_R14(%rdi)
175 movq %r13,VMXCTX_HOST_R13(%rdi)
176 movq %r12,VMXCTX_HOST_R12(%rdi)
177 movq %rbp,VMXCTX_HOST_RBP(%rdi)
178 movq %rsp,VMXCTX_HOST_RSP(%rdi)
179 movq %rbx,VMXCTX_HOST_RBX(%rdi)
180 movq %rax,VMXCTX_HOST_RIP(%rdi)
183 * XXX save host debug registers
185 movl $VMX_RETURN_DIRECT,%eax
190 * void vmx_return(struct vmxctx *ctxp, int retval)
193 * Return to vmm context through vmx_setjmp() with a value of 'retval'.
196 /* The pmap is no longer active on the host cpu */
199 /* Restore host context. */
200 movq VMXCTX_HOST_R15(%rdi),%r15
201 movq VMXCTX_HOST_R14(%rdi),%r14
202 movq VMXCTX_HOST_R13(%rdi),%r13
203 movq VMXCTX_HOST_R12(%rdi),%r12
204 movq VMXCTX_HOST_RBP(%rdi),%rbp
205 movq VMXCTX_HOST_RSP(%rdi),%rsp
206 movq VMXCTX_HOST_RBX(%rdi),%rbx
207 movq VMXCTX_HOST_RIP(%rdi),%rax
208 movq %rax,(%rsp) /* return address */
211 * XXX restore host debug registers
218 * void vmx_longjmp(void)
219 * %rsp points to the struct vmxctx
223 * Save guest state that is not automatically saved in the vmcs.
225 movq %rdi,VMXCTX_GUEST_RDI(%rsp)
226 movq %rsi,VMXCTX_GUEST_RSI(%rsp)
227 movq %rdx,VMXCTX_GUEST_RDX(%rsp)
228 movq %rcx,VMXCTX_GUEST_RCX(%rsp)
229 movq %r8,VMXCTX_GUEST_R8(%rsp)
230 movq %r9,VMXCTX_GUEST_R9(%rsp)
231 movq %rax,VMXCTX_GUEST_RAX(%rsp)
232 movq %rbx,VMXCTX_GUEST_RBX(%rsp)
233 movq %rbp,VMXCTX_GUEST_RBP(%rsp)
234 movq %r10,VMXCTX_GUEST_R10(%rsp)
235 movq %r11,VMXCTX_GUEST_R11(%rsp)
236 movq %r12,VMXCTX_GUEST_R12(%rsp)
237 movq %r13,VMXCTX_GUEST_R13(%rsp)
238 movq %r14,VMXCTX_GUEST_R14(%rsp)
239 movq %r15,VMXCTX_GUEST_R15(%rsp)
242 movq %rdi,VMXCTX_GUEST_CR2(%rsp)
245 movq $VMX_RETURN_LONGJMP,%rsi
247 addq $VMXCTX_TMPSTKTOP,%rsp
252 * void vmx_resume(struct vmxctx *ctxp)
255 * Although the return type is a 'void' this function may return indirectly
256 * through vmx_setjmp() with a return value of 2.
259 VMX_DISABLE_INTERRUPTS
263 VMX_SET_PM_ACTIVE /* This vcpu is now active on the host cpu */
265 VMX_CHECK_EPTGEN /* Check if we have to invalidate TLB */
268 * Restore guest state that is not automatically loaded from the vmcs.
275 * Capture the reason why vmresume failed.
277 VM_INSTRUCTION_ERROR(%eax, %rsp)
279 /* Return via vmx_setjmp with return value of VMX_RETURN_VMRESUME */
281 movq $VMX_RETURN_VMRESUME,%rsi
283 addq $VMXCTX_TMPSTKTOP,%rsp
288 * void vmx_launch(struct vmxctx *ctxp)
291 * Although the return type is a 'void' this function may return indirectly
292 * through vmx_setjmp() with a return value of 3.
295 VMX_DISABLE_INTERRUPTS
299 VMX_SET_PM_ACTIVE /* This vcpu is now active on the host cpu */
301 VMX_CHECK_EPTGEN /* Check if we have to invalidate TLB */
304 * Restore guest state that is not automatically loaded from the vmcs.
311 * Capture the reason why vmlaunch failed.
313 VM_INSTRUCTION_ERROR(%eax, %rsp)
315 /* Return via vmx_setjmp with return value of VMX_RETURN_VMLAUNCH */
317 movq $VMX_RETURN_VMLAUNCH,%rsi
319 addq $VMXCTX_TMPSTKTOP,%rsp