2 * Copyright (c) 2011 NetApp, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #ifndef _VMX_CPUFUNC_H_
30 #define _VMX_CPUFUNC_H_
35 * Section 5.2 "Conventions" from Intel Architecture Manual 2B.
40 * VMFailValid 2 see also VMCS VM-Instruction Error Field
43 #define VM_FAIL_INVALID 1
44 #define VM_FAIL_VALID 2
45 #define VMX_SET_ERROR_CODE \
47 " mov $1, %[error];" /* CF: error = 1 */ \
50 " mov $2, %[error];" /* ZF: error = 2 */ \
52 "2: mov $0, %[error];" \
55 /* returns 0 on success and non-zero on failure */
62 addr = vtophys(region);
63 __asm __volatile("vmxon %[addr];"
65 : [error] "=r" (error)
66 : [addr] "m" (*(uint64_t *)&addr)
72 /* returns 0 on success and non-zero on failure */
74 vmclear(struct vmcs *vmcs)
80 __asm __volatile("vmclear %[addr];"
82 : [error] "=r" (error)
83 : [addr] "m" (*(uint64_t *)&addr)
92 __asm __volatile("vmxoff");
96 vmptrst(uint64_t *addr)
99 __asm __volatile("vmptrst %[addr]" :: [addr]"m" (*addr) : "memory");
103 vmptrld(struct vmcs *vmcs)
108 addr = vtophys(vmcs);
109 __asm __volatile("vmptrld %[addr];"
111 : [error] "=r" (error)
112 : [addr] "m" (*(uint64_t *)&addr)
118 vmwrite(uint64_t reg, uint64_t val)
122 __asm __volatile("vmwrite %[val], %[reg];"
124 : [error] "=r" (error)
125 : [val] "r" (val), [reg] "r" (reg)
132 vmread(uint64_t r, uint64_t *addr)
136 __asm __volatile("vmread %[r], %[addr];"
138 : [error] "=r" (error)
139 : [r] "r" (r), [addr] "m" (*addr)
146 VMCLEAR(struct vmcs *vmcs)
152 panic("%s: vmclear(%p) error %d", __func__, vmcs, err);
158 VMPTRLD(struct vmcs *vmcs)
166 panic("%s: vmptrld(%p) error %d", __func__, vmcs, err);
169 #define INVVPID_TYPE_ADDRESS 0UL
170 #define INVVPID_TYPE_SINGLE_CONTEXT 1UL
171 #define INVVPID_TYPE_ALL_CONTEXTS 2UL
173 struct invvpid_desc {
177 uint64_t linear_addr;
179 CTASSERT(sizeof(struct invvpid_desc) == 16);
182 invvpid(uint64_t type, struct invvpid_desc desc)
186 __asm __volatile("invvpid %[desc], %[type];"
188 : [error] "=r" (error)
189 : [desc] "m" (desc), [type] "r" (type)
193 panic("invvpid error %d", error);
196 #define INVEPT_TYPE_SINGLE_CONTEXT 1UL
197 #define INVEPT_TYPE_ALL_CONTEXTS 2UL
202 CTASSERT(sizeof(struct invept_desc) == 16);
205 invept(uint64_t type, struct invept_desc desc)
209 __asm __volatile("invept %[desc], %[type];"
211 : [error] "=r" (error)
212 : [desc] "m" (desc), [type] "r" (type)
216 panic("invept error %d", error);