2 * Copyright (c) 2012 NetApp, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #ifndef _VMM_INSTRUCTION_EMUL_H_
30 #define _VMM_INSTRUCTION_EMUL_H_
33 * The data structures 'vie' and 'vie_op' are meant to be opaque to the
34 * consumers of instruction decoding. The only reason why their contents
35 * need to be exposed is because they are part of the 'vm_exit' structure.
38 uint8_t op_byte; /* actual opcode byte */
39 uint8_t op_type; /* type of operation (e.g. MOV) */
43 #define VIE_INST_SIZE 15
45 uint8_t inst[VIE_INST_SIZE]; /* instruction bytes */
46 uint8_t num_valid; /* size of the instruction */
47 uint8_t num_processed;
49 uint8_t rex_w:1, /* REX prefix */
55 uint8_t mod:2, /* ModRM byte */
59 uint8_t ss:2, /* SIB byte */
67 int base_register; /* VM_REG_GUEST_xyz */
68 int index_register; /* VM_REG_GUEST_xyz */
70 int64_t displacement; /* optional addr displacement */
71 int64_t immediate; /* optional immediate operand */
73 uint8_t decoded; /* set to 1 if successfully decoded */
75 struct vie_op op; /* opcode description */
79 * Callback functions to read and write memory regions.
81 typedef int (*mem_region_read_t)(void *vm, int cpuid, uint64_t gpa,
82 uint64_t *rval, int rsize, void *arg);
84 typedef int (*mem_region_write_t)(void *vm, int cpuid, uint64_t gpa,
85 uint64_t wval, int wsize, void *arg);
88 * Emulate the decoded 'vie' instruction.
90 * The callbacks 'mrr' and 'mrw' emulate reads and writes to the memory region
91 * containing 'gpa'. 'mrarg' is an opaque argument that is passed into the
94 * 'void *vm' should be 'struct vm *' when called from kernel context and
95 * 'struct vmctx *' when called from user context.
98 int vmm_emulate_instruction(void *vm, int cpuid, uint64_t gpa, struct vie *vie,
99 mem_region_read_t mrr, mem_region_write_t mrw,
104 * APIs to fetch and decode the instruction from nested page fault handler.
106 * 'vie' must be initialized before calling 'vmm_fetch_instruction()'
108 int vmm_fetch_instruction(struct vm *vm, int cpuid,
109 uint64_t rip, int inst_length, uint64_t cr3,
112 void vie_init(struct vie *vie);
115 * Decode the instruction fetched into 'vie' so it can be emulated.
117 * 'gla' is the guest linear address provided by the hardware assist
118 * that caused the nested page table fault. It is used to verify that
119 * the software instruction decoding is in agreement with the hardware.
121 * Some hardware assists do not provide the 'gla' to the hypervisor.
122 * To skip the 'gla' verification for this or any other reason pass
123 * in VIE_INVALID_GLA instead.
125 #define VIE_INVALID_GLA (1UL << 63) /* a non-canonical address */
126 int vmm_decode_instruction(struct vm *vm, int cpuid,
127 uint64_t gla, struct vie *vie);
130 #endif /* _VMM_INSTRUCTION_EMUL_H_ */