2 * Copyright (c) 2011 NetApp, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <x86/segments.h>
39 VM_SUSPEND_TRIPLEFAULT,
44 * Identifiers for architecturally defined registers.
85 VM_REG_GUEST_INTR_SHADOW,
95 #define VM_INTINFO_VECTOR(info) ((info) & 0xff)
96 #define VM_INTINFO_DEL_ERRCODE 0x800
97 #define VM_INTINFO_RSVD 0x7ffff000
98 #define VM_INTINFO_VALID 0x80000000
99 #define VM_INTINFO_TYPE 0x700
100 #define VM_INTINFO_HWINTR (0 << 8)
101 #define VM_INTINFO_NMI (2 << 8)
102 #define VM_INTINFO_HWEXCEPTION (3 << 8)
103 #define VM_INTINFO_SWINTR (4 << 8)
107 #define VM_MAX_NAMELEN 32
111 struct vm_memory_segment;
120 struct vm_guest_paging;
123 typedef int (*vmm_init_func_t)(int ipinum);
124 typedef int (*vmm_cleanup_func_t)(void);
125 typedef void (*vmm_resume_func_t)(void);
126 typedef void * (*vmi_init_func_t)(struct vm *vm, struct pmap *pmap);
127 typedef int (*vmi_run_func_t)(void *vmi, int vcpu, register_t rip,
128 struct pmap *pmap, void *rendezvous_cookie,
129 void *suspend_cookie);
130 typedef void (*vmi_cleanup_func_t)(void *vmi);
131 typedef int (*vmi_get_register_t)(void *vmi, int vcpu, int num,
133 typedef int (*vmi_set_register_t)(void *vmi, int vcpu, int num,
135 typedef int (*vmi_get_desc_t)(void *vmi, int vcpu, int num,
136 struct seg_desc *desc);
137 typedef int (*vmi_set_desc_t)(void *vmi, int vcpu, int num,
138 struct seg_desc *desc);
139 typedef int (*vmi_get_cap_t)(void *vmi, int vcpu, int num, int *retval);
140 typedef int (*vmi_set_cap_t)(void *vmi, int vcpu, int num, int val);
141 typedef struct vmspace * (*vmi_vmspace_alloc)(vm_offset_t min, vm_offset_t max);
142 typedef void (*vmi_vmspace_free)(struct vmspace *vmspace);
143 typedef struct vlapic * (*vmi_vlapic_init)(void *vmi, int vcpu);
144 typedef void (*vmi_vlapic_cleanup)(void *vmi, struct vlapic *vlapic);
147 vmm_init_func_t init; /* module wide initialization */
148 vmm_cleanup_func_t cleanup;
149 vmm_resume_func_t resume;
151 vmi_init_func_t vminit; /* vm-specific initialization */
152 vmi_run_func_t vmrun;
153 vmi_cleanup_func_t vmcleanup;
154 vmi_get_register_t vmgetreg;
155 vmi_set_register_t vmsetreg;
156 vmi_get_desc_t vmgetdesc;
157 vmi_set_desc_t vmsetdesc;
158 vmi_get_cap_t vmgetcap;
159 vmi_set_cap_t vmsetcap;
160 vmi_vmspace_alloc vmspace_alloc;
161 vmi_vmspace_free vmspace_free;
162 vmi_vlapic_init vlapic_init;
163 vmi_vlapic_cleanup vlapic_cleanup;
166 extern struct vmm_ops vmm_ops_intel;
167 extern struct vmm_ops vmm_ops_amd;
169 int vm_create(const char *name, struct vm **retvm);
170 void vm_destroy(struct vm *vm);
171 int vm_reinit(struct vm *vm);
172 const char *vm_name(struct vm *vm);
173 int vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len);
174 int vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
175 int vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len);
176 void *vm_gpa_hold(struct vm *, vm_paddr_t gpa, size_t len, int prot,
178 void vm_gpa_release(void *cookie);
179 int vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase,
180 struct vm_memory_segment *seg);
181 int vm_get_memobj(struct vm *vm, vm_paddr_t gpa, size_t len,
182 vm_offset_t *offset, struct vm_object **object);
183 boolean_t vm_mem_allocated(struct vm *vm, vm_paddr_t gpa);
184 int vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval);
185 int vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val);
186 int vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
187 struct seg_desc *ret_desc);
188 int vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
189 struct seg_desc *desc);
190 int vm_run(struct vm *vm, struct vm_run *vmrun);
191 int vm_suspend(struct vm *vm, enum vm_suspend_how how);
192 int vm_inject_nmi(struct vm *vm, int vcpu);
193 int vm_nmi_pending(struct vm *vm, int vcpuid);
194 void vm_nmi_clear(struct vm *vm, int vcpuid);
195 int vm_inject_extint(struct vm *vm, int vcpu);
196 int vm_extint_pending(struct vm *vm, int vcpuid);
197 void vm_extint_clear(struct vm *vm, int vcpuid);
198 struct vlapic *vm_lapic(struct vm *vm, int cpu);
199 struct vioapic *vm_ioapic(struct vm *vm);
200 struct vhpet *vm_hpet(struct vm *vm);
201 int vm_get_capability(struct vm *vm, int vcpu, int type, int *val);
202 int vm_set_capability(struct vm *vm, int vcpu, int type, int val);
203 int vm_get_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state *state);
204 int vm_set_x2apic_state(struct vm *vm, int vcpu, enum x2apic_state state);
205 int vm_apicid2vcpuid(struct vm *vm, int apicid);
206 int vm_activate_cpu(struct vm *vm, int vcpu);
207 cpuset_t vm_active_cpus(struct vm *vm);
208 cpuset_t vm_suspended_cpus(struct vm *vm);
209 struct vm_exit *vm_exitinfo(struct vm *vm, int vcpuid);
210 void vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip);
211 void vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip);
212 void vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip);
215 * Rendezvous all vcpus specified in 'dest' and execute 'func(arg)'.
216 * The rendezvous 'func(arg)' is not allowed to do anything that will
217 * cause the thread to be put to sleep.
219 * If the rendezvous is being initiated from a vcpu context then the
220 * 'vcpuid' must refer to that vcpu, otherwise it should be set to -1.
222 * The caller cannot hold any locks when initiating the rendezvous.
224 * The implementation of this API may cause vcpus other than those specified
225 * by 'dest' to be stalled. The caller should not rely on any vcpus making
226 * forward progress when the rendezvous is in progress.
228 typedef void (*vm_rendezvous_func_t)(struct vm *vm, int vcpuid, void *arg);
229 void vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest,
230 vm_rendezvous_func_t func, void *arg);
233 vcpu_rendezvous_pending(void *rendezvous_cookie)
236 return (*(uintptr_t *)rendezvous_cookie != 0);
240 vcpu_suspended(void *suspend_cookie)
243 return (*(int *)suspend_cookie);
247 * Return 1 if device indicated by bus/slot/func is supposed to be a
248 * pci passthrough device.
250 * Return 0 otherwise.
252 int vmm_is_pptdev(int bus, int slot, int func);
254 void *vm_iommu_domain(struct vm *vm);
263 int vcpu_set_state(struct vm *vm, int vcpu, enum vcpu_state state,
265 enum vcpu_state vcpu_get_state(struct vm *vm, int vcpu, int *hostcpu);
268 vcpu_is_running(struct vm *vm, int vcpu, int *hostcpu)
270 return (vcpu_get_state(vm, vcpu, hostcpu) == VCPU_RUNNING);
275 vcpu_should_yield(struct vm *vm, int vcpu)
277 return (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED));
281 void *vcpu_stats(struct vm *vm, int vcpu);
282 void vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr);
283 struct vmspace *vm_get_vmspace(struct vm *vm);
284 int vm_assign_pptdev(struct vm *vm, int bus, int slot, int func);
285 int vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func);
286 struct vatpic *vm_atpic(struct vm *vm);
287 struct vatpit *vm_atpit(struct vm *vm);
288 struct vpmtmr *vm_pmtmr(struct vm *vm);
289 struct vrtc *vm_rtc(struct vm *vm);
292 * Inject exception 'vector' into the guest vcpu. This function returns 0 on
293 * success and non-zero on failure.
295 * Wrapper functions like 'vm_inject_gp()' should be preferred to calling
296 * this function directly because they enforce the trap-like or fault-like
297 * behavior of an exception.
299 * This function should only be called in the context of the thread that is
300 * executing this vcpu.
302 int vm_inject_exception(struct vm *vm, int vcpuid, int vector, int err_valid,
303 uint32_t errcode, int restart_instruction);
306 * This function is called after a VM-exit that occurred during exception or
307 * interrupt delivery through the IDT. The format of 'intinfo' is described
308 * in Figure 15-1, "EXITINTINFO for All Intercepts", APM, Vol 2.
310 * If a VM-exit handler completes the event delivery successfully then it
311 * should call vm_exit_intinfo() to extinguish the pending event. For e.g.,
312 * if the task switch emulation is triggered via a task gate then it should
313 * call this function with 'intinfo=0' to indicate that the external event
314 * is not pending anymore.
316 * Return value is 0 on success and non-zero on failure.
318 int vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t intinfo);
321 * This function is called before every VM-entry to retrieve a pending
322 * event that should be injected into the guest. This function combines
323 * nested events into a double or triple fault.
325 * Returns 0 if there are no events that need to be injected into the guest
326 * and non-zero otherwise.
328 int vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *info);
330 int vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2);
332 enum vm_reg_name vm_segment_name(int seg_encoding);
342 * Set up 'copyinfo[]' to copy to/from guest linear address space starting
343 * at 'gla' and 'len' bytes long. The 'prot' should be set to PROT_READ for
344 * a copyin or PROT_WRITE for a copyout.
346 * Returns 0 on success.
347 * Returns 1 if an exception was injected into the guest.
348 * Returns -1 otherwise.
350 * The 'copyinfo[]' can be passed to 'vm_copyin()' or 'vm_copyout()' only if
351 * the return value is 0. The 'copyinfo[]' resources should be freed by calling
352 * 'vm_copy_teardown()' after the copy is done.
354 int vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
355 uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo,
357 void vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo,
359 void vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo,
360 void *kaddr, size_t len);
361 void vm_copyout(struct vm *vm, int vcpuid, const void *kaddr,
362 struct vm_copyinfo *copyinfo, size_t len);
364 int vcpu_trace_exceptions(struct vm *vm, int vcpuid);
367 #define VM_MAXCPU 16 /* maximum virtual cpus */
370 * Identifiers for optional vmm capabilities
376 VM_CAP_UNRESTRICTED_GUEST,
377 VM_CAP_ENABLE_INVPCID,
381 enum vm_intr_trigger {
387 * The 'access' field has the format specified in Table 21-2 of the Intel
388 * Architecture Manual vol 3b.
390 * XXX The contents of the 'access' field are architecturally defined except
391 * bit 16 - Segment Unusable.
398 #define SEG_DESC_TYPE(access) ((access) & 0x001f)
399 #define SEG_DESC_DPL(access) (((access) >> 5) & 0x3)
400 #define SEG_DESC_PRESENT(access) (((access) & 0x0080) ? 1 : 0)
401 #define SEG_DESC_DEF32(access) (((access) & 0x4000) ? 1 : 0)
402 #define SEG_DESC_GRANULARITY(access) (((access) & 0x8000) ? 1 : 0)
403 #define SEG_DESC_UNUSABLE(access) (((access) & 0x10000) ? 1 : 0)
408 CPU_MODE_COMPATIBILITY, /* IA-32E mode (CS.L = 0) */
409 CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */
412 enum vm_paging_mode {
419 struct vm_guest_paging {
422 enum vm_cpu_mode cpu_mode;
423 enum vm_paging_mode paging_mode;
427 * The data structures 'vie' and 'vie_op' are meant to be opaque to the
428 * consumers of instruction decoding. The only reason why their contents
429 * need to be exposed is because they are part of the 'vm_exit' structure.
432 uint8_t op_byte; /* actual opcode byte */
433 uint8_t op_type; /* type of operation (e.g. MOV) */
437 #define VIE_INST_SIZE 15
439 uint8_t inst[VIE_INST_SIZE]; /* instruction bytes */
440 uint8_t num_valid; /* size of the instruction */
441 uint8_t num_processed;
443 uint8_t addrsize:4, opsize:4; /* address and operand sizes */
444 uint8_t rex_w:1, /* REX prefix */
449 repz_present:1, /* REP/REPE/REPZ prefix */
450 repnz_present:1, /* REPNE/REPNZ prefix */
451 opsize_override:1, /* Operand size override */
452 addrsize_override:1, /* Address size override */
453 segment_override:1; /* Segment override */
455 uint8_t mod:2, /* ModRM byte */
459 uint8_t ss:2, /* SIB byte */
467 int base_register; /* VM_REG_GUEST_xyz */
468 int index_register; /* VM_REG_GUEST_xyz */
469 int segment_register; /* VM_REG_GUEST_xyz */
471 int64_t displacement; /* optional addr displacement */
472 int64_t immediate; /* optional immediate operand */
474 uint8_t decoded; /* set to 1 if successfully decoded */
476 struct vie_op op; /* opcode description */
489 VM_EXITCODE_INST_EMUL,
490 VM_EXITCODE_SPINUP_AP,
491 VM_EXITCODE_DEPRECATED1, /* used to be SPINDOWN_CPU */
492 VM_EXITCODE_RENDEZVOUS,
493 VM_EXITCODE_IOAPIC_EOI,
494 VM_EXITCODE_SUSPENDED,
495 VM_EXITCODE_INOUT_STR,
496 VM_EXITCODE_TASK_SWITCH,
504 uint16_t bytes:3; /* 1 or 2 or 4 */
509 uint32_t eax; /* valid for out */
512 struct vm_inout_str {
513 struct vm_inout inout; /* must be the first element */
514 struct vm_guest_paging paging;
518 uint64_t count; /* rep=1 (%rcx), rep=0 (1) */
520 enum vm_reg_name seg_name;
521 struct seg_desc seg_desc;
524 enum task_switch_reason {
528 TSR_IDT_GATE, /* task gate in IDT */
531 struct vm_task_switch {
532 uint16_t tsssel; /* new TSS selector */
533 int ext; /* task switch due to external event */
535 int errcode_valid; /* push 'errcode' on the new stack */
536 enum task_switch_reason reason;
537 struct vm_guest_paging paging;
541 enum vm_exitcode exitcode;
542 int inst_length; /* 0 means unknown */
545 struct vm_inout inout;
546 struct vm_inout_str inout_str;
556 struct vm_guest_paging paging;
560 * VMX specific payload. Used when there is no "better"
561 * exitcode to represent the VM-exit.
564 int status; /* vmx inst status */
566 * 'exit_reason' and 'exit_qualification' are valid
567 * only if 'status' is zero.
569 uint32_t exit_reason;
570 uint64_t exit_qualification;
572 * 'inst_error' and 'inst_type' are valid
573 * only if 'status' is non-zero.
579 * SVM specific payload.
587 uint32_t code; /* ecx value */
601 enum vm_suspend_how how;
603 struct vm_task_switch task_switch;
607 /* APIs to inject faults into the guest */
608 void vm_inject_fault(void *vm, int vcpuid, int vector, int errcode_valid,
612 vm_inject_ud(void *vm, int vcpuid)
614 vm_inject_fault(vm, vcpuid, IDT_UD, 0, 0);
618 vm_inject_gp(void *vm, int vcpuid)
620 vm_inject_fault(vm, vcpuid, IDT_GP, 1, 0);
624 vm_inject_ac(void *vm, int vcpuid, int errcode)
626 vm_inject_fault(vm, vcpuid, IDT_AC, 1, errcode);
630 vm_inject_ss(void *vm, int vcpuid, int errcode)
632 vm_inject_fault(vm, vcpuid, IDT_SS, 1, errcode);
635 void vm_inject_pf(void *vm, int vcpuid, int error_code, uint64_t cr2);
637 int vm_restart_instruction(void *vm, int vcpuid);