2 * Copyright (c) 2011 NetApp, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
36 #include <sys/sysctl.h>
37 #include <sys/malloc.h>
40 #include <sys/mutex.h>
42 #include <sys/rwlock.h>
43 #include <sys/sched.h>
45 #include <sys/systm.h>
48 #include <vm/vm_object.h>
49 #include <vm/vm_page.h>
51 #include <vm/vm_map.h>
52 #include <vm/vm_extern.h>
53 #include <vm/vm_param.h>
55 #include <machine/cpu.h>
56 #include <machine/vm.h>
57 #include <machine/pcb.h>
58 #include <machine/smp.h>
60 #include <x86/apicreg.h>
61 #include <machine/vmparam.h>
63 #include <machine/vmm.h>
64 #include <machine/vmm_dev.h>
65 #include <machine/vmm_instruction_emul.h>
67 #include "vmm_ioport.h"
81 #include "vmm_lapic.h"
90 * (a) allocated when vcpu is created
91 * (i) initialized when vcpu is created and when it is reinitialized
92 * (o) initialized the first time the vcpu is created
93 * (x) initialized before use
96 struct mtx mtx; /* (o) protects 'state' and 'hostcpu' */
97 enum vcpu_state state; /* (o) vcpu state */
98 int hostcpu; /* (o) vcpu's host cpu */
99 struct vlapic *vlapic; /* (i) APIC device model */
100 enum x2apic_state x2apic_state; /* (i) APIC mode */
101 uint64_t exitintinfo; /* (i) events pending at VM exit */
102 int nmi_pending; /* (i) NMI pending */
103 int extint_pending; /* (i) INTR pending */
104 int exception_pending; /* (i) exception pending */
105 int exc_vector; /* (x) exception collateral */
106 int exc_errcode_valid;
107 uint32_t exc_errcode;
108 struct savefpu *guestfpu; /* (a,i) guest fpu state */
109 uint64_t guest_xcr0; /* (i) guest %xcr0 register */
110 void *stats; /* (a,i) statistics */
111 struct vm_exit exitinfo; /* (x) exit reason and collateral */
112 uint64_t nextrip; /* (x) next instruction to execute */
115 #define vcpu_lock_initialized(v) mtx_initialized(&((v)->mtx))
116 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
117 #define vcpu_lock(v) mtx_lock_spin(&((v)->mtx))
118 #define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx))
119 #define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED)
127 #define VM_MAX_MEMORY_SEGMENTS 2
131 * (o) initialized the first time the VM is created
132 * (i) initialized when VM is created and when it is reinitialized
133 * (x) initialized before use
136 void *cookie; /* (i) cpu-specific data */
137 void *iommu; /* (x) iommu-specific data */
138 struct vhpet *vhpet; /* (i) virtual HPET */
139 struct vioapic *vioapic; /* (i) virtual ioapic */
140 struct vatpic *vatpic; /* (i) virtual atpic */
141 struct vatpit *vatpit; /* (i) virtual atpit */
142 struct vpmtmr *vpmtmr; /* (i) virtual ACPI PM timer */
143 struct vrtc *vrtc; /* (o) virtual RTC */
144 volatile cpuset_t active_cpus; /* (i) active vcpus */
145 int suspend; /* (i) stop VM execution */
146 volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */
147 volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */
148 cpuset_t rendezvous_req_cpus; /* (x) rendezvous requested */
149 cpuset_t rendezvous_done_cpus; /* (x) rendezvous finished */
150 void *rendezvous_arg; /* (x) rendezvous func/arg */
151 vm_rendezvous_func_t rendezvous_func;
152 struct mtx rendezvous_mtx; /* (o) rendezvous lock */
153 int num_mem_segs; /* (o) guest memory segments */
154 struct mem_seg mem_segs[VM_MAX_MEMORY_SEGMENTS];
155 struct vmspace *vmspace; /* (o) guest's address space */
156 char name[VM_MAX_NAMELEN]; /* (o) virtual machine name */
157 struct vcpu vcpu[VM_MAXCPU]; /* (i) guest vcpus */
160 static int vmm_initialized;
162 static struct vmm_ops *ops;
163 #define VMM_INIT(num) (ops != NULL ? (*ops->init)(num) : 0)
164 #define VMM_CLEANUP() (ops != NULL ? (*ops->cleanup)() : 0)
165 #define VMM_RESUME() (ops != NULL ? (*ops->resume)() : 0)
167 #define VMINIT(vm, pmap) (ops != NULL ? (*ops->vminit)(vm, pmap): NULL)
168 #define VMRUN(vmi, vcpu, rip, pmap, rptr, sptr) \
169 (ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip, pmap, rptr, sptr) : ENXIO)
170 #define VMCLEANUP(vmi) (ops != NULL ? (*ops->vmcleanup)(vmi) : NULL)
171 #define VMSPACE_ALLOC(min, max) \
172 (ops != NULL ? (*ops->vmspace_alloc)(min, max) : NULL)
173 #define VMSPACE_FREE(vmspace) \
174 (ops != NULL ? (*ops->vmspace_free)(vmspace) : ENXIO)
175 #define VMGETREG(vmi, vcpu, num, retval) \
176 (ops != NULL ? (*ops->vmgetreg)(vmi, vcpu, num, retval) : ENXIO)
177 #define VMSETREG(vmi, vcpu, num, val) \
178 (ops != NULL ? (*ops->vmsetreg)(vmi, vcpu, num, val) : ENXIO)
179 #define VMGETDESC(vmi, vcpu, num, desc) \
180 (ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO)
181 #define VMSETDESC(vmi, vcpu, num, desc) \
182 (ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO)
183 #define VMGETCAP(vmi, vcpu, num, retval) \
184 (ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO)
185 #define VMSETCAP(vmi, vcpu, num, val) \
186 (ops != NULL ? (*ops->vmsetcap)(vmi, vcpu, num, val) : ENXIO)
187 #define VLAPIC_INIT(vmi, vcpu) \
188 (ops != NULL ? (*ops->vlapic_init)(vmi, vcpu) : NULL)
189 #define VLAPIC_CLEANUP(vmi, vlapic) \
190 (ops != NULL ? (*ops->vlapic_cleanup)(vmi, vlapic) : NULL)
192 #define fpu_start_emulating() load_cr0(rcr0() | CR0_TS)
193 #define fpu_stop_emulating() clts()
195 static MALLOC_DEFINE(M_VM, "vm", "vm");
198 static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
200 SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL);
203 * Halt the guest if all vcpus are executing a HLT instruction with
204 * interrupts disabled.
206 static int halt_detection_enabled = 1;
207 SYSCTL_INT(_hw_vmm, OID_AUTO, halt_detection, CTLFLAG_RDTUN,
208 &halt_detection_enabled, 0,
209 "Halt VM if all vcpus execute HLT with interrupts disabled");
211 static int vmm_ipinum;
212 SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0,
213 "IPI vector used for vcpu notifications");
215 static int trace_guest_exceptions;
216 SYSCTL_INT(_hw_vmm, OID_AUTO, trace_guest_exceptions, CTLFLAG_RDTUN,
217 &trace_guest_exceptions, 0,
218 "Trap into hypervisor on all guest exceptions and reflect them back");
221 vcpu_cleanup(struct vm *vm, int i, bool destroy)
223 struct vcpu *vcpu = &vm->vcpu[i];
225 VLAPIC_CLEANUP(vm->cookie, vcpu->vlapic);
227 vmm_stat_free(vcpu->stats);
228 fpu_save_area_free(vcpu->guestfpu);
233 vcpu_init(struct vm *vm, int vcpu_id, bool create)
237 KASSERT(vcpu_id >= 0 && vcpu_id < VM_MAXCPU,
238 ("vcpu_init: invalid vcpu %d", vcpu_id));
240 vcpu = &vm->vcpu[vcpu_id];
243 KASSERT(!vcpu_lock_initialized(vcpu), ("vcpu %d already "
244 "initialized", vcpu_id));
245 vcpu_lock_init(vcpu);
246 vcpu->state = VCPU_IDLE;
247 vcpu->hostcpu = NOCPU;
248 vcpu->guestfpu = fpu_save_area_alloc();
249 vcpu->stats = vmm_stat_alloc();
252 vcpu->vlapic = VLAPIC_INIT(vm->cookie, vcpu_id);
253 vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED);
254 vcpu->exitintinfo = 0;
255 vcpu->nmi_pending = 0;
256 vcpu->extint_pending = 0;
257 vcpu->exception_pending = 0;
258 vcpu->guest_xcr0 = XFEATURE_ENABLED_X87;
259 fpu_save_area_reset(vcpu->guestfpu);
260 vmm_stat_init(vcpu->stats);
264 vcpu_trace_exceptions(struct vm *vm, int vcpuid)
267 return (trace_guest_exceptions);
271 vm_exitinfo(struct vm *vm, int cpuid)
275 if (cpuid < 0 || cpuid >= VM_MAXCPU)
276 panic("vm_exitinfo: invalid cpuid %d", cpuid);
278 vcpu = &vm->vcpu[cpuid];
280 return (&vcpu->exitinfo);
294 vmm_host_state_init();
296 vmm_ipinum = vmm_ipi_alloc();
298 vmm_ipinum = IPI_AST;
300 error = vmm_mem_init();
305 ops = &vmm_ops_intel;
306 else if (vmm_is_amd())
311 vmm_resume_p = vmm_resume;
313 return (VMM_INIT(vmm_ipinum));
317 vmm_handler(module_t mod, int what, void *arg)
324 if (ppt_avail_devices() > 0)
331 error = vmmdev_cleanup();
335 if (vmm_ipinum != IPI_AST)
336 vmm_ipi_free(vmm_ipinum);
337 error = VMM_CLEANUP();
339 * Something bad happened - prevent new
340 * VMs from being created
353 static moduledata_t vmm_kmod = {
360 * vmm initialization has the following dependencies:
362 * - iommu initialization must happen after the pci passthru driver has had
363 * a chance to attach to any passthru devices (after SI_SUB_CONFIGURE).
365 * - VT-x initialization requires smp_rendezvous() and therefore must happen
366 * after SMP is fully functional (after SI_SUB_SMP).
368 DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_SMP + 1, SI_ORDER_ANY);
369 MODULE_VERSION(vmm, 1);
372 vm_init(struct vm *vm, bool create)
376 vm->cookie = VMINIT(vm, vmspace_pmap(vm->vmspace));
378 vm->vioapic = vioapic_init(vm);
379 vm->vhpet = vhpet_init(vm);
380 vm->vatpic = vatpic_init(vm);
381 vm->vatpit = vatpit_init(vm);
382 vm->vpmtmr = vpmtmr_init(vm);
384 vm->vrtc = vrtc_init(vm);
386 CPU_ZERO(&vm->active_cpus);
389 CPU_ZERO(&vm->suspended_cpus);
391 for (i = 0; i < VM_MAXCPU; i++)
392 vcpu_init(vm, i, create);
396 vm_create(const char *name, struct vm **retvm)
399 struct vmspace *vmspace;
402 * If vmm.ko could not be successfully initialized then don't attempt
403 * to create the virtual machine.
405 if (!vmm_initialized)
408 if (name == NULL || strlen(name) >= VM_MAX_NAMELEN)
411 vmspace = VMSPACE_ALLOC(0, VM_MAXUSER_ADDRESS);
415 vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO);
416 strcpy(vm->name, name);
417 vm->num_mem_segs = 0;
418 vm->vmspace = vmspace;
419 mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF);
428 vm_free_mem_seg(struct vm *vm, struct mem_seg *seg)
431 if (seg->object != NULL)
432 vmm_mem_free(vm->vmspace, seg->gpa, seg->len);
434 bzero(seg, sizeof(*seg));
438 vm_cleanup(struct vm *vm, bool destroy)
442 ppt_unassign_all(vm);
444 if (vm->iommu != NULL)
445 iommu_destroy_domain(vm->iommu);
448 vrtc_cleanup(vm->vrtc);
450 vrtc_reset(vm->vrtc);
451 vpmtmr_cleanup(vm->vpmtmr);
452 vatpit_cleanup(vm->vatpit);
453 vhpet_cleanup(vm->vhpet);
454 vatpic_cleanup(vm->vatpic);
455 vioapic_cleanup(vm->vioapic);
457 for (i = 0; i < VM_MAXCPU; i++)
458 vcpu_cleanup(vm, i, destroy);
460 VMCLEANUP(vm->cookie);
463 for (i = 0; i < vm->num_mem_segs; i++)
464 vm_free_mem_seg(vm, &vm->mem_segs[i]);
466 vm->num_mem_segs = 0;
468 VMSPACE_FREE(vm->vmspace);
474 vm_destroy(struct vm *vm)
476 vm_cleanup(vm, true);
481 vm_reinit(struct vm *vm)
486 * A virtual machine can be reset only if all vcpus are suspended.
488 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
489 vm_cleanup(vm, false);
500 vm_name(struct vm *vm)
506 vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
510 if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL)
517 vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len)
520 vmm_mmio_free(vm->vmspace, gpa, len);
525 vm_mem_allocated(struct vm *vm, vm_paddr_t gpa)
528 vm_paddr_t gpabase, gpalimit;
530 for (i = 0; i < vm->num_mem_segs; i++) {
531 gpabase = vm->mem_segs[i].gpa;
532 gpalimit = gpabase + vm->mem_segs[i].len;
533 if (gpa >= gpabase && gpa < gpalimit)
534 return (TRUE); /* 'gpa' is regular memory */
537 if (ppt_is_mmio(vm, gpa))
538 return (TRUE); /* 'gpa' is pci passthru mmio */
544 vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len)
546 int available, allocated;
551 if ((gpa & PAGE_MASK) || (len & PAGE_MASK) || len == 0)
554 available = allocated = 0;
556 while (g < gpa + len) {
557 if (vm_mem_allocated(vm, g))
566 * If there are some allocated and some available pages in the address
567 * range then it is an error.
569 if (allocated && available)
573 * If the entire address range being requested has already been
574 * allocated then there isn't anything more to do.
576 if (allocated && available == 0)
579 if (vm->num_mem_segs >= VM_MAX_MEMORY_SEGMENTS)
582 seg = &vm->mem_segs[vm->num_mem_segs];
584 if ((object = vmm_mem_alloc(vm->vmspace, gpa, len)) == NULL)
589 seg->object = object;
598 vm_maxmem(struct vm *vm)
601 vm_paddr_t gpa, maxmem;
604 for (i = 0; i < vm->num_mem_segs; i++) {
605 gpa = vm->mem_segs[i].gpa + vm->mem_segs[i].len;
613 vm_gpa_unwire(struct vm *vm)
618 for (i = 0; i < vm->num_mem_segs; i++) {
619 seg = &vm->mem_segs[i];
623 rv = vm_map_unwire(&vm->vmspace->vm_map,
624 seg->gpa, seg->gpa + seg->len,
625 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
626 KASSERT(rv == KERN_SUCCESS, ("vm(%s) memory segment "
627 "%#lx/%ld could not be unwired: %d",
628 vm_name(vm), seg->gpa, seg->len, rv));
635 vm_gpa_wire(struct vm *vm)
640 for (i = 0; i < vm->num_mem_segs; i++) {
641 seg = &vm->mem_segs[i];
646 rv = vm_map_wire(&vm->vmspace->vm_map,
647 seg->gpa, seg->gpa + seg->len,
648 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
649 if (rv != KERN_SUCCESS)
655 if (i < vm->num_mem_segs) {
657 * Undo the wiring before returning an error.
667 vm_iommu_modify(struct vm *vm, boolean_t map)
672 void *vp, *cookie, *host_domain;
675 host_domain = iommu_host_domain();
677 for (i = 0; i < vm->num_mem_segs; i++) {
678 seg = &vm->mem_segs[i];
679 KASSERT(seg->wired, ("vm(%s) memory segment %#lx/%ld not wired",
680 vm_name(vm), seg->gpa, seg->len));
683 while (gpa < seg->gpa + seg->len) {
684 vp = vm_gpa_hold(vm, gpa, PAGE_SIZE, VM_PROT_WRITE,
686 KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx",
689 vm_gpa_release(cookie);
691 hpa = DMAP_TO_PHYS((uintptr_t)vp);
693 iommu_create_mapping(vm->iommu, gpa, hpa, sz);
694 iommu_remove_mapping(host_domain, hpa, sz);
696 iommu_remove_mapping(vm->iommu, gpa, sz);
697 iommu_create_mapping(host_domain, hpa, hpa, sz);
705 * Invalidate the cached translations associated with the domain
706 * from which pages were removed.
709 iommu_invalidate_tlb(host_domain);
711 iommu_invalidate_tlb(vm->iommu);
714 #define vm_iommu_unmap(vm) vm_iommu_modify((vm), FALSE)
715 #define vm_iommu_map(vm) vm_iommu_modify((vm), TRUE)
718 vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func)
722 error = ppt_unassign_device(vm, bus, slot, func);
726 if (ppt_assigned_devices(vm) == 0) {
734 vm_assign_pptdev(struct vm *vm, int bus, int slot, int func)
740 * Virtual machines with pci passthru devices get special treatment:
741 * - the guest physical memory is wired
742 * - the iommu is programmed to do the 'gpa' to 'hpa' translation
744 * We need to do this before the first pci passthru device is attached.
746 if (ppt_assigned_devices(vm) == 0) {
747 KASSERT(vm->iommu == NULL,
748 ("vm_assign_pptdev: iommu must be NULL"));
749 maxaddr = vm_maxmem(vm);
750 vm->iommu = iommu_create_domain(maxaddr);
752 error = vm_gpa_wire(vm);
759 error = ppt_assign_device(vm, bus, slot, func);
764 vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot,
770 pageoff = gpa & PAGE_MASK;
771 if (len > PAGE_SIZE - pageoff)
772 panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len);
774 count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map,
775 trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1);
779 return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff));
787 vm_gpa_release(void *cookie)
789 vm_page_t m = cookie;
797 vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase,
798 struct vm_memory_segment *seg)
802 for (i = 0; i < vm->num_mem_segs; i++) {
803 if (gpabase == vm->mem_segs[i].gpa) {
804 seg->gpa = vm->mem_segs[i].gpa;
805 seg->len = vm->mem_segs[i].len;
806 seg->wired = vm->mem_segs[i].wired;
814 vm_get_memobj(struct vm *vm, vm_paddr_t gpa, size_t len,
815 vm_offset_t *offset, struct vm_object **object)
822 for (i = 0; i < vm->num_mem_segs; i++) {
823 if ((seg_obj = vm->mem_segs[i].object) == NULL)
826 seg_gpa = vm->mem_segs[i].gpa;
827 seg_len = vm->mem_segs[i].len;
829 if (gpa >= seg_gpa && gpa < seg_gpa + seg_len) {
830 *offset = gpa - seg_gpa;
832 vm_object_reference(seg_obj);
841 vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval)
844 if (vcpu < 0 || vcpu >= VM_MAXCPU)
847 if (reg >= VM_REG_LAST)
850 return (VMGETREG(vm->cookie, vcpu, reg, retval));
854 vm_set_register(struct vm *vm, int vcpuid, int reg, uint64_t val)
859 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
862 if (reg >= VM_REG_LAST)
865 error = VMSETREG(vm->cookie, vcpuid, reg, val);
866 if (error || reg != VM_REG_GUEST_RIP)
869 /* Set 'nextrip' to match the value of %rip */
870 VCPU_CTR1(vm, vcpuid, "Setting nextrip to %#lx", val);
871 vcpu = &vm->vcpu[vcpuid];
877 is_descriptor_table(int reg)
881 case VM_REG_GUEST_IDTR:
882 case VM_REG_GUEST_GDTR:
890 is_segment_register(int reg)
894 case VM_REG_GUEST_ES:
895 case VM_REG_GUEST_CS:
896 case VM_REG_GUEST_SS:
897 case VM_REG_GUEST_DS:
898 case VM_REG_GUEST_FS:
899 case VM_REG_GUEST_GS:
900 case VM_REG_GUEST_TR:
901 case VM_REG_GUEST_LDTR:
909 vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
910 struct seg_desc *desc)
913 if (vcpu < 0 || vcpu >= VM_MAXCPU)
916 if (!is_segment_register(reg) && !is_descriptor_table(reg))
919 return (VMGETDESC(vm->cookie, vcpu, reg, desc));
923 vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
924 struct seg_desc *desc)
926 if (vcpu < 0 || vcpu >= VM_MAXCPU)
929 if (!is_segment_register(reg) && !is_descriptor_table(reg))
932 return (VMSETDESC(vm->cookie, vcpu, reg, desc));
936 restore_guest_fpustate(struct vcpu *vcpu)
939 /* flush host state to the pcb */
942 /* restore guest FPU state */
943 fpu_stop_emulating();
944 fpurestore(vcpu->guestfpu);
946 /* restore guest XCR0 if XSAVE is enabled in the host */
947 if (rcr4() & CR4_XSAVE)
948 load_xcr(0, vcpu->guest_xcr0);
951 * The FPU is now "dirty" with the guest's state so turn on emulation
952 * to trap any access to the FPU by the host.
954 fpu_start_emulating();
958 save_guest_fpustate(struct vcpu *vcpu)
961 if ((rcr0() & CR0_TS) == 0)
962 panic("fpu emulation not enabled in host!");
964 /* save guest XCR0 and restore host XCR0 */
965 if (rcr4() & CR4_XSAVE) {
966 vcpu->guest_xcr0 = rxcr(0);
967 load_xcr(0, vmm_get_host_xcr0());
970 /* save guest FPU state */
971 fpu_stop_emulating();
972 fpusave(vcpu->guestfpu);
973 fpu_start_emulating();
976 static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle");
979 vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate,
984 vcpu_assert_locked(vcpu);
987 * State transitions from the vmmdev_ioctl() must always begin from
988 * the VCPU_IDLE state. This guarantees that there is only a single
989 * ioctl() operating on a vcpu at any point.
992 while (vcpu->state != VCPU_IDLE)
993 msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
995 KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from "
999 if (vcpu->state == VCPU_RUNNING) {
1000 KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d "
1001 "mismatch for running vcpu", curcpu, vcpu->hostcpu));
1003 KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a "
1004 "vcpu that is not running", vcpu->hostcpu));
1008 * The following state transitions are allowed:
1009 * IDLE -> FROZEN -> IDLE
1010 * FROZEN -> RUNNING -> FROZEN
1011 * FROZEN -> SLEEPING -> FROZEN
1013 switch (vcpu->state) {
1017 error = (newstate != VCPU_FROZEN);
1020 error = (newstate == VCPU_FROZEN);
1030 vcpu->state = newstate;
1031 if (newstate == VCPU_RUNNING)
1032 vcpu->hostcpu = curcpu;
1034 vcpu->hostcpu = NOCPU;
1036 if (newstate == VCPU_IDLE)
1037 wakeup(&vcpu->state);
1043 vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate)
1047 if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0)
1048 panic("Error %d setting state to %d\n", error, newstate);
1052 vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate)
1056 if ((error = vcpu_set_state_locked(vcpu, newstate, false)) != 0)
1057 panic("Error %d setting state to %d", error, newstate);
1061 vm_set_rendezvous_func(struct vm *vm, vm_rendezvous_func_t func)
1064 KASSERT(mtx_owned(&vm->rendezvous_mtx), ("rendezvous_mtx not locked"));
1067 * Update 'rendezvous_func' and execute a write memory barrier to
1068 * ensure that it is visible across all host cpus. This is not needed
1069 * for correctness but it does ensure that all the vcpus will notice
1070 * that the rendezvous is requested immediately.
1072 vm->rendezvous_func = func;
1076 #define RENDEZVOUS_CTR0(vm, vcpuid, fmt) \
1079 VCPU_CTR0(vm, vcpuid, fmt); \
1085 vm_handle_rendezvous(struct vm *vm, int vcpuid)
1088 KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU),
1089 ("vm_handle_rendezvous: invalid vcpuid %d", vcpuid));
1091 mtx_lock(&vm->rendezvous_mtx);
1092 while (vm->rendezvous_func != NULL) {
1093 /* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */
1094 CPU_AND(&vm->rendezvous_req_cpus, &vm->active_cpus);
1097 CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) &&
1098 !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) {
1099 VCPU_CTR0(vm, vcpuid, "Calling rendezvous func");
1100 (*vm->rendezvous_func)(vm, vcpuid, vm->rendezvous_arg);
1101 CPU_SET(vcpuid, &vm->rendezvous_done_cpus);
1103 if (CPU_CMP(&vm->rendezvous_req_cpus,
1104 &vm->rendezvous_done_cpus) == 0) {
1105 VCPU_CTR0(vm, vcpuid, "Rendezvous completed");
1106 vm_set_rendezvous_func(vm, NULL);
1107 wakeup(&vm->rendezvous_func);
1110 RENDEZVOUS_CTR0(vm, vcpuid, "Wait for rendezvous completion");
1111 mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0,
1114 mtx_unlock(&vm->rendezvous_mtx);
1118 * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run.
1121 vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu)
1125 int t, vcpu_halted, vm_halted;
1127 KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted"));
1129 vcpu = &vm->vcpu[vcpuid];
1136 * Do a final check for pending NMI or interrupts before
1137 * really putting this thread to sleep. Also check for
1138 * software events that would cause this vcpu to wakeup.
1140 * These interrupts/events could have happened after the
1141 * vcpu returned from VMRUN() and before it acquired the
1144 if (vm->rendezvous_func != NULL || vm->suspend)
1146 if (vm_nmi_pending(vm, vcpuid))
1148 if (!intr_disabled) {
1149 if (vm_extint_pending(vm, vcpuid) ||
1150 vlapic_pending_intr(vcpu->vlapic, NULL)) {
1155 /* Don't go to sleep if the vcpu thread needs to yield */
1156 if (vcpu_should_yield(vm, vcpuid))
1160 * Some Linux guests implement "halt" by having all vcpus
1161 * execute HLT with interrupts disabled. 'halted_cpus' keeps
1162 * track of the vcpus that have entered this state. When all
1163 * vcpus enter the halted state the virtual machine is halted.
1165 if (intr_disabled) {
1167 VCPU_CTR0(vm, vcpuid, "Halted");
1168 if (!vcpu_halted && halt_detection_enabled) {
1170 CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus);
1172 if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) {
1181 vcpu_require_state_locked(vcpu, VCPU_SLEEPING);
1183 * XXX msleep_spin() cannot be interrupted by signals so
1184 * wake up periodically to check pending signals.
1186 msleep_spin(vcpu, &vcpu->mtx, wmesg, hz);
1187 vcpu_require_state_locked(vcpu, VCPU_FROZEN);
1188 vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t);
1192 CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus);
1197 vm_suspend(vm, VM_SUSPEND_HALT);
1203 vm_handle_paging(struct vm *vm, int vcpuid, bool *retu)
1208 struct vm_exit *vme;
1210 vcpu = &vm->vcpu[vcpuid];
1211 vme = &vcpu->exitinfo;
1213 KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d",
1214 __func__, vme->inst_length));
1216 ftype = vme->u.paging.fault_type;
1217 KASSERT(ftype == VM_PROT_READ ||
1218 ftype == VM_PROT_WRITE || ftype == VM_PROT_EXECUTE,
1219 ("vm_handle_paging: invalid fault_type %d", ftype));
1221 if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) {
1222 rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace),
1223 vme->u.paging.gpa, ftype);
1225 VCPU_CTR2(vm, vcpuid, "%s bit emulation for gpa %#lx",
1226 ftype == VM_PROT_READ ? "accessed" : "dirty",
1232 map = &vm->vmspace->vm_map;
1233 rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL);
1235 VCPU_CTR3(vm, vcpuid, "vm_handle_paging rv = %d, gpa = %#lx, "
1236 "ftype = %d", rv, vme->u.paging.gpa, ftype);
1238 if (rv != KERN_SUCCESS)
1245 vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu)
1249 struct vm_exit *vme;
1251 struct vm_guest_paging *paging;
1252 mem_region_read_t mread;
1253 mem_region_write_t mwrite;
1254 enum vm_cpu_mode cpu_mode;
1255 int cs_d, error, length;
1257 vcpu = &vm->vcpu[vcpuid];
1258 vme = &vcpu->exitinfo;
1260 gla = vme->u.inst_emul.gla;
1261 gpa = vme->u.inst_emul.gpa;
1262 cs_d = vme->u.inst_emul.cs_d;
1263 vie = &vme->u.inst_emul.vie;
1264 paging = &vme->u.inst_emul.paging;
1265 cpu_mode = paging->cpu_mode;
1267 VCPU_CTR1(vm, vcpuid, "inst_emul fault accessing gpa %#lx", gpa);
1269 /* Fetch, decode and emulate the faulting instruction */
1270 if (vie->num_valid == 0) {
1272 * If the instruction length is not known then assume a
1273 * maximum size instruction.
1275 length = vme->inst_length ? vme->inst_length : VIE_INST_SIZE;
1276 error = vmm_fetch_instruction(vm, vcpuid, paging, vme->rip,
1280 * The instruction bytes have already been copied into 'vie'
1285 return (0); /* Resume guest to handle page fault */
1286 else if (error == -1)
1288 else if (error != 0)
1289 panic("%s: vmm_fetch_instruction error %d", __func__, error);
1291 if (vmm_decode_instruction(vm, vcpuid, gla, cpu_mode, cs_d, vie) != 0)
1295 * If the instruction length was not specified then update it now
1296 * along with 'nextrip'.
1298 if (vme->inst_length == 0) {
1299 vme->inst_length = vie->num_processed;
1300 vcpu->nextrip += vie->num_processed;
1303 /* return to userland unless this is an in-kernel emulated device */
1304 if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) {
1305 mread = lapic_mmio_read;
1306 mwrite = lapic_mmio_write;
1307 } else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) {
1308 mread = vioapic_mmio_read;
1309 mwrite = vioapic_mmio_write;
1310 } else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) {
1311 mread = vhpet_mmio_read;
1312 mwrite = vhpet_mmio_write;
1318 error = vmm_emulate_instruction(vm, vcpuid, gpa, vie, paging,
1319 mread, mwrite, retu);
1325 vm_handle_suspend(struct vm *vm, int vcpuid, bool *retu)
1331 vcpu = &vm->vcpu[vcpuid];
1333 CPU_SET_ATOMIC(vcpuid, &vm->suspended_cpus);
1336 * Wait until all 'active_cpus' have suspended themselves.
1338 * Since a VM may be suspended at any time including when one or
1339 * more vcpus are doing a rendezvous we need to call the rendezvous
1340 * handler while we are waiting to prevent a deadlock.
1344 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
1345 VCPU_CTR0(vm, vcpuid, "All vcpus suspended");
1349 if (vm->rendezvous_func == NULL) {
1350 VCPU_CTR0(vm, vcpuid, "Sleeping during suspend");
1351 vcpu_require_state_locked(vcpu, VCPU_SLEEPING);
1352 msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz);
1353 vcpu_require_state_locked(vcpu, VCPU_FROZEN);
1355 VCPU_CTR0(vm, vcpuid, "Rendezvous during suspend");
1357 vm_handle_rendezvous(vm, vcpuid);
1364 * Wakeup the other sleeping vcpus and return to userspace.
1366 for (i = 0; i < VM_MAXCPU; i++) {
1367 if (CPU_ISSET(i, &vm->suspended_cpus)) {
1368 vcpu_notify_event(vm, i, false);
1377 vm_suspend(struct vm *vm, enum vm_suspend_how how)
1381 if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST)
1384 if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) {
1385 VM_CTR2(vm, "virtual machine already suspended %d/%d",
1390 VM_CTR1(vm, "virtual machine successfully suspended %d", how);
1393 * Notify all active vcpus that they are now suspended.
1395 for (i = 0; i < VM_MAXCPU; i++) {
1396 if (CPU_ISSET(i, &vm->active_cpus))
1397 vcpu_notify_event(vm, i, false);
1404 vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip)
1406 struct vm_exit *vmexit;
1408 KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST,
1409 ("vm_exit_suspended: invalid suspend type %d", vm->suspend));
1411 vmexit = vm_exitinfo(vm, vcpuid);
1413 vmexit->inst_length = 0;
1414 vmexit->exitcode = VM_EXITCODE_SUSPENDED;
1415 vmexit->u.suspended.how = vm->suspend;
1419 vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip)
1421 struct vm_exit *vmexit;
1423 KASSERT(vm->rendezvous_func != NULL, ("rendezvous not in progress"));
1425 vmexit = vm_exitinfo(vm, vcpuid);
1427 vmexit->inst_length = 0;
1428 vmexit->exitcode = VM_EXITCODE_RENDEZVOUS;
1429 vmm_stat_incr(vm, vcpuid, VMEXIT_RENDEZVOUS, 1);
1433 vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip)
1435 struct vm_exit *vmexit;
1437 vmexit = vm_exitinfo(vm, vcpuid);
1439 vmexit->inst_length = 0;
1440 vmexit->exitcode = VM_EXITCODE_BOGUS;
1441 vmm_stat_incr(vm, vcpuid, VMEXIT_ASTPENDING, 1);
1445 vm_run(struct vm *vm, struct vm_run *vmrun)
1451 struct vm_exit *vme;
1452 bool retu, intr_disabled;
1456 vcpuid = vmrun->cpuid;
1458 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1461 if (!CPU_ISSET(vcpuid, &vm->active_cpus))
1464 if (CPU_ISSET(vcpuid, &vm->suspended_cpus))
1467 rptr = &vm->rendezvous_func;
1468 sptr = &vm->suspend;
1469 pmap = vmspace_pmap(vm->vmspace);
1470 vcpu = &vm->vcpu[vcpuid];
1471 vme = &vcpu->exitinfo;
1475 KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active),
1476 ("vm_run: absurd pm_active"));
1480 pcb = PCPU_GET(curpcb);
1481 set_pcb_flags(pcb, PCB_FULL_IRET);
1483 restore_guest_fpustate(vcpu);
1485 vcpu_require_state(vm, vcpuid, VCPU_RUNNING);
1486 error = VMRUN(vm->cookie, vcpuid, vcpu->nextrip, pmap, rptr, sptr);
1487 vcpu_require_state(vm, vcpuid, VCPU_FROZEN);
1489 save_guest_fpustate(vcpu);
1491 vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
1497 vcpu->nextrip = vme->rip + vme->inst_length;
1498 switch (vme->exitcode) {
1499 case VM_EXITCODE_SUSPENDED:
1500 error = vm_handle_suspend(vm, vcpuid, &retu);
1502 case VM_EXITCODE_IOAPIC_EOI:
1503 vioapic_process_eoi(vm, vcpuid,
1504 vme->u.ioapic_eoi.vector);
1506 case VM_EXITCODE_RENDEZVOUS:
1507 vm_handle_rendezvous(vm, vcpuid);
1510 case VM_EXITCODE_HLT:
1511 intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0);
1512 error = vm_handle_hlt(vm, vcpuid, intr_disabled, &retu);
1514 case VM_EXITCODE_PAGING:
1515 error = vm_handle_paging(vm, vcpuid, &retu);
1517 case VM_EXITCODE_INST_EMUL:
1518 error = vm_handle_inst_emul(vm, vcpuid, &retu);
1520 case VM_EXITCODE_INOUT:
1521 case VM_EXITCODE_INOUT_STR:
1522 error = vm_handle_inout(vm, vcpuid, vme, &retu);
1524 case VM_EXITCODE_MONITOR:
1525 case VM_EXITCODE_MWAIT:
1526 vm_inject_ud(vm, vcpuid);
1529 retu = true; /* handled in userland */
1534 if (error == 0 && retu == false)
1537 /* copy the exit information */
1538 bcopy(vme, &vmrun->vm_exit, sizeof(struct vm_exit));
1543 vm_restart_instruction(void *arg, int vcpuid)
1547 enum vcpu_state state;
1552 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1555 vcpu = &vm->vcpu[vcpuid];
1556 state = vcpu_get_state(vm, vcpuid, NULL);
1557 if (state == VCPU_RUNNING) {
1559 * When a vcpu is "running" the next instruction is determined
1560 * by adding 'rip' and 'inst_length' in the vcpu's 'exitinfo'.
1561 * Thus setting 'inst_length' to zero will cause the current
1562 * instruction to be restarted.
1564 vcpu->exitinfo.inst_length = 0;
1565 VCPU_CTR1(vm, vcpuid, "restarting instruction at %#lx by "
1566 "setting inst_length to zero", vcpu->exitinfo.rip);
1567 } else if (state == VCPU_FROZEN) {
1569 * When a vcpu is "frozen" it is outside the critical section
1570 * around VMRUN() and 'nextrip' points to the next instruction.
1571 * Thus instruction restart is achieved by setting 'nextrip'
1572 * to the vcpu's %rip.
1574 error = vm_get_register(vm, vcpuid, VM_REG_GUEST_RIP, &rip);
1575 KASSERT(!error, ("%s: error %d getting rip", __func__, error));
1576 VCPU_CTR2(vm, vcpuid, "restarting instruction by updating "
1577 "nextrip from %#lx to %#lx", vcpu->nextrip, rip);
1578 vcpu->nextrip = rip;
1580 panic("%s: invalid state %d", __func__, state);
1586 vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t info)
1591 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1594 vcpu = &vm->vcpu[vcpuid];
1596 if (info & VM_INTINFO_VALID) {
1597 type = info & VM_INTINFO_TYPE;
1598 vector = info & 0xff;
1599 if (type == VM_INTINFO_NMI && vector != IDT_NMI)
1601 if (type == VM_INTINFO_HWEXCEPTION && vector >= 32)
1603 if (info & VM_INTINFO_RSVD)
1608 VCPU_CTR2(vm, vcpuid, "%s: info1(%#lx)", __func__, info);
1609 vcpu->exitintinfo = info;
1619 #define IDT_VE 20 /* Virtualization Exception (Intel specific) */
1621 static enum exc_class
1622 exception_class(uint64_t info)
1626 KASSERT(info & VM_INTINFO_VALID, ("intinfo must be valid: %#lx", info));
1627 type = info & VM_INTINFO_TYPE;
1628 vector = info & 0xff;
1630 /* Table 6-4, "Interrupt and Exception Classes", Intel SDM, Vol 3 */
1632 case VM_INTINFO_HWINTR:
1633 case VM_INTINFO_SWINTR:
1634 case VM_INTINFO_NMI:
1635 return (EXC_BENIGN);
1638 * Hardware exception.
1640 * SVM and VT-x use identical type values to represent NMI,
1641 * hardware interrupt and software interrupt.
1643 * SVM uses type '3' for all exceptions. VT-x uses type '3'
1644 * for exceptions except #BP and #OF. #BP and #OF use a type
1645 * value of '5' or '6'. Therefore we don't check for explicit
1646 * values of 'type' to classify 'intinfo' into a hardware
1655 return (EXC_PAGEFAULT);
1661 return (EXC_CONTRIBUTORY);
1663 return (EXC_BENIGN);
1668 nested_fault(struct vm *vm, int vcpuid, uint64_t info1, uint64_t info2,
1671 enum exc_class exc1, exc2;
1674 KASSERT(info1 & VM_INTINFO_VALID, ("info1 %#lx is not valid", info1));
1675 KASSERT(info2 & VM_INTINFO_VALID, ("info2 %#lx is not valid", info2));
1678 * If an exception occurs while attempting to call the double-fault
1679 * handler the processor enters shutdown mode (aka triple fault).
1681 type1 = info1 & VM_INTINFO_TYPE;
1682 vector1 = info1 & 0xff;
1683 if (type1 == VM_INTINFO_HWEXCEPTION && vector1 == IDT_DF) {
1684 VCPU_CTR2(vm, vcpuid, "triple fault: info1(%#lx), info2(%#lx)",
1686 vm_suspend(vm, VM_SUSPEND_TRIPLEFAULT);
1692 * Table 6-5 "Conditions for Generating a Double Fault", Intel SDM, Vol3
1694 exc1 = exception_class(info1);
1695 exc2 = exception_class(info2);
1696 if ((exc1 == EXC_CONTRIBUTORY && exc2 == EXC_CONTRIBUTORY) ||
1697 (exc1 == EXC_PAGEFAULT && exc2 != EXC_BENIGN)) {
1698 /* Convert nested fault into a double fault. */
1700 *retinfo |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION;
1701 *retinfo |= VM_INTINFO_DEL_ERRCODE;
1703 /* Handle exceptions serially */
1710 vcpu_exception_intinfo(struct vcpu *vcpu)
1714 if (vcpu->exception_pending) {
1715 info = vcpu->exc_vector & 0xff;
1716 info |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION;
1717 if (vcpu->exc_errcode_valid) {
1718 info |= VM_INTINFO_DEL_ERRCODE;
1719 info |= (uint64_t)vcpu->exc_errcode << 32;
1726 vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *retinfo)
1729 uint64_t info1, info2;
1732 KASSERT(vcpuid >= 0 && vcpuid < VM_MAXCPU, ("invalid vcpu %d", vcpuid));
1734 vcpu = &vm->vcpu[vcpuid];
1736 info1 = vcpu->exitintinfo;
1737 vcpu->exitintinfo = 0;
1740 if (vcpu->exception_pending) {
1741 info2 = vcpu_exception_intinfo(vcpu);
1742 vcpu->exception_pending = 0;
1743 VCPU_CTR2(vm, vcpuid, "Exception %d delivered: %#lx",
1744 vcpu->exc_vector, info2);
1747 if ((info1 & VM_INTINFO_VALID) && (info2 & VM_INTINFO_VALID)) {
1748 valid = nested_fault(vm, vcpuid, info1, info2, retinfo);
1749 } else if (info1 & VM_INTINFO_VALID) {
1752 } else if (info2 & VM_INTINFO_VALID) {
1760 VCPU_CTR4(vm, vcpuid, "%s: info1(%#lx), info2(%#lx), "
1761 "retinfo(%#lx)", __func__, info1, info2, *retinfo);
1768 vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2)
1772 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1775 vcpu = &vm->vcpu[vcpuid];
1776 *info1 = vcpu->exitintinfo;
1777 *info2 = vcpu_exception_intinfo(vcpu);
1782 vm_inject_exception(struct vm *vm, int vcpuid, int vector, int errcode_valid,
1783 uint32_t errcode, int restart_instruction)
1788 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1791 if (vector < 0 || vector >= 32)
1795 * A double fault exception should never be injected directly into
1796 * the guest. It is a derived exception that results from specific
1797 * combinations of nested faults.
1799 if (vector == IDT_DF)
1802 vcpu = &vm->vcpu[vcpuid];
1804 if (vcpu->exception_pending) {
1805 VCPU_CTR2(vm, vcpuid, "Unable to inject exception %d due to "
1806 "pending exception %d", vector, vcpu->exc_vector);
1811 * From section 26.6.1 "Interruptibility State" in Intel SDM:
1813 * Event blocking by "STI" or "MOV SS" is cleared after guest executes
1814 * one instruction or incurs an exception.
1816 error = vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0);
1817 KASSERT(error == 0, ("%s: error %d clearing interrupt shadow",
1820 if (restart_instruction)
1821 vm_restart_instruction(vm, vcpuid);
1823 vcpu->exception_pending = 1;
1824 vcpu->exc_vector = vector;
1825 vcpu->exc_errcode = errcode;
1826 vcpu->exc_errcode_valid = errcode_valid;
1827 VCPU_CTR1(vm, vcpuid, "Exception %d pending", vector);
1832 vm_inject_fault(void *vmarg, int vcpuid, int vector, int errcode_valid,
1836 int error, restart_instruction;
1839 restart_instruction = 1;
1841 error = vm_inject_exception(vm, vcpuid, vector, errcode_valid,
1842 errcode, restart_instruction);
1843 KASSERT(error == 0, ("vm_inject_exception error %d", error));
1847 vm_inject_pf(void *vmarg, int vcpuid, int error_code, uint64_t cr2)
1853 VCPU_CTR2(vm, vcpuid, "Injecting page fault: error_code %#x, cr2 %#lx",
1856 error = vm_set_register(vm, vcpuid, VM_REG_GUEST_CR2, cr2);
1857 KASSERT(error == 0, ("vm_set_register(cr2) error %d", error));
1859 vm_inject_fault(vm, vcpuid, IDT_PF, 1, error_code);
1862 static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu");
1865 vm_inject_nmi(struct vm *vm, int vcpuid)
1869 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1872 vcpu = &vm->vcpu[vcpuid];
1874 vcpu->nmi_pending = 1;
1875 vcpu_notify_event(vm, vcpuid, false);
1880 vm_nmi_pending(struct vm *vm, int vcpuid)
1884 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1885 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
1887 vcpu = &vm->vcpu[vcpuid];
1889 return (vcpu->nmi_pending);
1893 vm_nmi_clear(struct vm *vm, int vcpuid)
1897 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1898 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
1900 vcpu = &vm->vcpu[vcpuid];
1902 if (vcpu->nmi_pending == 0)
1903 panic("vm_nmi_clear: inconsistent nmi_pending state");
1905 vcpu->nmi_pending = 0;
1906 vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1);
1909 static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu");
1912 vm_inject_extint(struct vm *vm, int vcpuid)
1916 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1919 vcpu = &vm->vcpu[vcpuid];
1921 vcpu->extint_pending = 1;
1922 vcpu_notify_event(vm, vcpuid, false);
1927 vm_extint_pending(struct vm *vm, int vcpuid)
1931 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1932 panic("vm_extint_pending: invalid vcpuid %d", vcpuid);
1934 vcpu = &vm->vcpu[vcpuid];
1936 return (vcpu->extint_pending);
1940 vm_extint_clear(struct vm *vm, int vcpuid)
1944 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1945 panic("vm_extint_pending: invalid vcpuid %d", vcpuid);
1947 vcpu = &vm->vcpu[vcpuid];
1949 if (vcpu->extint_pending == 0)
1950 panic("vm_extint_clear: inconsistent extint_pending state");
1952 vcpu->extint_pending = 0;
1953 vmm_stat_incr(vm, vcpuid, VCPU_EXTINT_COUNT, 1);
1957 vm_get_capability(struct vm *vm, int vcpu, int type, int *retval)
1959 if (vcpu < 0 || vcpu >= VM_MAXCPU)
1962 if (type < 0 || type >= VM_CAP_MAX)
1965 return (VMGETCAP(vm->cookie, vcpu, type, retval));
1969 vm_set_capability(struct vm *vm, int vcpu, int type, int val)
1971 if (vcpu < 0 || vcpu >= VM_MAXCPU)
1974 if (type < 0 || type >= VM_CAP_MAX)
1977 return (VMSETCAP(vm->cookie, vcpu, type, val));
1981 vm_lapic(struct vm *vm, int cpu)
1983 return (vm->vcpu[cpu].vlapic);
1987 vm_ioapic(struct vm *vm)
1990 return (vm->vioapic);
1994 vm_hpet(struct vm *vm)
2001 vmm_is_pptdev(int bus, int slot, int func)
2005 char *val, *cp, *cp2;
2009 * The length of an environment variable is limited to 128 bytes which
2010 * puts an upper limit on the number of passthru devices that may be
2011 * specified using a single environment variable.
2013 * Work around this by scanning multiple environment variable
2014 * names instead of a single one - yuck!
2016 const char *names[] = { "pptdevs", "pptdevs2", "pptdevs3", NULL };
2018 /* set pptdevs="1/2/3 4/5/6 7/8/9 10/11/12" */
2020 for (i = 0; names[i] != NULL && !found; i++) {
2021 cp = val = kern_getenv(names[i]);
2022 while (cp != NULL && *cp != '\0') {
2023 if ((cp2 = strchr(cp, ' ')) != NULL)
2026 n = sscanf(cp, "%d/%d/%d", &b, &s, &f);
2027 if (n == 3 && bus == b && slot == s && func == f) {
2043 vm_iommu_domain(struct vm *vm)
2050 vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate,
2056 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2057 panic("vm_set_run_state: invalid vcpuid %d", vcpuid);
2059 vcpu = &vm->vcpu[vcpuid];
2062 error = vcpu_set_state_locked(vcpu, newstate, from_idle);
2069 vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu)
2072 enum vcpu_state state;
2074 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2075 panic("vm_get_run_state: invalid vcpuid %d", vcpuid);
2077 vcpu = &vm->vcpu[vcpuid];
2080 state = vcpu->state;
2081 if (hostcpu != NULL)
2082 *hostcpu = vcpu->hostcpu;
2089 vm_activate_cpu(struct vm *vm, int vcpuid)
2092 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2095 if (CPU_ISSET(vcpuid, &vm->active_cpus))
2098 VCPU_CTR0(vm, vcpuid, "activated");
2099 CPU_SET_ATOMIC(vcpuid, &vm->active_cpus);
2104 vm_active_cpus(struct vm *vm)
2107 return (vm->active_cpus);
2111 vm_suspended_cpus(struct vm *vm)
2114 return (vm->suspended_cpus);
2118 vcpu_stats(struct vm *vm, int vcpuid)
2121 return (vm->vcpu[vcpuid].stats);
2125 vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state)
2127 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2130 *state = vm->vcpu[vcpuid].x2apic_state;
2136 vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state)
2138 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2141 if (state >= X2APIC_STATE_LAST)
2144 vm->vcpu[vcpuid].x2apic_state = state;
2146 vlapic_set_x2apic_state(vm, vcpuid, state);
2152 * This function is called to ensure that a vcpu "sees" a pending event
2153 * as soon as possible:
2154 * - If the vcpu thread is sleeping then it is woken up.
2155 * - If the vcpu is running on a different host_cpu then an IPI will be directed
2156 * to the host_cpu to cause the vcpu to trap into the hypervisor.
2159 vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr)
2164 vcpu = &vm->vcpu[vcpuid];
2167 hostcpu = vcpu->hostcpu;
2168 if (vcpu->state == VCPU_RUNNING) {
2169 KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu"));
2170 if (hostcpu != curcpu) {
2172 vlapic_post_intr(vcpu->vlapic, hostcpu,
2175 ipi_cpu(hostcpu, vmm_ipinum);
2179 * If the 'vcpu' is running on 'curcpu' then it must
2180 * be sending a notification to itself (e.g. SELF_IPI).
2181 * The pending event will be picked up when the vcpu
2182 * transitions back to guest context.
2186 KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent "
2187 "with hostcpu %d", vcpu->state, hostcpu));
2188 if (vcpu->state == VCPU_SLEEPING)
2195 vm_get_vmspace(struct vm *vm)
2198 return (vm->vmspace);
2202 vm_apicid2vcpuid(struct vm *vm, int apicid)
2205 * XXX apic id is assumed to be numerically identical to vcpu id
2211 vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest,
2212 vm_rendezvous_func_t func, void *arg)
2217 * Enforce that this function is called without any locks
2219 WITNESS_WARN(WARN_PANIC, NULL, "vm_smp_rendezvous");
2220 KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU),
2221 ("vm_smp_rendezvous: invalid vcpuid %d", vcpuid));
2224 mtx_lock(&vm->rendezvous_mtx);
2225 if (vm->rendezvous_func != NULL) {
2227 * If a rendezvous is already in progress then we need to
2228 * call the rendezvous handler in case this 'vcpuid' is one
2229 * of the targets of the rendezvous.
2231 RENDEZVOUS_CTR0(vm, vcpuid, "Rendezvous already in progress");
2232 mtx_unlock(&vm->rendezvous_mtx);
2233 vm_handle_rendezvous(vm, vcpuid);
2236 KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous "
2237 "rendezvous is still in progress"));
2239 RENDEZVOUS_CTR0(vm, vcpuid, "Initiating rendezvous");
2240 vm->rendezvous_req_cpus = dest;
2241 CPU_ZERO(&vm->rendezvous_done_cpus);
2242 vm->rendezvous_arg = arg;
2243 vm_set_rendezvous_func(vm, func);
2244 mtx_unlock(&vm->rendezvous_mtx);
2247 * Wake up any sleeping vcpus and trigger a VM-exit in any running
2248 * vcpus so they handle the rendezvous as soon as possible.
2250 for (i = 0; i < VM_MAXCPU; i++) {
2251 if (CPU_ISSET(i, &dest))
2252 vcpu_notify_event(vm, i, false);
2255 vm_handle_rendezvous(vm, vcpuid);
2259 vm_atpic(struct vm *vm)
2261 return (vm->vatpic);
2265 vm_atpit(struct vm *vm)
2267 return (vm->vatpit);
2271 vm_pmtmr(struct vm *vm)
2274 return (vm->vpmtmr);
2278 vm_rtc(struct vm *vm)
2285 vm_segment_name(int seg)
2287 static enum vm_reg_name seg_names[] = {
2296 KASSERT(seg >= 0 && seg < nitems(seg_names),
2297 ("%s: invalid segment encoding %d", __func__, seg));
2298 return (seg_names[seg]);
2302 vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo,
2307 for (idx = 0; idx < num_copyinfo; idx++) {
2308 if (copyinfo[idx].cookie != NULL)
2309 vm_gpa_release(copyinfo[idx].cookie);
2311 bzero(copyinfo, num_copyinfo * sizeof(struct vm_copyinfo));
2315 vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
2316 uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo,
2319 int error, idx, nused;
2320 size_t n, off, remaining;
2324 bzero(copyinfo, sizeof(struct vm_copyinfo) * num_copyinfo);
2328 while (remaining > 0) {
2329 KASSERT(nused < num_copyinfo, ("insufficient vm_copyinfo"));
2330 error = vmm_gla2gpa(vm, vcpuid, paging, gla, prot, &gpa);
2333 off = gpa & PAGE_MASK;
2334 n = min(remaining, PAGE_SIZE - off);
2335 copyinfo[nused].gpa = gpa;
2336 copyinfo[nused].len = n;
2342 for (idx = 0; idx < nused; idx++) {
2343 hva = vm_gpa_hold(vm, copyinfo[idx].gpa, copyinfo[idx].len,
2347 copyinfo[idx].hva = hva;
2348 copyinfo[idx].cookie = cookie;
2352 vm_copy_teardown(vm, vcpuid, copyinfo, num_copyinfo);
2360 vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, void *kaddr,
2369 bcopy(copyinfo[idx].hva, dst, copyinfo[idx].len);
2370 len -= copyinfo[idx].len;
2371 dst += copyinfo[idx].len;
2377 vm_copyout(struct vm *vm, int vcpuid, const void *kaddr,
2378 struct vm_copyinfo *copyinfo, size_t len)
2386 bcopy(src, copyinfo[idx].hva, copyinfo[idx].len);
2387 len -= copyinfo[idx].len;
2388 src += copyinfo[idx].len;
2394 * Return the amount of in-use and wired memory for the VM. Since
2395 * these are global stats, only return the values with for vCPU 0
2397 VMM_STAT_DECLARE(VMM_MEM_RESIDENT);
2398 VMM_STAT_DECLARE(VMM_MEM_WIRED);
2401 vm_get_rescnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat)
2405 vmm_stat_set(vm, vcpu, VMM_MEM_RESIDENT,
2406 PAGE_SIZE * vmspace_resident_count(vm->vmspace));
2411 vm_get_wiredcnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat)
2415 vmm_stat_set(vm, vcpu, VMM_MEM_WIRED,
2416 PAGE_SIZE * pmap_wired_count(vmspace_pmap(vm->vmspace)));
2420 VMM_STAT_FUNC(VMM_MEM_RESIDENT, "Resident memory", vm_get_rescnt);
2421 VMM_STAT_FUNC(VMM_MEM_WIRED, "Wired memory", vm_get_wiredcnt);