2 * Copyright (c) 2011 NetApp, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
36 #include <sys/sysctl.h>
37 #include <sys/malloc.h>
40 #include <sys/mutex.h>
42 #include <sys/rwlock.h>
43 #include <sys/sched.h>
45 #include <sys/systm.h>
48 #include <vm/vm_object.h>
49 #include <vm/vm_page.h>
51 #include <vm/vm_map.h>
52 #include <vm/vm_extern.h>
53 #include <vm/vm_param.h>
55 #include <machine/cpu.h>
56 #include <machine/pcb.h>
57 #include <machine/smp.h>
58 #include <machine/md_var.h>
60 #include <x86/apicreg.h>
62 #include <machine/vmm.h>
63 #include <machine/vmm_dev.h>
64 #include <machine/vmm_instruction_emul.h>
66 #include "vmm_ioport.h"
79 #include "vmm_lapic.h"
88 * (a) allocated when vcpu is created
89 * (i) initialized when vcpu is created and when it is reinitialized
90 * (o) initialized the first time the vcpu is created
91 * (x) initialized before use
94 struct mtx mtx; /* (o) protects 'state' and 'hostcpu' */
95 enum vcpu_state state; /* (o) vcpu state */
96 int hostcpu; /* (o) vcpu's host cpu */
97 int reqidle; /* (i) request vcpu to idle */
98 struct vlapic *vlapic; /* (i) APIC device model */
99 enum x2apic_state x2apic_state; /* (i) APIC mode */
100 uint64_t exitintinfo; /* (i) events pending at VM exit */
101 int nmi_pending; /* (i) NMI pending */
102 int extint_pending; /* (i) INTR pending */
103 int exception_pending; /* (i) exception pending */
104 int exc_vector; /* (x) exception collateral */
105 int exc_errcode_valid;
106 uint32_t exc_errcode;
107 struct savefpu *guestfpu; /* (a,i) guest fpu state */
108 uint64_t guest_xcr0; /* (i) guest %xcr0 register */
109 void *stats; /* (a,i) statistics */
110 struct vm_exit exitinfo; /* (x) exit reason and collateral */
111 uint64_t nextrip; /* (x) next instruction to execute */
114 #define vcpu_lock_initialized(v) mtx_initialized(&((v)->mtx))
115 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
116 #define vcpu_lock(v) mtx_lock_spin(&((v)->mtx))
117 #define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx))
118 #define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED)
123 struct vm_object *object;
125 #define VM_MAX_MEMSEGS 3
135 #define VM_MAX_MEMMAPS 4
139 * (o) initialized the first time the VM is created
140 * (i) initialized when VM is created and when it is reinitialized
141 * (x) initialized before use
144 void *cookie; /* (i) cpu-specific data */
145 void *iommu; /* (x) iommu-specific data */
146 struct vhpet *vhpet; /* (i) virtual HPET */
147 struct vioapic *vioapic; /* (i) virtual ioapic */
148 struct vatpic *vatpic; /* (i) virtual atpic */
149 struct vatpit *vatpit; /* (i) virtual atpit */
150 struct vpmtmr *vpmtmr; /* (i) virtual ACPI PM timer */
151 struct vrtc *vrtc; /* (o) virtual RTC */
152 volatile cpuset_t active_cpus; /* (i) active vcpus */
153 int suspend; /* (i) stop VM execution */
154 volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */
155 volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */
156 cpuset_t rendezvous_req_cpus; /* (x) rendezvous requested */
157 cpuset_t rendezvous_done_cpus; /* (x) rendezvous finished */
158 void *rendezvous_arg; /* (x) rendezvous func/arg */
159 vm_rendezvous_func_t rendezvous_func;
160 struct mtx rendezvous_mtx; /* (o) rendezvous lock */
161 struct mem_map mem_maps[VM_MAX_MEMMAPS]; /* (i) guest address space */
162 struct mem_seg mem_segs[VM_MAX_MEMSEGS]; /* (o) guest memory regions */
163 struct vmspace *vmspace; /* (o) guest's address space */
164 char name[VM_MAX_NAMELEN]; /* (o) virtual machine name */
165 struct vcpu vcpu[VM_MAXCPU]; /* (i) guest vcpus */
166 /* The following describe the vm cpu topology */
167 uint16_t sockets; /* (o) num of sockets */
168 uint16_t cores; /* (o) num of cores/socket */
169 uint16_t threads; /* (o) num of threads/core */
170 uint16_t maxcpus; /* (o) max pluggable cpus */
173 static int vmm_initialized;
175 static struct vmm_ops *ops;
176 #define VMM_INIT(num) (ops != NULL ? (*ops->init)(num) : 0)
177 #define VMM_CLEANUP() (ops != NULL ? (*ops->cleanup)() : 0)
178 #define VMM_RESUME() (ops != NULL ? (*ops->resume)() : 0)
180 #define VMINIT(vm, pmap) (ops != NULL ? (*ops->vminit)(vm, pmap): NULL)
181 #define VMRUN(vmi, vcpu, rip, pmap, evinfo) \
182 (ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip, pmap, evinfo) : ENXIO)
183 #define VMCLEANUP(vmi) (ops != NULL ? (*ops->vmcleanup)(vmi) : NULL)
184 #define VMSPACE_ALLOC(min, max) \
185 (ops != NULL ? (*ops->vmspace_alloc)(min, max) : NULL)
186 #define VMSPACE_FREE(vmspace) \
187 (ops != NULL ? (*ops->vmspace_free)(vmspace) : ENXIO)
188 #define VMGETREG(vmi, vcpu, num, retval) \
189 (ops != NULL ? (*ops->vmgetreg)(vmi, vcpu, num, retval) : ENXIO)
190 #define VMSETREG(vmi, vcpu, num, val) \
191 (ops != NULL ? (*ops->vmsetreg)(vmi, vcpu, num, val) : ENXIO)
192 #define VMGETDESC(vmi, vcpu, num, desc) \
193 (ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO)
194 #define VMSETDESC(vmi, vcpu, num, desc) \
195 (ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO)
196 #define VMGETCAP(vmi, vcpu, num, retval) \
197 (ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO)
198 #define VMSETCAP(vmi, vcpu, num, val) \
199 (ops != NULL ? (*ops->vmsetcap)(vmi, vcpu, num, val) : ENXIO)
200 #define VLAPIC_INIT(vmi, vcpu) \
201 (ops != NULL ? (*ops->vlapic_init)(vmi, vcpu) : NULL)
202 #define VLAPIC_CLEANUP(vmi, vlapic) \
203 (ops != NULL ? (*ops->vlapic_cleanup)(vmi, vlapic) : NULL)
205 #define fpu_start_emulating() load_cr0(rcr0() | CR0_TS)
206 #define fpu_stop_emulating() clts()
208 SDT_PROVIDER_DEFINE(vmm);
210 static MALLOC_DEFINE(M_VM, "vm", "vm");
213 static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
215 SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL);
218 * Halt the guest if all vcpus are executing a HLT instruction with
219 * interrupts disabled.
221 static int halt_detection_enabled = 1;
222 SYSCTL_INT(_hw_vmm, OID_AUTO, halt_detection, CTLFLAG_RDTUN,
223 &halt_detection_enabled, 0,
224 "Halt VM if all vcpus execute HLT with interrupts disabled");
226 static int vmm_ipinum;
227 SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0,
228 "IPI vector used for vcpu notifications");
230 static int trace_guest_exceptions;
231 SYSCTL_INT(_hw_vmm, OID_AUTO, trace_guest_exceptions, CTLFLAG_RDTUN,
232 &trace_guest_exceptions, 0,
233 "Trap into hypervisor on all guest exceptions and reflect them back");
235 static void vm_free_memmap(struct vm *vm, int ident);
236 static bool sysmem_mapping(struct vm *vm, struct mem_map *mm);
237 static void vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr);
241 vcpu_state2str(enum vcpu_state state)
260 vcpu_cleanup(struct vm *vm, int i, bool destroy)
262 struct vcpu *vcpu = &vm->vcpu[i];
264 VLAPIC_CLEANUP(vm->cookie, vcpu->vlapic);
266 vmm_stat_free(vcpu->stats);
267 fpu_save_area_free(vcpu->guestfpu);
272 vcpu_init(struct vm *vm, int vcpu_id, bool create)
276 KASSERT(vcpu_id >= 0 && vcpu_id < VM_MAXCPU,
277 ("vcpu_init: invalid vcpu %d", vcpu_id));
279 vcpu = &vm->vcpu[vcpu_id];
282 KASSERT(!vcpu_lock_initialized(vcpu), ("vcpu %d already "
283 "initialized", vcpu_id));
284 vcpu_lock_init(vcpu);
285 vcpu->state = VCPU_IDLE;
286 vcpu->hostcpu = NOCPU;
287 vcpu->guestfpu = fpu_save_area_alloc();
288 vcpu->stats = vmm_stat_alloc();
291 vcpu->vlapic = VLAPIC_INIT(vm->cookie, vcpu_id);
292 vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED);
294 vcpu->exitintinfo = 0;
295 vcpu->nmi_pending = 0;
296 vcpu->extint_pending = 0;
297 vcpu->exception_pending = 0;
298 vcpu->guest_xcr0 = XFEATURE_ENABLED_X87;
299 fpu_save_area_reset(vcpu->guestfpu);
300 vmm_stat_init(vcpu->stats);
304 vcpu_trace_exceptions(struct vm *vm, int vcpuid)
307 return (trace_guest_exceptions);
311 vm_exitinfo(struct vm *vm, int cpuid)
315 if (cpuid < 0 || cpuid >= VM_MAXCPU)
316 panic("vm_exitinfo: invalid cpuid %d", cpuid);
318 vcpu = &vm->vcpu[cpuid];
320 return (&vcpu->exitinfo);
334 vmm_host_state_init();
336 vmm_ipinum = lapic_ipi_alloc(pti ? &IDTVEC(justreturn1_pti) :
337 &IDTVEC(justreturn));
339 vmm_ipinum = IPI_AST;
341 error = vmm_mem_init();
346 ops = &vmm_ops_intel;
347 else if (vmm_is_amd())
352 vmm_resume_p = vmm_resume;
354 return (VMM_INIT(vmm_ipinum));
358 vmm_handler(module_t mod, int what, void *arg)
370 error = vmmdev_cleanup();
374 if (vmm_ipinum != IPI_AST)
375 lapic_ipi_free(vmm_ipinum);
376 error = VMM_CLEANUP();
378 * Something bad happened - prevent new
379 * VMs from being created
392 static moduledata_t vmm_kmod = {
399 * vmm initialization has the following dependencies:
401 * - VT-x initialization requires smp_rendezvous() and therefore must happen
402 * after SMP is fully functional (after SI_SUB_SMP).
404 DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_SMP + 1, SI_ORDER_ANY);
405 MODULE_VERSION(vmm, 1);
408 vm_init(struct vm *vm, bool create)
412 vm->cookie = VMINIT(vm, vmspace_pmap(vm->vmspace));
414 vm->vioapic = vioapic_init(vm);
415 vm->vhpet = vhpet_init(vm);
416 vm->vatpic = vatpic_init(vm);
417 vm->vatpit = vatpit_init(vm);
418 vm->vpmtmr = vpmtmr_init(vm);
420 vm->vrtc = vrtc_init(vm);
422 CPU_ZERO(&vm->active_cpus);
425 CPU_ZERO(&vm->suspended_cpus);
427 for (i = 0; i < VM_MAXCPU; i++)
428 vcpu_init(vm, i, create);
432 * The default CPU topology is a single thread per package.
434 u_int cores_per_package = 1;
435 u_int threads_per_core = 1;
438 vm_create(const char *name, struct vm **retvm)
441 struct vmspace *vmspace;
444 * If vmm.ko could not be successfully initialized then don't attempt
445 * to create the virtual machine.
447 if (!vmm_initialized)
450 if (name == NULL || strlen(name) >= VM_MAX_NAMELEN)
453 vmspace = VMSPACE_ALLOC(0, VM_MAXUSER_ADDRESS);
457 vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO);
458 strcpy(vm->name, name);
459 vm->vmspace = vmspace;
460 mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF);
463 vm->cores = cores_per_package; /* XXX backwards compatibility */
464 vm->threads = threads_per_core; /* XXX backwards compatibility */
465 vm->maxcpus = 0; /* XXX not implemented */
474 vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores,
475 uint16_t *threads, uint16_t *maxcpus)
477 *sockets = vm->sockets;
479 *threads = vm->threads;
480 *maxcpus = vm->maxcpus;
484 vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores,
485 uint16_t threads, uint16_t maxcpus)
488 return (EINVAL); /* XXX remove when supported */
489 if ((sockets * cores * threads) > VM_MAXCPU)
491 /* XXX need to check sockets * cores * threads == vCPU, how? */
492 vm->sockets = sockets;
494 vm->threads = threads;
495 vm->maxcpus = maxcpus;
500 vm_cleanup(struct vm *vm, bool destroy)
505 ppt_unassign_all(vm);
507 if (vm->iommu != NULL)
508 iommu_destroy_domain(vm->iommu);
511 vrtc_cleanup(vm->vrtc);
513 vrtc_reset(vm->vrtc);
514 vpmtmr_cleanup(vm->vpmtmr);
515 vatpit_cleanup(vm->vatpit);
516 vhpet_cleanup(vm->vhpet);
517 vatpic_cleanup(vm->vatpic);
518 vioapic_cleanup(vm->vioapic);
520 for (i = 0; i < VM_MAXCPU; i++)
521 vcpu_cleanup(vm, i, destroy);
523 VMCLEANUP(vm->cookie);
526 * System memory is removed from the guest address space only when
527 * the VM is destroyed. This is because the mapping remains the same
530 * Device memory can be relocated by the guest (e.g. using PCI BARs)
531 * so those mappings are removed on a VM reset.
533 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
534 mm = &vm->mem_maps[i];
535 if (destroy || !sysmem_mapping(vm, mm))
536 vm_free_memmap(vm, i);
540 for (i = 0; i < VM_MAX_MEMSEGS; i++)
541 vm_free_memseg(vm, i);
543 VMSPACE_FREE(vm->vmspace);
549 vm_destroy(struct vm *vm)
551 vm_cleanup(vm, true);
556 vm_reinit(struct vm *vm)
561 * A virtual machine can be reset only if all vcpus are suspended.
563 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
564 vm_cleanup(vm, false);
575 vm_name(struct vm *vm)
581 vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
585 if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL)
592 vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len)
595 vmm_mmio_free(vm->vmspace, gpa, len);
600 * Return 'true' if 'gpa' is allocated in the guest address space.
602 * This function is called in the context of a running vcpu which acts as
603 * an implicit lock on 'vm->mem_maps[]'.
606 vm_mem_allocated(struct vm *vm, int vcpuid, vm_paddr_t gpa)
613 state = vcpu_get_state(vm, vcpuid, &hostcpu);
614 KASSERT(state == VCPU_RUNNING && hostcpu == curcpu,
615 ("%s: invalid vcpu state %d/%d", __func__, state, hostcpu));
618 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
619 mm = &vm->mem_maps[i];
620 if (mm->len != 0 && gpa >= mm->gpa && gpa < mm->gpa + mm->len)
621 return (true); /* 'gpa' is sysmem or devmem */
624 if (ppt_is_mmio(vm, gpa))
625 return (true); /* 'gpa' is pci passthru mmio */
631 vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem)
636 if (ident < 0 || ident >= VM_MAX_MEMSEGS)
639 if (len == 0 || (len & PAGE_MASK))
642 seg = &vm->mem_segs[ident];
643 if (seg->object != NULL) {
644 if (seg->len == len && seg->sysmem == sysmem)
650 obj = vm_object_allocate(OBJT_DEFAULT, len >> PAGE_SHIFT);
656 seg->sysmem = sysmem;
661 vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
666 if (ident < 0 || ident >= VM_MAX_MEMSEGS)
669 seg = &vm->mem_segs[ident];
673 *sysmem = seg->sysmem;
675 *objptr = seg->object;
680 vm_free_memseg(struct vm *vm, int ident)
684 KASSERT(ident >= 0 && ident < VM_MAX_MEMSEGS,
685 ("%s: invalid memseg ident %d", __func__, ident));
687 seg = &vm->mem_segs[ident];
688 if (seg->object != NULL) {
689 vm_object_deallocate(seg->object);
690 bzero(seg, sizeof(struct mem_seg));
695 vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first,
696 size_t len, int prot, int flags)
699 struct mem_map *m, *map;
703 if (prot == 0 || (prot & ~(VM_PROT_ALL)) != 0)
706 if (flags & ~VM_MEMMAP_F_WIRED)
709 if (segid < 0 || segid >= VM_MAX_MEMSEGS)
712 seg = &vm->mem_segs[segid];
713 if (seg->object == NULL)
717 if (first < 0 || first >= last || last > seg->len)
720 if ((gpa | first | last) & PAGE_MASK)
724 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
725 m = &vm->mem_maps[i];
735 error = vm_map_find(&vm->vmspace->vm_map, seg->object, first, &gpa,
736 len, 0, VMFS_NO_SPACE, prot, prot, 0);
737 if (error != KERN_SUCCESS)
740 vm_object_reference(seg->object);
742 if (flags & VM_MEMMAP_F_WIRED) {
743 error = vm_map_wire(&vm->vmspace->vm_map, gpa, gpa + len,
744 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
745 if (error != KERN_SUCCESS) {
746 vm_map_remove(&vm->vmspace->vm_map, gpa, gpa + len);
761 vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid,
762 vm_ooffset_t *segoff, size_t *len, int *prot, int *flags)
764 struct mem_map *mm, *mmnext;
768 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
769 mm = &vm->mem_maps[i];
770 if (mm->len == 0 || mm->gpa < *gpa)
772 if (mmnext == NULL || mm->gpa < mmnext->gpa)
776 if (mmnext != NULL) {
779 *segid = mmnext->segid;
781 *segoff = mmnext->segoff;
785 *prot = mmnext->prot;
787 *flags = mmnext->flags;
795 vm_free_memmap(struct vm *vm, int ident)
800 mm = &vm->mem_maps[ident];
802 error = vm_map_remove(&vm->vmspace->vm_map, mm->gpa,
804 KASSERT(error == KERN_SUCCESS, ("%s: vm_map_remove error %d",
806 bzero(mm, sizeof(struct mem_map));
811 sysmem_mapping(struct vm *vm, struct mem_map *mm)
814 if (mm->len != 0 && vm->mem_segs[mm->segid].sysmem)
821 vmm_sysmem_maxaddr(struct vm *vm)
828 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
829 mm = &vm->mem_maps[i];
830 if (sysmem_mapping(vm, mm)) {
831 if (maxaddr < mm->gpa + mm->len)
832 maxaddr = mm->gpa + mm->len;
839 vm_iommu_modify(struct vm *vm, boolean_t map)
844 void *vp, *cookie, *host_domain;
847 host_domain = iommu_host_domain();
849 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
850 mm = &vm->mem_maps[i];
851 if (!sysmem_mapping(vm, mm))
855 KASSERT((mm->flags & VM_MEMMAP_F_IOMMU) == 0,
856 ("iommu map found invalid memmap %#lx/%#lx/%#x",
857 mm->gpa, mm->len, mm->flags));
858 if ((mm->flags & VM_MEMMAP_F_WIRED) == 0)
860 mm->flags |= VM_MEMMAP_F_IOMMU;
862 if ((mm->flags & VM_MEMMAP_F_IOMMU) == 0)
864 mm->flags &= ~VM_MEMMAP_F_IOMMU;
865 KASSERT((mm->flags & VM_MEMMAP_F_WIRED) != 0,
866 ("iommu unmap found invalid memmap %#lx/%#lx/%#x",
867 mm->gpa, mm->len, mm->flags));
871 while (gpa < mm->gpa + mm->len) {
872 vp = vm_gpa_hold(vm, -1, gpa, PAGE_SIZE, VM_PROT_WRITE,
874 KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx",
877 vm_gpa_release(cookie);
879 hpa = DMAP_TO_PHYS((uintptr_t)vp);
881 iommu_create_mapping(vm->iommu, gpa, hpa, sz);
882 iommu_remove_mapping(host_domain, hpa, sz);
884 iommu_remove_mapping(vm->iommu, gpa, sz);
885 iommu_create_mapping(host_domain, hpa, hpa, sz);
893 * Invalidate the cached translations associated with the domain
894 * from which pages were removed.
897 iommu_invalidate_tlb(host_domain);
899 iommu_invalidate_tlb(vm->iommu);
902 #define vm_iommu_unmap(vm) vm_iommu_modify((vm), FALSE)
903 #define vm_iommu_map(vm) vm_iommu_modify((vm), TRUE)
906 vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func)
910 error = ppt_unassign_device(vm, bus, slot, func);
914 if (ppt_assigned_devices(vm) == 0)
921 vm_assign_pptdev(struct vm *vm, int bus, int slot, int func)
926 /* Set up the IOMMU to do the 'gpa' to 'hpa' translation */
927 if (ppt_assigned_devices(vm) == 0) {
928 KASSERT(vm->iommu == NULL,
929 ("vm_assign_pptdev: iommu must be NULL"));
930 maxaddr = vmm_sysmem_maxaddr(vm);
931 vm->iommu = iommu_create_domain(maxaddr);
932 if (vm->iommu == NULL)
937 error = ppt_assign_device(vm, bus, slot, func);
942 vm_gpa_hold(struct vm *vm, int vcpuid, vm_paddr_t gpa, size_t len, int reqprot,
945 int i, count, pageoff;
950 * All vcpus are frozen by ioctls that modify the memory map
951 * (e.g. VM_MMAP_MEMSEG). Therefore 'vm->memmap[]' stability is
952 * guaranteed if at least one vcpu is in the VCPU_FROZEN state.
955 KASSERT(vcpuid >= -1 && vcpuid < VM_MAXCPU, ("%s: invalid vcpuid %d",
957 for (i = 0; i < VM_MAXCPU; i++) {
958 if (vcpuid != -1 && vcpuid != i)
960 state = vcpu_get_state(vm, i, NULL);
961 KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d",
965 pageoff = gpa & PAGE_MASK;
966 if (len > PAGE_SIZE - pageoff)
967 panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len);
970 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
971 mm = &vm->mem_maps[i];
972 if (sysmem_mapping(vm, mm) && gpa >= mm->gpa &&
973 gpa < mm->gpa + mm->len) {
974 count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map,
975 trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1);
982 return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff));
990 vm_gpa_release(void *cookie)
992 vm_page_t m = cookie;
1000 vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval)
1003 if (vcpu < 0 || vcpu >= VM_MAXCPU)
1006 if (reg >= VM_REG_LAST)
1009 return (VMGETREG(vm->cookie, vcpu, reg, retval));
1013 vm_set_register(struct vm *vm, int vcpuid, int reg, uint64_t val)
1018 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1021 if (reg >= VM_REG_LAST)
1024 error = VMSETREG(vm->cookie, vcpuid, reg, val);
1025 if (error || reg != VM_REG_GUEST_RIP)
1028 /* Set 'nextrip' to match the value of %rip */
1029 VCPU_CTR1(vm, vcpuid, "Setting nextrip to %#lx", val);
1030 vcpu = &vm->vcpu[vcpuid];
1031 vcpu->nextrip = val;
1036 is_descriptor_table(int reg)
1040 case VM_REG_GUEST_IDTR:
1041 case VM_REG_GUEST_GDTR:
1049 is_segment_register(int reg)
1053 case VM_REG_GUEST_ES:
1054 case VM_REG_GUEST_CS:
1055 case VM_REG_GUEST_SS:
1056 case VM_REG_GUEST_DS:
1057 case VM_REG_GUEST_FS:
1058 case VM_REG_GUEST_GS:
1059 case VM_REG_GUEST_TR:
1060 case VM_REG_GUEST_LDTR:
1068 vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
1069 struct seg_desc *desc)
1072 if (vcpu < 0 || vcpu >= VM_MAXCPU)
1075 if (!is_segment_register(reg) && !is_descriptor_table(reg))
1078 return (VMGETDESC(vm->cookie, vcpu, reg, desc));
1082 vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
1083 struct seg_desc *desc)
1085 if (vcpu < 0 || vcpu >= VM_MAXCPU)
1088 if (!is_segment_register(reg) && !is_descriptor_table(reg))
1091 return (VMSETDESC(vm->cookie, vcpu, reg, desc));
1095 restore_guest_fpustate(struct vcpu *vcpu)
1098 /* flush host state to the pcb */
1101 /* restore guest FPU state */
1102 fpu_stop_emulating();
1103 fpurestore(vcpu->guestfpu);
1105 /* restore guest XCR0 if XSAVE is enabled in the host */
1106 if (rcr4() & CR4_XSAVE)
1107 load_xcr(0, vcpu->guest_xcr0);
1110 * The FPU is now "dirty" with the guest's state so turn on emulation
1111 * to trap any access to the FPU by the host.
1113 fpu_start_emulating();
1117 save_guest_fpustate(struct vcpu *vcpu)
1120 if ((rcr0() & CR0_TS) == 0)
1121 panic("fpu emulation not enabled in host!");
1123 /* save guest XCR0 and restore host XCR0 */
1124 if (rcr4() & CR4_XSAVE) {
1125 vcpu->guest_xcr0 = rxcr(0);
1126 load_xcr(0, vmm_get_host_xcr0());
1129 /* save guest FPU state */
1130 fpu_stop_emulating();
1131 fpusave(vcpu->guestfpu);
1132 fpu_start_emulating();
1135 static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle");
1138 vcpu_set_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate,
1144 vcpu = &vm->vcpu[vcpuid];
1145 vcpu_assert_locked(vcpu);
1148 * State transitions from the vmmdev_ioctl() must always begin from
1149 * the VCPU_IDLE state. This guarantees that there is only a single
1150 * ioctl() operating on a vcpu at any point.
1153 while (vcpu->state != VCPU_IDLE) {
1155 vcpu_notify_event_locked(vcpu, false);
1156 VCPU_CTR1(vm, vcpuid, "vcpu state change from %s to "
1157 "idle requested", vcpu_state2str(vcpu->state));
1158 msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
1161 KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from "
1162 "vcpu idle state"));
1165 if (vcpu->state == VCPU_RUNNING) {
1166 KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d "
1167 "mismatch for running vcpu", curcpu, vcpu->hostcpu));
1169 KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a "
1170 "vcpu that is not running", vcpu->hostcpu));
1174 * The following state transitions are allowed:
1175 * IDLE -> FROZEN -> IDLE
1176 * FROZEN -> RUNNING -> FROZEN
1177 * FROZEN -> SLEEPING -> FROZEN
1179 switch (vcpu->state) {
1183 error = (newstate != VCPU_FROZEN);
1186 error = (newstate == VCPU_FROZEN);
1196 VCPU_CTR2(vm, vcpuid, "vcpu state changed from %s to %s",
1197 vcpu_state2str(vcpu->state), vcpu_state2str(newstate));
1199 vcpu->state = newstate;
1200 if (newstate == VCPU_RUNNING)
1201 vcpu->hostcpu = curcpu;
1203 vcpu->hostcpu = NOCPU;
1205 if (newstate == VCPU_IDLE)
1206 wakeup(&vcpu->state);
1212 vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate)
1216 if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0)
1217 panic("Error %d setting state to %d\n", error, newstate);
1221 vcpu_require_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate)
1225 if ((error = vcpu_set_state_locked(vm, vcpuid, newstate, false)) != 0)
1226 panic("Error %d setting state to %d", error, newstate);
1230 vm_set_rendezvous_func(struct vm *vm, vm_rendezvous_func_t func)
1233 KASSERT(mtx_owned(&vm->rendezvous_mtx), ("rendezvous_mtx not locked"));
1236 * Update 'rendezvous_func' and execute a write memory barrier to
1237 * ensure that it is visible across all host cpus. This is not needed
1238 * for correctness but it does ensure that all the vcpus will notice
1239 * that the rendezvous is requested immediately.
1241 vm->rendezvous_func = func;
1245 #define RENDEZVOUS_CTR0(vm, vcpuid, fmt) \
1248 VCPU_CTR0(vm, vcpuid, fmt); \
1254 vm_handle_rendezvous(struct vm *vm, int vcpuid)
1257 KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU),
1258 ("vm_handle_rendezvous: invalid vcpuid %d", vcpuid));
1260 mtx_lock(&vm->rendezvous_mtx);
1261 while (vm->rendezvous_func != NULL) {
1262 /* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */
1263 CPU_AND(&vm->rendezvous_req_cpus, &vm->active_cpus);
1266 CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) &&
1267 !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) {
1268 VCPU_CTR0(vm, vcpuid, "Calling rendezvous func");
1269 (*vm->rendezvous_func)(vm, vcpuid, vm->rendezvous_arg);
1270 CPU_SET(vcpuid, &vm->rendezvous_done_cpus);
1272 if (CPU_CMP(&vm->rendezvous_req_cpus,
1273 &vm->rendezvous_done_cpus) == 0) {
1274 VCPU_CTR0(vm, vcpuid, "Rendezvous completed");
1275 vm_set_rendezvous_func(vm, NULL);
1276 wakeup(&vm->rendezvous_func);
1279 RENDEZVOUS_CTR0(vm, vcpuid, "Wait for rendezvous completion");
1280 mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0,
1283 mtx_unlock(&vm->rendezvous_mtx);
1287 * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run.
1290 vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu)
1294 int t, vcpu_halted, vm_halted;
1296 KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted"));
1298 vcpu = &vm->vcpu[vcpuid];
1305 * Do a final check for pending NMI or interrupts before
1306 * really putting this thread to sleep. Also check for
1307 * software events that would cause this vcpu to wakeup.
1309 * These interrupts/events could have happened after the
1310 * vcpu returned from VMRUN() and before it acquired the
1313 if (vm->rendezvous_func != NULL || vm->suspend || vcpu->reqidle)
1315 if (vm_nmi_pending(vm, vcpuid))
1317 if (!intr_disabled) {
1318 if (vm_extint_pending(vm, vcpuid) ||
1319 vlapic_pending_intr(vcpu->vlapic, NULL)) {
1324 /* Don't go to sleep if the vcpu thread needs to yield */
1325 if (vcpu_should_yield(vm, vcpuid))
1329 * Some Linux guests implement "halt" by having all vcpus
1330 * execute HLT with interrupts disabled. 'halted_cpus' keeps
1331 * track of the vcpus that have entered this state. When all
1332 * vcpus enter the halted state the virtual machine is halted.
1334 if (intr_disabled) {
1336 VCPU_CTR0(vm, vcpuid, "Halted");
1337 if (!vcpu_halted && halt_detection_enabled) {
1339 CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus);
1341 if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) {
1350 vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING);
1352 * XXX msleep_spin() cannot be interrupted by signals so
1353 * wake up periodically to check pending signals.
1355 msleep_spin(vcpu, &vcpu->mtx, wmesg, hz);
1356 vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN);
1357 vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t);
1361 CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus);
1366 vm_suspend(vm, VM_SUSPEND_HALT);
1372 vm_handle_paging(struct vm *vm, int vcpuid, bool *retu)
1377 struct vm_exit *vme;
1379 vcpu = &vm->vcpu[vcpuid];
1380 vme = &vcpu->exitinfo;
1382 KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d",
1383 __func__, vme->inst_length));
1385 ftype = vme->u.paging.fault_type;
1386 KASSERT(ftype == VM_PROT_READ ||
1387 ftype == VM_PROT_WRITE || ftype == VM_PROT_EXECUTE,
1388 ("vm_handle_paging: invalid fault_type %d", ftype));
1390 if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) {
1391 rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace),
1392 vme->u.paging.gpa, ftype);
1394 VCPU_CTR2(vm, vcpuid, "%s bit emulation for gpa %#lx",
1395 ftype == VM_PROT_READ ? "accessed" : "dirty",
1401 map = &vm->vmspace->vm_map;
1402 rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL);
1404 VCPU_CTR3(vm, vcpuid, "vm_handle_paging rv = %d, gpa = %#lx, "
1405 "ftype = %d", rv, vme->u.paging.gpa, ftype);
1407 if (rv != KERN_SUCCESS)
1414 vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu)
1418 struct vm_exit *vme;
1419 uint64_t gla, gpa, cs_base;
1420 struct vm_guest_paging *paging;
1421 mem_region_read_t mread;
1422 mem_region_write_t mwrite;
1423 enum vm_cpu_mode cpu_mode;
1424 int cs_d, error, fault;
1426 vcpu = &vm->vcpu[vcpuid];
1427 vme = &vcpu->exitinfo;
1429 KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d",
1430 __func__, vme->inst_length));
1432 gla = vme->u.inst_emul.gla;
1433 gpa = vme->u.inst_emul.gpa;
1434 cs_base = vme->u.inst_emul.cs_base;
1435 cs_d = vme->u.inst_emul.cs_d;
1436 vie = &vme->u.inst_emul.vie;
1437 paging = &vme->u.inst_emul.paging;
1438 cpu_mode = paging->cpu_mode;
1440 VCPU_CTR1(vm, vcpuid, "inst_emul fault accessing gpa %#lx", gpa);
1442 /* Fetch, decode and emulate the faulting instruction */
1443 if (vie->num_valid == 0) {
1444 error = vmm_fetch_instruction(vm, vcpuid, paging, vme->rip +
1445 cs_base, VIE_INST_SIZE, vie, &fault);
1448 * The instruction bytes have already been copied into 'vie'
1455 if (vmm_decode_instruction(vm, vcpuid, gla, cpu_mode, cs_d, vie) != 0) {
1456 VCPU_CTR1(vm, vcpuid, "Error decoding instruction at %#lx",
1457 vme->rip + cs_base);
1458 *retu = true; /* dump instruction bytes in userspace */
1463 * Update 'nextrip' based on the length of the emulated instruction.
1465 vme->inst_length = vie->num_processed;
1466 vcpu->nextrip += vie->num_processed;
1467 VCPU_CTR1(vm, vcpuid, "nextrip updated to %#lx after instruction "
1468 "decoding", vcpu->nextrip);
1470 /* return to userland unless this is an in-kernel emulated device */
1471 if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) {
1472 mread = lapic_mmio_read;
1473 mwrite = lapic_mmio_write;
1474 } else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) {
1475 mread = vioapic_mmio_read;
1476 mwrite = vioapic_mmio_write;
1477 } else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) {
1478 mread = vhpet_mmio_read;
1479 mwrite = vhpet_mmio_write;
1485 error = vmm_emulate_instruction(vm, vcpuid, gpa, vie, paging,
1486 mread, mwrite, retu);
1492 vm_handle_suspend(struct vm *vm, int vcpuid, bool *retu)
1498 vcpu = &vm->vcpu[vcpuid];
1500 CPU_SET_ATOMIC(vcpuid, &vm->suspended_cpus);
1503 * Wait until all 'active_cpus' have suspended themselves.
1505 * Since a VM may be suspended at any time including when one or
1506 * more vcpus are doing a rendezvous we need to call the rendezvous
1507 * handler while we are waiting to prevent a deadlock.
1511 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
1512 VCPU_CTR0(vm, vcpuid, "All vcpus suspended");
1516 if (vm->rendezvous_func == NULL) {
1517 VCPU_CTR0(vm, vcpuid, "Sleeping during suspend");
1518 vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING);
1519 msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz);
1520 vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN);
1522 VCPU_CTR0(vm, vcpuid, "Rendezvous during suspend");
1524 vm_handle_rendezvous(vm, vcpuid);
1531 * Wakeup the other sleeping vcpus and return to userspace.
1533 for (i = 0; i < VM_MAXCPU; i++) {
1534 if (CPU_ISSET(i, &vm->suspended_cpus)) {
1535 vcpu_notify_event(vm, i, false);
1544 vm_handle_reqidle(struct vm *vm, int vcpuid, bool *retu)
1546 struct vcpu *vcpu = &vm->vcpu[vcpuid];
1549 KASSERT(vcpu->reqidle, ("invalid vcpu reqidle %d", vcpu->reqidle));
1557 vm_suspend(struct vm *vm, enum vm_suspend_how how)
1561 if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST)
1564 if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) {
1565 VM_CTR2(vm, "virtual machine already suspended %d/%d",
1570 VM_CTR1(vm, "virtual machine successfully suspended %d", how);
1573 * Notify all active vcpus that they are now suspended.
1575 for (i = 0; i < VM_MAXCPU; i++) {
1576 if (CPU_ISSET(i, &vm->active_cpus))
1577 vcpu_notify_event(vm, i, false);
1584 vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip)
1586 struct vm_exit *vmexit;
1588 KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST,
1589 ("vm_exit_suspended: invalid suspend type %d", vm->suspend));
1591 vmexit = vm_exitinfo(vm, vcpuid);
1593 vmexit->inst_length = 0;
1594 vmexit->exitcode = VM_EXITCODE_SUSPENDED;
1595 vmexit->u.suspended.how = vm->suspend;
1599 vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip)
1601 struct vm_exit *vmexit;
1603 KASSERT(vm->rendezvous_func != NULL, ("rendezvous not in progress"));
1605 vmexit = vm_exitinfo(vm, vcpuid);
1607 vmexit->inst_length = 0;
1608 vmexit->exitcode = VM_EXITCODE_RENDEZVOUS;
1609 vmm_stat_incr(vm, vcpuid, VMEXIT_RENDEZVOUS, 1);
1613 vm_exit_reqidle(struct vm *vm, int vcpuid, uint64_t rip)
1615 struct vm_exit *vmexit;
1617 vmexit = vm_exitinfo(vm, vcpuid);
1619 vmexit->inst_length = 0;
1620 vmexit->exitcode = VM_EXITCODE_REQIDLE;
1621 vmm_stat_incr(vm, vcpuid, VMEXIT_REQIDLE, 1);
1625 vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip)
1627 struct vm_exit *vmexit;
1629 vmexit = vm_exitinfo(vm, vcpuid);
1631 vmexit->inst_length = 0;
1632 vmexit->exitcode = VM_EXITCODE_BOGUS;
1633 vmm_stat_incr(vm, vcpuid, VMEXIT_ASTPENDING, 1);
1637 vm_run(struct vm *vm, struct vm_run *vmrun)
1639 struct vm_eventinfo evinfo;
1644 struct vm_exit *vme;
1645 bool retu, intr_disabled;
1648 vcpuid = vmrun->cpuid;
1650 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1653 if (!CPU_ISSET(vcpuid, &vm->active_cpus))
1656 if (CPU_ISSET(vcpuid, &vm->suspended_cpus))
1659 pmap = vmspace_pmap(vm->vmspace);
1660 vcpu = &vm->vcpu[vcpuid];
1661 vme = &vcpu->exitinfo;
1662 evinfo.rptr = &vm->rendezvous_func;
1663 evinfo.sptr = &vm->suspend;
1664 evinfo.iptr = &vcpu->reqidle;
1668 KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active),
1669 ("vm_run: absurd pm_active"));
1673 pcb = PCPU_GET(curpcb);
1674 set_pcb_flags(pcb, PCB_FULL_IRET);
1676 restore_guest_fpustate(vcpu);
1678 vcpu_require_state(vm, vcpuid, VCPU_RUNNING);
1679 error = VMRUN(vm->cookie, vcpuid, vcpu->nextrip, pmap, &evinfo);
1680 vcpu_require_state(vm, vcpuid, VCPU_FROZEN);
1682 save_guest_fpustate(vcpu);
1684 vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
1690 vcpu->nextrip = vme->rip + vme->inst_length;
1691 switch (vme->exitcode) {
1692 case VM_EXITCODE_REQIDLE:
1693 error = vm_handle_reqidle(vm, vcpuid, &retu);
1695 case VM_EXITCODE_SUSPENDED:
1696 error = vm_handle_suspend(vm, vcpuid, &retu);
1698 case VM_EXITCODE_IOAPIC_EOI:
1699 vioapic_process_eoi(vm, vcpuid,
1700 vme->u.ioapic_eoi.vector);
1702 case VM_EXITCODE_RENDEZVOUS:
1703 vm_handle_rendezvous(vm, vcpuid);
1706 case VM_EXITCODE_HLT:
1707 intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0);
1708 error = vm_handle_hlt(vm, vcpuid, intr_disabled, &retu);
1710 case VM_EXITCODE_PAGING:
1711 error = vm_handle_paging(vm, vcpuid, &retu);
1713 case VM_EXITCODE_INST_EMUL:
1714 error = vm_handle_inst_emul(vm, vcpuid, &retu);
1716 case VM_EXITCODE_INOUT:
1717 case VM_EXITCODE_INOUT_STR:
1718 error = vm_handle_inout(vm, vcpuid, vme, &retu);
1720 case VM_EXITCODE_MONITOR:
1721 case VM_EXITCODE_MWAIT:
1722 case VM_EXITCODE_VMINSN:
1723 vm_inject_ud(vm, vcpuid);
1726 retu = true; /* handled in userland */
1731 if (error == 0 && retu == false)
1734 VCPU_CTR2(vm, vcpuid, "retu %d/%d", error, vme->exitcode);
1736 /* copy the exit information */
1737 bcopy(vme, &vmrun->vm_exit, sizeof(struct vm_exit));
1742 vm_restart_instruction(void *arg, int vcpuid)
1746 enum vcpu_state state;
1751 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1754 vcpu = &vm->vcpu[vcpuid];
1755 state = vcpu_get_state(vm, vcpuid, NULL);
1756 if (state == VCPU_RUNNING) {
1758 * When a vcpu is "running" the next instruction is determined
1759 * by adding 'rip' and 'inst_length' in the vcpu's 'exitinfo'.
1760 * Thus setting 'inst_length' to zero will cause the current
1761 * instruction to be restarted.
1763 vcpu->exitinfo.inst_length = 0;
1764 VCPU_CTR1(vm, vcpuid, "restarting instruction at %#lx by "
1765 "setting inst_length to zero", vcpu->exitinfo.rip);
1766 } else if (state == VCPU_FROZEN) {
1768 * When a vcpu is "frozen" it is outside the critical section
1769 * around VMRUN() and 'nextrip' points to the next instruction.
1770 * Thus instruction restart is achieved by setting 'nextrip'
1771 * to the vcpu's %rip.
1773 error = vm_get_register(vm, vcpuid, VM_REG_GUEST_RIP, &rip);
1774 KASSERT(!error, ("%s: error %d getting rip", __func__, error));
1775 VCPU_CTR2(vm, vcpuid, "restarting instruction by updating "
1776 "nextrip from %#lx to %#lx", vcpu->nextrip, rip);
1777 vcpu->nextrip = rip;
1779 panic("%s: invalid state %d", __func__, state);
1785 vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t info)
1790 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1793 vcpu = &vm->vcpu[vcpuid];
1795 if (info & VM_INTINFO_VALID) {
1796 type = info & VM_INTINFO_TYPE;
1797 vector = info & 0xff;
1798 if (type == VM_INTINFO_NMI && vector != IDT_NMI)
1800 if (type == VM_INTINFO_HWEXCEPTION && vector >= 32)
1802 if (info & VM_INTINFO_RSVD)
1807 VCPU_CTR2(vm, vcpuid, "%s: info1(%#lx)", __func__, info);
1808 vcpu->exitintinfo = info;
1818 #define IDT_VE 20 /* Virtualization Exception (Intel specific) */
1820 static enum exc_class
1821 exception_class(uint64_t info)
1825 KASSERT(info & VM_INTINFO_VALID, ("intinfo must be valid: %#lx", info));
1826 type = info & VM_INTINFO_TYPE;
1827 vector = info & 0xff;
1829 /* Table 6-4, "Interrupt and Exception Classes", Intel SDM, Vol 3 */
1831 case VM_INTINFO_HWINTR:
1832 case VM_INTINFO_SWINTR:
1833 case VM_INTINFO_NMI:
1834 return (EXC_BENIGN);
1837 * Hardware exception.
1839 * SVM and VT-x use identical type values to represent NMI,
1840 * hardware interrupt and software interrupt.
1842 * SVM uses type '3' for all exceptions. VT-x uses type '3'
1843 * for exceptions except #BP and #OF. #BP and #OF use a type
1844 * value of '5' or '6'. Therefore we don't check for explicit
1845 * values of 'type' to classify 'intinfo' into a hardware
1854 return (EXC_PAGEFAULT);
1860 return (EXC_CONTRIBUTORY);
1862 return (EXC_BENIGN);
1867 nested_fault(struct vm *vm, int vcpuid, uint64_t info1, uint64_t info2,
1870 enum exc_class exc1, exc2;
1873 KASSERT(info1 & VM_INTINFO_VALID, ("info1 %#lx is not valid", info1));
1874 KASSERT(info2 & VM_INTINFO_VALID, ("info2 %#lx is not valid", info2));
1877 * If an exception occurs while attempting to call the double-fault
1878 * handler the processor enters shutdown mode (aka triple fault).
1880 type1 = info1 & VM_INTINFO_TYPE;
1881 vector1 = info1 & 0xff;
1882 if (type1 == VM_INTINFO_HWEXCEPTION && vector1 == IDT_DF) {
1883 VCPU_CTR2(vm, vcpuid, "triple fault: info1(%#lx), info2(%#lx)",
1885 vm_suspend(vm, VM_SUSPEND_TRIPLEFAULT);
1891 * Table 6-5 "Conditions for Generating a Double Fault", Intel SDM, Vol3
1893 exc1 = exception_class(info1);
1894 exc2 = exception_class(info2);
1895 if ((exc1 == EXC_CONTRIBUTORY && exc2 == EXC_CONTRIBUTORY) ||
1896 (exc1 == EXC_PAGEFAULT && exc2 != EXC_BENIGN)) {
1897 /* Convert nested fault into a double fault. */
1899 *retinfo |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION;
1900 *retinfo |= VM_INTINFO_DEL_ERRCODE;
1902 /* Handle exceptions serially */
1909 vcpu_exception_intinfo(struct vcpu *vcpu)
1913 if (vcpu->exception_pending) {
1914 info = vcpu->exc_vector & 0xff;
1915 info |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION;
1916 if (vcpu->exc_errcode_valid) {
1917 info |= VM_INTINFO_DEL_ERRCODE;
1918 info |= (uint64_t)vcpu->exc_errcode << 32;
1925 vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *retinfo)
1928 uint64_t info1, info2;
1931 KASSERT(vcpuid >= 0 && vcpuid < VM_MAXCPU, ("invalid vcpu %d", vcpuid));
1933 vcpu = &vm->vcpu[vcpuid];
1935 info1 = vcpu->exitintinfo;
1936 vcpu->exitintinfo = 0;
1939 if (vcpu->exception_pending) {
1940 info2 = vcpu_exception_intinfo(vcpu);
1941 vcpu->exception_pending = 0;
1942 VCPU_CTR2(vm, vcpuid, "Exception %d delivered: %#lx",
1943 vcpu->exc_vector, info2);
1946 if ((info1 & VM_INTINFO_VALID) && (info2 & VM_INTINFO_VALID)) {
1947 valid = nested_fault(vm, vcpuid, info1, info2, retinfo);
1948 } else if (info1 & VM_INTINFO_VALID) {
1951 } else if (info2 & VM_INTINFO_VALID) {
1959 VCPU_CTR4(vm, vcpuid, "%s: info1(%#lx), info2(%#lx), "
1960 "retinfo(%#lx)", __func__, info1, info2, *retinfo);
1967 vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2)
1971 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1974 vcpu = &vm->vcpu[vcpuid];
1975 *info1 = vcpu->exitintinfo;
1976 *info2 = vcpu_exception_intinfo(vcpu);
1981 vm_inject_exception(struct vm *vm, int vcpuid, int vector, int errcode_valid,
1982 uint32_t errcode, int restart_instruction)
1988 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1991 if (vector < 0 || vector >= 32)
1995 * A double fault exception should never be injected directly into
1996 * the guest. It is a derived exception that results from specific
1997 * combinations of nested faults.
1999 if (vector == IDT_DF)
2002 vcpu = &vm->vcpu[vcpuid];
2004 if (vcpu->exception_pending) {
2005 VCPU_CTR2(vm, vcpuid, "Unable to inject exception %d due to "
2006 "pending exception %d", vector, vcpu->exc_vector);
2010 if (errcode_valid) {
2012 * Exceptions don't deliver an error code in real mode.
2014 error = vm_get_register(vm, vcpuid, VM_REG_GUEST_CR0, ®val);
2015 KASSERT(!error, ("%s: error %d getting CR0", __func__, error));
2016 if (!(regval & CR0_PE))
2021 * From section 26.6.1 "Interruptibility State" in Intel SDM:
2023 * Event blocking by "STI" or "MOV SS" is cleared after guest executes
2024 * one instruction or incurs an exception.
2026 error = vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0);
2027 KASSERT(error == 0, ("%s: error %d clearing interrupt shadow",
2030 if (restart_instruction)
2031 vm_restart_instruction(vm, vcpuid);
2033 vcpu->exception_pending = 1;
2034 vcpu->exc_vector = vector;
2035 vcpu->exc_errcode = errcode;
2036 vcpu->exc_errcode_valid = errcode_valid;
2037 VCPU_CTR1(vm, vcpuid, "Exception %d pending", vector);
2042 vm_inject_fault(void *vmarg, int vcpuid, int vector, int errcode_valid,
2046 int error, restart_instruction;
2049 restart_instruction = 1;
2051 error = vm_inject_exception(vm, vcpuid, vector, errcode_valid,
2052 errcode, restart_instruction);
2053 KASSERT(error == 0, ("vm_inject_exception error %d", error));
2057 vm_inject_pf(void *vmarg, int vcpuid, int error_code, uint64_t cr2)
2063 VCPU_CTR2(vm, vcpuid, "Injecting page fault: error_code %#x, cr2 %#lx",
2066 error = vm_set_register(vm, vcpuid, VM_REG_GUEST_CR2, cr2);
2067 KASSERT(error == 0, ("vm_set_register(cr2) error %d", error));
2069 vm_inject_fault(vm, vcpuid, IDT_PF, 1, error_code);
2072 static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu");
2075 vm_inject_nmi(struct vm *vm, int vcpuid)
2079 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2082 vcpu = &vm->vcpu[vcpuid];
2084 vcpu->nmi_pending = 1;
2085 vcpu_notify_event(vm, vcpuid, false);
2090 vm_nmi_pending(struct vm *vm, int vcpuid)
2094 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2095 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
2097 vcpu = &vm->vcpu[vcpuid];
2099 return (vcpu->nmi_pending);
2103 vm_nmi_clear(struct vm *vm, int vcpuid)
2107 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2108 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
2110 vcpu = &vm->vcpu[vcpuid];
2112 if (vcpu->nmi_pending == 0)
2113 panic("vm_nmi_clear: inconsistent nmi_pending state");
2115 vcpu->nmi_pending = 0;
2116 vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1);
2119 static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu");
2122 vm_inject_extint(struct vm *vm, int vcpuid)
2126 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2129 vcpu = &vm->vcpu[vcpuid];
2131 vcpu->extint_pending = 1;
2132 vcpu_notify_event(vm, vcpuid, false);
2137 vm_extint_pending(struct vm *vm, int vcpuid)
2141 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2142 panic("vm_extint_pending: invalid vcpuid %d", vcpuid);
2144 vcpu = &vm->vcpu[vcpuid];
2146 return (vcpu->extint_pending);
2150 vm_extint_clear(struct vm *vm, int vcpuid)
2154 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2155 panic("vm_extint_pending: invalid vcpuid %d", vcpuid);
2157 vcpu = &vm->vcpu[vcpuid];
2159 if (vcpu->extint_pending == 0)
2160 panic("vm_extint_clear: inconsistent extint_pending state");
2162 vcpu->extint_pending = 0;
2163 vmm_stat_incr(vm, vcpuid, VCPU_EXTINT_COUNT, 1);
2167 vm_get_capability(struct vm *vm, int vcpu, int type, int *retval)
2169 if (vcpu < 0 || vcpu >= VM_MAXCPU)
2172 if (type < 0 || type >= VM_CAP_MAX)
2175 return (VMGETCAP(vm->cookie, vcpu, type, retval));
2179 vm_set_capability(struct vm *vm, int vcpu, int type, int val)
2181 if (vcpu < 0 || vcpu >= VM_MAXCPU)
2184 if (type < 0 || type >= VM_CAP_MAX)
2187 return (VMSETCAP(vm->cookie, vcpu, type, val));
2191 vm_lapic(struct vm *vm, int cpu)
2193 return (vm->vcpu[cpu].vlapic);
2197 vm_ioapic(struct vm *vm)
2200 return (vm->vioapic);
2204 vm_hpet(struct vm *vm)
2211 vmm_is_pptdev(int bus, int slot, int func)
2215 char *val, *cp, *cp2;
2219 * The length of an environment variable is limited to 128 bytes which
2220 * puts an upper limit on the number of passthru devices that may be
2221 * specified using a single environment variable.
2223 * Work around this by scanning multiple environment variable
2224 * names instead of a single one - yuck!
2226 const char *names[] = { "pptdevs", "pptdevs2", "pptdevs3", NULL };
2228 /* set pptdevs="1/2/3 4/5/6 7/8/9 10/11/12" */
2230 for (i = 0; names[i] != NULL && !found; i++) {
2231 cp = val = kern_getenv(names[i]);
2232 while (cp != NULL && *cp != '\0') {
2233 if ((cp2 = strchr(cp, ' ')) != NULL)
2236 n = sscanf(cp, "%d/%d/%d", &b, &s, &f);
2237 if (n == 3 && bus == b && slot == s && func == f) {
2253 vm_iommu_domain(struct vm *vm)
2260 vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate,
2266 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2267 panic("vm_set_run_state: invalid vcpuid %d", vcpuid);
2269 vcpu = &vm->vcpu[vcpuid];
2272 error = vcpu_set_state_locked(vm, vcpuid, newstate, from_idle);
2279 vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu)
2282 enum vcpu_state state;
2284 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2285 panic("vm_get_run_state: invalid vcpuid %d", vcpuid);
2287 vcpu = &vm->vcpu[vcpuid];
2290 state = vcpu->state;
2291 if (hostcpu != NULL)
2292 *hostcpu = vcpu->hostcpu;
2299 vm_activate_cpu(struct vm *vm, int vcpuid)
2302 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2305 if (CPU_ISSET(vcpuid, &vm->active_cpus))
2308 VCPU_CTR0(vm, vcpuid, "activated");
2309 CPU_SET_ATOMIC(vcpuid, &vm->active_cpus);
2314 vm_active_cpus(struct vm *vm)
2317 return (vm->active_cpus);
2321 vm_suspended_cpus(struct vm *vm)
2324 return (vm->suspended_cpus);
2328 vcpu_stats(struct vm *vm, int vcpuid)
2331 return (vm->vcpu[vcpuid].stats);
2335 vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state)
2337 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2340 *state = vm->vcpu[vcpuid].x2apic_state;
2346 vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state)
2348 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2351 if (state >= X2APIC_STATE_LAST)
2354 vm->vcpu[vcpuid].x2apic_state = state;
2356 vlapic_set_x2apic_state(vm, vcpuid, state);
2362 * This function is called to ensure that a vcpu "sees" a pending event
2363 * as soon as possible:
2364 * - If the vcpu thread is sleeping then it is woken up.
2365 * - If the vcpu is running on a different host_cpu then an IPI will be directed
2366 * to the host_cpu to cause the vcpu to trap into the hypervisor.
2369 vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr)
2373 hostcpu = vcpu->hostcpu;
2374 if (vcpu->state == VCPU_RUNNING) {
2375 KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu"));
2376 if (hostcpu != curcpu) {
2378 vlapic_post_intr(vcpu->vlapic, hostcpu,
2381 ipi_cpu(hostcpu, vmm_ipinum);
2385 * If the 'vcpu' is running on 'curcpu' then it must
2386 * be sending a notification to itself (e.g. SELF_IPI).
2387 * The pending event will be picked up when the vcpu
2388 * transitions back to guest context.
2392 KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent "
2393 "with hostcpu %d", vcpu->state, hostcpu));
2394 if (vcpu->state == VCPU_SLEEPING)
2400 vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr)
2402 struct vcpu *vcpu = &vm->vcpu[vcpuid];
2405 vcpu_notify_event_locked(vcpu, lapic_intr);
2410 vm_get_vmspace(struct vm *vm)
2413 return (vm->vmspace);
2417 vm_apicid2vcpuid(struct vm *vm, int apicid)
2420 * XXX apic id is assumed to be numerically identical to vcpu id
2426 vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest,
2427 vm_rendezvous_func_t func, void *arg)
2432 * Enforce that this function is called without any locks
2434 WITNESS_WARN(WARN_PANIC, NULL, "vm_smp_rendezvous");
2435 KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU),
2436 ("vm_smp_rendezvous: invalid vcpuid %d", vcpuid));
2439 mtx_lock(&vm->rendezvous_mtx);
2440 if (vm->rendezvous_func != NULL) {
2442 * If a rendezvous is already in progress then we need to
2443 * call the rendezvous handler in case this 'vcpuid' is one
2444 * of the targets of the rendezvous.
2446 RENDEZVOUS_CTR0(vm, vcpuid, "Rendezvous already in progress");
2447 mtx_unlock(&vm->rendezvous_mtx);
2448 vm_handle_rendezvous(vm, vcpuid);
2451 KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous "
2452 "rendezvous is still in progress"));
2454 RENDEZVOUS_CTR0(vm, vcpuid, "Initiating rendezvous");
2455 vm->rendezvous_req_cpus = dest;
2456 CPU_ZERO(&vm->rendezvous_done_cpus);
2457 vm->rendezvous_arg = arg;
2458 vm_set_rendezvous_func(vm, func);
2459 mtx_unlock(&vm->rendezvous_mtx);
2462 * Wake up any sleeping vcpus and trigger a VM-exit in any running
2463 * vcpus so they handle the rendezvous as soon as possible.
2465 for (i = 0; i < VM_MAXCPU; i++) {
2466 if (CPU_ISSET(i, &dest))
2467 vcpu_notify_event(vm, i, false);
2470 vm_handle_rendezvous(vm, vcpuid);
2474 vm_atpic(struct vm *vm)
2476 return (vm->vatpic);
2480 vm_atpit(struct vm *vm)
2482 return (vm->vatpit);
2486 vm_pmtmr(struct vm *vm)
2489 return (vm->vpmtmr);
2493 vm_rtc(struct vm *vm)
2500 vm_segment_name(int seg)
2502 static enum vm_reg_name seg_names[] = {
2511 KASSERT(seg >= 0 && seg < nitems(seg_names),
2512 ("%s: invalid segment encoding %d", __func__, seg));
2513 return (seg_names[seg]);
2517 vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo,
2522 for (idx = 0; idx < num_copyinfo; idx++) {
2523 if (copyinfo[idx].cookie != NULL)
2524 vm_gpa_release(copyinfo[idx].cookie);
2526 bzero(copyinfo, num_copyinfo * sizeof(struct vm_copyinfo));
2530 vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
2531 uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo,
2532 int num_copyinfo, int *fault)
2534 int error, idx, nused;
2535 size_t n, off, remaining;
2539 bzero(copyinfo, sizeof(struct vm_copyinfo) * num_copyinfo);
2543 while (remaining > 0) {
2544 KASSERT(nused < num_copyinfo, ("insufficient vm_copyinfo"));
2545 error = vm_gla2gpa(vm, vcpuid, paging, gla, prot, &gpa, fault);
2546 if (error || *fault)
2548 off = gpa & PAGE_MASK;
2549 n = min(remaining, PAGE_SIZE - off);
2550 copyinfo[nused].gpa = gpa;
2551 copyinfo[nused].len = n;
2557 for (idx = 0; idx < nused; idx++) {
2558 hva = vm_gpa_hold(vm, vcpuid, copyinfo[idx].gpa,
2559 copyinfo[idx].len, prot, &cookie);
2562 copyinfo[idx].hva = hva;
2563 copyinfo[idx].cookie = cookie;
2567 vm_copy_teardown(vm, vcpuid, copyinfo, num_copyinfo);
2576 vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, void *kaddr,
2585 bcopy(copyinfo[idx].hva, dst, copyinfo[idx].len);
2586 len -= copyinfo[idx].len;
2587 dst += copyinfo[idx].len;
2593 vm_copyout(struct vm *vm, int vcpuid, const void *kaddr,
2594 struct vm_copyinfo *copyinfo, size_t len)
2602 bcopy(src, copyinfo[idx].hva, copyinfo[idx].len);
2603 len -= copyinfo[idx].len;
2604 src += copyinfo[idx].len;
2610 * Return the amount of in-use and wired memory for the VM. Since
2611 * these are global stats, only return the values with for vCPU 0
2613 VMM_STAT_DECLARE(VMM_MEM_RESIDENT);
2614 VMM_STAT_DECLARE(VMM_MEM_WIRED);
2617 vm_get_rescnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat)
2621 vmm_stat_set(vm, vcpu, VMM_MEM_RESIDENT,
2622 PAGE_SIZE * vmspace_resident_count(vm->vmspace));
2627 vm_get_wiredcnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat)
2631 vmm_stat_set(vm, vcpu, VMM_MEM_WIRED,
2632 PAGE_SIZE * pmap_wired_count(vmspace_pmap(vm->vmspace)));
2636 VMM_STAT_FUNC(VMM_MEM_RESIDENT, "Resident memory", vm_get_rescnt);
2637 VMM_STAT_FUNC(VMM_MEM_WIRED, "Wired memory", vm_get_wiredcnt);