2 * Copyright (c) 2011 NetApp, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
36 #include <sys/sysctl.h>
37 #include <sys/malloc.h>
40 #include <sys/mutex.h>
42 #include <sys/rwlock.h>
43 #include <sys/sched.h>
45 #include <sys/systm.h>
48 #include <vm/vm_object.h>
49 #include <vm/vm_page.h>
51 #include <vm/vm_map.h>
52 #include <vm/vm_extern.h>
53 #include <vm/vm_param.h>
55 #include <machine/vm.h>
56 #include <machine/pcb.h>
57 #include <machine/smp.h>
58 #include <x86/apicreg.h>
59 #include <machine/vmparam.h>
61 #include <machine/vmm.h>
62 #include <machine/vmm_dev.h>
74 #include "vmm_lapic.h"
83 enum vcpu_state state;
85 int hostcpu; /* host cpuid this vcpu last ran on */
86 uint64_t guest_msrs[VMM_MSR_NUM];
87 struct vlapic *vlapic;
89 struct savefpu *guestfpu; /* guest fpu state */
91 struct vm_exit exitinfo;
92 enum x2apic_state x2apic_state;
96 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
97 #define vcpu_lock(v) mtx_lock_spin(&((v)->mtx))
98 #define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx))
99 #define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED)
107 #define VM_MAX_MEMORY_SEGMENTS 2
110 void *cookie; /* processor-specific data */
111 void *iommu; /* iommu-specific data */
112 struct vhpet *vhpet; /* virtual HPET */
113 struct vioapic *vioapic; /* virtual ioapic */
114 struct vmspace *vmspace; /* guest's address space */
115 struct vcpu vcpu[VM_MAXCPU];
117 struct mem_seg mem_segs[VM_MAX_MEMORY_SEGMENTS];
118 char name[VM_MAX_NAMELEN];
121 * Set of active vcpus.
122 * An active vcpu is one that has been started implicitly (BSP) or
123 * explicitly (AP) by sending it a startup ipi.
125 cpuset_t active_cpus;
128 static int vmm_initialized;
130 static struct vmm_ops *ops;
131 #define VMM_INIT() (ops != NULL ? (*ops->init)() : 0)
132 #define VMM_CLEANUP() (ops != NULL ? (*ops->cleanup)() : 0)
134 #define VMINIT(vm, pmap) (ops != NULL ? (*ops->vminit)(vm, pmap): NULL)
135 #define VMRUN(vmi, vcpu, rip, pmap) \
136 (ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip, pmap) : ENXIO)
137 #define VMCLEANUP(vmi) (ops != NULL ? (*ops->vmcleanup)(vmi) : NULL)
138 #define VMSPACE_ALLOC(min, max) \
139 (ops != NULL ? (*ops->vmspace_alloc)(min, max) : NULL)
140 #define VMSPACE_FREE(vmspace) \
141 (ops != NULL ? (*ops->vmspace_free)(vmspace) : ENXIO)
142 #define VMGETREG(vmi, vcpu, num, retval) \
143 (ops != NULL ? (*ops->vmgetreg)(vmi, vcpu, num, retval) : ENXIO)
144 #define VMSETREG(vmi, vcpu, num, val) \
145 (ops != NULL ? (*ops->vmsetreg)(vmi, vcpu, num, val) : ENXIO)
146 #define VMGETDESC(vmi, vcpu, num, desc) \
147 (ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO)
148 #define VMSETDESC(vmi, vcpu, num, desc) \
149 (ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO)
150 #define VMINJECT(vmi, vcpu, type, vec, ec, ecv) \
151 (ops != NULL ? (*ops->vminject)(vmi, vcpu, type, vec, ec, ecv) : ENXIO)
152 #define VMGETCAP(vmi, vcpu, num, retval) \
153 (ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO)
154 #define VMSETCAP(vmi, vcpu, num, val) \
155 (ops != NULL ? (*ops->vmsetcap)(vmi, vcpu, num, val) : ENXIO)
157 #define fpu_start_emulating() load_cr0(rcr0() | CR0_TS)
158 #define fpu_stop_emulating() clts()
160 static MALLOC_DEFINE(M_VM, "vm", "vm");
161 CTASSERT(VMM_MSR_NUM <= 64); /* msr_mask can keep track of up to 64 msrs */
164 static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
167 vcpu_cleanup(struct vcpu *vcpu)
169 vlapic_cleanup(vcpu->vlapic);
170 vmm_stat_free(vcpu->stats);
171 fpu_save_area_free(vcpu->guestfpu);
175 vcpu_init(struct vm *vm, uint32_t vcpu_id)
179 vcpu = &vm->vcpu[vcpu_id];
181 vcpu_lock_init(vcpu);
182 vcpu->hostcpu = NOCPU;
183 vcpu->vcpuid = vcpu_id;
184 vcpu->vlapic = vlapic_init(vm, vcpu_id);
185 vm_set_x2apic_state(vm, vcpu_id, X2APIC_ENABLED);
186 vcpu->guestfpu = fpu_save_area_alloc();
187 fpu_save_area_reset(vcpu->guestfpu);
188 vcpu->stats = vmm_stat_alloc();
192 vm_exitinfo(struct vm *vm, int cpuid)
196 if (cpuid < 0 || cpuid >= VM_MAXCPU)
197 panic("vm_exitinfo: invalid cpuid %d", cpuid);
199 vcpu = &vm->vcpu[cpuid];
201 return (&vcpu->exitinfo);
209 vmm_host_state_init();
212 error = vmm_mem_init();
217 ops = &vmm_ops_intel;
218 else if (vmm_is_amd())
229 vmm_handler(module_t mod, int what, void *arg)
242 error = vmmdev_cleanup();
246 error = VMM_CLEANUP();
248 * Something bad happened - prevent new
249 * VMs from being created
262 static moduledata_t vmm_kmod = {
269 * vmm initialization has the following dependencies:
271 * - iommu initialization must happen after the pci passthru driver has had
272 * a chance to attach to any passthru devices (after SI_SUB_CONFIGURE).
274 * - VT-x initialization requires smp_rendezvous() and therefore must happen
275 * after SMP is fully functional (after SI_SUB_SMP).
277 DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_SMP + 1, SI_ORDER_ANY);
278 MODULE_VERSION(vmm, 1);
280 SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL);
283 vm_create(const char *name, struct vm **retvm)
287 struct vmspace *vmspace;
292 * If vmm.ko could not be successfully initialized then don't attempt
293 * to create the virtual machine.
295 if (!vmm_initialized)
298 if (name == NULL || strlen(name) >= VM_MAX_NAMELEN)
301 vmspace = VMSPACE_ALLOC(VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
305 vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO);
306 strcpy(vm->name, name);
307 vm->cookie = VMINIT(vm, vmspace_pmap(vmspace));
308 vm->vioapic = vioapic_init(vm);
309 vm->vhpet = vhpet_init(vm);
311 for (i = 0; i < VM_MAXCPU; i++) {
313 guest_msrs_init(vm, i);
316 vm_activate_cpu(vm, BSP);
317 vm->vmspace = vmspace;
324 vm_free_mem_seg(struct vm *vm, struct mem_seg *seg)
327 if (seg->object != NULL)
328 vmm_mem_free(vm->vmspace, seg->gpa, seg->len);
330 bzero(seg, sizeof(*seg));
334 vm_destroy(struct vm *vm)
338 ppt_unassign_all(vm);
340 if (vm->iommu != NULL)
341 iommu_destroy_domain(vm->iommu);
343 vhpet_cleanup(vm->vhpet);
344 vioapic_cleanup(vm->vioapic);
346 for (i = 0; i < vm->num_mem_segs; i++)
347 vm_free_mem_seg(vm, &vm->mem_segs[i]);
349 vm->num_mem_segs = 0;
351 for (i = 0; i < VM_MAXCPU; i++)
352 vcpu_cleanup(&vm->vcpu[i]);
354 VMSPACE_FREE(vm->vmspace);
356 VMCLEANUP(vm->cookie);
362 vm_name(struct vm *vm)
368 vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
372 if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL)
379 vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len)
382 vmm_mmio_free(vm->vmspace, gpa, len);
387 vm_mem_allocated(struct vm *vm, vm_paddr_t gpa)
390 vm_paddr_t gpabase, gpalimit;
392 for (i = 0; i < vm->num_mem_segs; i++) {
393 gpabase = vm->mem_segs[i].gpa;
394 gpalimit = gpabase + vm->mem_segs[i].len;
395 if (gpa >= gpabase && gpa < gpalimit)
396 return (TRUE); /* 'gpa' is regular memory */
399 if (ppt_is_mmio(vm, gpa))
400 return (TRUE); /* 'gpa' is pci passthru mmio */
406 vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len)
408 int available, allocated;
413 if ((gpa & PAGE_MASK) || (len & PAGE_MASK) || len == 0)
416 available = allocated = 0;
418 while (g < gpa + len) {
419 if (vm_mem_allocated(vm, g))
428 * If there are some allocated and some available pages in the address
429 * range then it is an error.
431 if (allocated && available)
435 * If the entire address range being requested has already been
436 * allocated then there isn't anything more to do.
438 if (allocated && available == 0)
441 if (vm->num_mem_segs >= VM_MAX_MEMORY_SEGMENTS)
444 seg = &vm->mem_segs[vm->num_mem_segs];
446 if ((object = vmm_mem_alloc(vm->vmspace, gpa, len)) == NULL)
451 seg->object = object;
460 vm_gpa_unwire(struct vm *vm)
465 for (i = 0; i < vm->num_mem_segs; i++) {
466 seg = &vm->mem_segs[i];
470 rv = vm_map_unwire(&vm->vmspace->vm_map,
471 seg->gpa, seg->gpa + seg->len,
472 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
473 KASSERT(rv == KERN_SUCCESS, ("vm(%s) memory segment "
474 "%#lx/%ld could not be unwired: %d",
475 vm_name(vm), seg->gpa, seg->len, rv));
482 vm_gpa_wire(struct vm *vm)
487 for (i = 0; i < vm->num_mem_segs; i++) {
488 seg = &vm->mem_segs[i];
493 rv = vm_map_wire(&vm->vmspace->vm_map,
494 seg->gpa, seg->gpa + seg->len,
495 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
496 if (rv != KERN_SUCCESS)
502 if (i < vm->num_mem_segs) {
504 * Undo the wiring before returning an error.
514 vm_iommu_modify(struct vm *vm, boolean_t map)
519 void *vp, *cookie, *host_domain;
522 host_domain = iommu_host_domain();
524 for (i = 0; i < vm->num_mem_segs; i++) {
525 seg = &vm->mem_segs[i];
526 KASSERT(seg->wired, ("vm(%s) memory segment %#lx/%ld not wired",
527 vm_name(vm), seg->gpa, seg->len));
530 while (gpa < seg->gpa + seg->len) {
531 vp = vm_gpa_hold(vm, gpa, PAGE_SIZE, VM_PROT_WRITE,
533 KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx",
536 vm_gpa_release(cookie);
538 hpa = DMAP_TO_PHYS((uintptr_t)vp);
540 iommu_create_mapping(vm->iommu, gpa, hpa, sz);
541 iommu_remove_mapping(host_domain, hpa, sz);
543 iommu_remove_mapping(vm->iommu, gpa, sz);
544 iommu_create_mapping(host_domain, hpa, hpa, sz);
552 * Invalidate the cached translations associated with the domain
553 * from which pages were removed.
556 iommu_invalidate_tlb(host_domain);
558 iommu_invalidate_tlb(vm->iommu);
561 #define vm_iommu_unmap(vm) vm_iommu_modify((vm), FALSE)
562 #define vm_iommu_map(vm) vm_iommu_modify((vm), TRUE)
565 vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func)
569 error = ppt_unassign_device(vm, bus, slot, func);
573 if (ppt_num_devices(vm) == 0) {
581 vm_assign_pptdev(struct vm *vm, int bus, int slot, int func)
587 * Virtual machines with pci passthru devices get special treatment:
588 * - the guest physical memory is wired
589 * - the iommu is programmed to do the 'gpa' to 'hpa' translation
591 * We need to do this before the first pci passthru device is attached.
593 if (ppt_num_devices(vm) == 0) {
594 KASSERT(vm->iommu == NULL,
595 ("vm_assign_pptdev: iommu must be NULL"));
596 maxaddr = vmm_mem_maxaddr();
597 vm->iommu = iommu_create_domain(maxaddr);
599 error = vm_gpa_wire(vm);
606 error = ppt_assign_device(vm, bus, slot, func);
611 vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot,
617 pageoff = gpa & PAGE_MASK;
618 if (len > PAGE_SIZE - pageoff)
619 panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len);
621 count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map,
622 trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1);
626 return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff));
634 vm_gpa_release(void *cookie)
636 vm_page_t m = cookie;
644 vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase,
645 struct vm_memory_segment *seg)
649 for (i = 0; i < vm->num_mem_segs; i++) {
650 if (gpabase == vm->mem_segs[i].gpa) {
651 seg->gpa = vm->mem_segs[i].gpa;
652 seg->len = vm->mem_segs[i].len;
653 seg->wired = vm->mem_segs[i].wired;
661 vm_get_memobj(struct vm *vm, vm_paddr_t gpa, size_t len,
662 vm_offset_t *offset, struct vm_object **object)
669 for (i = 0; i < vm->num_mem_segs; i++) {
670 if ((seg_obj = vm->mem_segs[i].object) == NULL)
673 seg_gpa = vm->mem_segs[i].gpa;
674 seg_len = vm->mem_segs[i].len;
676 if (gpa >= seg_gpa && gpa < seg_gpa + seg_len) {
677 *offset = gpa - seg_gpa;
679 vm_object_reference(seg_obj);
688 vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval)
691 if (vcpu < 0 || vcpu >= VM_MAXCPU)
694 if (reg >= VM_REG_LAST)
697 return (VMGETREG(vm->cookie, vcpu, reg, retval));
701 vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val)
704 if (vcpu < 0 || vcpu >= VM_MAXCPU)
707 if (reg >= VM_REG_LAST)
710 return (VMSETREG(vm->cookie, vcpu, reg, val));
714 is_descriptor_table(int reg)
718 case VM_REG_GUEST_IDTR:
719 case VM_REG_GUEST_GDTR:
727 is_segment_register(int reg)
731 case VM_REG_GUEST_ES:
732 case VM_REG_GUEST_CS:
733 case VM_REG_GUEST_SS:
734 case VM_REG_GUEST_DS:
735 case VM_REG_GUEST_FS:
736 case VM_REG_GUEST_GS:
737 case VM_REG_GUEST_TR:
738 case VM_REG_GUEST_LDTR:
746 vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
747 struct seg_desc *desc)
750 if (vcpu < 0 || vcpu >= VM_MAXCPU)
753 if (!is_segment_register(reg) && !is_descriptor_table(reg))
756 return (VMGETDESC(vm->cookie, vcpu, reg, desc));
760 vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
761 struct seg_desc *desc)
763 if (vcpu < 0 || vcpu >= VM_MAXCPU)
766 if (!is_segment_register(reg) && !is_descriptor_table(reg))
769 return (VMSETDESC(vm->cookie, vcpu, reg, desc));
773 restore_guest_fpustate(struct vcpu *vcpu)
776 /* flush host state to the pcb */
779 /* restore guest FPU state */
780 fpu_stop_emulating();
781 fpurestore(vcpu->guestfpu);
784 * The FPU is now "dirty" with the guest's state so turn on emulation
785 * to trap any access to the FPU by the host.
787 fpu_start_emulating();
791 save_guest_fpustate(struct vcpu *vcpu)
794 if ((rcr0() & CR0_TS) == 0)
795 panic("fpu emulation not enabled in host!");
797 /* save guest FPU state */
798 fpu_stop_emulating();
799 fpusave(vcpu->guestfpu);
800 fpu_start_emulating();
803 static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle");
806 vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate)
810 vcpu_assert_locked(vcpu);
813 * The following state transitions are allowed:
814 * IDLE -> FROZEN -> IDLE
815 * FROZEN -> RUNNING -> FROZEN
816 * FROZEN -> SLEEPING -> FROZEN
818 switch (vcpu->state) {
822 error = (newstate != VCPU_FROZEN);
825 error = (newstate == VCPU_FROZEN);
833 vcpu->state = newstate;
841 vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate)
845 if ((error = vcpu_set_state(vm, vcpuid, newstate)) != 0)
846 panic("Error %d setting state to %d\n", error, newstate);
850 vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate)
854 if ((error = vcpu_set_state_locked(vcpu, newstate)) != 0)
855 panic("Error %d setting state to %d", error, newstate);
859 * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run.
862 vm_handle_hlt(struct vm *vm, int vcpuid, boolean_t *retu)
867 vcpu = &vm->vcpu[vcpuid];
872 * Figure out the number of host ticks until the next apic
873 * timer interrupt in the guest.
875 sleepticks = lapic_timer_tick(vm, vcpuid);
878 * If the guest local apic timer is disabled then sleep for
879 * a long time but not forever.
885 * Do a final check for pending NMI or interrupts before
886 * really putting this thread to sleep.
888 * These interrupts could have happened any time after we
889 * returned from VMRUN() and before we grabbed the vcpu lock.
891 if (!vm_nmi_pending(vm, vcpuid) && lapic_pending_intr(vm, vcpuid) < 0) {
893 panic("invalid sleepticks %d", sleepticks);
895 vcpu_require_state_locked(vcpu, VCPU_SLEEPING);
896 msleep_spin(vcpu, &vcpu->mtx, "vmidle", sleepticks);
897 vcpu_require_state_locked(vcpu, VCPU_FROZEN);
898 vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t);
906 vm_handle_paging(struct vm *vm, int vcpuid, boolean_t *retu)
913 vcpu = &vm->vcpu[vcpuid];
914 vme = &vcpu->exitinfo;
916 ftype = vme->u.paging.fault_type;
917 KASSERT(ftype == VM_PROT_READ ||
918 ftype == VM_PROT_WRITE || ftype == VM_PROT_EXECUTE,
919 ("vm_handle_paging: invalid fault_type %d", ftype));
921 if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) {
922 rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace),
923 vme->u.paging.gpa, ftype);
928 map = &vm->vmspace->vm_map;
929 rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL);
931 VCPU_CTR3(vm, vcpuid, "vm_handle_paging rv = %d, gpa = %#lx, "
932 "ftype = %d", rv, vme->u.paging.gpa, ftype);
934 if (rv != KERN_SUCCESS)
937 /* restart execution at the faulting instruction */
938 vme->inst_length = 0;
944 vm_handle_inst_emul(struct vm *vm, int vcpuid, boolean_t *retu)
949 int error, inst_length;
950 uint64_t rip, gla, gpa, cr3;
951 mem_region_read_t mread;
952 mem_region_write_t mwrite;
954 vcpu = &vm->vcpu[vcpuid];
955 vme = &vcpu->exitinfo;
958 inst_length = vme->inst_length;
960 gla = vme->u.inst_emul.gla;
961 gpa = vme->u.inst_emul.gpa;
962 cr3 = vme->u.inst_emul.cr3;
963 vie = &vme->u.inst_emul.vie;
967 /* Fetch, decode and emulate the faulting instruction */
968 if (vmm_fetch_instruction(vm, vcpuid, rip, inst_length, cr3, vie) != 0)
971 if (vmm_decode_instruction(vm, vcpuid, gla, vie) != 0)
974 /* return to userland unless this is an in-kernel emulated device */
975 if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) {
976 mread = lapic_mmio_read;
977 mwrite = lapic_mmio_write;
978 } else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) {
979 mread = vioapic_mmio_read;
980 mwrite = vioapic_mmio_write;
981 } else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) {
982 mread = vhpet_mmio_read;
983 mwrite = vhpet_mmio_write;
989 error = vmm_emulate_instruction(vm, vcpuid, gpa, vie, mread, mwrite, 0);
991 /* return to userland to spin up the AP */
992 if (error == 0 && vme->exitcode == VM_EXITCODE_SPINUP_AP)
999 vm_run(struct vm *vm, struct vm_run *vmrun)
1004 uint64_t tscval, rip;
1005 struct vm_exit *vme;
1009 vcpuid = vmrun->cpuid;
1011 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1014 pmap = vmspace_pmap(vm->vmspace);
1015 vcpu = &vm->vcpu[vcpuid];
1016 vme = &vcpu->exitinfo;
1021 KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active),
1022 ("vm_run: absurd pm_active"));
1026 pcb = PCPU_GET(curpcb);
1027 set_pcb_flags(pcb, PCB_FULL_IRET);
1029 restore_guest_msrs(vm, vcpuid);
1030 restore_guest_fpustate(vcpu);
1032 vcpu_require_state(vm, vcpuid, VCPU_RUNNING);
1033 vcpu->hostcpu = curcpu;
1034 error = VMRUN(vm->cookie, vcpuid, rip, pmap);
1035 vcpu->hostcpu = NOCPU;
1036 vcpu_require_state(vm, vcpuid, VCPU_FROZEN);
1038 save_guest_fpustate(vcpu);
1039 restore_host_msrs(vm, vcpuid);
1041 vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
1047 switch (vme->exitcode) {
1048 case VM_EXITCODE_HLT:
1049 error = vm_handle_hlt(vm, vcpuid, &retu);
1051 case VM_EXITCODE_PAGING:
1052 error = vm_handle_paging(vm, vcpuid, &retu);
1054 case VM_EXITCODE_INST_EMUL:
1055 error = vm_handle_inst_emul(vm, vcpuid, &retu);
1058 retu = TRUE; /* handled in userland */
1063 if (error == 0 && retu == FALSE) {
1064 rip = vme->rip + vme->inst_length;
1068 /* copy the exit information */
1069 bcopy(vme, &vmrun->vm_exit, sizeof(struct vm_exit));
1074 vm_inject_event(struct vm *vm, int vcpuid, int type,
1075 int vector, uint32_t code, int code_valid)
1077 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1080 if ((type > VM_EVENT_NONE && type < VM_EVENT_MAX) == 0)
1083 if (vector < 0 || vector > 255)
1086 return (VMINJECT(vm->cookie, vcpuid, type, vector, code, code_valid));
1089 static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu");
1092 vm_inject_nmi(struct vm *vm, int vcpuid)
1096 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1099 vcpu = &vm->vcpu[vcpuid];
1101 vcpu->nmi_pending = 1;
1102 vm_interrupt_hostcpu(vm, vcpuid);
1107 vm_nmi_pending(struct vm *vm, int vcpuid)
1111 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1112 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
1114 vcpu = &vm->vcpu[vcpuid];
1116 return (vcpu->nmi_pending);
1120 vm_nmi_clear(struct vm *vm, int vcpuid)
1124 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1125 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
1127 vcpu = &vm->vcpu[vcpuid];
1129 if (vcpu->nmi_pending == 0)
1130 panic("vm_nmi_clear: inconsistent nmi_pending state");
1132 vcpu->nmi_pending = 0;
1133 vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1);
1137 vm_get_capability(struct vm *vm, int vcpu, int type, int *retval)
1139 if (vcpu < 0 || vcpu >= VM_MAXCPU)
1142 if (type < 0 || type >= VM_CAP_MAX)
1145 return (VMGETCAP(vm->cookie, vcpu, type, retval));
1149 vm_set_capability(struct vm *vm, int vcpu, int type, int val)
1151 if (vcpu < 0 || vcpu >= VM_MAXCPU)
1154 if (type < 0 || type >= VM_CAP_MAX)
1157 return (VMSETCAP(vm->cookie, vcpu, type, val));
1161 vm_guest_msrs(struct vm *vm, int cpu)
1163 return (vm->vcpu[cpu].guest_msrs);
1167 vm_lapic(struct vm *vm, int cpu)
1169 return (vm->vcpu[cpu].vlapic);
1173 vm_ioapic(struct vm *vm)
1176 return (vm->vioapic);
1180 vm_hpet(struct vm *vm)
1187 vmm_is_pptdev(int bus, int slot, int func)
1191 char *val, *cp, *cp2;
1195 * The length of an environment variable is limited to 128 bytes which
1196 * puts an upper limit on the number of passthru devices that may be
1197 * specified using a single environment variable.
1199 * Work around this by scanning multiple environment variable
1200 * names instead of a single one - yuck!
1202 const char *names[] = { "pptdevs", "pptdevs2", "pptdevs3", NULL };
1204 /* set pptdevs="1/2/3 4/5/6 7/8/9 10/11/12" */
1206 for (i = 0; names[i] != NULL && !found; i++) {
1207 cp = val = getenv(names[i]);
1208 while (cp != NULL && *cp != '\0') {
1209 if ((cp2 = strchr(cp, ' ')) != NULL)
1212 n = sscanf(cp, "%d/%d/%d", &b, &s, &f);
1213 if (n == 3 && bus == b && slot == s && func == f) {
1229 vm_iommu_domain(struct vm *vm)
1236 vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate)
1241 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1242 panic("vm_set_run_state: invalid vcpuid %d", vcpuid);
1244 vcpu = &vm->vcpu[vcpuid];
1247 error = vcpu_set_state_locked(vcpu, newstate);
1254 vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu)
1257 enum vcpu_state state;
1259 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1260 panic("vm_get_run_state: invalid vcpuid %d", vcpuid);
1262 vcpu = &vm->vcpu[vcpuid];
1265 state = vcpu->state;
1266 if (hostcpu != NULL)
1267 *hostcpu = vcpu->hostcpu;
1274 vm_activate_cpu(struct vm *vm, int vcpuid)
1277 if (vcpuid >= 0 && vcpuid < VM_MAXCPU)
1278 CPU_SET(vcpuid, &vm->active_cpus);
1282 vm_active_cpus(struct vm *vm)
1285 return (vm->active_cpus);
1289 vcpu_stats(struct vm *vm, int vcpuid)
1292 return (vm->vcpu[vcpuid].stats);
1296 vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state)
1298 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1301 *state = vm->vcpu[vcpuid].x2apic_state;
1307 vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state)
1309 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1312 if (state >= X2APIC_STATE_LAST)
1315 vm->vcpu[vcpuid].x2apic_state = state;
1317 vlapic_set_x2apic_state(vm, vcpuid, state);
1323 vm_interrupt_hostcpu(struct vm *vm, int vcpuid)
1328 vcpu = &vm->vcpu[vcpuid];
1331 hostcpu = vcpu->hostcpu;
1332 if (hostcpu == NOCPU) {
1333 if (vcpu->state == VCPU_SLEEPING)
1336 if (vcpu->state != VCPU_RUNNING)
1337 panic("invalid vcpu state %d", vcpu->state);
1338 if (hostcpu != curcpu)
1339 ipi_cpu(hostcpu, vmm_ipinum);
1345 vm_get_vmspace(struct vm *vm)
1348 return (vm->vmspace);
1352 vm_apicid2vcpuid(struct vm *vm, int apicid)
1355 * XXX apic id is assumed to be numerically identical to vcpu id