2 * Copyright (c) 2011 NetApp, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
36 #include <sys/sysctl.h>
37 #include <sys/malloc.h>
40 #include <sys/mutex.h>
42 #include <sys/rwlock.h>
43 #include <sys/sched.h>
45 #include <sys/systm.h>
48 #include <vm/vm_object.h>
49 #include <vm/vm_page.h>
51 #include <vm/vm_map.h>
52 #include <vm/vm_extern.h>
53 #include <vm/vm_param.h>
55 #include <machine/vm.h>
56 #include <machine/pcb.h>
57 #include <machine/smp.h>
59 #include <x86/apicreg.h>
60 #include <machine/vmparam.h>
62 #include <machine/vmm.h>
63 #include <machine/vmm_dev.h>
75 #include "vmm_lapic.h"
84 enum vcpu_state state;
86 int hostcpu; /* host cpuid this vcpu last ran on */
87 uint64_t guest_msrs[VMM_MSR_NUM];
88 struct vlapic *vlapic;
90 struct savefpu *guestfpu; /* guest fpu state */
92 struct vm_exit exitinfo;
93 enum x2apic_state x2apic_state;
97 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
98 #define vcpu_lock(v) mtx_lock_spin(&((v)->mtx))
99 #define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx))
100 #define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED)
108 #define VM_MAX_MEMORY_SEGMENTS 2
111 void *cookie; /* processor-specific data */
112 void *iommu; /* iommu-specific data */
113 struct vhpet *vhpet; /* virtual HPET */
114 struct vioapic *vioapic; /* virtual ioapic */
115 struct vmspace *vmspace; /* guest's address space */
116 struct vcpu vcpu[VM_MAXCPU];
118 struct mem_seg mem_segs[VM_MAX_MEMORY_SEGMENTS];
119 char name[VM_MAX_NAMELEN];
122 * Set of active vcpus.
123 * An active vcpu is one that has been started implicitly (BSP) or
124 * explicitly (AP) by sending it a startup ipi.
126 cpuset_t active_cpus;
129 static int vmm_initialized;
131 static struct vmm_ops *ops;
132 #define VMM_INIT() (ops != NULL ? (*ops->init)() : 0)
133 #define VMM_CLEANUP() (ops != NULL ? (*ops->cleanup)() : 0)
135 #define VMINIT(vm, pmap) (ops != NULL ? (*ops->vminit)(vm, pmap): NULL)
136 #define VMRUN(vmi, vcpu, rip, pmap) \
137 (ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip, pmap) : ENXIO)
138 #define VMCLEANUP(vmi) (ops != NULL ? (*ops->vmcleanup)(vmi) : NULL)
139 #define VMSPACE_ALLOC(min, max) \
140 (ops != NULL ? (*ops->vmspace_alloc)(min, max) : NULL)
141 #define VMSPACE_FREE(vmspace) \
142 (ops != NULL ? (*ops->vmspace_free)(vmspace) : ENXIO)
143 #define VMGETREG(vmi, vcpu, num, retval) \
144 (ops != NULL ? (*ops->vmgetreg)(vmi, vcpu, num, retval) : ENXIO)
145 #define VMSETREG(vmi, vcpu, num, val) \
146 (ops != NULL ? (*ops->vmsetreg)(vmi, vcpu, num, val) : ENXIO)
147 #define VMGETDESC(vmi, vcpu, num, desc) \
148 (ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO)
149 #define VMSETDESC(vmi, vcpu, num, desc) \
150 (ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO)
151 #define VMINJECT(vmi, vcpu, type, vec, ec, ecv) \
152 (ops != NULL ? (*ops->vminject)(vmi, vcpu, type, vec, ec, ecv) : ENXIO)
153 #define VMGETCAP(vmi, vcpu, num, retval) \
154 (ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO)
155 #define VMSETCAP(vmi, vcpu, num, val) \
156 (ops != NULL ? (*ops->vmsetcap)(vmi, vcpu, num, val) : ENXIO)
158 #define fpu_start_emulating() load_cr0(rcr0() | CR0_TS)
159 #define fpu_stop_emulating() clts()
161 static MALLOC_DEFINE(M_VM, "vm", "vm");
162 CTASSERT(VMM_MSR_NUM <= 64); /* msr_mask can keep track of up to 64 msrs */
165 static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
168 vcpu_cleanup(struct vcpu *vcpu)
170 vlapic_cleanup(vcpu->vlapic);
171 vmm_stat_free(vcpu->stats);
172 fpu_save_area_free(vcpu->guestfpu);
176 vcpu_init(struct vm *vm, uint32_t vcpu_id)
180 vcpu = &vm->vcpu[vcpu_id];
182 vcpu_lock_init(vcpu);
183 vcpu->hostcpu = NOCPU;
184 vcpu->vcpuid = vcpu_id;
185 vcpu->vlapic = vlapic_init(vm, vcpu_id);
186 vm_set_x2apic_state(vm, vcpu_id, X2APIC_ENABLED);
187 vcpu->guestfpu = fpu_save_area_alloc();
188 fpu_save_area_reset(vcpu->guestfpu);
189 vcpu->stats = vmm_stat_alloc();
193 vm_exitinfo(struct vm *vm, int cpuid)
197 if (cpuid < 0 || cpuid >= VM_MAXCPU)
198 panic("vm_exitinfo: invalid cpuid %d", cpuid);
200 vcpu = &vm->vcpu[cpuid];
202 return (&vcpu->exitinfo);
210 vmm_host_state_init();
213 error = vmm_mem_init();
218 ops = &vmm_ops_intel;
219 else if (vmm_is_amd())
230 vmm_handler(module_t mod, int what, void *arg)
243 error = vmmdev_cleanup();
247 error = VMM_CLEANUP();
249 * Something bad happened - prevent new
250 * VMs from being created
263 static moduledata_t vmm_kmod = {
270 * vmm initialization has the following dependencies:
272 * - iommu initialization must happen after the pci passthru driver has had
273 * a chance to attach to any passthru devices (after SI_SUB_CONFIGURE).
275 * - VT-x initialization requires smp_rendezvous() and therefore must happen
276 * after SMP is fully functional (after SI_SUB_SMP).
278 DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_SMP + 1, SI_ORDER_ANY);
279 MODULE_VERSION(vmm, 1);
281 SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL);
284 vm_create(const char *name, struct vm **retvm)
288 struct vmspace *vmspace;
293 * If vmm.ko could not be successfully initialized then don't attempt
294 * to create the virtual machine.
296 if (!vmm_initialized)
299 if (name == NULL || strlen(name) >= VM_MAX_NAMELEN)
302 vmspace = VMSPACE_ALLOC(VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
306 vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO);
307 strcpy(vm->name, name);
308 vm->cookie = VMINIT(vm, vmspace_pmap(vmspace));
309 vm->vioapic = vioapic_init(vm);
310 vm->vhpet = vhpet_init(vm);
312 for (i = 0; i < VM_MAXCPU; i++) {
314 guest_msrs_init(vm, i);
317 vm_activate_cpu(vm, BSP);
318 vm->vmspace = vmspace;
325 vm_free_mem_seg(struct vm *vm, struct mem_seg *seg)
328 if (seg->object != NULL)
329 vmm_mem_free(vm->vmspace, seg->gpa, seg->len);
331 bzero(seg, sizeof(*seg));
335 vm_destroy(struct vm *vm)
339 ppt_unassign_all(vm);
341 if (vm->iommu != NULL)
342 iommu_destroy_domain(vm->iommu);
344 vhpet_cleanup(vm->vhpet);
345 vioapic_cleanup(vm->vioapic);
347 for (i = 0; i < vm->num_mem_segs; i++)
348 vm_free_mem_seg(vm, &vm->mem_segs[i]);
350 vm->num_mem_segs = 0;
352 for (i = 0; i < VM_MAXCPU; i++)
353 vcpu_cleanup(&vm->vcpu[i]);
355 VMSPACE_FREE(vm->vmspace);
357 VMCLEANUP(vm->cookie);
363 vm_name(struct vm *vm)
369 vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
373 if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL)
380 vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len)
383 vmm_mmio_free(vm->vmspace, gpa, len);
388 vm_mem_allocated(struct vm *vm, vm_paddr_t gpa)
391 vm_paddr_t gpabase, gpalimit;
393 for (i = 0; i < vm->num_mem_segs; i++) {
394 gpabase = vm->mem_segs[i].gpa;
395 gpalimit = gpabase + vm->mem_segs[i].len;
396 if (gpa >= gpabase && gpa < gpalimit)
397 return (TRUE); /* 'gpa' is regular memory */
400 if (ppt_is_mmio(vm, gpa))
401 return (TRUE); /* 'gpa' is pci passthru mmio */
407 vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len)
409 int available, allocated;
414 if ((gpa & PAGE_MASK) || (len & PAGE_MASK) || len == 0)
417 available = allocated = 0;
419 while (g < gpa + len) {
420 if (vm_mem_allocated(vm, g))
429 * If there are some allocated and some available pages in the address
430 * range then it is an error.
432 if (allocated && available)
436 * If the entire address range being requested has already been
437 * allocated then there isn't anything more to do.
439 if (allocated && available == 0)
442 if (vm->num_mem_segs >= VM_MAX_MEMORY_SEGMENTS)
445 seg = &vm->mem_segs[vm->num_mem_segs];
447 if ((object = vmm_mem_alloc(vm->vmspace, gpa, len)) == NULL)
452 seg->object = object;
461 vm_gpa_unwire(struct vm *vm)
466 for (i = 0; i < vm->num_mem_segs; i++) {
467 seg = &vm->mem_segs[i];
471 rv = vm_map_unwire(&vm->vmspace->vm_map,
472 seg->gpa, seg->gpa + seg->len,
473 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
474 KASSERT(rv == KERN_SUCCESS, ("vm(%s) memory segment "
475 "%#lx/%ld could not be unwired: %d",
476 vm_name(vm), seg->gpa, seg->len, rv));
483 vm_gpa_wire(struct vm *vm)
488 for (i = 0; i < vm->num_mem_segs; i++) {
489 seg = &vm->mem_segs[i];
494 rv = vm_map_wire(&vm->vmspace->vm_map,
495 seg->gpa, seg->gpa + seg->len,
496 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
497 if (rv != KERN_SUCCESS)
503 if (i < vm->num_mem_segs) {
505 * Undo the wiring before returning an error.
515 vm_iommu_modify(struct vm *vm, boolean_t map)
520 void *vp, *cookie, *host_domain;
523 host_domain = iommu_host_domain();
525 for (i = 0; i < vm->num_mem_segs; i++) {
526 seg = &vm->mem_segs[i];
527 KASSERT(seg->wired, ("vm(%s) memory segment %#lx/%ld not wired",
528 vm_name(vm), seg->gpa, seg->len));
531 while (gpa < seg->gpa + seg->len) {
532 vp = vm_gpa_hold(vm, gpa, PAGE_SIZE, VM_PROT_WRITE,
534 KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx",
537 vm_gpa_release(cookie);
539 hpa = DMAP_TO_PHYS((uintptr_t)vp);
541 iommu_create_mapping(vm->iommu, gpa, hpa, sz);
542 iommu_remove_mapping(host_domain, hpa, sz);
544 iommu_remove_mapping(vm->iommu, gpa, sz);
545 iommu_create_mapping(host_domain, hpa, hpa, sz);
553 * Invalidate the cached translations associated with the domain
554 * from which pages were removed.
557 iommu_invalidate_tlb(host_domain);
559 iommu_invalidate_tlb(vm->iommu);
562 #define vm_iommu_unmap(vm) vm_iommu_modify((vm), FALSE)
563 #define vm_iommu_map(vm) vm_iommu_modify((vm), TRUE)
566 vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func)
570 error = ppt_unassign_device(vm, bus, slot, func);
574 if (ppt_num_devices(vm) == 0) {
582 vm_assign_pptdev(struct vm *vm, int bus, int slot, int func)
588 * Virtual machines with pci passthru devices get special treatment:
589 * - the guest physical memory is wired
590 * - the iommu is programmed to do the 'gpa' to 'hpa' translation
592 * We need to do this before the first pci passthru device is attached.
594 if (ppt_num_devices(vm) == 0) {
595 KASSERT(vm->iommu == NULL,
596 ("vm_assign_pptdev: iommu must be NULL"));
597 maxaddr = vmm_mem_maxaddr();
598 vm->iommu = iommu_create_domain(maxaddr);
600 error = vm_gpa_wire(vm);
607 error = ppt_assign_device(vm, bus, slot, func);
612 vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot,
618 pageoff = gpa & PAGE_MASK;
619 if (len > PAGE_SIZE - pageoff)
620 panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len);
622 count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map,
623 trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1);
627 return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff));
635 vm_gpa_release(void *cookie)
637 vm_page_t m = cookie;
645 vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase,
646 struct vm_memory_segment *seg)
650 for (i = 0; i < vm->num_mem_segs; i++) {
651 if (gpabase == vm->mem_segs[i].gpa) {
652 seg->gpa = vm->mem_segs[i].gpa;
653 seg->len = vm->mem_segs[i].len;
654 seg->wired = vm->mem_segs[i].wired;
662 vm_get_memobj(struct vm *vm, vm_paddr_t gpa, size_t len,
663 vm_offset_t *offset, struct vm_object **object)
670 for (i = 0; i < vm->num_mem_segs; i++) {
671 if ((seg_obj = vm->mem_segs[i].object) == NULL)
674 seg_gpa = vm->mem_segs[i].gpa;
675 seg_len = vm->mem_segs[i].len;
677 if (gpa >= seg_gpa && gpa < seg_gpa + seg_len) {
678 *offset = gpa - seg_gpa;
680 vm_object_reference(seg_obj);
689 vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval)
692 if (vcpu < 0 || vcpu >= VM_MAXCPU)
695 if (reg >= VM_REG_LAST)
698 return (VMGETREG(vm->cookie, vcpu, reg, retval));
702 vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val)
705 if (vcpu < 0 || vcpu >= VM_MAXCPU)
708 if (reg >= VM_REG_LAST)
711 return (VMSETREG(vm->cookie, vcpu, reg, val));
715 is_descriptor_table(int reg)
719 case VM_REG_GUEST_IDTR:
720 case VM_REG_GUEST_GDTR:
728 is_segment_register(int reg)
732 case VM_REG_GUEST_ES:
733 case VM_REG_GUEST_CS:
734 case VM_REG_GUEST_SS:
735 case VM_REG_GUEST_DS:
736 case VM_REG_GUEST_FS:
737 case VM_REG_GUEST_GS:
738 case VM_REG_GUEST_TR:
739 case VM_REG_GUEST_LDTR:
747 vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
748 struct seg_desc *desc)
751 if (vcpu < 0 || vcpu >= VM_MAXCPU)
754 if (!is_segment_register(reg) && !is_descriptor_table(reg))
757 return (VMGETDESC(vm->cookie, vcpu, reg, desc));
761 vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
762 struct seg_desc *desc)
764 if (vcpu < 0 || vcpu >= VM_MAXCPU)
767 if (!is_segment_register(reg) && !is_descriptor_table(reg))
770 return (VMSETDESC(vm->cookie, vcpu, reg, desc));
774 restore_guest_fpustate(struct vcpu *vcpu)
777 /* flush host state to the pcb */
780 /* restore guest FPU state */
781 fpu_stop_emulating();
782 fpurestore(vcpu->guestfpu);
785 * The FPU is now "dirty" with the guest's state so turn on emulation
786 * to trap any access to the FPU by the host.
788 fpu_start_emulating();
792 save_guest_fpustate(struct vcpu *vcpu)
795 if ((rcr0() & CR0_TS) == 0)
796 panic("fpu emulation not enabled in host!");
798 /* save guest FPU state */
799 fpu_stop_emulating();
800 fpusave(vcpu->guestfpu);
801 fpu_start_emulating();
804 static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle");
807 vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate)
811 vcpu_assert_locked(vcpu);
814 * The following state transitions are allowed:
815 * IDLE -> FROZEN -> IDLE
816 * FROZEN -> RUNNING -> FROZEN
817 * FROZEN -> SLEEPING -> FROZEN
819 switch (vcpu->state) {
823 error = (newstate != VCPU_FROZEN);
826 error = (newstate == VCPU_FROZEN);
834 vcpu->state = newstate;
842 vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate)
846 if ((error = vcpu_set_state(vm, vcpuid, newstate)) != 0)
847 panic("Error %d setting state to %d\n", error, newstate);
851 vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate)
855 if ((error = vcpu_set_state_locked(vcpu, newstate)) != 0)
856 panic("Error %d setting state to %d", error, newstate);
860 * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run.
863 vm_handle_hlt(struct vm *vm, int vcpuid, boolean_t intr_disabled,
866 struct vm_exit *vmexit;
870 vcpu = &vm->vcpu[vcpuid];
875 * Figure out the number of host ticks until the next apic
876 * timer interrupt in the guest.
878 sleepticks = lapic_timer_tick(vm, vcpuid);
881 * If the guest local apic timer is disabled then sleep for
882 * a long time but not forever.
888 * Do a final check for pending NMI or interrupts before
889 * really putting this thread to sleep.
891 * These interrupts could have happened any time after we
892 * returned from VMRUN() and before we grabbed the vcpu lock.
894 if (!vm_nmi_pending(vm, vcpuid) &&
895 (intr_disabled || vlapic_pending_intr(vcpu->vlapic) < 0)) {
897 panic("invalid sleepticks %d", sleepticks);
899 vcpu_require_state_locked(vcpu, VCPU_SLEEPING);
900 if (vlapic_enabled(vcpu->vlapic)) {
901 msleep_spin(vcpu, &vcpu->mtx, "vmidle", sleepticks);
904 * Spindown the vcpu if the apic is disabled and it
905 * had entered the halted state.
908 vmexit = vm_exitinfo(vm, vcpuid);
909 vmexit->exitcode = VM_EXITCODE_SPINDOWN_CPU;
910 VCPU_CTR0(vm, vcpuid, "spinning down cpu");
912 vcpu_require_state_locked(vcpu, VCPU_FROZEN);
913 vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t);
921 vm_handle_paging(struct vm *vm, int vcpuid, boolean_t *retu)
928 vcpu = &vm->vcpu[vcpuid];
929 vme = &vcpu->exitinfo;
931 ftype = vme->u.paging.fault_type;
932 KASSERT(ftype == VM_PROT_READ ||
933 ftype == VM_PROT_WRITE || ftype == VM_PROT_EXECUTE,
934 ("vm_handle_paging: invalid fault_type %d", ftype));
936 if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) {
937 rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace),
938 vme->u.paging.gpa, ftype);
943 map = &vm->vmspace->vm_map;
944 rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL);
946 VCPU_CTR3(vm, vcpuid, "vm_handle_paging rv = %d, gpa = %#lx, "
947 "ftype = %d", rv, vme->u.paging.gpa, ftype);
949 if (rv != KERN_SUCCESS)
952 /* restart execution at the faulting instruction */
953 vme->inst_length = 0;
959 vm_handle_inst_emul(struct vm *vm, int vcpuid, boolean_t *retu)
964 int error, inst_length;
965 uint64_t rip, gla, gpa, cr3;
966 mem_region_read_t mread;
967 mem_region_write_t mwrite;
969 vcpu = &vm->vcpu[vcpuid];
970 vme = &vcpu->exitinfo;
973 inst_length = vme->inst_length;
975 gla = vme->u.inst_emul.gla;
976 gpa = vme->u.inst_emul.gpa;
977 cr3 = vme->u.inst_emul.cr3;
978 vie = &vme->u.inst_emul.vie;
982 /* Fetch, decode and emulate the faulting instruction */
983 if (vmm_fetch_instruction(vm, vcpuid, rip, inst_length, cr3, vie) != 0)
986 if (vmm_decode_instruction(vm, vcpuid, gla, vie) != 0)
989 /* return to userland unless this is an in-kernel emulated device */
990 if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) {
991 mread = lapic_mmio_read;
992 mwrite = lapic_mmio_write;
993 } else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) {
994 mread = vioapic_mmio_read;
995 mwrite = vioapic_mmio_write;
996 } else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) {
997 mread = vhpet_mmio_read;
998 mwrite = vhpet_mmio_write;
1004 error = vmm_emulate_instruction(vm, vcpuid, gpa, vie, mread, mwrite, 0);
1006 /* return to userland to spin up the AP */
1007 if (error == 0 && vme->exitcode == VM_EXITCODE_SPINUP_AP)
1014 vm_run(struct vm *vm, struct vm_run *vmrun)
1019 uint64_t tscval, rip;
1020 struct vm_exit *vme;
1021 boolean_t retu, intr_disabled;
1024 vcpuid = vmrun->cpuid;
1026 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1029 pmap = vmspace_pmap(vm->vmspace);
1030 vcpu = &vm->vcpu[vcpuid];
1031 vme = &vcpu->exitinfo;
1036 KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active),
1037 ("vm_run: absurd pm_active"));
1041 pcb = PCPU_GET(curpcb);
1042 set_pcb_flags(pcb, PCB_FULL_IRET);
1044 restore_guest_msrs(vm, vcpuid);
1045 restore_guest_fpustate(vcpu);
1047 vcpu_require_state(vm, vcpuid, VCPU_RUNNING);
1048 vcpu->hostcpu = curcpu;
1049 error = VMRUN(vm->cookie, vcpuid, rip, pmap);
1050 vcpu->hostcpu = NOCPU;
1051 vcpu_require_state(vm, vcpuid, VCPU_FROZEN);
1053 save_guest_fpustate(vcpu);
1054 restore_host_msrs(vm, vcpuid);
1056 vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
1062 switch (vme->exitcode) {
1063 case VM_EXITCODE_HLT:
1064 if ((vme->u.hlt.rflags & PSL_I) == 0)
1065 intr_disabled = TRUE;
1067 intr_disabled = FALSE;
1068 error = vm_handle_hlt(vm, vcpuid, intr_disabled, &retu);
1070 case VM_EXITCODE_PAGING:
1071 error = vm_handle_paging(vm, vcpuid, &retu);
1073 case VM_EXITCODE_INST_EMUL:
1074 error = vm_handle_inst_emul(vm, vcpuid, &retu);
1077 retu = TRUE; /* handled in userland */
1082 if (error == 0 && retu == FALSE) {
1083 rip = vme->rip + vme->inst_length;
1087 /* copy the exit information */
1088 bcopy(vme, &vmrun->vm_exit, sizeof(struct vm_exit));
1093 vm_inject_event(struct vm *vm, int vcpuid, int type,
1094 int vector, uint32_t code, int code_valid)
1096 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1099 if ((type > VM_EVENT_NONE && type < VM_EVENT_MAX) == 0)
1102 if (vector < 0 || vector > 255)
1105 return (VMINJECT(vm->cookie, vcpuid, type, vector, code, code_valid));
1108 static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu");
1111 vm_inject_nmi(struct vm *vm, int vcpuid)
1115 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1118 vcpu = &vm->vcpu[vcpuid];
1120 vcpu->nmi_pending = 1;
1121 vcpu_notify_event(vm, vcpuid);
1126 vm_nmi_pending(struct vm *vm, int vcpuid)
1130 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1131 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
1133 vcpu = &vm->vcpu[vcpuid];
1135 return (vcpu->nmi_pending);
1139 vm_nmi_clear(struct vm *vm, int vcpuid)
1143 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1144 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
1146 vcpu = &vm->vcpu[vcpuid];
1148 if (vcpu->nmi_pending == 0)
1149 panic("vm_nmi_clear: inconsistent nmi_pending state");
1151 vcpu->nmi_pending = 0;
1152 vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1);
1156 vm_get_capability(struct vm *vm, int vcpu, int type, int *retval)
1158 if (vcpu < 0 || vcpu >= VM_MAXCPU)
1161 if (type < 0 || type >= VM_CAP_MAX)
1164 return (VMGETCAP(vm->cookie, vcpu, type, retval));
1168 vm_set_capability(struct vm *vm, int vcpu, int type, int val)
1170 if (vcpu < 0 || vcpu >= VM_MAXCPU)
1173 if (type < 0 || type >= VM_CAP_MAX)
1176 return (VMSETCAP(vm->cookie, vcpu, type, val));
1180 vm_guest_msrs(struct vm *vm, int cpu)
1182 return (vm->vcpu[cpu].guest_msrs);
1186 vm_lapic(struct vm *vm, int cpu)
1188 return (vm->vcpu[cpu].vlapic);
1192 vm_ioapic(struct vm *vm)
1195 return (vm->vioapic);
1199 vm_hpet(struct vm *vm)
1206 vmm_is_pptdev(int bus, int slot, int func)
1210 char *val, *cp, *cp2;
1214 * The length of an environment variable is limited to 128 bytes which
1215 * puts an upper limit on the number of passthru devices that may be
1216 * specified using a single environment variable.
1218 * Work around this by scanning multiple environment variable
1219 * names instead of a single one - yuck!
1221 const char *names[] = { "pptdevs", "pptdevs2", "pptdevs3", NULL };
1223 /* set pptdevs="1/2/3 4/5/6 7/8/9 10/11/12" */
1225 for (i = 0; names[i] != NULL && !found; i++) {
1226 cp = val = getenv(names[i]);
1227 while (cp != NULL && *cp != '\0') {
1228 if ((cp2 = strchr(cp, ' ')) != NULL)
1231 n = sscanf(cp, "%d/%d/%d", &b, &s, &f);
1232 if (n == 3 && bus == b && slot == s && func == f) {
1248 vm_iommu_domain(struct vm *vm)
1255 vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate)
1260 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1261 panic("vm_set_run_state: invalid vcpuid %d", vcpuid);
1263 vcpu = &vm->vcpu[vcpuid];
1266 error = vcpu_set_state_locked(vcpu, newstate);
1273 vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu)
1276 enum vcpu_state state;
1278 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1279 panic("vm_get_run_state: invalid vcpuid %d", vcpuid);
1281 vcpu = &vm->vcpu[vcpuid];
1284 state = vcpu->state;
1285 if (hostcpu != NULL)
1286 *hostcpu = vcpu->hostcpu;
1293 vm_activate_cpu(struct vm *vm, int vcpuid)
1296 if (vcpuid >= 0 && vcpuid < VM_MAXCPU)
1297 CPU_SET(vcpuid, &vm->active_cpus);
1301 vm_active_cpus(struct vm *vm)
1304 return (vm->active_cpus);
1308 vcpu_stats(struct vm *vm, int vcpuid)
1311 return (vm->vcpu[vcpuid].stats);
1315 vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state)
1317 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1320 *state = vm->vcpu[vcpuid].x2apic_state;
1326 vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state)
1328 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1331 if (state >= X2APIC_STATE_LAST)
1334 vm->vcpu[vcpuid].x2apic_state = state;
1336 vlapic_set_x2apic_state(vm, vcpuid, state);
1342 * This function is called to ensure that a vcpu "sees" a pending event
1343 * as soon as possible:
1344 * - If the vcpu thread is sleeping then it is woken up.
1345 * - If the vcpu is running on a different host_cpu then an IPI will be directed
1346 * to the host_cpu to cause the vcpu to trap into the hypervisor.
1349 vcpu_notify_event(struct vm *vm, int vcpuid)
1354 vcpu = &vm->vcpu[vcpuid];
1357 hostcpu = vcpu->hostcpu;
1358 if (hostcpu == NOCPU) {
1359 if (vcpu->state == VCPU_SLEEPING)
1362 if (vcpu->state != VCPU_RUNNING)
1363 panic("invalid vcpu state %d", vcpu->state);
1364 if (hostcpu != curcpu)
1365 ipi_cpu(hostcpu, vmm_ipinum);
1371 vm_get_vmspace(struct vm *vm)
1374 return (vm->vmspace);
1378 vm_apicid2vcpuid(struct vm *vm, int apicid)
1381 * XXX apic id is assumed to be numerically identical to vcpu id