2 * Copyright (c) 2011 NetApp, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
36 #include <sys/sysctl.h>
37 #include <sys/malloc.h>
40 #include <sys/mutex.h>
42 #include <sys/rwlock.h>
43 #include <sys/sched.h>
45 #include <sys/systm.h>
48 #include <vm/vm_object.h>
49 #include <vm/vm_page.h>
51 #include <vm/vm_map.h>
52 #include <vm/vm_extern.h>
53 #include <vm/vm_param.h>
55 #include <machine/vm.h>
56 #include <machine/pcb.h>
57 #include <machine/smp.h>
58 #include <x86/apicreg.h>
59 #include <machine/pmap.h>
60 #include <machine/vmparam.h>
62 #include <machine/vmm.h>
67 #include <machine/vmm_dev.h>
72 #include "vmm_lapic.h"
81 enum vcpu_state state;
83 int hostcpu; /* host cpuid this vcpu last ran on */
84 uint64_t guest_msrs[VMM_MSR_NUM];
85 struct vlapic *vlapic;
87 struct savefpu *guestfpu; /* guest fpu state */
89 struct vm_exit exitinfo;
90 enum x2apic_state x2apic_state;
94 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
95 #define vcpu_lock(v) mtx_lock_spin(&((v)->mtx))
96 #define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx))
97 #define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED)
105 #define VM_MAX_MEMORY_SEGMENTS 2
108 void *cookie; /* processor-specific data */
109 void *iommu; /* iommu-specific data */
110 struct vmspace *vmspace; /* guest's address space */
111 struct vcpu vcpu[VM_MAXCPU];
113 struct mem_seg mem_segs[VM_MAX_MEMORY_SEGMENTS];
114 char name[VM_MAX_NAMELEN];
117 * Set of active vcpus.
118 * An active vcpu is one that has been started implicitly (BSP) or
119 * explicitly (AP) by sending it a startup ipi.
121 cpuset_t active_cpus;
124 static int vmm_initialized;
126 static struct vmm_ops *ops;
127 #define VMM_INIT() (ops != NULL ? (*ops->init)() : 0)
128 #define VMM_CLEANUP() (ops != NULL ? (*ops->cleanup)() : 0)
130 #define VMINIT(vm, pmap) (ops != NULL ? (*ops->vminit)(vm, pmap): NULL)
131 #define VMRUN(vmi, vcpu, rip, pmap) \
132 (ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip, pmap) : ENXIO)
133 #define VMCLEANUP(vmi) (ops != NULL ? (*ops->vmcleanup)(vmi) : NULL)
134 #define VMSPACE_ALLOC(min, max) \
135 (ops != NULL ? (*ops->vmspace_alloc)(min, max) : NULL)
136 #define VMSPACE_FREE(vmspace) \
137 (ops != NULL ? (*ops->vmspace_free)(vmspace) : ENXIO)
138 #define VMGETREG(vmi, vcpu, num, retval) \
139 (ops != NULL ? (*ops->vmgetreg)(vmi, vcpu, num, retval) : ENXIO)
140 #define VMSETREG(vmi, vcpu, num, val) \
141 (ops != NULL ? (*ops->vmsetreg)(vmi, vcpu, num, val) : ENXIO)
142 #define VMGETDESC(vmi, vcpu, num, desc) \
143 (ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO)
144 #define VMSETDESC(vmi, vcpu, num, desc) \
145 (ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO)
146 #define VMINJECT(vmi, vcpu, type, vec, ec, ecv) \
147 (ops != NULL ? (*ops->vminject)(vmi, vcpu, type, vec, ec, ecv) : ENXIO)
148 #define VMGETCAP(vmi, vcpu, num, retval) \
149 (ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO)
150 #define VMSETCAP(vmi, vcpu, num, val) \
151 (ops != NULL ? (*ops->vmsetcap)(vmi, vcpu, num, val) : ENXIO)
153 #define fpu_start_emulating() load_cr0(rcr0() | CR0_TS)
154 #define fpu_stop_emulating() clts()
156 static MALLOC_DEFINE(M_VM, "vm", "vm");
157 CTASSERT(VMM_MSR_NUM <= 64); /* msr_mask can keep track of up to 64 msrs */
160 static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
163 vcpu_cleanup(struct vcpu *vcpu)
165 vlapic_cleanup(vcpu->vlapic);
166 vmm_stat_free(vcpu->stats);
167 fpu_save_area_free(vcpu->guestfpu);
171 vcpu_init(struct vm *vm, uint32_t vcpu_id)
175 vcpu = &vm->vcpu[vcpu_id];
177 vcpu_lock_init(vcpu);
178 vcpu->hostcpu = NOCPU;
179 vcpu->vcpuid = vcpu_id;
180 vcpu->vlapic = vlapic_init(vm, vcpu_id);
181 vm_set_x2apic_state(vm, vcpu_id, X2APIC_ENABLED);
182 vcpu->guestfpu = fpu_save_area_alloc();
183 fpu_save_area_reset(vcpu->guestfpu);
184 vcpu->stats = vmm_stat_alloc();
188 vm_exitinfo(struct vm *vm, int cpuid)
192 if (cpuid < 0 || cpuid >= VM_MAXCPU)
193 panic("vm_exitinfo: invalid cpuid %d", cpuid);
195 vcpu = &vm->vcpu[cpuid];
197 return (&vcpu->exitinfo);
205 vmm_host_state_init();
208 error = vmm_mem_init();
213 ops = &vmm_ops_intel;
214 else if (vmm_is_amd())
225 vmm_handler(module_t mod, int what, void *arg)
238 error = vmmdev_cleanup();
242 error = VMM_CLEANUP();
244 * Something bad happened - prevent new
245 * VMs from being created
258 static moduledata_t vmm_kmod = {
265 * vmm initialization has the following dependencies:
267 * - iommu initialization must happen after the pci passthru driver has had
268 * a chance to attach to any passthru devices (after SI_SUB_CONFIGURE).
270 * - VT-x initialization requires smp_rendezvous() and therefore must happen
271 * after SMP is fully functional (after SI_SUB_SMP).
273 DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_SMP + 1, SI_ORDER_ANY);
274 MODULE_VERSION(vmm, 1);
276 SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL);
279 vm_create(const char *name, struct vm **retvm)
283 struct vmspace *vmspace;
288 * If vmm.ko could not be successfully initialized then don't attempt
289 * to create the virtual machine.
291 if (!vmm_initialized)
294 if (name == NULL || strlen(name) >= VM_MAX_NAMELEN)
297 vmspace = VMSPACE_ALLOC(VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
301 vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO);
302 strcpy(vm->name, name);
303 vm->cookie = VMINIT(vm, vmspace_pmap(vmspace));
305 for (i = 0; i < VM_MAXCPU; i++) {
307 guest_msrs_init(vm, i);
310 vm_activate_cpu(vm, BSP);
311 vm->vmspace = vmspace;
318 vm_free_mem_seg(struct vm *vm, struct mem_seg *seg)
321 if (seg->object != NULL)
322 vmm_mem_free(vm->vmspace, seg->gpa, seg->len);
324 bzero(seg, sizeof(*seg));
328 vm_destroy(struct vm *vm)
332 ppt_unassign_all(vm);
334 if (vm->iommu != NULL)
335 iommu_destroy_domain(vm->iommu);
337 for (i = 0; i < vm->num_mem_segs; i++)
338 vm_free_mem_seg(vm, &vm->mem_segs[i]);
340 vm->num_mem_segs = 0;
342 for (i = 0; i < VM_MAXCPU; i++)
343 vcpu_cleanup(&vm->vcpu[i]);
345 VMSPACE_FREE(vm->vmspace);
347 VMCLEANUP(vm->cookie);
353 vm_name(struct vm *vm)
359 vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
363 if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL)
370 vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len)
373 vmm_mmio_free(vm->vmspace, gpa, len);
378 vm_mem_allocated(struct vm *vm, vm_paddr_t gpa)
381 vm_paddr_t gpabase, gpalimit;
383 for (i = 0; i < vm->num_mem_segs; i++) {
384 gpabase = vm->mem_segs[i].gpa;
385 gpalimit = gpabase + vm->mem_segs[i].len;
386 if (gpa >= gpabase && gpa < gpalimit)
387 return (TRUE); /* 'gpa' is regular memory */
390 if (ppt_is_mmio(vm, gpa))
391 return (TRUE); /* 'gpa' is pci passthru mmio */
397 vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len)
399 int available, allocated;
404 if ((gpa & PAGE_MASK) || (len & PAGE_MASK) || len == 0)
407 available = allocated = 0;
409 while (g < gpa + len) {
410 if (vm_mem_allocated(vm, g))
419 * If there are some allocated and some available pages in the address
420 * range then it is an error.
422 if (allocated && available)
426 * If the entire address range being requested has already been
427 * allocated then there isn't anything more to do.
429 if (allocated && available == 0)
432 if (vm->num_mem_segs >= VM_MAX_MEMORY_SEGMENTS)
435 seg = &vm->mem_segs[vm->num_mem_segs];
437 if ((object = vmm_mem_alloc(vm->vmspace, gpa, len)) == NULL)
442 seg->object = object;
451 vm_gpa_unwire(struct vm *vm)
456 for (i = 0; i < vm->num_mem_segs; i++) {
457 seg = &vm->mem_segs[i];
461 rv = vm_map_unwire(&vm->vmspace->vm_map,
462 seg->gpa, seg->gpa + seg->len,
463 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
464 KASSERT(rv == KERN_SUCCESS, ("vm(%s) memory segment "
465 "%#lx/%ld could not be unwired: %d",
466 vm_name(vm), seg->gpa, seg->len, rv));
473 vm_gpa_wire(struct vm *vm)
478 for (i = 0; i < vm->num_mem_segs; i++) {
479 seg = &vm->mem_segs[i];
484 rv = vm_map_wire(&vm->vmspace->vm_map,
485 seg->gpa, seg->gpa + seg->len,
486 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
487 if (rv != KERN_SUCCESS)
493 if (i < vm->num_mem_segs) {
495 * Undo the wiring before returning an error.
505 vm_iommu_modify(struct vm *vm, boolean_t map)
510 void *vp, *cookie, *host_domain;
513 host_domain = iommu_host_domain();
515 for (i = 0; i < vm->num_mem_segs; i++) {
516 seg = &vm->mem_segs[i];
517 KASSERT(seg->wired, ("vm(%s) memory segment %#lx/%ld not wired",
518 vm_name(vm), seg->gpa, seg->len));
521 while (gpa < seg->gpa + seg->len) {
522 vp = vm_gpa_hold(vm, gpa, PAGE_SIZE, VM_PROT_WRITE,
524 KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx",
527 vm_gpa_release(cookie);
529 hpa = DMAP_TO_PHYS((uintptr_t)vp);
531 iommu_create_mapping(vm->iommu, gpa, hpa, sz);
532 iommu_remove_mapping(host_domain, hpa, sz);
534 iommu_remove_mapping(vm->iommu, gpa, sz);
535 iommu_create_mapping(host_domain, hpa, hpa, sz);
543 * Invalidate the cached translations associated with the domain
544 * from which pages were removed.
547 iommu_invalidate_tlb(host_domain);
549 iommu_invalidate_tlb(vm->iommu);
552 #define vm_iommu_unmap(vm) vm_iommu_modify((vm), FALSE)
553 #define vm_iommu_map(vm) vm_iommu_modify((vm), TRUE)
556 vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func)
560 error = ppt_unassign_device(vm, bus, slot, func);
564 if (ppt_num_devices(vm) == 0) {
572 vm_assign_pptdev(struct vm *vm, int bus, int slot, int func)
578 * Virtual machines with pci passthru devices get special treatment:
579 * - the guest physical memory is wired
580 * - the iommu is programmed to do the 'gpa' to 'hpa' translation
582 * We need to do this before the first pci passthru device is attached.
584 if (ppt_num_devices(vm) == 0) {
585 KASSERT(vm->iommu == NULL,
586 ("vm_assign_pptdev: iommu must be NULL"));
587 maxaddr = vmm_mem_maxaddr();
588 vm->iommu = iommu_create_domain(maxaddr);
590 error = vm_gpa_wire(vm);
597 error = ppt_assign_device(vm, bus, slot, func);
602 vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot,
608 pageoff = gpa & PAGE_MASK;
609 if (len > PAGE_SIZE - pageoff)
610 panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len);
612 count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map,
613 trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1);
617 return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff));
625 vm_gpa_release(void *cookie)
627 vm_page_t m = cookie;
635 vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase,
636 struct vm_memory_segment *seg)
640 for (i = 0; i < vm->num_mem_segs; i++) {
641 if (gpabase == vm->mem_segs[i].gpa) {
642 seg->gpa = vm->mem_segs[i].gpa;
643 seg->len = vm->mem_segs[i].len;
644 seg->wired = vm->mem_segs[i].wired;
652 vm_get_memobj(struct vm *vm, vm_paddr_t gpa, size_t len,
653 vm_offset_t *offset, struct vm_object **object)
660 for (i = 0; i < vm->num_mem_segs; i++) {
661 if ((seg_obj = vm->mem_segs[i].object) == NULL)
664 seg_gpa = vm->mem_segs[i].gpa;
665 seg_len = vm->mem_segs[i].len;
667 if (gpa >= seg_gpa && gpa < seg_gpa + seg_len) {
668 *offset = gpa - seg_gpa;
670 vm_object_reference(seg_obj);
679 vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval)
682 if (vcpu < 0 || vcpu >= VM_MAXCPU)
685 if (reg >= VM_REG_LAST)
688 return (VMGETREG(vm->cookie, vcpu, reg, retval));
692 vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val)
695 if (vcpu < 0 || vcpu >= VM_MAXCPU)
698 if (reg >= VM_REG_LAST)
701 return (VMSETREG(vm->cookie, vcpu, reg, val));
705 is_descriptor_table(int reg)
709 case VM_REG_GUEST_IDTR:
710 case VM_REG_GUEST_GDTR:
718 is_segment_register(int reg)
722 case VM_REG_GUEST_ES:
723 case VM_REG_GUEST_CS:
724 case VM_REG_GUEST_SS:
725 case VM_REG_GUEST_DS:
726 case VM_REG_GUEST_FS:
727 case VM_REG_GUEST_GS:
728 case VM_REG_GUEST_TR:
729 case VM_REG_GUEST_LDTR:
737 vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
738 struct seg_desc *desc)
741 if (vcpu < 0 || vcpu >= VM_MAXCPU)
744 if (!is_segment_register(reg) && !is_descriptor_table(reg))
747 return (VMGETDESC(vm->cookie, vcpu, reg, desc));
751 vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
752 struct seg_desc *desc)
754 if (vcpu < 0 || vcpu >= VM_MAXCPU)
757 if (!is_segment_register(reg) && !is_descriptor_table(reg))
760 return (VMSETDESC(vm->cookie, vcpu, reg, desc));
764 restore_guest_fpustate(struct vcpu *vcpu)
767 /* flush host state to the pcb */
770 /* restore guest FPU state */
771 fpu_stop_emulating();
772 fpurestore(vcpu->guestfpu);
775 * The FPU is now "dirty" with the guest's state so turn on emulation
776 * to trap any access to the FPU by the host.
778 fpu_start_emulating();
782 save_guest_fpustate(struct vcpu *vcpu)
785 if ((rcr0() & CR0_TS) == 0)
786 panic("fpu emulation not enabled in host!");
788 /* save guest FPU state */
789 fpu_stop_emulating();
790 fpusave(vcpu->guestfpu);
791 fpu_start_emulating();
794 static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle");
797 vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate)
801 vcpu_assert_locked(vcpu);
804 * The following state transitions are allowed:
805 * IDLE -> FROZEN -> IDLE
806 * FROZEN -> RUNNING -> FROZEN
807 * FROZEN -> SLEEPING -> FROZEN
809 switch (vcpu->state) {
813 error = (newstate != VCPU_FROZEN);
816 error = (newstate == VCPU_FROZEN);
824 vcpu->state = newstate;
832 vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate)
836 if ((error = vcpu_set_state(vm, vcpuid, newstate)) != 0)
837 panic("Error %d setting state to %d\n", error, newstate);
841 vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate)
845 if ((error = vcpu_set_state_locked(vcpu, newstate)) != 0)
846 panic("Error %d setting state to %d", error, newstate);
850 * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run.
853 vm_handle_hlt(struct vm *vm, int vcpuid, boolean_t *retu)
858 vcpu = &vm->vcpu[vcpuid];
863 * Figure out the number of host ticks until the next apic
864 * timer interrupt in the guest.
866 sleepticks = lapic_timer_tick(vm, vcpuid);
869 * If the guest local apic timer is disabled then sleep for
870 * a long time but not forever.
876 * Do a final check for pending NMI or interrupts before
877 * really putting this thread to sleep.
879 * These interrupts could have happened any time after we
880 * returned from VMRUN() and before we grabbed the vcpu lock.
882 if (!vm_nmi_pending(vm, vcpuid) && lapic_pending_intr(vm, vcpuid) < 0) {
884 panic("invalid sleepticks %d", sleepticks);
886 vcpu_require_state_locked(vcpu, VCPU_SLEEPING);
887 msleep_spin(vcpu, &vcpu->mtx, "vmidle", sleepticks);
888 vcpu_require_state_locked(vcpu, VCPU_FROZEN);
889 vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t);
897 vm_handle_paging(struct vm *vm, int vcpuid, boolean_t *retu)
904 vcpu = &vm->vcpu[vcpuid];
905 vme = &vcpu->exitinfo;
907 ftype = vme->u.paging.fault_type;
908 KASSERT(ftype == VM_PROT_READ ||
909 ftype == VM_PROT_WRITE || ftype == VM_PROT_EXECUTE,
910 ("vm_handle_paging: invalid fault_type %d", ftype));
912 if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) {
913 rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace),
914 vme->u.paging.gpa, ftype);
919 map = &vm->vmspace->vm_map;
920 rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL);
922 VMM_CTR3(vm, vcpuid, "vm_handle_paging rv = %d, gpa = %#lx, ftype = %d",
923 rv, vme->u.paging.gpa, ftype);
925 if (rv != KERN_SUCCESS)
928 /* restart execution at the faulting instruction */
929 vme->inst_length = 0;
935 vm_handle_inst_emul(struct vm *vm, int vcpuid, boolean_t *retu)
940 int error, inst_length;
941 uint64_t rip, gla, gpa, cr3;
943 vcpu = &vm->vcpu[vcpuid];
944 vme = &vcpu->exitinfo;
947 inst_length = vme->inst_length;
949 gla = vme->u.inst_emul.gla;
950 gpa = vme->u.inst_emul.gpa;
951 cr3 = vme->u.inst_emul.cr3;
952 vie = &vme->u.inst_emul.vie;
956 /* Fetch, decode and emulate the faulting instruction */
957 if (vmm_fetch_instruction(vm, vcpuid, rip, inst_length, cr3, vie) != 0)
960 if (vmm_decode_instruction(vm, vcpuid, gla, vie) != 0)
963 /* return to userland unless this is a local apic access */
964 if (gpa < DEFAULT_APIC_BASE || gpa >= DEFAULT_APIC_BASE + PAGE_SIZE) {
969 error = vmm_emulate_instruction(vm, vcpuid, gpa, vie,
970 lapic_mmio_read, lapic_mmio_write, 0);
972 /* return to userland to spin up the AP */
973 if (error == 0 && vme->exitcode == VM_EXITCODE_SPINUP_AP)
980 vm_run(struct vm *vm, struct vm_run *vmrun)
985 uint64_t tscval, rip;
990 vcpuid = vmrun->cpuid;
992 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
995 pmap = vmspace_pmap(vm->vmspace);
996 vcpu = &vm->vcpu[vcpuid];
997 vme = &vcpu->exitinfo;
1002 KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active),
1003 ("vm_run: absurd pm_active"));
1007 pcb = PCPU_GET(curpcb);
1008 set_pcb_flags(pcb, PCB_FULL_IRET);
1010 restore_guest_msrs(vm, vcpuid);
1011 restore_guest_fpustate(vcpu);
1013 vcpu_require_state(vm, vcpuid, VCPU_RUNNING);
1014 vcpu->hostcpu = curcpu;
1015 error = VMRUN(vm->cookie, vcpuid, rip, pmap);
1016 vcpu->hostcpu = NOCPU;
1017 vcpu_require_state(vm, vcpuid, VCPU_FROZEN);
1019 save_guest_fpustate(vcpu);
1020 restore_host_msrs(vm, vcpuid);
1022 vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
1028 switch (vme->exitcode) {
1029 case VM_EXITCODE_HLT:
1030 error = vm_handle_hlt(vm, vcpuid, &retu);
1032 case VM_EXITCODE_PAGING:
1033 error = vm_handle_paging(vm, vcpuid, &retu);
1035 case VM_EXITCODE_INST_EMUL:
1036 error = vm_handle_inst_emul(vm, vcpuid, &retu);
1039 retu = TRUE; /* handled in userland */
1044 if (error == 0 && retu == FALSE) {
1045 rip = vme->rip + vme->inst_length;
1049 /* copy the exit information */
1050 bcopy(vme, &vmrun->vm_exit, sizeof(struct vm_exit));
1055 vm_inject_event(struct vm *vm, int vcpuid, int type,
1056 int vector, uint32_t code, int code_valid)
1058 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1061 if ((type > VM_EVENT_NONE && type < VM_EVENT_MAX) == 0)
1064 if (vector < 0 || vector > 255)
1067 return (VMINJECT(vm->cookie, vcpuid, type, vector, code, code_valid));
1070 static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu");
1073 vm_inject_nmi(struct vm *vm, int vcpuid)
1077 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1080 vcpu = &vm->vcpu[vcpuid];
1082 vcpu->nmi_pending = 1;
1083 vm_interrupt_hostcpu(vm, vcpuid);
1088 vm_nmi_pending(struct vm *vm, int vcpuid)
1092 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1093 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
1095 vcpu = &vm->vcpu[vcpuid];
1097 return (vcpu->nmi_pending);
1101 vm_nmi_clear(struct vm *vm, int vcpuid)
1105 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1106 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
1108 vcpu = &vm->vcpu[vcpuid];
1110 if (vcpu->nmi_pending == 0)
1111 panic("vm_nmi_clear: inconsistent nmi_pending state");
1113 vcpu->nmi_pending = 0;
1114 vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1);
1118 vm_get_capability(struct vm *vm, int vcpu, int type, int *retval)
1120 if (vcpu < 0 || vcpu >= VM_MAXCPU)
1123 if (type < 0 || type >= VM_CAP_MAX)
1126 return (VMGETCAP(vm->cookie, vcpu, type, retval));
1130 vm_set_capability(struct vm *vm, int vcpu, int type, int val)
1132 if (vcpu < 0 || vcpu >= VM_MAXCPU)
1135 if (type < 0 || type >= VM_CAP_MAX)
1138 return (VMSETCAP(vm->cookie, vcpu, type, val));
1142 vm_guest_msrs(struct vm *vm, int cpu)
1144 return (vm->vcpu[cpu].guest_msrs);
1148 vm_lapic(struct vm *vm, int cpu)
1150 return (vm->vcpu[cpu].vlapic);
1154 vmm_is_pptdev(int bus, int slot, int func)
1158 char *val, *cp, *cp2;
1162 * The length of an environment variable is limited to 128 bytes which
1163 * puts an upper limit on the number of passthru devices that may be
1164 * specified using a single environment variable.
1166 * Work around this by scanning multiple environment variable
1167 * names instead of a single one - yuck!
1169 const char *names[] = { "pptdevs", "pptdevs2", "pptdevs3", NULL };
1171 /* set pptdevs="1/2/3 4/5/6 7/8/9 10/11/12" */
1173 for (i = 0; names[i] != NULL && !found; i++) {
1174 cp = val = getenv(names[i]);
1175 while (cp != NULL && *cp != '\0') {
1176 if ((cp2 = strchr(cp, ' ')) != NULL)
1179 n = sscanf(cp, "%d/%d/%d", &b, &s, &f);
1180 if (n == 3 && bus == b && slot == s && func == f) {
1196 vm_iommu_domain(struct vm *vm)
1203 vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate)
1208 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1209 panic("vm_set_run_state: invalid vcpuid %d", vcpuid);
1211 vcpu = &vm->vcpu[vcpuid];
1214 error = vcpu_set_state_locked(vcpu, newstate);
1221 vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu)
1224 enum vcpu_state state;
1226 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1227 panic("vm_get_run_state: invalid vcpuid %d", vcpuid);
1229 vcpu = &vm->vcpu[vcpuid];
1232 state = vcpu->state;
1233 if (hostcpu != NULL)
1234 *hostcpu = vcpu->hostcpu;
1241 vm_activate_cpu(struct vm *vm, int vcpuid)
1244 if (vcpuid >= 0 && vcpuid < VM_MAXCPU)
1245 CPU_SET(vcpuid, &vm->active_cpus);
1249 vm_active_cpus(struct vm *vm)
1252 return (vm->active_cpus);
1256 vcpu_stats(struct vm *vm, int vcpuid)
1259 return (vm->vcpu[vcpuid].stats);
1263 vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state)
1265 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1268 *state = vm->vcpu[vcpuid].x2apic_state;
1274 vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state)
1276 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1279 if (state >= X2APIC_STATE_LAST)
1282 vm->vcpu[vcpuid].x2apic_state = state;
1284 vlapic_set_x2apic_state(vm, vcpuid, state);
1290 vm_interrupt_hostcpu(struct vm *vm, int vcpuid)
1295 vcpu = &vm->vcpu[vcpuid];
1298 hostcpu = vcpu->hostcpu;
1299 if (hostcpu == NOCPU) {
1300 if (vcpu->state == VCPU_SLEEPING)
1303 if (vcpu->state != VCPU_RUNNING)
1304 panic("invalid vcpu state %d", vcpu->state);
1305 if (hostcpu != curcpu)
1306 ipi_cpu(hostcpu, vmm_ipinum);
1312 vm_get_vmspace(struct vm *vm)
1315 return (vm->vmspace);