2 * Copyright (c) 2011 NetApp, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
36 #include <sys/sysctl.h>
37 #include <sys/malloc.h>
40 #include <sys/mutex.h>
42 #include <sys/sched.h>
44 #include <sys/systm.h>
48 #include <machine/vm.h>
49 #include <machine/pcb.h>
50 #include <machine/smp.h>
51 #include <x86/apicreg.h>
53 #include <machine/vmm.h>
57 #include <machine/vmm_dev.h>
62 #include "vmm_lapic.h"
71 enum vcpu_state state;
73 int pincpu; /* host cpuid this vcpu is bound to */
74 int hostcpu; /* host cpuid this vcpu last ran on */
75 uint64_t guest_msrs[VMM_MSR_NUM];
76 struct vlapic *vlapic;
78 struct savefpu *guestfpu; /* guest fpu state */
80 struct vm_exit exitinfo;
81 enum x2apic_state x2apic_state;
84 #define VCPU_F_PINNED 0x0001
86 #define VCPU_PINCPU(vm, vcpuid) \
87 ((vm->vcpu[vcpuid].flags & VCPU_F_PINNED) ? vm->vcpu[vcpuid].pincpu : -1)
89 #define VCPU_UNPIN(vm, vcpuid) (vm->vcpu[vcpuid].flags &= ~VCPU_F_PINNED)
91 #define VCPU_PIN(vm, vcpuid, host_cpuid) \
93 vm->vcpu[vcpuid].flags |= VCPU_F_PINNED; \
94 vm->vcpu[vcpuid].pincpu = host_cpuid; \
97 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
98 #define vcpu_lock(v) mtx_lock_spin(&((v)->mtx))
99 #define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx))
101 #define VM_MAX_MEMORY_SEGMENTS 2
104 void *cookie; /* processor-specific data */
105 void *iommu; /* iommu-specific data */
106 struct vcpu vcpu[VM_MAXCPU];
108 struct vm_memory_segment mem_segs[VM_MAX_MEMORY_SEGMENTS];
109 char name[VM_MAX_NAMELEN];
112 * Set of active vcpus.
113 * An active vcpu is one that has been started implicitly (BSP) or
114 * explicitly (AP) by sending it a startup ipi.
116 cpuset_t active_cpus;
119 static struct vmm_ops *ops;
120 #define VMM_INIT() (ops != NULL ? (*ops->init)() : 0)
121 #define VMM_CLEANUP() (ops != NULL ? (*ops->cleanup)() : 0)
123 #define VMINIT(vm) (ops != NULL ? (*ops->vminit)(vm): NULL)
124 #define VMRUN(vmi, vcpu, rip) \
125 (ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip) : ENXIO)
126 #define VMCLEANUP(vmi) (ops != NULL ? (*ops->vmcleanup)(vmi) : NULL)
127 #define VMMMAP_SET(vmi, gpa, hpa, len, attr, prot, spm) \
129 (*ops->vmmmap_set)(vmi, gpa, hpa, len, attr, prot, spm) : \
131 #define VMMMAP_GET(vmi, gpa) \
132 (ops != NULL ? (*ops->vmmmap_get)(vmi, gpa) : ENXIO)
133 #define VMGETREG(vmi, vcpu, num, retval) \
134 (ops != NULL ? (*ops->vmgetreg)(vmi, vcpu, num, retval) : ENXIO)
135 #define VMSETREG(vmi, vcpu, num, val) \
136 (ops != NULL ? (*ops->vmsetreg)(vmi, vcpu, num, val) : ENXIO)
137 #define VMGETDESC(vmi, vcpu, num, desc) \
138 (ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO)
139 #define VMSETDESC(vmi, vcpu, num, desc) \
140 (ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO)
141 #define VMINJECT(vmi, vcpu, type, vec, ec, ecv) \
142 (ops != NULL ? (*ops->vminject)(vmi, vcpu, type, vec, ec, ecv) : ENXIO)
143 #define VMGETCAP(vmi, vcpu, num, retval) \
144 (ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO)
145 #define VMSETCAP(vmi, vcpu, num, val) \
146 (ops != NULL ? (*ops->vmsetcap)(vmi, vcpu, num, val) : ENXIO)
148 #define fpu_start_emulating() load_cr0(rcr0() | CR0_TS)
149 #define fpu_stop_emulating() clts()
151 static MALLOC_DEFINE(M_VM, "vm", "vm");
152 CTASSERT(VMM_MSR_NUM <= 64); /* msr_mask can keep track of up to 64 msrs */
155 static VMM_STAT_DEFINE(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
158 vcpu_cleanup(struct vcpu *vcpu)
160 vlapic_cleanup(vcpu->vlapic);
161 vmm_stat_free(vcpu->stats);
162 fpu_save_area_free(vcpu->guestfpu);
166 vcpu_init(struct vm *vm, uint32_t vcpu_id)
170 vcpu = &vm->vcpu[vcpu_id];
172 vcpu_lock_init(vcpu);
173 vcpu->hostcpu = NOCPU;
174 vcpu->vcpuid = vcpu_id;
175 vcpu->vlapic = vlapic_init(vm, vcpu_id);
176 vm_set_x2apic_state(vm, vcpu_id, X2APIC_ENABLED);
177 vcpu->guestfpu = fpu_save_area_alloc();
178 fpu_save_area_reset(vcpu->guestfpu);
179 vcpu->stats = vmm_stat_alloc();
183 vm_exitinfo(struct vm *vm, int cpuid)
187 if (cpuid < 0 || cpuid >= VM_MAXCPU)
188 panic("vm_exitinfo: invalid cpuid %d", cpuid);
190 vcpu = &vm->vcpu[cpuid];
192 return (&vcpu->exitinfo);
200 vmm_host_state_init();
203 error = vmm_mem_init();
208 ops = &vmm_ops_intel;
209 else if (vmm_is_amd())
220 vmm_handler(module_t mod, int what, void *arg)
231 error = vmmdev_cleanup();
235 error = VMM_CLEANUP();
245 static moduledata_t vmm_kmod = {
252 * Execute the module load handler after the pci passthru driver has had
253 * a chance to claim devices. We need this information at the time we do
254 * iommu initialization.
256 DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_CONFIGURE + 1, SI_ORDER_ANY);
257 MODULE_VERSION(vmm, 1);
259 SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL);
262 vm_create(const char *name)
270 if (name == NULL || strlen(name) >= VM_MAX_NAMELEN)
273 vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO);
274 strcpy(vm->name, name);
275 vm->cookie = VMINIT(vm);
277 for (i = 0; i < VM_MAXCPU; i++) {
279 guest_msrs_init(vm, i);
282 maxaddr = vmm_mem_maxaddr();
283 vm->iommu = iommu_create_domain(maxaddr);
284 vm_activate_cpu(vm, BSP);
290 vm_free_mem_seg(struct vm *vm, struct vm_memory_segment *seg)
296 host_domain = iommu_host_domain();
299 while (len < seg->len) {
300 hpa = vm_gpa2hpa(vm, seg->gpa + len, PAGE_SIZE);
301 if (hpa == (vm_paddr_t)-1) {
302 panic("vm_free_mem_segs: cannot free hpa "
303 "associated with gpa 0x%016lx", seg->gpa + len);
307 * Remove the 'gpa' to 'hpa' mapping in VMs domain.
308 * And resurrect the 1:1 mapping for 'hpa' in 'host_domain'.
310 iommu_remove_mapping(vm->iommu, seg->gpa + len, PAGE_SIZE);
311 iommu_create_mapping(host_domain, hpa, hpa, PAGE_SIZE);
313 vmm_mem_free(hpa, PAGE_SIZE);
319 * Invalidate cached translations associated with 'vm->iommu' since
320 * we have now moved some pages from it.
322 iommu_invalidate_tlb(vm->iommu);
324 bzero(seg, sizeof(struct vm_memory_segment));
328 vm_destroy(struct vm *vm)
332 ppt_unassign_all(vm);
334 for (i = 0; i < vm->num_mem_segs; i++)
335 vm_free_mem_seg(vm, &vm->mem_segs[i]);
337 vm->num_mem_segs = 0;
339 for (i = 0; i < VM_MAXCPU; i++)
340 vcpu_cleanup(&vm->vcpu[i]);
342 iommu_destroy_domain(vm->iommu);
344 VMCLEANUP(vm->cookie);
350 vm_name(struct vm *vm)
356 vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
358 const boolean_t spok = TRUE; /* superpage mappings are ok */
360 return (VMMMAP_SET(vm->cookie, gpa, hpa, len, VM_MEMATTR_UNCACHEABLE,
365 vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len)
367 const boolean_t spok = TRUE; /* superpage mappings are ok */
369 return (VMMMAP_SET(vm->cookie, gpa, 0, len, 0,
370 VM_PROT_NONE, spok));
374 * Returns TRUE if 'gpa' is available for allocation and FALSE otherwise
377 vm_gpa_available(struct vm *vm, vm_paddr_t gpa)
380 vm_paddr_t gpabase, gpalimit;
383 panic("vm_gpa_available: gpa (0x%016lx) not page aligned", gpa);
385 for (i = 0; i < vm->num_mem_segs; i++) {
386 gpabase = vm->mem_segs[i].gpa;
387 gpalimit = gpabase + vm->mem_segs[i].len;
388 if (gpa >= gpabase && gpa < gpalimit)
396 vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len)
398 int error, available, allocated;
399 struct vm_memory_segment *seg;
403 const boolean_t spok = TRUE; /* superpage mappings are ok */
405 if ((gpa & PAGE_MASK) || (len & PAGE_MASK) || len == 0)
408 available = allocated = 0;
410 while (g < gpa + len) {
411 if (vm_gpa_available(vm, g))
420 * If there are some allocated and some available pages in the address
421 * range then it is an error.
423 if (allocated && available)
427 * If the entire address range being requested has already been
428 * allocated then there isn't anything more to do.
430 if (allocated && available == 0)
433 if (vm->num_mem_segs >= VM_MAX_MEMORY_SEGMENTS)
436 host_domain = iommu_host_domain();
438 seg = &vm->mem_segs[vm->num_mem_segs];
443 while (seg->len < len) {
444 hpa = vmm_mem_alloc(PAGE_SIZE);
450 error = VMMMAP_SET(vm->cookie, gpa + seg->len, hpa, PAGE_SIZE,
451 VM_MEMATTR_WRITE_BACK, VM_PROT_ALL, spok);
456 * Remove the 1:1 mapping for 'hpa' from the 'host_domain'.
457 * Add mapping for 'gpa + seg->len' to 'hpa' in the VMs domain.
459 iommu_remove_mapping(host_domain, hpa, PAGE_SIZE);
460 iommu_create_mapping(vm->iommu, gpa + seg->len, hpa, PAGE_SIZE);
462 seg->len += PAGE_SIZE;
466 vm_free_mem_seg(vm, seg);
471 * Invalidate cached translations associated with 'host_domain' since
472 * we have now moved some pages from it.
474 iommu_invalidate_tlb(host_domain);
482 vm_gpa2hpa(struct vm *vm, vm_paddr_t gpa, size_t len)
486 nextpage = rounddown(gpa + PAGE_SIZE, PAGE_SIZE);
487 if (len > nextpage - gpa)
488 panic("vm_gpa2hpa: invalid gpa/len: 0x%016lx/%lu", gpa, len);
490 return (VMMMAP_GET(vm->cookie, gpa));
494 vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase,
495 struct vm_memory_segment *seg)
499 for (i = 0; i < vm->num_mem_segs; i++) {
500 if (gpabase == vm->mem_segs[i].gpa) {
501 *seg = vm->mem_segs[i];
509 vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval)
512 if (vcpu < 0 || vcpu >= VM_MAXCPU)
515 if (reg >= VM_REG_LAST)
518 return (VMGETREG(vm->cookie, vcpu, reg, retval));
522 vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val)
525 if (vcpu < 0 || vcpu >= VM_MAXCPU)
528 if (reg >= VM_REG_LAST)
531 return (VMSETREG(vm->cookie, vcpu, reg, val));
535 is_descriptor_table(int reg)
539 case VM_REG_GUEST_IDTR:
540 case VM_REG_GUEST_GDTR:
548 is_segment_register(int reg)
552 case VM_REG_GUEST_ES:
553 case VM_REG_GUEST_CS:
554 case VM_REG_GUEST_SS:
555 case VM_REG_GUEST_DS:
556 case VM_REG_GUEST_FS:
557 case VM_REG_GUEST_GS:
558 case VM_REG_GUEST_TR:
559 case VM_REG_GUEST_LDTR:
567 vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
568 struct seg_desc *desc)
571 if (vcpu < 0 || vcpu >= VM_MAXCPU)
574 if (!is_segment_register(reg) && !is_descriptor_table(reg))
577 return (VMGETDESC(vm->cookie, vcpu, reg, desc));
581 vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
582 struct seg_desc *desc)
584 if (vcpu < 0 || vcpu >= VM_MAXCPU)
587 if (!is_segment_register(reg) && !is_descriptor_table(reg))
590 return (VMSETDESC(vm->cookie, vcpu, reg, desc));
594 vm_get_pinning(struct vm *vm, int vcpuid, int *cpuid)
597 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
600 *cpuid = VCPU_PINCPU(vm, vcpuid);
606 vm_set_pinning(struct vm *vm, int vcpuid, int host_cpuid)
610 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
613 td = curthread; /* XXXSMP only safe when muxing vcpus */
616 if (host_cpuid < 0) {
617 VCPU_UNPIN(vm, vcpuid);
624 if (CPU_ABSENT(host_cpuid))
628 * XXX we should check that 'host_cpuid' has not already been pinned
632 sched_bind(td, host_cpuid);
634 VCPU_PIN(vm, vcpuid, host_cpuid);
640 restore_guest_fpustate(struct vcpu *vcpu)
643 /* flush host state to the pcb */
646 /* restore guest FPU state */
647 fpu_stop_emulating();
648 fpurestore(vcpu->guestfpu);
651 * The FPU is now "dirty" with the guest's state so turn on emulation
652 * to trap any access to the FPU by the host.
654 fpu_start_emulating();
658 save_guest_fpustate(struct vcpu *vcpu)
661 if ((rcr0() & CR0_TS) == 0)
662 panic("fpu emulation not enabled in host!");
664 /* save guest FPU state */
665 fpu_stop_emulating();
666 fpusave(vcpu->guestfpu);
667 fpu_start_emulating();
670 static VMM_STAT_DEFINE(VCPU_IDLE_TICKS, "number of ticks vcpu was idle");
673 vm_run(struct vm *vm, struct vm_run *vmrun)
675 int error, vcpuid, sleepticks, t;
678 uint64_t tscval, rip;
681 vcpuid = vmrun->cpuid;
683 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
686 vcpu = &vm->vcpu[vcpuid];
687 vme = &vmrun->vm_exit;
694 pcb = PCPU_GET(curpcb);
695 set_pcb_flags(pcb, PCB_FULL_IRET);
697 restore_guest_msrs(vm, vcpuid);
698 restore_guest_fpustate(vcpu);
700 vcpu->hostcpu = curcpu;
701 error = VMRUN(vm->cookie, vcpuid, rip);
702 vcpu->hostcpu = NOCPU;
704 save_guest_fpustate(vcpu);
705 restore_host_msrs(vm, vcpuid);
707 vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
709 /* copy the exit information */
710 bcopy(&vcpu->exitinfo, vme, sizeof(struct vm_exit));
715 * Oblige the guest's desire to 'hlt' by sleeping until the vcpu
718 if (error == 0 && vme->exitcode == VM_EXITCODE_HLT) {
722 * Figure out the number of host ticks until the next apic
723 * timer interrupt in the guest.
725 sleepticks = lapic_timer_tick(vm, vcpuid);
728 * If the guest local apic timer is disabled then sleep for
729 * a long time but not forever.
735 * Do a final check for pending NMI or interrupts before
736 * really putting this thread to sleep.
738 * These interrupts could have happened any time after we
739 * returned from VMRUN() and before we grabbed the vcpu lock.
741 if (!vm_nmi_pending(vm, vcpuid) &&
742 lapic_pending_intr(vm, vcpuid) < 0) {
744 panic("invalid sleepticks %d", sleepticks);
746 msleep_spin(vcpu, &vcpu->mtx, "vmidle", sleepticks);
747 vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t);
752 rip = vme->rip + vme->inst_length;
760 vm_inject_event(struct vm *vm, int vcpuid, int type,
761 int vector, uint32_t code, int code_valid)
763 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
766 if ((type > VM_EVENT_NONE && type < VM_EVENT_MAX) == 0)
769 if (vector < 0 || vector > 255)
772 return (VMINJECT(vm->cookie, vcpuid, type, vector, code, code_valid));
775 static VMM_STAT_DEFINE(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu");
778 vm_inject_nmi(struct vm *vm, int vcpuid)
782 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
785 vcpu = &vm->vcpu[vcpuid];
787 vcpu->nmi_pending = 1;
788 vm_interrupt_hostcpu(vm, vcpuid);
793 vm_nmi_pending(struct vm *vm, int vcpuid)
797 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
798 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
800 vcpu = &vm->vcpu[vcpuid];
802 return (vcpu->nmi_pending);
806 vm_nmi_clear(struct vm *vm, int vcpuid)
810 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
811 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
813 vcpu = &vm->vcpu[vcpuid];
815 if (vcpu->nmi_pending == 0)
816 panic("vm_nmi_clear: inconsistent nmi_pending state");
818 vcpu->nmi_pending = 0;
819 vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1);
823 vm_get_capability(struct vm *vm, int vcpu, int type, int *retval)
825 if (vcpu < 0 || vcpu >= VM_MAXCPU)
828 if (type < 0 || type >= VM_CAP_MAX)
831 return (VMGETCAP(vm->cookie, vcpu, type, retval));
835 vm_set_capability(struct vm *vm, int vcpu, int type, int val)
837 if (vcpu < 0 || vcpu >= VM_MAXCPU)
840 if (type < 0 || type >= VM_CAP_MAX)
843 return (VMSETCAP(vm->cookie, vcpu, type, val));
847 vm_guest_msrs(struct vm *vm, int cpu)
849 return (vm->vcpu[cpu].guest_msrs);
853 vm_lapic(struct vm *vm, int cpu)
855 return (vm->vcpu[cpu].vlapic);
859 vmm_is_pptdev(int bus, int slot, int func)
861 int found, b, s, f, n;
862 char *val, *cp, *cp2;
865 * setenv pptdevs "1/2/3 4/5/6 7/8/9 10/11/12"
868 cp = val = getenv("pptdevs");
869 while (cp != NULL && *cp != '\0') {
870 if ((cp2 = strchr(cp, ' ')) != NULL)
873 n = sscanf(cp, "%d/%d/%d", &b, &s, &f);
874 if (n == 3 && bus == b && slot == s && func == f) {
889 vm_iommu_domain(struct vm *vm)
896 vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state state)
901 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
902 panic("vm_set_run_state: invalid vcpuid %d", vcpuid);
904 vcpu = &vm->vcpu[vcpuid];
909 * The following state transitions are allowed:
910 * IDLE -> RUNNING -> IDLE
911 * IDLE -> CANNOT_RUN -> IDLE
913 if ((vcpu->state == VCPU_IDLE && state != VCPU_IDLE) ||
914 (vcpu->state != VCPU_IDLE && state == VCPU_IDLE)) {
927 vcpu_get_state(struct vm *vm, int vcpuid)
930 enum vcpu_state state;
932 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
933 panic("vm_get_run_state: invalid vcpuid %d", vcpuid);
935 vcpu = &vm->vcpu[vcpuid];
945 vm_activate_cpu(struct vm *vm, int vcpuid)
948 if (vcpuid >= 0 && vcpuid < VM_MAXCPU)
949 CPU_SET(vcpuid, &vm->active_cpus);
953 vm_active_cpus(struct vm *vm)
956 return (vm->active_cpus);
960 vcpu_stats(struct vm *vm, int vcpuid)
963 return (vm->vcpu[vcpuid].stats);
967 vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state)
969 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
972 *state = vm->vcpu[vcpuid].x2apic_state;
978 vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state)
980 if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
983 if (state < 0 || state >= X2APIC_STATE_LAST)
986 vm->vcpu[vcpuid].x2apic_state = state;
988 vlapic_set_x2apic_state(vm, vcpuid, state);
994 vm_interrupt_hostcpu(struct vm *vm, int vcpuid)
999 vcpu = &vm->vcpu[vcpuid];
1002 hostcpu = vcpu->hostcpu;
1003 if (hostcpu == NOCPU) {
1005 * If the vcpu is 'RUNNING' but without a valid 'hostcpu' then
1006 * the host thread must be sleeping waiting for an event to
1007 * kick the vcpu out of 'hlt'.
1009 * XXX this is racy because the condition exists right before
1010 * and after calling VMRUN() in vm_run(). The wakeup() is
1011 * benign in this case.
1013 if (vcpu->state == VCPU_RUNNING)
1016 if (vcpu->state != VCPU_RUNNING)
1017 panic("invalid vcpu state %d", vcpu->state);
1018 if (hostcpu != curcpu)
1019 ipi_cpu(hostcpu, vmm_ipinum);