2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011 NetApp, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include "opt_bhyve_snapshot.h"
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/module.h>
40 #include <sys/sysctl.h>
41 #include <sys/malloc.h>
44 #include <sys/mutex.h>
46 #include <sys/rwlock.h>
47 #include <sys/sched.h>
49 #include <sys/vnode.h>
52 #include <vm/vm_param.h>
53 #include <vm/vm_extern.h>
54 #include <vm/vm_object.h>
55 #include <vm/vm_page.h>
57 #include <vm/vm_map.h>
58 #include <vm/vm_pager.h>
59 #include <vm/vm_kern.h>
60 #include <vm/vnode_pager.h>
61 #include <vm/swap_pager.h>
64 #include <machine/cpu.h>
65 #include <machine/pcb.h>
66 #include <machine/smp.h>
67 #include <machine/md_var.h>
69 #include <x86/apicreg.h>
70 #include <x86/ifunc.h>
72 #include <machine/vmm.h>
73 #include <machine/vmm_dev.h>
74 #include <machine/vmm_instruction_emul.h>
75 #include <machine/vmm_snapshot.h>
77 #include "vmm_ioport.h"
90 #include "vmm_lapic.h"
99 * (a) allocated when vcpu is created
100 * (i) initialized when vcpu is created and when it is reinitialized
101 * (o) initialized the first time the vcpu is created
102 * (x) initialized before use
105 struct mtx mtx; /* (o) protects 'state' and 'hostcpu' */
106 enum vcpu_state state; /* (o) vcpu state */
107 int vcpuid; /* (o) */
108 int hostcpu; /* (o) vcpu's host cpu */
109 int reqidle; /* (i) request vcpu to idle */
110 struct vm *vm; /* (o) */
111 void *cookie; /* (i) cpu-specific data */
112 struct vlapic *vlapic; /* (i) APIC device model */
113 enum x2apic_state x2apic_state; /* (i) APIC mode */
114 uint64_t exitintinfo; /* (i) events pending at VM exit */
115 int nmi_pending; /* (i) NMI pending */
116 int extint_pending; /* (i) INTR pending */
117 int exception_pending; /* (i) exception pending */
118 int exc_vector; /* (x) exception collateral */
119 int exc_errcode_valid;
120 uint32_t exc_errcode;
121 struct savefpu *guestfpu; /* (a,i) guest fpu state */
122 uint64_t guest_xcr0; /* (i) guest %xcr0 register */
123 void *stats; /* (a,i) statistics */
124 struct vm_exit exitinfo; /* (x) exit reason and collateral */
125 uint64_t nextrip; /* (x) next instruction to execute */
126 uint64_t tsc_offset; /* (o) TSC offsetting */
129 #define vcpu_lock_initialized(v) mtx_initialized(&((v)->mtx))
130 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
131 #define vcpu_lock(v) mtx_lock_spin(&((v)->mtx))
132 #define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx))
133 #define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED)
138 struct vm_object *object;
140 #define VM_MAX_MEMSEGS 4
150 #define VM_MAX_MEMMAPS 8
154 * (o) initialized the first time the VM is created
155 * (i) initialized when VM is created and when it is reinitialized
156 * (x) initialized before use
159 void *cookie; /* (i) cpu-specific data */
160 void *iommu; /* (x) iommu-specific data */
161 struct vhpet *vhpet; /* (i) virtual HPET */
162 struct vioapic *vioapic; /* (i) virtual ioapic */
163 struct vatpic *vatpic; /* (i) virtual atpic */
164 struct vatpit *vatpit; /* (i) virtual atpit */
165 struct vpmtmr *vpmtmr; /* (i) virtual ACPI PM timer */
166 struct vrtc *vrtc; /* (o) virtual RTC */
167 volatile cpuset_t active_cpus; /* (i) active vcpus */
168 volatile cpuset_t debug_cpus; /* (i) vcpus stopped for debug */
169 int suspend; /* (i) stop VM execution */
170 volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */
171 volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */
172 cpuset_t rendezvous_req_cpus; /* (x) rendezvous requested */
173 cpuset_t rendezvous_done_cpus; /* (x) rendezvous finished */
174 void *rendezvous_arg; /* (x) rendezvous func/arg */
175 vm_rendezvous_func_t rendezvous_func;
176 struct mtx rendezvous_mtx; /* (o) rendezvous lock */
177 struct mem_map mem_maps[VM_MAX_MEMMAPS]; /* (i) guest address space */
178 struct mem_seg mem_segs[VM_MAX_MEMSEGS]; /* (o) guest memory regions */
179 struct vmspace *vmspace; /* (o) guest's address space */
180 char name[VM_MAX_NAMELEN+1]; /* (o) virtual machine name */
181 struct vcpu vcpu[VM_MAXCPU]; /* (i) guest vcpus */
182 /* The following describe the vm cpu topology */
183 uint16_t sockets; /* (o) num of sockets */
184 uint16_t cores; /* (o) num of cores/socket */
185 uint16_t threads; /* (o) num of threads/core */
186 uint16_t maxcpus; /* (o) max pluggable cpus */
189 #define VMM_CTR0(vcpu, format) \
190 VCPU_CTR0((vcpu)->vm, (vcpu)->vcpuid, format)
192 #define VMM_CTR1(vcpu, format, p1) \
193 VCPU_CTR1((vcpu)->vm, (vcpu)->vcpuid, format, p1)
195 #define VMM_CTR2(vcpu, format, p1, p2) \
196 VCPU_CTR2((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2)
198 #define VMM_CTR3(vcpu, format, p1, p2, p3) \
199 VCPU_CTR3((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3)
201 #define VMM_CTR4(vcpu, format, p1, p2, p3, p4) \
202 VCPU_CTR4((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3, p4)
204 static int vmm_initialized;
206 static void vmmops_panic(void);
211 panic("vmm_ops func called when !vmm_is_intel() && !vmm_is_svm()");
214 #define DEFINE_VMMOPS_IFUNC(ret_type, opname, args) \
215 DEFINE_IFUNC(static, ret_type, vmmops_##opname, args) \
217 if (vmm_is_intel()) \
218 return (vmm_ops_intel.opname); \
219 else if (vmm_is_svm()) \
220 return (vmm_ops_amd.opname); \
222 return ((ret_type (*)args)vmmops_panic); \
225 DEFINE_VMMOPS_IFUNC(int, modinit, (int ipinum))
226 DEFINE_VMMOPS_IFUNC(int, modcleanup, (void))
227 DEFINE_VMMOPS_IFUNC(void, modresume, (void))
228 DEFINE_VMMOPS_IFUNC(void *, init, (struct vm *vm, struct pmap *pmap))
229 DEFINE_VMMOPS_IFUNC(int, run, (void *vcpui, register_t rip, struct pmap *pmap,
230 struct vm_eventinfo *info))
231 DEFINE_VMMOPS_IFUNC(void, cleanup, (void *vmi))
232 DEFINE_VMMOPS_IFUNC(void *, vcpu_init, (void *vmi, struct vcpu *vcpu,
234 DEFINE_VMMOPS_IFUNC(void, vcpu_cleanup, (void *vcpui))
235 DEFINE_VMMOPS_IFUNC(int, getreg, (void *vcpui, int num, uint64_t *retval))
236 DEFINE_VMMOPS_IFUNC(int, setreg, (void *vcpui, int num, uint64_t val))
237 DEFINE_VMMOPS_IFUNC(int, getdesc, (void *vcpui, int num, struct seg_desc *desc))
238 DEFINE_VMMOPS_IFUNC(int, setdesc, (void *vcpui, int num, struct seg_desc *desc))
239 DEFINE_VMMOPS_IFUNC(int, getcap, (void *vcpui, int num, int *retval))
240 DEFINE_VMMOPS_IFUNC(int, setcap, (void *vcpui, int num, int val))
241 DEFINE_VMMOPS_IFUNC(struct vmspace *, vmspace_alloc, (vm_offset_t min,
243 DEFINE_VMMOPS_IFUNC(void, vmspace_free, (struct vmspace *vmspace))
244 DEFINE_VMMOPS_IFUNC(struct vlapic *, vlapic_init, (void *vcpui))
245 DEFINE_VMMOPS_IFUNC(void, vlapic_cleanup, (struct vlapic *vlapic))
246 #ifdef BHYVE_SNAPSHOT
247 DEFINE_VMMOPS_IFUNC(int, snapshot, (void *vmi, struct vm_snapshot_meta *meta))
248 DEFINE_VMMOPS_IFUNC(int, vcpu_snapshot, (void *vcpui,
249 struct vm_snapshot_meta *meta))
250 DEFINE_VMMOPS_IFUNC(int, restore_tsc, (void *vcpui, uint64_t now))
253 #define fpu_start_emulating() load_cr0(rcr0() | CR0_TS)
254 #define fpu_stop_emulating() clts()
256 SDT_PROVIDER_DEFINE(vmm);
258 static MALLOC_DEFINE(M_VM, "vm", "vm");
261 static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
263 SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
267 * Halt the guest if all vcpus are executing a HLT instruction with
268 * interrupts disabled.
270 static int halt_detection_enabled = 1;
271 SYSCTL_INT(_hw_vmm, OID_AUTO, halt_detection, CTLFLAG_RDTUN,
272 &halt_detection_enabled, 0,
273 "Halt VM if all vcpus execute HLT with interrupts disabled");
275 static int vmm_ipinum;
276 SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0,
277 "IPI vector used for vcpu notifications");
279 static int trace_guest_exceptions;
280 SYSCTL_INT(_hw_vmm, OID_AUTO, trace_guest_exceptions, CTLFLAG_RDTUN,
281 &trace_guest_exceptions, 0,
282 "Trap into hypervisor on all guest exceptions and reflect them back");
284 static int trap_wbinvd;
285 SYSCTL_INT(_hw_vmm, OID_AUTO, trap_wbinvd, CTLFLAG_RDTUN, &trap_wbinvd, 0,
286 "WBINVD triggers a VM-exit");
288 static void vm_free_memmap(struct vm *vm, int ident);
289 static bool sysmem_mapping(struct vm *vm, struct mem_map *mm);
290 static void vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr);
294 vcpu_state2str(enum vcpu_state state)
312 static __inline void *
313 vcpu_cookie(struct vm *vm, int i)
315 return (vm->vcpu[i].cookie);
319 vcpu_cleanup(struct vm *vm, int i, bool destroy)
321 struct vcpu *vcpu = &vm->vcpu[i];
323 vmmops_vlapic_cleanup(vcpu->vlapic);
324 vmmops_vcpu_cleanup(vcpu->cookie);
327 vmm_stat_free(vcpu->stats);
328 fpu_save_area_free(vcpu->guestfpu);
333 vcpu_init(struct vm *vm, int vcpu_id, bool create)
337 KASSERT(vcpu_id >= 0 && vcpu_id < vm->maxcpus,
338 ("vcpu_init: invalid vcpu %d", vcpu_id));
340 vcpu = &vm->vcpu[vcpu_id];
343 KASSERT(!vcpu_lock_initialized(vcpu), ("vcpu %d already "
344 "initialized", vcpu_id));
345 vcpu_lock_init(vcpu);
346 vcpu->state = VCPU_IDLE;
347 vcpu->hostcpu = NOCPU;
348 vcpu->vcpuid = vcpu_id;
350 vcpu->guestfpu = fpu_save_area_alloc();
351 vcpu->stats = vmm_stat_alloc();
352 vcpu->tsc_offset = 0;
355 vcpu->cookie = vmmops_vcpu_init(vm->cookie, vcpu, vcpu_id);
356 vcpu->vlapic = vmmops_vlapic_init(vcpu->cookie);
357 vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED);
359 vcpu->exitintinfo = 0;
360 vcpu->nmi_pending = 0;
361 vcpu->extint_pending = 0;
362 vcpu->exception_pending = 0;
363 vcpu->guest_xcr0 = XFEATURE_ENABLED_X87;
364 fpu_save_area_reset(vcpu->guestfpu);
365 vmm_stat_init(vcpu->stats);
369 vcpu_trace_exceptions(struct vcpu *vcpu)
372 return (trace_guest_exceptions);
376 vcpu_trap_wbinvd(struct vcpu *vcpu)
378 return (trap_wbinvd);
382 vm_exitinfo(struct vcpu *vcpu)
384 return (&vcpu->exitinfo);
392 if (!vmm_is_hw_supported())
395 vmm_host_state_init();
397 vmm_ipinum = lapic_ipi_alloc(pti ? &IDTVEC(justreturn1_pti) :
398 &IDTVEC(justreturn));
400 vmm_ipinum = IPI_AST;
402 error = vmm_mem_init();
406 vmm_resume_p = vmmops_modresume;
408 return (vmmops_modinit(vmm_ipinum));
412 vmm_handler(module_t mod, int what, void *arg)
418 if (vmm_is_hw_supported()) {
428 if (vmm_is_hw_supported()) {
429 error = vmmdev_cleanup();
433 if (vmm_ipinum != IPI_AST)
434 lapic_ipi_free(vmm_ipinum);
435 error = vmmops_modcleanup();
437 * Something bad happened - prevent new
438 * VMs from being created
454 static moduledata_t vmm_kmod = {
461 * vmm initialization has the following dependencies:
463 * - VT-x initialization requires smp_rendezvous() and therefore must happen
464 * after SMP is fully functional (after SI_SUB_SMP).
466 DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_SMP + 1, SI_ORDER_ANY);
467 MODULE_VERSION(vmm, 1);
470 vm_init(struct vm *vm, bool create)
474 vm->cookie = vmmops_init(vm, vmspace_pmap(vm->vmspace));
476 vm->vioapic = vioapic_init(vm);
477 vm->vhpet = vhpet_init(vm);
478 vm->vatpic = vatpic_init(vm);
479 vm->vatpit = vatpit_init(vm);
480 vm->vpmtmr = vpmtmr_init(vm);
482 vm->vrtc = vrtc_init(vm);
484 CPU_ZERO(&vm->active_cpus);
485 CPU_ZERO(&vm->debug_cpus);
488 CPU_ZERO(&vm->suspended_cpus);
490 for (i = 0; i < vm->maxcpus; i++)
491 vcpu_init(vm, i, create);
495 * The default CPU topology is a single thread per package.
497 u_int cores_per_package = 1;
498 u_int threads_per_core = 1;
501 vm_create(const char *name, struct vm **retvm)
504 struct vmspace *vmspace;
507 * If vmm.ko could not be successfully initialized then don't attempt
508 * to create the virtual machine.
510 if (!vmm_initialized)
513 if (name == NULL || strnlen(name, VM_MAX_NAMELEN + 1) ==
517 vmspace = vmmops_vmspace_alloc(0, VM_MAXUSER_ADDRESS_LA48);
521 vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO);
522 strcpy(vm->name, name);
523 vm->vmspace = vmspace;
524 mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF);
527 vm->cores = cores_per_package; /* XXX backwards compatibility */
528 vm->threads = threads_per_core; /* XXX backwards compatibility */
529 vm->maxcpus = VM_MAXCPU; /* XXX temp to keep code working */
538 vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores,
539 uint16_t *threads, uint16_t *maxcpus)
541 *sockets = vm->sockets;
543 *threads = vm->threads;
544 *maxcpus = vm->maxcpus;
548 vm_get_maxcpus(struct vm *vm)
550 return (vm->maxcpus);
554 vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores,
555 uint16_t threads, uint16_t maxcpus)
558 return (EINVAL); /* XXX remove when supported */
559 if ((sockets * cores * threads) > vm->maxcpus)
561 /* XXX need to check sockets * cores * threads == vCPU, how? */
562 vm->sockets = sockets;
564 vm->threads = threads;
565 vm->maxcpus = VM_MAXCPU; /* XXX temp to keep code working */
570 vm_cleanup(struct vm *vm, bool destroy)
575 ppt_unassign_all(vm);
577 if (vm->iommu != NULL)
578 iommu_destroy_domain(vm->iommu);
581 vrtc_cleanup(vm->vrtc);
583 vrtc_reset(vm->vrtc);
584 vpmtmr_cleanup(vm->vpmtmr);
585 vatpit_cleanup(vm->vatpit);
586 vhpet_cleanup(vm->vhpet);
587 vatpic_cleanup(vm->vatpic);
588 vioapic_cleanup(vm->vioapic);
590 for (i = 0; i < vm->maxcpus; i++)
591 vcpu_cleanup(vm, i, destroy);
593 vmmops_cleanup(vm->cookie);
596 * System memory is removed from the guest address space only when
597 * the VM is destroyed. This is because the mapping remains the same
600 * Device memory can be relocated by the guest (e.g. using PCI BARs)
601 * so those mappings are removed on a VM reset.
603 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
604 mm = &vm->mem_maps[i];
605 if (destroy || !sysmem_mapping(vm, mm))
606 vm_free_memmap(vm, i);
610 for (i = 0; i < VM_MAX_MEMSEGS; i++)
611 vm_free_memseg(vm, i);
613 vmmops_vmspace_free(vm->vmspace);
619 vm_destroy(struct vm *vm)
621 vm_cleanup(vm, true);
626 vm_reinit(struct vm *vm)
631 * A virtual machine can be reset only if all vcpus are suspended.
633 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
634 vm_cleanup(vm, false);
645 vm_name(struct vm *vm)
651 vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
655 if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL)
662 vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len)
665 vmm_mmio_free(vm->vmspace, gpa, len);
670 * Return 'true' if 'gpa' is allocated in the guest address space.
672 * This function is called in the context of a running vcpu which acts as
673 * an implicit lock on 'vm->mem_maps[]'.
676 vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa)
678 struct vm *vm = vcpu->vm;
684 state = vcpu_get_state(vcpu, &hostcpu);
685 KASSERT(state == VCPU_RUNNING && hostcpu == curcpu,
686 ("%s: invalid vcpu state %d/%d", __func__, state, hostcpu));
689 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
690 mm = &vm->mem_maps[i];
691 if (mm->len != 0 && gpa >= mm->gpa && gpa < mm->gpa + mm->len)
692 return (true); /* 'gpa' is sysmem or devmem */
695 if (ppt_is_mmio(vm, gpa))
696 return (true); /* 'gpa' is pci passthru mmio */
702 vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem)
707 if (ident < 0 || ident >= VM_MAX_MEMSEGS)
710 if (len == 0 || (len & PAGE_MASK))
713 seg = &vm->mem_segs[ident];
714 if (seg->object != NULL) {
715 if (seg->len == len && seg->sysmem == sysmem)
721 obj = vm_object_allocate(OBJT_SWAP, len >> PAGE_SHIFT);
727 seg->sysmem = sysmem;
732 vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
737 if (ident < 0 || ident >= VM_MAX_MEMSEGS)
740 seg = &vm->mem_segs[ident];
744 *sysmem = seg->sysmem;
746 *objptr = seg->object;
751 vm_free_memseg(struct vm *vm, int ident)
755 KASSERT(ident >= 0 && ident < VM_MAX_MEMSEGS,
756 ("%s: invalid memseg ident %d", __func__, ident));
758 seg = &vm->mem_segs[ident];
759 if (seg->object != NULL) {
760 vm_object_deallocate(seg->object);
761 bzero(seg, sizeof(struct mem_seg));
766 vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first,
767 size_t len, int prot, int flags)
770 struct mem_map *m, *map;
774 if (prot == 0 || (prot & ~(VM_PROT_ALL)) != 0)
777 if (flags & ~VM_MEMMAP_F_WIRED)
780 if (segid < 0 || segid >= VM_MAX_MEMSEGS)
783 seg = &vm->mem_segs[segid];
784 if (seg->object == NULL)
788 if (first < 0 || first >= last || last > seg->len)
791 if ((gpa | first | last) & PAGE_MASK)
795 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
796 m = &vm->mem_maps[i];
806 error = vm_map_find(&vm->vmspace->vm_map, seg->object, first, &gpa,
807 len, 0, VMFS_NO_SPACE, prot, prot, 0);
808 if (error != KERN_SUCCESS)
811 vm_object_reference(seg->object);
813 if (flags & VM_MEMMAP_F_WIRED) {
814 error = vm_map_wire(&vm->vmspace->vm_map, gpa, gpa + len,
815 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
816 if (error != KERN_SUCCESS) {
817 vm_map_remove(&vm->vmspace->vm_map, gpa, gpa + len);
818 return (error == KERN_RESOURCE_SHORTAGE ? ENOMEM :
833 vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len)
838 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
839 m = &vm->mem_maps[i];
840 if (m->gpa == gpa && m->len == len &&
841 (m->flags & VM_MEMMAP_F_IOMMU) == 0) {
842 vm_free_memmap(vm, i);
851 vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid,
852 vm_ooffset_t *segoff, size_t *len, int *prot, int *flags)
854 struct mem_map *mm, *mmnext;
858 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
859 mm = &vm->mem_maps[i];
860 if (mm->len == 0 || mm->gpa < *gpa)
862 if (mmnext == NULL || mm->gpa < mmnext->gpa)
866 if (mmnext != NULL) {
869 *segid = mmnext->segid;
871 *segoff = mmnext->segoff;
875 *prot = mmnext->prot;
877 *flags = mmnext->flags;
885 vm_free_memmap(struct vm *vm, int ident)
888 int error __diagused;
890 mm = &vm->mem_maps[ident];
892 error = vm_map_remove(&vm->vmspace->vm_map, mm->gpa,
894 KASSERT(error == KERN_SUCCESS, ("%s: vm_map_remove error %d",
896 bzero(mm, sizeof(struct mem_map));
901 sysmem_mapping(struct vm *vm, struct mem_map *mm)
904 if (mm->len != 0 && vm->mem_segs[mm->segid].sysmem)
911 vmm_sysmem_maxaddr(struct vm *vm)
918 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
919 mm = &vm->mem_maps[i];
920 if (sysmem_mapping(vm, mm)) {
921 if (maxaddr < mm->gpa + mm->len)
922 maxaddr = mm->gpa + mm->len;
929 vm_iommu_modify(struct vm *vm, bool map)
934 void *vp, *cookie, *host_domain;
937 host_domain = iommu_host_domain();
939 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
940 mm = &vm->mem_maps[i];
941 if (!sysmem_mapping(vm, mm))
945 KASSERT((mm->flags & VM_MEMMAP_F_IOMMU) == 0,
946 ("iommu map found invalid memmap %#lx/%#lx/%#x",
947 mm->gpa, mm->len, mm->flags));
948 if ((mm->flags & VM_MEMMAP_F_WIRED) == 0)
950 mm->flags |= VM_MEMMAP_F_IOMMU;
952 if ((mm->flags & VM_MEMMAP_F_IOMMU) == 0)
954 mm->flags &= ~VM_MEMMAP_F_IOMMU;
955 KASSERT((mm->flags & VM_MEMMAP_F_WIRED) != 0,
956 ("iommu unmap found invalid memmap %#lx/%#lx/%#x",
957 mm->gpa, mm->len, mm->flags));
961 while (gpa < mm->gpa + mm->len) {
962 vp = vm_gpa_hold_global(vm, gpa, PAGE_SIZE,
963 VM_PROT_WRITE, &cookie);
964 KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx",
967 vm_gpa_release(cookie);
969 hpa = DMAP_TO_PHYS((uintptr_t)vp);
971 iommu_create_mapping(vm->iommu, gpa, hpa, sz);
973 iommu_remove_mapping(vm->iommu, gpa, sz);
981 * Invalidate the cached translations associated with the domain
982 * from which pages were removed.
985 iommu_invalidate_tlb(host_domain);
987 iommu_invalidate_tlb(vm->iommu);
990 #define vm_iommu_unmap(vm) vm_iommu_modify((vm), false)
991 #define vm_iommu_map(vm) vm_iommu_modify((vm), true)
994 vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func)
998 error = ppt_unassign_device(vm, bus, slot, func);
1002 if (ppt_assigned_devices(vm) == 0)
1009 vm_assign_pptdev(struct vm *vm, int bus, int slot, int func)
1014 /* Set up the IOMMU to do the 'gpa' to 'hpa' translation */
1015 if (ppt_assigned_devices(vm) == 0) {
1016 KASSERT(vm->iommu == NULL,
1017 ("vm_assign_pptdev: iommu must be NULL"));
1018 maxaddr = vmm_sysmem_maxaddr(vm);
1019 vm->iommu = iommu_create_domain(maxaddr);
1020 if (vm->iommu == NULL)
1025 error = ppt_assign_device(vm, bus, slot, func);
1030 _vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot,
1033 int i, count, pageoff;
1037 pageoff = gpa & PAGE_MASK;
1038 if (len > PAGE_SIZE - pageoff)
1039 panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len);
1042 for (i = 0; i < VM_MAX_MEMMAPS; i++) {
1043 mm = &vm->mem_maps[i];
1044 if (gpa >= mm->gpa && gpa < mm->gpa + mm->len) {
1045 count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map,
1046 trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1);
1053 return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff));
1061 vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len, int reqprot,
1066 * The current vcpu should be frozen to ensure 'vm_memmap[]'
1069 int state = vcpu_get_state(vcpu, NULL);
1070 KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d",
1073 return (_vm_gpa_hold(vcpu->vm, gpa, len, reqprot, cookie));
1077 vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot,
1082 * All vcpus are frozen by ioctls that modify the memory map
1083 * (e.g. VM_MMAP_MEMSEG). Therefore 'vm->memmap[]' stability is
1084 * guaranteed if at least one vcpu is in the VCPU_FROZEN state.
1087 for (int i = 0; i < vm->maxcpus; i++) {
1088 state = vcpu_get_state(vm_vcpu(vm, i), NULL);
1089 KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d",
1093 return (_vm_gpa_hold(vm, gpa, len, reqprot, cookie));
1097 vm_gpa_release(void *cookie)
1099 vm_page_t m = cookie;
1101 vm_page_unwire(m, PQ_ACTIVE);
1105 vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval)
1108 if (reg >= VM_REG_LAST)
1111 return (vmmops_getreg(vcpu->cookie, reg, retval));
1115 vm_set_register(struct vcpu *vcpu, int reg, uint64_t val)
1119 if (reg >= VM_REG_LAST)
1122 error = vmmops_setreg(vcpu->cookie, reg, val);
1123 if (error || reg != VM_REG_GUEST_RIP)
1126 /* Set 'nextrip' to match the value of %rip */
1127 VMM_CTR1(vcpu, "Setting nextrip to %#lx", val);
1128 vcpu->nextrip = val;
1133 is_descriptor_table(int reg)
1137 case VM_REG_GUEST_IDTR:
1138 case VM_REG_GUEST_GDTR:
1146 is_segment_register(int reg)
1150 case VM_REG_GUEST_ES:
1151 case VM_REG_GUEST_CS:
1152 case VM_REG_GUEST_SS:
1153 case VM_REG_GUEST_DS:
1154 case VM_REG_GUEST_FS:
1155 case VM_REG_GUEST_GS:
1156 case VM_REG_GUEST_TR:
1157 case VM_REG_GUEST_LDTR:
1165 vm_get_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *desc)
1168 if (!is_segment_register(reg) && !is_descriptor_table(reg))
1171 return (vmmops_getdesc(vcpu->cookie, reg, desc));
1175 vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
1176 struct seg_desc *desc)
1178 if (vcpu < 0 || vcpu >= vm->maxcpus)
1181 if (!is_segment_register(reg) && !is_descriptor_table(reg))
1184 return (vmmops_setdesc(vcpu_cookie(vm, vcpu), reg, desc));
1188 restore_guest_fpustate(struct vcpu *vcpu)
1191 /* flush host state to the pcb */
1194 /* restore guest FPU state */
1195 fpu_stop_emulating();
1196 fpurestore(vcpu->guestfpu);
1198 /* restore guest XCR0 if XSAVE is enabled in the host */
1199 if (rcr4() & CR4_XSAVE)
1200 load_xcr(0, vcpu->guest_xcr0);
1203 * The FPU is now "dirty" with the guest's state so turn on emulation
1204 * to trap any access to the FPU by the host.
1206 fpu_start_emulating();
1210 save_guest_fpustate(struct vcpu *vcpu)
1213 if ((rcr0() & CR0_TS) == 0)
1214 panic("fpu emulation not enabled in host!");
1216 /* save guest XCR0 and restore host XCR0 */
1217 if (rcr4() & CR4_XSAVE) {
1218 vcpu->guest_xcr0 = rxcr(0);
1219 load_xcr(0, vmm_get_host_xcr0());
1222 /* save guest FPU state */
1223 fpu_stop_emulating();
1224 fpusave(vcpu->guestfpu);
1225 fpu_start_emulating();
1228 static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle");
1231 vcpu_set_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate,
1237 vcpu = &vm->vcpu[vcpuid];
1238 vcpu_assert_locked(vcpu);
1241 * State transitions from the vmmdev_ioctl() must always begin from
1242 * the VCPU_IDLE state. This guarantees that there is only a single
1243 * ioctl() operating on a vcpu at any point.
1246 while (vcpu->state != VCPU_IDLE) {
1248 vcpu_notify_event_locked(vcpu, false);
1249 VCPU_CTR1(vm, vcpuid, "vcpu state change from %s to "
1250 "idle requested", vcpu_state2str(vcpu->state));
1251 msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
1254 KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from "
1255 "vcpu idle state"));
1258 if (vcpu->state == VCPU_RUNNING) {
1259 KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d "
1260 "mismatch for running vcpu", curcpu, vcpu->hostcpu));
1262 KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a "
1263 "vcpu that is not running", vcpu->hostcpu));
1267 * The following state transitions are allowed:
1268 * IDLE -> FROZEN -> IDLE
1269 * FROZEN -> RUNNING -> FROZEN
1270 * FROZEN -> SLEEPING -> FROZEN
1272 switch (vcpu->state) {
1276 error = (newstate != VCPU_FROZEN);
1279 error = (newstate == VCPU_FROZEN);
1289 VCPU_CTR2(vm, vcpuid, "vcpu state changed from %s to %s",
1290 vcpu_state2str(vcpu->state), vcpu_state2str(newstate));
1292 vcpu->state = newstate;
1293 if (newstate == VCPU_RUNNING)
1294 vcpu->hostcpu = curcpu;
1296 vcpu->hostcpu = NOCPU;
1298 if (newstate == VCPU_IDLE)
1299 wakeup(&vcpu->state);
1305 vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate)
1309 if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0)
1310 panic("Error %d setting state to %d\n", error, newstate);
1314 vcpu_require_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate)
1318 if ((error = vcpu_set_state_locked(vm, vcpuid, newstate, false)) != 0)
1319 panic("Error %d setting state to %d", error, newstate);
1323 vm_handle_rendezvous(struct vm *vm, int vcpuid)
1328 KASSERT(vcpuid >= 0 && vcpuid < vm->maxcpus,
1329 ("vm_handle_rendezvous: invalid vcpuid %d", vcpuid));
1333 mtx_lock(&vm->rendezvous_mtx);
1334 while (vm->rendezvous_func != NULL) {
1335 /* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */
1336 CPU_AND(&vm->rendezvous_req_cpus, &vm->rendezvous_req_cpus, &vm->active_cpus);
1338 if (CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) &&
1339 !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) {
1340 VCPU_CTR0(vm, vcpuid, "Calling rendezvous func");
1341 (*vm->rendezvous_func)(vm, vcpuid, vm->rendezvous_arg);
1342 CPU_SET(vcpuid, &vm->rendezvous_done_cpus);
1344 if (CPU_CMP(&vm->rendezvous_req_cpus,
1345 &vm->rendezvous_done_cpus) == 0) {
1346 VCPU_CTR0(vm, vcpuid, "Rendezvous completed");
1347 vm->rendezvous_func = NULL;
1348 wakeup(&vm->rendezvous_func);
1351 VCPU_CTR0(vm, vcpuid, "Wait for rendezvous completion");
1352 mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0,
1354 if ((td->td_flags & TDF_NEEDSUSPCHK) != 0) {
1355 mtx_unlock(&vm->rendezvous_mtx);
1356 error = thread_check_susp(td, true);
1359 mtx_lock(&vm->rendezvous_mtx);
1362 mtx_unlock(&vm->rendezvous_mtx);
1367 * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run.
1370 vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu)
1375 int error, t, vcpu_halted, vm_halted;
1377 KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted"));
1379 vcpu = &vm->vcpu[vcpuid];
1388 * Do a final check for pending NMI or interrupts before
1389 * really putting this thread to sleep. Also check for
1390 * software events that would cause this vcpu to wakeup.
1392 * These interrupts/events could have happened after the
1393 * vcpu returned from vmmops_run() and before it acquired the
1396 if (vm->rendezvous_func != NULL || vm->suspend || vcpu->reqidle)
1398 if (vm_nmi_pending(vcpu))
1400 if (!intr_disabled) {
1401 if (vm_extint_pending(vcpu) ||
1402 vlapic_pending_intr(vcpu->vlapic, NULL)) {
1407 /* Don't go to sleep if the vcpu thread needs to yield */
1408 if (vcpu_should_yield(vcpu))
1411 if (vcpu_debugged(vcpu))
1415 * Some Linux guests implement "halt" by having all vcpus
1416 * execute HLT with interrupts disabled. 'halted_cpus' keeps
1417 * track of the vcpus that have entered this state. When all
1418 * vcpus enter the halted state the virtual machine is halted.
1420 if (intr_disabled) {
1422 VCPU_CTR0(vm, vcpuid, "Halted");
1423 if (!vcpu_halted && halt_detection_enabled) {
1425 CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus);
1427 if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) {
1436 vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING);
1438 * XXX msleep_spin() cannot be interrupted by signals so
1439 * wake up periodically to check pending signals.
1441 msleep_spin(vcpu, &vcpu->mtx, wmesg, hz);
1442 vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN);
1443 vmm_stat_incr(vcpu, VCPU_IDLE_TICKS, ticks - t);
1444 if ((td->td_flags & TDF_NEEDSUSPCHK) != 0) {
1446 error = thread_check_susp(td, false);
1449 CPU_CLR_ATOMIC(vcpuid,
1459 CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus);
1464 vm_suspend(vm, VM_SUSPEND_HALT);
1470 vm_handle_paging(struct vm *vm, int vcpuid, bool *retu)
1475 struct vm_exit *vme;
1477 vcpu = &vm->vcpu[vcpuid];
1478 vme = &vcpu->exitinfo;
1480 KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d",
1481 __func__, vme->inst_length));
1483 ftype = vme->u.paging.fault_type;
1484 KASSERT(ftype == VM_PROT_READ ||
1485 ftype == VM_PROT_WRITE || ftype == VM_PROT_EXECUTE,
1486 ("vm_handle_paging: invalid fault_type %d", ftype));
1488 if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) {
1489 rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace),
1490 vme->u.paging.gpa, ftype);
1492 VCPU_CTR2(vm, vcpuid, "%s bit emulation for gpa %#lx",
1493 ftype == VM_PROT_READ ? "accessed" : "dirty",
1499 map = &vm->vmspace->vm_map;
1500 rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL, NULL);
1502 VCPU_CTR3(vm, vcpuid, "vm_handle_paging rv = %d, gpa = %#lx, "
1503 "ftype = %d", rv, vme->u.paging.gpa, ftype);
1505 if (rv != KERN_SUCCESS)
1512 vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu)
1516 struct vm_exit *vme;
1517 uint64_t gla, gpa, cs_base;
1518 struct vm_guest_paging *paging;
1519 mem_region_read_t mread;
1520 mem_region_write_t mwrite;
1521 enum vm_cpu_mode cpu_mode;
1522 int cs_d, error, fault;
1524 vcpu = &vm->vcpu[vcpuid];
1525 vme = &vcpu->exitinfo;
1527 KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d",
1528 __func__, vme->inst_length));
1530 gla = vme->u.inst_emul.gla;
1531 gpa = vme->u.inst_emul.gpa;
1532 cs_base = vme->u.inst_emul.cs_base;
1533 cs_d = vme->u.inst_emul.cs_d;
1534 vie = &vme->u.inst_emul.vie;
1535 paging = &vme->u.inst_emul.paging;
1536 cpu_mode = paging->cpu_mode;
1538 VCPU_CTR1(vm, vcpuid, "inst_emul fault accessing gpa %#lx", gpa);
1540 /* Fetch, decode and emulate the faulting instruction */
1541 if (vie->num_valid == 0) {
1542 error = vmm_fetch_instruction(vcpu, paging, vme->rip + cs_base,
1543 VIE_INST_SIZE, vie, &fault);
1546 * The instruction bytes have already been copied into 'vie'
1553 if (vmm_decode_instruction(vcpu, gla, cpu_mode, cs_d, vie) != 0) {
1554 VCPU_CTR1(vm, vcpuid, "Error decoding instruction at %#lx",
1555 vme->rip + cs_base);
1556 *retu = true; /* dump instruction bytes in userspace */
1561 * Update 'nextrip' based on the length of the emulated instruction.
1563 vme->inst_length = vie->num_processed;
1564 vcpu->nextrip += vie->num_processed;
1565 VCPU_CTR1(vm, vcpuid, "nextrip updated to %#lx after instruction "
1566 "decoding", vcpu->nextrip);
1568 /* return to userland unless this is an in-kernel emulated device */
1569 if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) {
1570 mread = lapic_mmio_read;
1571 mwrite = lapic_mmio_write;
1572 } else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) {
1573 mread = vioapic_mmio_read;
1574 mwrite = vioapic_mmio_write;
1575 } else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) {
1576 mread = vhpet_mmio_read;
1577 mwrite = vhpet_mmio_write;
1583 error = vmm_emulate_instruction(vcpu, gpa, vie, paging, mread, mwrite,
1590 vm_handle_suspend(struct vm *vm, int vcpuid, bool *retu)
1597 vcpu = &vm->vcpu[vcpuid];
1600 CPU_SET_ATOMIC(vcpuid, &vm->suspended_cpus);
1603 * Wait until all 'active_cpus' have suspended themselves.
1605 * Since a VM may be suspended at any time including when one or
1606 * more vcpus are doing a rendezvous we need to call the rendezvous
1607 * handler while we are waiting to prevent a deadlock.
1610 while (error == 0) {
1611 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
1612 VCPU_CTR0(vm, vcpuid, "All vcpus suspended");
1616 if (vm->rendezvous_func == NULL) {
1617 VCPU_CTR0(vm, vcpuid, "Sleeping during suspend");
1618 vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING);
1619 msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz);
1620 vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN);
1621 if ((td->td_flags & TDF_NEEDSUSPCHK) != 0) {
1623 error = thread_check_susp(td, false);
1627 VCPU_CTR0(vm, vcpuid, "Rendezvous during suspend");
1629 error = vm_handle_rendezvous(vm, vcpuid);
1636 * Wakeup the other sleeping vcpus and return to userspace.
1638 for (i = 0; i < vm->maxcpus; i++) {
1639 if (CPU_ISSET(i, &vm->suspended_cpus)) {
1640 vcpu_notify_event(vm, i, false);
1649 vm_handle_reqidle(struct vm *vm, int vcpuid, bool *retu)
1651 struct vcpu *vcpu = &vm->vcpu[vcpuid];
1654 KASSERT(vcpu->reqidle, ("invalid vcpu reqidle %d", vcpu->reqidle));
1662 vm_suspend(struct vm *vm, enum vm_suspend_how how)
1666 if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST)
1669 if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) {
1670 VM_CTR2(vm, "virtual machine already suspended %d/%d",
1675 VM_CTR1(vm, "virtual machine successfully suspended %d", how);
1678 * Notify all active vcpus that they are now suspended.
1680 for (i = 0; i < vm->maxcpus; i++) {
1681 if (CPU_ISSET(i, &vm->active_cpus))
1682 vcpu_notify_event(vm, i, false);
1689 vm_exit_suspended(struct vcpu *vcpu, uint64_t rip)
1691 struct vm *vm = vcpu->vm;
1692 struct vm_exit *vmexit;
1694 KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST,
1695 ("vm_exit_suspended: invalid suspend type %d", vm->suspend));
1697 vmexit = vm_exitinfo(vcpu);
1699 vmexit->inst_length = 0;
1700 vmexit->exitcode = VM_EXITCODE_SUSPENDED;
1701 vmexit->u.suspended.how = vm->suspend;
1705 vm_exit_debug(struct vcpu *vcpu, uint64_t rip)
1707 struct vm_exit *vmexit;
1709 vmexit = vm_exitinfo(vcpu);
1711 vmexit->inst_length = 0;
1712 vmexit->exitcode = VM_EXITCODE_DEBUG;
1716 vm_exit_rendezvous(struct vcpu *vcpu, uint64_t rip)
1718 struct vm_exit *vmexit;
1720 vmexit = vm_exitinfo(vcpu);
1722 vmexit->inst_length = 0;
1723 vmexit->exitcode = VM_EXITCODE_RENDEZVOUS;
1724 vmm_stat_incr(vcpu, VMEXIT_RENDEZVOUS, 1);
1728 vm_exit_reqidle(struct vcpu *vcpu, uint64_t rip)
1730 struct vm_exit *vmexit;
1732 vmexit = vm_exitinfo(vcpu);
1734 vmexit->inst_length = 0;
1735 vmexit->exitcode = VM_EXITCODE_REQIDLE;
1736 vmm_stat_incr(vcpu, VMEXIT_REQIDLE, 1);
1740 vm_exit_astpending(struct vcpu *vcpu, uint64_t rip)
1742 struct vm_exit *vmexit;
1744 vmexit = vm_exitinfo(vcpu);
1746 vmexit->inst_length = 0;
1747 vmexit->exitcode = VM_EXITCODE_BOGUS;
1748 vmm_stat_incr(vcpu, VMEXIT_ASTPENDING, 1);
1752 vm_run(struct vm *vm, struct vm_run *vmrun)
1754 struct vm_eventinfo evinfo;
1759 struct vm_exit *vme;
1760 bool retu, intr_disabled;
1763 vcpuid = vmrun->cpuid;
1765 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
1768 if (!CPU_ISSET(vcpuid, &vm->active_cpus))
1771 if (CPU_ISSET(vcpuid, &vm->suspended_cpus))
1774 pmap = vmspace_pmap(vm->vmspace);
1775 vcpu = &vm->vcpu[vcpuid];
1776 vme = &vcpu->exitinfo;
1777 evinfo.rptr = &vm->rendezvous_func;
1778 evinfo.sptr = &vm->suspend;
1779 evinfo.iptr = &vcpu->reqidle;
1783 KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active),
1784 ("vm_run: absurd pm_active"));
1788 pcb = PCPU_GET(curpcb);
1789 set_pcb_flags(pcb, PCB_FULL_IRET);
1791 restore_guest_fpustate(vcpu);
1793 vcpu_require_state(vm, vcpuid, VCPU_RUNNING);
1794 error = vmmops_run(vcpu->cookie, vcpu->nextrip, pmap, &evinfo);
1795 vcpu_require_state(vm, vcpuid, VCPU_FROZEN);
1797 save_guest_fpustate(vcpu);
1799 vmm_stat_incr(vcpu, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
1805 vcpu->nextrip = vme->rip + vme->inst_length;
1806 switch (vme->exitcode) {
1807 case VM_EXITCODE_REQIDLE:
1808 error = vm_handle_reqidle(vm, vcpuid, &retu);
1810 case VM_EXITCODE_SUSPENDED:
1811 error = vm_handle_suspend(vm, vcpuid, &retu);
1813 case VM_EXITCODE_IOAPIC_EOI:
1814 vioapic_process_eoi(vm, vcpuid,
1815 vme->u.ioapic_eoi.vector);
1817 case VM_EXITCODE_RENDEZVOUS:
1818 error = vm_handle_rendezvous(vm, vcpuid);
1820 case VM_EXITCODE_HLT:
1821 intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0);
1822 error = vm_handle_hlt(vm, vcpuid, intr_disabled, &retu);
1824 case VM_EXITCODE_PAGING:
1825 error = vm_handle_paging(vm, vcpuid, &retu);
1827 case VM_EXITCODE_INST_EMUL:
1828 error = vm_handle_inst_emul(vm, vcpuid, &retu);
1830 case VM_EXITCODE_INOUT:
1831 case VM_EXITCODE_INOUT_STR:
1832 error = vm_handle_inout(vm, vcpuid, vme, &retu);
1834 case VM_EXITCODE_MONITOR:
1835 case VM_EXITCODE_MWAIT:
1836 case VM_EXITCODE_VMINSN:
1840 retu = true; /* handled in userland */
1846 * VM_EXITCODE_INST_EMUL could access the apic which could transform the
1847 * exit code into VM_EXITCODE_IPI.
1849 if (error == 0 && vme->exitcode == VM_EXITCODE_IPI) {
1851 error = vm_handle_ipi(vm, vcpuid, vme, &retu);
1854 if (error == 0 && retu == false)
1857 vmm_stat_incr(vcpu, VMEXIT_USERSPACE, 1);
1858 VCPU_CTR2(vm, vcpuid, "retu %d/%d", error, vme->exitcode);
1860 /* copy the exit information */
1861 bcopy(vme, &vmrun->vm_exit, sizeof(struct vm_exit));
1866 vm_restart_instruction(struct vcpu *vcpu)
1868 enum vcpu_state state;
1870 int error __diagused;
1872 state = vcpu_get_state(vcpu, NULL);
1873 if (state == VCPU_RUNNING) {
1875 * When a vcpu is "running" the next instruction is determined
1876 * by adding 'rip' and 'inst_length' in the vcpu's 'exitinfo'.
1877 * Thus setting 'inst_length' to zero will cause the current
1878 * instruction to be restarted.
1880 vcpu->exitinfo.inst_length = 0;
1881 VMM_CTR1(vcpu, "restarting instruction at %#lx by "
1882 "setting inst_length to zero", vcpu->exitinfo.rip);
1883 } else if (state == VCPU_FROZEN) {
1885 * When a vcpu is "frozen" it is outside the critical section
1886 * around vmmops_run() and 'nextrip' points to the next
1887 * instruction. Thus instruction restart is achieved by setting
1888 * 'nextrip' to the vcpu's %rip.
1890 error = vm_get_register(vcpu, VM_REG_GUEST_RIP, &rip);
1891 KASSERT(!error, ("%s: error %d getting rip", __func__, error));
1892 VMM_CTR2(vcpu, "restarting instruction by updating "
1893 "nextrip from %#lx to %#lx", vcpu->nextrip, rip);
1894 vcpu->nextrip = rip;
1896 panic("%s: invalid state %d", __func__, state);
1902 vm_exit_intinfo(struct vcpu *vcpu, uint64_t info)
1906 if (info & VM_INTINFO_VALID) {
1907 type = info & VM_INTINFO_TYPE;
1908 vector = info & 0xff;
1909 if (type == VM_INTINFO_NMI && vector != IDT_NMI)
1911 if (type == VM_INTINFO_HWEXCEPTION && vector >= 32)
1913 if (info & VM_INTINFO_RSVD)
1918 VMM_CTR2(vcpu, "%s: info1(%#lx)", __func__, info);
1919 vcpu->exitintinfo = info;
1929 #define IDT_VE 20 /* Virtualization Exception (Intel specific) */
1931 static enum exc_class
1932 exception_class(uint64_t info)
1936 KASSERT(info & VM_INTINFO_VALID, ("intinfo must be valid: %#lx", info));
1937 type = info & VM_INTINFO_TYPE;
1938 vector = info & 0xff;
1940 /* Table 6-4, "Interrupt and Exception Classes", Intel SDM, Vol 3 */
1942 case VM_INTINFO_HWINTR:
1943 case VM_INTINFO_SWINTR:
1944 case VM_INTINFO_NMI:
1945 return (EXC_BENIGN);
1948 * Hardware exception.
1950 * SVM and VT-x use identical type values to represent NMI,
1951 * hardware interrupt and software interrupt.
1953 * SVM uses type '3' for all exceptions. VT-x uses type '3'
1954 * for exceptions except #BP and #OF. #BP and #OF use a type
1955 * value of '5' or '6'. Therefore we don't check for explicit
1956 * values of 'type' to classify 'intinfo' into a hardware
1965 return (EXC_PAGEFAULT);
1971 return (EXC_CONTRIBUTORY);
1973 return (EXC_BENIGN);
1978 nested_fault(struct vcpu *vcpu, uint64_t info1, uint64_t info2,
1981 enum exc_class exc1, exc2;
1984 KASSERT(info1 & VM_INTINFO_VALID, ("info1 %#lx is not valid", info1));
1985 KASSERT(info2 & VM_INTINFO_VALID, ("info2 %#lx is not valid", info2));
1988 * If an exception occurs while attempting to call the double-fault
1989 * handler the processor enters shutdown mode (aka triple fault).
1991 type1 = info1 & VM_INTINFO_TYPE;
1992 vector1 = info1 & 0xff;
1993 if (type1 == VM_INTINFO_HWEXCEPTION && vector1 == IDT_DF) {
1994 VMM_CTR2(vcpu, "triple fault: info1(%#lx), info2(%#lx)",
1996 vm_suspend(vcpu->vm, VM_SUSPEND_TRIPLEFAULT);
2002 * Table 6-5 "Conditions for Generating a Double Fault", Intel SDM, Vol3
2004 exc1 = exception_class(info1);
2005 exc2 = exception_class(info2);
2006 if ((exc1 == EXC_CONTRIBUTORY && exc2 == EXC_CONTRIBUTORY) ||
2007 (exc1 == EXC_PAGEFAULT && exc2 != EXC_BENIGN)) {
2008 /* Convert nested fault into a double fault. */
2010 *retinfo |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION;
2011 *retinfo |= VM_INTINFO_DEL_ERRCODE;
2013 /* Handle exceptions serially */
2020 vcpu_exception_intinfo(struct vcpu *vcpu)
2024 if (vcpu->exception_pending) {
2025 info = vcpu->exc_vector & 0xff;
2026 info |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION;
2027 if (vcpu->exc_errcode_valid) {
2028 info |= VM_INTINFO_DEL_ERRCODE;
2029 info |= (uint64_t)vcpu->exc_errcode << 32;
2036 vm_entry_intinfo(struct vcpu *vcpu, uint64_t *retinfo)
2038 uint64_t info1, info2;
2041 info1 = vcpu->exitintinfo;
2042 vcpu->exitintinfo = 0;
2045 if (vcpu->exception_pending) {
2046 info2 = vcpu_exception_intinfo(vcpu);
2047 vcpu->exception_pending = 0;
2048 VMM_CTR2(vcpu, "Exception %d delivered: %#lx",
2049 vcpu->exc_vector, info2);
2052 if ((info1 & VM_INTINFO_VALID) && (info2 & VM_INTINFO_VALID)) {
2053 valid = nested_fault(vcpu, info1, info2, retinfo);
2054 } else if (info1 & VM_INTINFO_VALID) {
2057 } else if (info2 & VM_INTINFO_VALID) {
2065 VMM_CTR4(vcpu, "%s: info1(%#lx), info2(%#lx), "
2066 "retinfo(%#lx)", __func__, info1, info2, *retinfo);
2073 vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2)
2077 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2080 vcpu = &vm->vcpu[vcpuid];
2081 *info1 = vcpu->exitintinfo;
2082 *info2 = vcpu_exception_intinfo(vcpu);
2087 vm_inject_exception(struct vcpu *vcpu, int vector, int errcode_valid,
2088 uint32_t errcode, int restart_instruction)
2091 int error __diagused;
2093 if (vector < 0 || vector >= 32)
2097 * A double fault exception should never be injected directly into
2098 * the guest. It is a derived exception that results from specific
2099 * combinations of nested faults.
2101 if (vector == IDT_DF)
2104 if (vcpu->exception_pending) {
2105 VMM_CTR2(vcpu, "Unable to inject exception %d due to "
2106 "pending exception %d", vector, vcpu->exc_vector);
2110 if (errcode_valid) {
2112 * Exceptions don't deliver an error code in real mode.
2114 error = vm_get_register(vcpu, VM_REG_GUEST_CR0, ®val);
2115 KASSERT(!error, ("%s: error %d getting CR0", __func__, error));
2116 if (!(regval & CR0_PE))
2121 * From section 26.6.1 "Interruptibility State" in Intel SDM:
2123 * Event blocking by "STI" or "MOV SS" is cleared after guest executes
2124 * one instruction or incurs an exception.
2126 error = vm_set_register(vcpu, VM_REG_GUEST_INTR_SHADOW, 0);
2127 KASSERT(error == 0, ("%s: error %d clearing interrupt shadow",
2130 if (restart_instruction)
2131 vm_restart_instruction(vcpu);
2133 vcpu->exception_pending = 1;
2134 vcpu->exc_vector = vector;
2135 vcpu->exc_errcode = errcode;
2136 vcpu->exc_errcode_valid = errcode_valid;
2137 VMM_CTR1(vcpu, "Exception %d pending", vector);
2142 vm_inject_fault(struct vcpu *vcpu, int vector, int errcode_valid, int errcode)
2144 int error __diagused, restart_instruction;
2146 restart_instruction = 1;
2148 error = vm_inject_exception(vcpu, vector, errcode_valid,
2149 errcode, restart_instruction);
2150 KASSERT(error == 0, ("vm_inject_exception error %d", error));
2154 vm_inject_pf(struct vcpu *vcpu, int error_code, uint64_t cr2)
2156 int error __diagused;
2158 VMM_CTR2(vcpu, "Injecting page fault: error_code %#x, cr2 %#lx",
2161 error = vm_set_register(vcpu, VM_REG_GUEST_CR2, cr2);
2162 KASSERT(error == 0, ("vm_set_register(cr2) error %d", error));
2164 vm_inject_fault(vcpu, IDT_PF, 1, error_code);
2167 static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu");
2170 vm_inject_nmi(struct vm *vm, int vcpuid)
2174 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2177 vcpu = &vm->vcpu[vcpuid];
2179 vcpu->nmi_pending = 1;
2180 vcpu_notify_event(vm, vcpuid, false);
2185 vm_nmi_pending(struct vcpu *vcpu)
2187 return (vcpu->nmi_pending);
2191 vm_nmi_clear(struct vcpu *vcpu)
2193 if (vcpu->nmi_pending == 0)
2194 panic("vm_nmi_clear: inconsistent nmi_pending state");
2196 vcpu->nmi_pending = 0;
2197 vmm_stat_incr(vcpu, VCPU_NMI_COUNT, 1);
2200 static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu");
2203 vm_inject_extint(struct vm *vm, int vcpuid)
2207 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2210 vcpu = &vm->vcpu[vcpuid];
2212 vcpu->extint_pending = 1;
2213 vcpu_notify_event(vm, vcpuid, false);
2218 vm_extint_pending(struct vcpu *vcpu)
2220 return (vcpu->extint_pending);
2224 vm_extint_clear(struct vcpu *vcpu)
2226 if (vcpu->extint_pending == 0)
2227 panic("vm_extint_clear: inconsistent extint_pending state");
2229 vcpu->extint_pending = 0;
2230 vmm_stat_incr(vcpu, VCPU_EXTINT_COUNT, 1);
2234 vm_get_capability(struct vm *vm, int vcpu, int type, int *retval)
2236 if (vcpu < 0 || vcpu >= vm->maxcpus)
2239 if (type < 0 || type >= VM_CAP_MAX)
2242 return (vmmops_getcap(vcpu_cookie(vm, vcpu), type, retval));
2246 vm_set_capability(struct vm *vm, int vcpu, int type, int val)
2248 if (vcpu < 0 || vcpu >= vm->maxcpus)
2251 if (type < 0 || type >= VM_CAP_MAX)
2254 return (vmmops_setcap(vcpu_cookie(vm, vcpu), type, val));
2258 vcpu_vm(struct vcpu *vcpu)
2264 vcpu_vcpuid(struct vcpu *vcpu)
2266 return (vcpu->vcpuid);
2270 vm_vcpu(struct vm *vm, int vcpuid)
2272 return (&vm->vcpu[vcpuid]);
2276 vm_lapic(struct vcpu *vcpu)
2278 return (vcpu->vlapic);
2282 vm_ioapic(struct vm *vm)
2285 return (vm->vioapic);
2289 vm_hpet(struct vm *vm)
2296 vmm_is_pptdev(int bus, int slot, int func)
2299 char *val, *cp, *cp2;
2304 * The length of an environment variable is limited to 128 bytes which
2305 * puts an upper limit on the number of passthru devices that may be
2306 * specified using a single environment variable.
2308 * Work around this by scanning multiple environment variable
2309 * names instead of a single one - yuck!
2311 const char *names[] = { "pptdevs", "pptdevs2", "pptdevs3", NULL };
2313 /* set pptdevs="1/2/3 4/5/6 7/8/9 10/11/12" */
2315 for (i = 0; names[i] != NULL && !found; i++) {
2316 cp = val = kern_getenv(names[i]);
2317 while (cp != NULL && *cp != '\0') {
2318 if ((cp2 = strchr(cp, ' ')) != NULL)
2321 n = sscanf(cp, "%d/%d/%d", &b, &s, &f);
2322 if (n == 3 && bus == b && slot == s && func == f) {
2338 vm_iommu_domain(struct vm *vm)
2345 vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate,
2351 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2352 panic("vm_set_run_state: invalid vcpuid %d", vcpuid);
2354 vcpu = &vm->vcpu[vcpuid];
2357 error = vcpu_set_state_locked(vm, vcpuid, newstate, from_idle);
2364 vcpu_get_state(struct vcpu *vcpu, int *hostcpu)
2366 enum vcpu_state state;
2369 state = vcpu->state;
2370 if (hostcpu != NULL)
2371 *hostcpu = vcpu->hostcpu;
2378 vm_activate_cpu(struct vm *vm, int vcpuid)
2381 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2384 if (CPU_ISSET(vcpuid, &vm->active_cpus))
2387 VCPU_CTR0(vm, vcpuid, "activated");
2388 CPU_SET_ATOMIC(vcpuid, &vm->active_cpus);
2393 vm_suspend_cpu(struct vm *vm, int vcpuid)
2397 if (vcpuid < -1 || vcpuid >= vm->maxcpus)
2401 vm->debug_cpus = vm->active_cpus;
2402 for (i = 0; i < vm->maxcpus; i++) {
2403 if (CPU_ISSET(i, &vm->active_cpus))
2404 vcpu_notify_event(vm, i, false);
2407 if (!CPU_ISSET(vcpuid, &vm->active_cpus))
2410 CPU_SET_ATOMIC(vcpuid, &vm->debug_cpus);
2411 vcpu_notify_event(vm, vcpuid, false);
2417 vm_resume_cpu(struct vm *vm, int vcpuid)
2420 if (vcpuid < -1 || vcpuid >= vm->maxcpus)
2424 CPU_ZERO(&vm->debug_cpus);
2426 if (!CPU_ISSET(vcpuid, &vm->debug_cpus))
2429 CPU_CLR_ATOMIC(vcpuid, &vm->debug_cpus);
2435 vcpu_debugged(struct vcpu *vcpu)
2438 return (CPU_ISSET(vcpu->vcpuid, &vcpu->vm->debug_cpus));
2442 vm_active_cpus(struct vm *vm)
2445 return (vm->active_cpus);
2449 vm_debug_cpus(struct vm *vm)
2452 return (vm->debug_cpus);
2456 vm_suspended_cpus(struct vm *vm)
2459 return (vm->suspended_cpus);
2463 vcpu_stats(struct vcpu *vcpu)
2466 return (vcpu->stats);
2470 vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state)
2472 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2475 *state = vm->vcpu[vcpuid].x2apic_state;
2481 vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state)
2485 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2488 if (state >= X2APIC_STATE_LAST)
2491 vcpu = &vm->vcpu[vcpuid];
2492 vcpu->x2apic_state = state;
2494 vlapic_set_x2apic_state(vcpu, state);
2500 * This function is called to ensure that a vcpu "sees" a pending event
2501 * as soon as possible:
2502 * - If the vcpu thread is sleeping then it is woken up.
2503 * - If the vcpu is running on a different host_cpu then an IPI will be directed
2504 * to the host_cpu to cause the vcpu to trap into the hypervisor.
2507 vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr)
2511 hostcpu = vcpu->hostcpu;
2512 if (vcpu->state == VCPU_RUNNING) {
2513 KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu"));
2514 if (hostcpu != curcpu) {
2516 vlapic_post_intr(vcpu->vlapic, hostcpu,
2519 ipi_cpu(hostcpu, vmm_ipinum);
2523 * If the 'vcpu' is running on 'curcpu' then it must
2524 * be sending a notification to itself (e.g. SELF_IPI).
2525 * The pending event will be picked up when the vcpu
2526 * transitions back to guest context.
2530 KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent "
2531 "with hostcpu %d", vcpu->state, hostcpu));
2532 if (vcpu->state == VCPU_SLEEPING)
2538 vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr)
2540 struct vcpu *vcpu = &vm->vcpu[vcpuid];
2543 vcpu_notify_event_locked(vcpu, lapic_intr);
2548 vm_get_vmspace(struct vm *vm)
2551 return (vm->vmspace);
2555 vm_apicid2vcpuid(struct vm *vm, int apicid)
2558 * XXX apic id is assumed to be numerically identical to vcpu id
2564 vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest,
2565 vm_rendezvous_func_t func, void *arg)
2570 * Enforce that this function is called without any locks
2572 WITNESS_WARN(WARN_PANIC, NULL, "vm_smp_rendezvous");
2573 KASSERT(vcpuid >= 0 && vcpuid < vm->maxcpus,
2574 ("vm_smp_rendezvous: invalid vcpuid %d", vcpuid));
2577 mtx_lock(&vm->rendezvous_mtx);
2578 if (vm->rendezvous_func != NULL) {
2580 * If a rendezvous is already in progress then we need to
2581 * call the rendezvous handler in case this 'vcpuid' is one
2582 * of the targets of the rendezvous.
2584 VCPU_CTR0(vm, vcpuid, "Rendezvous already in progress");
2585 mtx_unlock(&vm->rendezvous_mtx);
2586 error = vm_handle_rendezvous(vm, vcpuid);
2591 KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous "
2592 "rendezvous is still in progress"));
2594 VCPU_CTR0(vm, vcpuid, "Initiating rendezvous");
2595 vm->rendezvous_req_cpus = dest;
2596 CPU_ZERO(&vm->rendezvous_done_cpus);
2597 vm->rendezvous_arg = arg;
2598 vm->rendezvous_func = func;
2599 mtx_unlock(&vm->rendezvous_mtx);
2602 * Wake up any sleeping vcpus and trigger a VM-exit in any running
2603 * vcpus so they handle the rendezvous as soon as possible.
2605 for (i = 0; i < vm->maxcpus; i++) {
2606 if (CPU_ISSET(i, &dest))
2607 vcpu_notify_event(vm, i, false);
2610 return (vm_handle_rendezvous(vm, vcpuid));
2614 vm_atpic(struct vm *vm)
2616 return (vm->vatpic);
2620 vm_atpit(struct vm *vm)
2622 return (vm->vatpit);
2626 vm_pmtmr(struct vm *vm)
2629 return (vm->vpmtmr);
2633 vm_rtc(struct vm *vm)
2640 vm_segment_name(int seg)
2642 static enum vm_reg_name seg_names[] = {
2651 KASSERT(seg >= 0 && seg < nitems(seg_names),
2652 ("%s: invalid segment encoding %d", __func__, seg));
2653 return (seg_names[seg]);
2657 vm_copy_teardown(struct vm_copyinfo *copyinfo, int num_copyinfo)
2661 for (idx = 0; idx < num_copyinfo; idx++) {
2662 if (copyinfo[idx].cookie != NULL)
2663 vm_gpa_release(copyinfo[idx].cookie);
2665 bzero(copyinfo, num_copyinfo * sizeof(struct vm_copyinfo));
2669 vm_copy_setup(struct vcpu *vcpu, struct vm_guest_paging *paging,
2670 uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo,
2671 int num_copyinfo, int *fault)
2673 int error, idx, nused;
2674 size_t n, off, remaining;
2678 bzero(copyinfo, sizeof(struct vm_copyinfo) * num_copyinfo);
2682 while (remaining > 0) {
2683 KASSERT(nused < num_copyinfo, ("insufficient vm_copyinfo"));
2684 error = vm_gla2gpa(vcpu, paging, gla, prot, &gpa, fault);
2685 if (error || *fault)
2687 off = gpa & PAGE_MASK;
2688 n = min(remaining, PAGE_SIZE - off);
2689 copyinfo[nused].gpa = gpa;
2690 copyinfo[nused].len = n;
2696 for (idx = 0; idx < nused; idx++) {
2697 hva = vm_gpa_hold(vcpu, copyinfo[idx].gpa,
2698 copyinfo[idx].len, prot, &cookie);
2701 copyinfo[idx].hva = hva;
2702 copyinfo[idx].cookie = cookie;
2706 vm_copy_teardown(copyinfo, num_copyinfo);
2715 vm_copyin(struct vm_copyinfo *copyinfo, void *kaddr, size_t len)
2723 bcopy(copyinfo[idx].hva, dst, copyinfo[idx].len);
2724 len -= copyinfo[idx].len;
2725 dst += copyinfo[idx].len;
2731 vm_copyout(const void *kaddr, struct vm_copyinfo *copyinfo, size_t len)
2739 bcopy(src, copyinfo[idx].hva, copyinfo[idx].len);
2740 len -= copyinfo[idx].len;
2741 src += copyinfo[idx].len;
2747 * Return the amount of in-use and wired memory for the VM. Since
2748 * these are global stats, only return the values with for vCPU 0
2750 VMM_STAT_DECLARE(VMM_MEM_RESIDENT);
2751 VMM_STAT_DECLARE(VMM_MEM_WIRED);
2754 vm_get_rescnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat)
2758 vmm_stat_set(vm_vcpu(vm, vcpu), VMM_MEM_RESIDENT,
2759 PAGE_SIZE * vmspace_resident_count(vm->vmspace));
2764 vm_get_wiredcnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat)
2768 vmm_stat_set(vm_vcpu(vm, vcpu), VMM_MEM_WIRED,
2769 PAGE_SIZE * pmap_wired_count(vmspace_pmap(vm->vmspace)));
2773 VMM_STAT_FUNC(VMM_MEM_RESIDENT, "Resident memory", vm_get_rescnt);
2774 VMM_STAT_FUNC(VMM_MEM_WIRED, "Wired memory", vm_get_wiredcnt);
2776 #ifdef BHYVE_SNAPSHOT
2778 vm_snapshot_vcpus(struct vm *vm, struct vm_snapshot_meta *meta)
2783 uint16_t i, maxcpus;
2786 maxcpus = vm_get_maxcpus(vm);
2787 for (i = 0; i < maxcpus; i++) {
2788 vcpu = &vm->vcpu[i];
2790 SNAPSHOT_VAR_OR_LEAVE(vcpu->x2apic_state, meta, ret, done);
2791 SNAPSHOT_VAR_OR_LEAVE(vcpu->exitintinfo, meta, ret, done);
2792 SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_vector, meta, ret, done);
2793 SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_errcode_valid, meta, ret, done);
2794 SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_errcode, meta, ret, done);
2795 SNAPSHOT_VAR_OR_LEAVE(vcpu->guest_xcr0, meta, ret, done);
2796 SNAPSHOT_VAR_OR_LEAVE(vcpu->exitinfo, meta, ret, done);
2797 SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, ret, done);
2800 * Save the absolute TSC value by adding now to tsc_offset.
2802 * It will be turned turned back into an actual offset when the
2803 * TSC restore function is called
2805 tsc = now + vcpu->tsc_offset;
2806 SNAPSHOT_VAR_OR_LEAVE(tsc, meta, ret, done);
2814 vm_snapshot_vm(struct vm *vm, struct vm_snapshot_meta *meta)
2818 ret = vm_snapshot_vcpus(vm, meta);
2827 vm_snapshot_vcpu(struct vm *vm, struct vm_snapshot_meta *meta)
2831 uint16_t i, maxcpus;
2835 maxcpus = vm_get_maxcpus(vm);
2836 for (i = 0; i < maxcpus; i++) {
2837 vcpu = &vm->vcpu[i];
2839 error = vmmops_vcpu_snapshot(vcpu->cookie, meta);
2841 printf("%s: failed to snapshot vmcs/vmcb data for "
2842 "vCPU: %d; error: %d\n", __func__, i, error);
2852 * Save kernel-side structures to user-space for snapshotting.
2855 vm_snapshot_req(struct vm *vm, struct vm_snapshot_meta *meta)
2859 switch (meta->dev_req) {
2861 ret = vmmops_snapshot(vm->cookie, meta);
2864 ret = vm_snapshot_vcpu(vm, meta);
2867 ret = vm_snapshot_vm(vm, meta);
2869 case STRUCT_VIOAPIC:
2870 ret = vioapic_snapshot(vm_ioapic(vm), meta);
2873 ret = vlapic_snapshot(vm, meta);
2876 ret = vhpet_snapshot(vm_hpet(vm), meta);
2879 ret = vatpic_snapshot(vm_atpic(vm), meta);
2882 ret = vatpit_snapshot(vm_atpit(vm), meta);
2885 ret = vpmtmr_snapshot(vm_pmtmr(vm), meta);
2888 ret = vrtc_snapshot(vm_rtc(vm), meta);
2891 printf("%s: failed to find the requested type %#x\n",
2892 __func__, meta->dev_req);
2899 vm_set_tsc_offset(struct vcpu *vcpu, uint64_t offset)
2901 vcpu->tsc_offset = offset;
2905 vm_restore_time(struct vm *vm)
2910 uint16_t i, maxcpus;
2914 error = vhpet_restore_time(vm_hpet(vm));
2918 maxcpus = vm_get_maxcpus(vm);
2919 for (i = 0; i < maxcpus; i++) {
2920 vcpu = &vm->vcpu[i];
2922 error = vmmops_restore_tsc(vcpu->cookie,
2923 vcpu->tsc_offset - now);