2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011 NetApp, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/capsicum.h>
36 #include <sys/sysctl.h>
37 #include <sys/ioctl.h>
38 #include <sys/linker.h>
40 #include <sys/module.h>
41 #include <sys/_iovec.h>
42 #include <sys/cpuset.h>
44 #include <x86/segments.h>
45 #include <machine/specialreg.h>
47 #include <capsicum_helpers.h>
60 #include <machine/vmm.h>
61 #include <machine/vmm_dev.h>
62 #include <machine/vmm_snapshot.h>
67 #define MB (1024 * 1024UL)
68 #define GB (1024 * 1024 * 1024UL)
71 * Size of the guard region before and after the virtual address space
72 * mapping the guest physical memory. This must be a multiple of the
73 * superpage size for performance reasons.
75 #define VM_MMAP_GUARD_SIZE (4 * MB)
77 #define PROT_RW (PROT_READ | PROT_WRITE)
78 #define PROT_ALL (PROT_READ | PROT_WRITE | PROT_EXEC)
82 uint32_t lowmem_limit;
90 #define CREATE(x) sysctlbyname("hw.vmm.create", NULL, NULL, (x), strlen((x)))
91 #define DESTROY(x) sysctlbyname("hw.vmm.destroy", NULL, NULL, (x), strlen((x)))
94 vm_device_open(const char *name)
99 len = strlen("/dev/vmm/") + strlen(name) + 1;
100 vmfile = malloc(len);
101 assert(vmfile != NULL);
102 snprintf(vmfile, len, "/dev/vmm/%s", name);
104 /* Open the device file */
105 fd = open(vmfile, O_RDWR, 0);
112 vm_create(const char *name)
114 /* Try to load vmm(4) module before creating a guest. */
115 if (modfind("vmm") < 0)
117 return (CREATE(name));
121 vm_open(const char *name)
126 vm = malloc(sizeof(struct vmctx) + strlen(name) + 1);
131 vm->lowmem_limit = 3 * GB;
132 vm->name = (char *)(vm + 1);
133 strcpy(vm->name, name);
135 if ((vm->fd = vm_device_open(vm->name)) < 0)
147 vm_close(struct vmctx *vm)
156 vm_destroy(struct vmctx *vm)
168 vm_vcpu_open(struct vmctx *ctx, int vcpuid)
172 vcpu = malloc(sizeof(*vcpu));
174 vcpu->vcpuid = vcpuid;
179 vm_vcpu_close(struct vcpu *vcpu)
185 vcpu_id(struct vcpu *vcpu)
187 return (vcpu->vcpuid);
191 vm_parse_memsize(const char *opt, size_t *ret_memsize)
197 optval = strtoul(opt, &endptr, 0);
198 if (*opt != '\0' && *endptr == '\0') {
200 * For the sake of backward compatibility if the memory size
201 * specified on the command line is less than a megabyte then
202 * it is interpreted as being in units of MB.
206 *ret_memsize = optval;
209 error = expand_number(opt, ret_memsize);
215 vm_get_lowmem_limit(struct vmctx *ctx)
218 return (ctx->lowmem_limit);
222 vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit)
225 ctx->lowmem_limit = limit;
229 vm_set_memflags(struct vmctx *ctx, int flags)
232 ctx->memflags = flags;
236 vm_get_memflags(struct vmctx *ctx)
239 return (ctx->memflags);
243 * Map segment 'segid' starting at 'off' into guest address range [gpa,gpa+len).
246 vm_mmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, int segid, vm_ooffset_t off,
247 size_t len, int prot)
249 struct vm_memmap memmap;
253 memmap.segid = segid;
259 if (ctx->memflags & VM_MEM_F_WIRED)
260 memmap.flags |= VM_MEMMAP_F_WIRED;
263 * If this mapping already exists then don't create it again. This
264 * is the common case for SYSMEM mappings created by bhyveload(8).
266 error = vm_mmap_getnext(ctx, &gpa, &segid, &off, &len, &prot, &flags);
267 if (error == 0 && gpa == memmap.gpa) {
268 if (segid != memmap.segid || off != memmap.segoff ||
269 prot != memmap.prot || flags != memmap.flags) {
277 error = ioctl(ctx->fd, VM_MMAP_MEMSEG, &memmap);
282 vm_get_guestmem_from_ctx(struct vmctx *ctx, char **guest_baseaddr,
283 size_t *lowmem_size, size_t *highmem_size)
286 *guest_baseaddr = ctx->baseaddr;
287 *lowmem_size = ctx->lowmem;
288 *highmem_size = ctx->highmem;
293 vm_munmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, size_t len)
295 struct vm_munmap munmap;
301 error = ioctl(ctx->fd, VM_MUNMAP_MEMSEG, &munmap);
306 vm_mmap_getnext(struct vmctx *ctx, vm_paddr_t *gpa, int *segid,
307 vm_ooffset_t *segoff, size_t *len, int *prot, int *flags)
309 struct vm_memmap memmap;
312 bzero(&memmap, sizeof(struct vm_memmap));
314 error = ioctl(ctx->fd, VM_MMAP_GETNEXT, &memmap);
317 *segid = memmap.segid;
318 *segoff = memmap.segoff;
321 *flags = memmap.flags;
327 * Return 0 if the segments are identical and non-zero otherwise.
329 * This is slightly complicated by the fact that only device memory segments
333 cmpseg(size_t len, const char *str, size_t len2, const char *str2)
337 if ((!str && !str2) || (str && str2 && !strcmp(str, str2)))
344 vm_alloc_memseg(struct vmctx *ctx, int segid, size_t len, const char *name)
346 struct vm_memseg memseg;
351 * If the memory segment has already been created then just return.
352 * This is the usual case for the SYSMEM segment created by userspace
353 * loaders like bhyveload(8).
355 error = vm_get_memseg(ctx, segid, &memseg.len, memseg.name,
356 sizeof(memseg.name));
360 if (memseg.len != 0) {
361 if (cmpseg(len, name, memseg.len, VM_MEMSEG_NAME(&memseg))) {
369 bzero(&memseg, sizeof(struct vm_memseg));
370 memseg.segid = segid;
373 n = strlcpy(memseg.name, name, sizeof(memseg.name));
374 if (n >= sizeof(memseg.name)) {
375 errno = ENAMETOOLONG;
380 error = ioctl(ctx->fd, VM_ALLOC_MEMSEG, &memseg);
385 vm_get_memseg(struct vmctx *ctx, int segid, size_t *lenp, char *namebuf,
388 struct vm_memseg memseg;
392 memseg.segid = segid;
393 error = ioctl(ctx->fd, VM_GET_MEMSEG, &memseg);
396 n = strlcpy(namebuf, memseg.name, bufsize);
398 errno = ENAMETOOLONG;
406 setup_memory_segment(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char *base)
411 /* Map 'len' bytes starting at 'gpa' in the guest address space */
412 error = vm_mmap_memseg(ctx, gpa, VM_SYSMEM, gpa, len, PROT_ALL);
416 flags = MAP_SHARED | MAP_FIXED;
417 if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
420 /* mmap into the process address space on the host */
421 ptr = mmap(base + gpa, len, PROT_RW, flags, ctx->fd, gpa);
422 if (ptr == MAP_FAILED)
429 vm_setup_memory(struct vmctx *ctx, size_t memsize, enum vm_mmap_style vms)
433 char *baseaddr, *ptr;
436 assert(vms == VM_MMAP_ALL);
439 * If 'memsize' cannot fit entirely in the 'lowmem' segment then
440 * create another 'highmem' segment above 4GB for the remainder.
442 if (memsize > ctx->lowmem_limit) {
443 ctx->lowmem = ctx->lowmem_limit;
444 ctx->highmem = memsize - ctx->lowmem_limit;
445 objsize = 4*GB + ctx->highmem;
447 ctx->lowmem = memsize;
449 objsize = ctx->lowmem;
452 error = vm_alloc_memseg(ctx, VM_SYSMEM, objsize, NULL);
457 * Stake out a contiguous region covering the guest physical memory
458 * and the adjoining guard regions.
460 len = VM_MMAP_GUARD_SIZE + objsize + VM_MMAP_GUARD_SIZE;
461 ptr = mmap(NULL, len, PROT_NONE, MAP_GUARD | MAP_ALIGNED_SUPER, -1, 0);
462 if (ptr == MAP_FAILED)
465 baseaddr = ptr + VM_MMAP_GUARD_SIZE;
466 if (ctx->highmem > 0) {
469 error = setup_memory_segment(ctx, gpa, len, baseaddr);
474 if (ctx->lowmem > 0) {
477 error = setup_memory_segment(ctx, gpa, len, baseaddr);
482 ctx->baseaddr = baseaddr;
488 * Returns a non-NULL pointer if [gaddr, gaddr+len) is entirely contained in
489 * the lowmem or highmem regions.
491 * In particular return NULL if [gaddr, gaddr+len) falls in guest MMIO region.
492 * The instruction emulation code depends on this behavior.
495 vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len)
498 if (ctx->lowmem > 0) {
499 if (gaddr < ctx->lowmem && len <= ctx->lowmem &&
500 gaddr + len <= ctx->lowmem)
501 return (ctx->baseaddr + gaddr);
504 if (ctx->highmem > 0) {
506 if (gaddr < 4*GB + ctx->highmem &&
507 len <= ctx->highmem &&
508 gaddr + len <= 4*GB + ctx->highmem)
509 return (ctx->baseaddr + gaddr);
517 vm_rev_map_gpa(struct vmctx *ctx, void *addr)
521 offaddr = (char *)addr - ctx->baseaddr;
524 if (offaddr <= ctx->lowmem)
527 if (ctx->highmem > 0)
528 if (offaddr >= 4*GB && offaddr < 4*GB + ctx->highmem)
531 return ((vm_paddr_t)-1);
535 vm_get_name(struct vmctx *ctx)
542 vm_get_lowmem_size(struct vmctx *ctx)
545 return (ctx->lowmem);
549 vm_get_highmem_size(struct vmctx *ctx)
552 return (ctx->highmem);
556 vm_create_devmem(struct vmctx *ctx, int segid, const char *name, size_t len)
558 char pathname[MAXPATHLEN];
561 int fd, error, flags;
565 if (name == NULL || strlen(name) == 0) {
570 error = vm_alloc_memseg(ctx, segid, len, name);
574 strlcpy(pathname, "/dev/vmm.io/", sizeof(pathname));
575 strlcat(pathname, ctx->name, sizeof(pathname));
576 strlcat(pathname, ".", sizeof(pathname));
577 strlcat(pathname, name, sizeof(pathname));
579 fd = open(pathname, O_RDWR);
584 * Stake out a contiguous region covering the device memory and the
585 * adjoining guard regions.
587 len2 = VM_MMAP_GUARD_SIZE + len + VM_MMAP_GUARD_SIZE;
588 base = mmap(NULL, len2, PROT_NONE, MAP_GUARD | MAP_ALIGNED_SUPER, -1,
590 if (base == MAP_FAILED)
593 flags = MAP_SHARED | MAP_FIXED;
594 if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
597 /* mmap the devmem region in the host address space */
598 ptr = mmap(base + VM_MMAP_GUARD_SIZE, len, PROT_RW, flags, fd, 0);
606 vcpu_ioctl(struct vcpu *vcpu, u_long cmd, void *arg)
609 * XXX: fragile, handle with care
610 * Assumes that the first field of the ioctl data
613 *(int *)arg = vcpu->vcpuid;
614 return (ioctl(vcpu->ctx->fd, cmd, arg));
618 vm_set_desc(struct vcpu *vcpu, int reg,
619 uint64_t base, uint32_t limit, uint32_t access)
622 struct vm_seg_desc vmsegdesc;
624 bzero(&vmsegdesc, sizeof(vmsegdesc));
625 vmsegdesc.regnum = reg;
626 vmsegdesc.desc.base = base;
627 vmsegdesc.desc.limit = limit;
628 vmsegdesc.desc.access = access;
630 error = vcpu_ioctl(vcpu, VM_SET_SEGMENT_DESCRIPTOR, &vmsegdesc);
635 vm_get_desc(struct vcpu *vcpu, int reg, uint64_t *base, uint32_t *limit,
639 struct vm_seg_desc vmsegdesc;
641 bzero(&vmsegdesc, sizeof(vmsegdesc));
642 vmsegdesc.regnum = reg;
644 error = vcpu_ioctl(vcpu, VM_GET_SEGMENT_DESCRIPTOR, &vmsegdesc);
646 *base = vmsegdesc.desc.base;
647 *limit = vmsegdesc.desc.limit;
648 *access = vmsegdesc.desc.access;
654 vm_get_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *seg_desc)
658 error = vm_get_desc(vcpu, reg, &seg_desc->base, &seg_desc->limit,
664 vm_set_register(struct vcpu *vcpu, int reg, uint64_t val)
667 struct vm_register vmreg;
669 bzero(&vmreg, sizeof(vmreg));
673 error = vcpu_ioctl(vcpu, VM_SET_REGISTER, &vmreg);
678 vm_get_register(struct vcpu *vcpu, int reg, uint64_t *ret_val)
681 struct vm_register vmreg;
683 bzero(&vmreg, sizeof(vmreg));
686 error = vcpu_ioctl(vcpu, VM_GET_REGISTER, &vmreg);
687 *ret_val = vmreg.regval;
692 vm_set_register_set(struct vcpu *vcpu, unsigned int count,
693 const int *regnums, uint64_t *regvals)
696 struct vm_register_set vmregset;
698 bzero(&vmregset, sizeof(vmregset));
699 vmregset.count = count;
700 vmregset.regnums = regnums;
701 vmregset.regvals = regvals;
703 error = vcpu_ioctl(vcpu, VM_SET_REGISTER_SET, &vmregset);
708 vm_get_register_set(struct vcpu *vcpu, unsigned int count,
709 const int *regnums, uint64_t *regvals)
712 struct vm_register_set vmregset;
714 bzero(&vmregset, sizeof(vmregset));
715 vmregset.count = count;
716 vmregset.regnums = regnums;
717 vmregset.regvals = regvals;
719 error = vcpu_ioctl(vcpu, VM_GET_REGISTER_SET, &vmregset);
724 vm_run(struct vcpu *vcpu, struct vm_exit *vmexit)
729 bzero(&vmrun, sizeof(vmrun));
731 error = vcpu_ioctl(vcpu, VM_RUN, &vmrun);
732 bcopy(&vmrun.vm_exit, vmexit, sizeof(struct vm_exit));
737 vm_suspend(struct vmctx *ctx, enum vm_suspend_how how)
739 struct vm_suspend vmsuspend;
741 bzero(&vmsuspend, sizeof(vmsuspend));
743 return (ioctl(ctx->fd, VM_SUSPEND, &vmsuspend));
747 vm_reinit(struct vmctx *ctx)
750 return (ioctl(ctx->fd, VM_REINIT, 0));
754 vm_inject_exception(struct vcpu *vcpu, int vector, int errcode_valid,
755 uint32_t errcode, int restart_instruction)
757 struct vm_exception exc;
760 exc.error_code = errcode;
761 exc.error_code_valid = errcode_valid;
762 exc.restart_instruction = restart_instruction;
764 return (vcpu_ioctl(vcpu, VM_INJECT_EXCEPTION, &exc));
768 vm_apicid2vcpu(struct vmctx *ctx __unused, int apicid)
771 * The apic id associated with the 'vcpu' has the same numerical value
772 * as the 'vcpu' itself.
778 vm_lapic_irq(struct vcpu *vcpu, int vector)
780 struct vm_lapic_irq vmirq;
782 bzero(&vmirq, sizeof(vmirq));
783 vmirq.vector = vector;
785 return (vcpu_ioctl(vcpu, VM_LAPIC_IRQ, &vmirq));
789 vm_lapic_local_irq(struct vcpu *vcpu, int vector)
791 struct vm_lapic_irq vmirq;
793 bzero(&vmirq, sizeof(vmirq));
794 vmirq.vector = vector;
796 return (vcpu_ioctl(vcpu, VM_LAPIC_LOCAL_IRQ, &vmirq));
800 vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg)
802 struct vm_lapic_msi vmmsi;
804 bzero(&vmmsi, sizeof(vmmsi));
808 return (ioctl(ctx->fd, VM_LAPIC_MSI, &vmmsi));
812 vm_ioapic_assert_irq(struct vmctx *ctx, int irq)
814 struct vm_ioapic_irq ioapic_irq;
816 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
817 ioapic_irq.irq = irq;
819 return (ioctl(ctx->fd, VM_IOAPIC_ASSERT_IRQ, &ioapic_irq));
823 vm_ioapic_deassert_irq(struct vmctx *ctx, int irq)
825 struct vm_ioapic_irq ioapic_irq;
827 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
828 ioapic_irq.irq = irq;
830 return (ioctl(ctx->fd, VM_IOAPIC_DEASSERT_IRQ, &ioapic_irq));
834 vm_ioapic_pulse_irq(struct vmctx *ctx, int irq)
836 struct vm_ioapic_irq ioapic_irq;
838 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
839 ioapic_irq.irq = irq;
841 return (ioctl(ctx->fd, VM_IOAPIC_PULSE_IRQ, &ioapic_irq));
845 vm_ioapic_pincount(struct vmctx *ctx, int *pincount)
848 return (ioctl(ctx->fd, VM_IOAPIC_PINCOUNT, pincount));
852 vm_readwrite_kernemu_device(struct vcpu *vcpu, vm_paddr_t gpa,
853 bool write, int size, uint64_t *value)
855 struct vm_readwrite_kernemu_device irp = {
856 .access_width = fls(size) - 1,
858 .value = write ? *value : ~0ul,
860 long cmd = (write ? VM_SET_KERNEMU_DEV : VM_GET_KERNEMU_DEV);
863 rc = vcpu_ioctl(vcpu, cmd, &irp);
864 if (rc == 0 && !write)
870 vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
872 struct vm_isa_irq isa_irq;
874 bzero(&isa_irq, sizeof(struct vm_isa_irq));
875 isa_irq.atpic_irq = atpic_irq;
876 isa_irq.ioapic_irq = ioapic_irq;
878 return (ioctl(ctx->fd, VM_ISA_ASSERT_IRQ, &isa_irq));
882 vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
884 struct vm_isa_irq isa_irq;
886 bzero(&isa_irq, sizeof(struct vm_isa_irq));
887 isa_irq.atpic_irq = atpic_irq;
888 isa_irq.ioapic_irq = ioapic_irq;
890 return (ioctl(ctx->fd, VM_ISA_DEASSERT_IRQ, &isa_irq));
894 vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
896 struct vm_isa_irq isa_irq;
898 bzero(&isa_irq, sizeof(struct vm_isa_irq));
899 isa_irq.atpic_irq = atpic_irq;
900 isa_irq.ioapic_irq = ioapic_irq;
902 return (ioctl(ctx->fd, VM_ISA_PULSE_IRQ, &isa_irq));
906 vm_isa_set_irq_trigger(struct vmctx *ctx, int atpic_irq,
907 enum vm_intr_trigger trigger)
909 struct vm_isa_irq_trigger isa_irq_trigger;
911 bzero(&isa_irq_trigger, sizeof(struct vm_isa_irq_trigger));
912 isa_irq_trigger.atpic_irq = atpic_irq;
913 isa_irq_trigger.trigger = trigger;
915 return (ioctl(ctx->fd, VM_ISA_SET_IRQ_TRIGGER, &isa_irq_trigger));
919 vm_inject_nmi(struct vcpu *vcpu)
923 bzero(&vmnmi, sizeof(vmnmi));
925 return (vcpu_ioctl(vcpu, VM_INJECT_NMI, &vmnmi));
928 static const char *capstrmap[] = {
929 [VM_CAP_HALT_EXIT] = "hlt_exit",
930 [VM_CAP_MTRAP_EXIT] = "mtrap_exit",
931 [VM_CAP_PAUSE_EXIT] = "pause_exit",
932 [VM_CAP_UNRESTRICTED_GUEST] = "unrestricted_guest",
933 [VM_CAP_ENABLE_INVPCID] = "enable_invpcid",
934 [VM_CAP_BPT_EXIT] = "bpt_exit",
938 vm_capability_name2type(const char *capname)
942 for (i = 0; i < (int)nitems(capstrmap); i++) {
943 if (strcmp(capstrmap[i], capname) == 0)
951 vm_capability_type2name(int type)
953 if (type >= 0 && type < (int)nitems(capstrmap))
954 return (capstrmap[type]);
960 vm_get_capability(struct vcpu *vcpu, enum vm_cap_type cap, int *retval)
963 struct vm_capability vmcap;
965 bzero(&vmcap, sizeof(vmcap));
968 error = vcpu_ioctl(vcpu, VM_GET_CAPABILITY, &vmcap);
969 *retval = vmcap.capval;
974 vm_set_capability(struct vcpu *vcpu, enum vm_cap_type cap, int val)
976 struct vm_capability vmcap;
978 bzero(&vmcap, sizeof(vmcap));
982 return (vcpu_ioctl(vcpu, VM_SET_CAPABILITY, &vmcap));
986 vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
988 struct vm_pptdev pptdev;
990 bzero(&pptdev, sizeof(pptdev));
995 return (ioctl(ctx->fd, VM_BIND_PPTDEV, &pptdev));
999 vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
1001 struct vm_pptdev pptdev;
1003 bzero(&pptdev, sizeof(pptdev));
1008 return (ioctl(ctx->fd, VM_UNBIND_PPTDEV, &pptdev));
1012 vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
1013 vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
1015 struct vm_pptdev_mmio pptmmio;
1017 bzero(&pptmmio, sizeof(pptmmio));
1019 pptmmio.slot = slot;
1020 pptmmio.func = func;
1025 return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio));
1029 vm_unmap_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
1030 vm_paddr_t gpa, size_t len)
1032 struct vm_pptdev_mmio pptmmio;
1034 bzero(&pptmmio, sizeof(pptmmio));
1036 pptmmio.slot = slot;
1037 pptmmio.func = func;
1041 return (ioctl(ctx->fd, VM_UNMAP_PPTDEV_MMIO, &pptmmio));
1045 vm_setup_pptdev_msi(struct vmctx *ctx, int bus, int slot, int func,
1046 uint64_t addr, uint64_t msg, int numvec)
1048 struct vm_pptdev_msi pptmsi;
1050 bzero(&pptmsi, sizeof(pptmsi));
1056 pptmsi.numvec = numvec;
1058 return (ioctl(ctx->fd, VM_PPTDEV_MSI, &pptmsi));
1062 vm_setup_pptdev_msix(struct vmctx *ctx, int bus, int slot, int func,
1063 int idx, uint64_t addr, uint64_t msg, uint32_t vector_control)
1065 struct vm_pptdev_msix pptmsix;
1067 bzero(&pptmsix, sizeof(pptmsix));
1069 pptmsix.slot = slot;
1070 pptmsix.func = func;
1073 pptmsix.addr = addr;
1074 pptmsix.vector_control = vector_control;
1076 return ioctl(ctx->fd, VM_PPTDEV_MSIX, &pptmsix);
1080 vm_disable_pptdev_msix(struct vmctx *ctx, int bus, int slot, int func)
1082 struct vm_pptdev ppt;
1084 bzero(&ppt, sizeof(ppt));
1089 return ioctl(ctx->fd, VM_PPTDEV_DISABLE_MSIX, &ppt);
1093 vm_get_stats(struct vcpu *vcpu, struct timeval *ret_tv,
1096 static _Thread_local uint64_t *stats_buf;
1097 static _Thread_local u_int stats_count;
1098 uint64_t *new_stats;
1099 struct vm_stats vmstats;
1105 for (index = 0;; index += nitems(vmstats.statbuf)) {
1106 vmstats.index = index;
1107 if (vcpu_ioctl(vcpu, VM_STATS, &vmstats) != 0)
1109 if (stats_count < index + vmstats.num_entries) {
1110 new_stats = realloc(stats_buf,
1111 (index + vmstats.num_entries) * sizeof(uint64_t));
1112 if (new_stats == NULL) {
1116 stats_count = index + vmstats.num_entries;
1117 stats_buf = new_stats;
1119 memcpy(stats_buf + index, vmstats.statbuf,
1120 vmstats.num_entries * sizeof(uint64_t));
1121 count += vmstats.num_entries;
1124 if (vmstats.num_entries != nitems(vmstats.statbuf))
1129 *ret_entries = count;
1131 *ret_tv = vmstats.tv;
1138 vm_get_stat_desc(struct vmctx *ctx, int index)
1140 static struct vm_stat_desc statdesc;
1142 statdesc.index = index;
1143 if (ioctl(ctx->fd, VM_STAT_DESC, &statdesc) == 0)
1144 return (statdesc.desc);
1150 vm_get_x2apic_state(struct vcpu *vcpu, enum x2apic_state *state)
1153 struct vm_x2apic x2apic;
1155 bzero(&x2apic, sizeof(x2apic));
1157 error = vcpu_ioctl(vcpu, VM_GET_X2APIC_STATE, &x2apic);
1158 *state = x2apic.state;
1163 vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state)
1166 struct vm_x2apic x2apic;
1168 bzero(&x2apic, sizeof(x2apic));
1169 x2apic.state = state;
1171 error = vcpu_ioctl(vcpu, VM_SET_X2APIC_STATE, &x2apic);
1177 * From Intel Vol 3a:
1178 * Table 9-1. IA-32 Processor States Following Power-up, Reset or INIT
1181 vcpu_reset(struct vcpu *vcpu)
1184 uint64_t rflags, rip, cr0, cr4, zero, desc_base, rdx;
1185 uint32_t desc_access, desc_limit;
1191 error = vm_set_register(vcpu, VM_REG_GUEST_RFLAGS, rflags);
1196 if ((error = vm_set_register(vcpu, VM_REG_GUEST_RIP, rip)) != 0)
1200 * According to Intels Software Developer Manual CR0 should be
1201 * initialized with CR0_ET | CR0_NW | CR0_CD but that crashes some
1202 * guests like Windows.
1205 if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR0, cr0)) != 0)
1208 if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR2, zero)) != 0)
1211 if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR3, zero)) != 0)
1215 if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR4, cr4)) != 0)
1219 * CS: present, r/w, accessed, 16-bit, byte granularity, usable
1221 desc_base = 0xffff0000;
1222 desc_limit = 0xffff;
1223 desc_access = 0x0093;
1224 error = vm_set_desc(vcpu, VM_REG_GUEST_CS,
1225 desc_base, desc_limit, desc_access);
1230 if ((error = vm_set_register(vcpu, VM_REG_GUEST_CS, sel)) != 0)
1234 * SS,DS,ES,FS,GS: present, r/w, accessed, 16-bit, byte granularity
1237 desc_limit = 0xffff;
1238 desc_access = 0x0093;
1239 error = vm_set_desc(vcpu, VM_REG_GUEST_SS,
1240 desc_base, desc_limit, desc_access);
1244 error = vm_set_desc(vcpu, VM_REG_GUEST_DS,
1245 desc_base, desc_limit, desc_access);
1249 error = vm_set_desc(vcpu, VM_REG_GUEST_ES,
1250 desc_base, desc_limit, desc_access);
1254 error = vm_set_desc(vcpu, VM_REG_GUEST_FS,
1255 desc_base, desc_limit, desc_access);
1259 error = vm_set_desc(vcpu, VM_REG_GUEST_GS,
1260 desc_base, desc_limit, desc_access);
1265 if ((error = vm_set_register(vcpu, VM_REG_GUEST_SS, sel)) != 0)
1267 if ((error = vm_set_register(vcpu, VM_REG_GUEST_DS, sel)) != 0)
1269 if ((error = vm_set_register(vcpu, VM_REG_GUEST_ES, sel)) != 0)
1271 if ((error = vm_set_register(vcpu, VM_REG_GUEST_FS, sel)) != 0)
1273 if ((error = vm_set_register(vcpu, VM_REG_GUEST_GS, sel)) != 0)
1276 if ((error = vm_set_register(vcpu, VM_REG_GUEST_EFER, zero)) != 0)
1279 /* General purpose registers */
1281 if ((error = vm_set_register(vcpu, VM_REG_GUEST_RAX, zero)) != 0)
1283 if ((error = vm_set_register(vcpu, VM_REG_GUEST_RBX, zero)) != 0)
1285 if ((error = vm_set_register(vcpu, VM_REG_GUEST_RCX, zero)) != 0)
1287 if ((error = vm_set_register(vcpu, VM_REG_GUEST_RDX, rdx)) != 0)
1289 if ((error = vm_set_register(vcpu, VM_REG_GUEST_RSI, zero)) != 0)
1291 if ((error = vm_set_register(vcpu, VM_REG_GUEST_RDI, zero)) != 0)
1293 if ((error = vm_set_register(vcpu, VM_REG_GUEST_RBP, zero)) != 0)
1295 if ((error = vm_set_register(vcpu, VM_REG_GUEST_RSP, zero)) != 0)
1297 if ((error = vm_set_register(vcpu, VM_REG_GUEST_R8, zero)) != 0)
1299 if ((error = vm_set_register(vcpu, VM_REG_GUEST_R9, zero)) != 0)
1301 if ((error = vm_set_register(vcpu, VM_REG_GUEST_R10, zero)) != 0)
1303 if ((error = vm_set_register(vcpu, VM_REG_GUEST_R11, zero)) != 0)
1305 if ((error = vm_set_register(vcpu, VM_REG_GUEST_R12, zero)) != 0)
1307 if ((error = vm_set_register(vcpu, VM_REG_GUEST_R13, zero)) != 0)
1309 if ((error = vm_set_register(vcpu, VM_REG_GUEST_R14, zero)) != 0)
1311 if ((error = vm_set_register(vcpu, VM_REG_GUEST_R15, zero)) != 0)
1316 desc_limit = 0xffff;
1318 error = vm_set_desc(vcpu, VM_REG_GUEST_GDTR,
1319 desc_base, desc_limit, desc_access);
1323 error = vm_set_desc(vcpu, VM_REG_GUEST_IDTR,
1324 desc_base, desc_limit, desc_access);
1330 desc_limit = 0xffff;
1331 desc_access = 0x0000008b;
1332 error = vm_set_desc(vcpu, VM_REG_GUEST_TR, 0, 0, desc_access);
1337 if ((error = vm_set_register(vcpu, VM_REG_GUEST_TR, sel)) != 0)
1342 desc_limit = 0xffff;
1343 desc_access = 0x00000082;
1344 error = vm_set_desc(vcpu, VM_REG_GUEST_LDTR, desc_base,
1345 desc_limit, desc_access);
1350 if ((error = vm_set_register(vcpu, VM_REG_GUEST_LDTR, 0)) != 0)
1353 if ((error = vm_set_register(vcpu, VM_REG_GUEST_DR6,
1356 if ((error = vm_set_register(vcpu, VM_REG_GUEST_DR7, 0x400)) !=
1360 if ((error = vm_set_register(vcpu, VM_REG_GUEST_INTR_SHADOW,
1370 vm_get_gpa_pmap(struct vmctx *ctx, uint64_t gpa, uint64_t *pte, int *num)
1373 struct vm_gpa_pte gpapte;
1375 bzero(&gpapte, sizeof(gpapte));
1378 error = ioctl(ctx->fd, VM_GET_GPA_PMAP, &gpapte);
1381 *num = gpapte.ptenum;
1382 for (i = 0; i < gpapte.ptenum; i++)
1383 pte[i] = gpapte.pte[i];
1390 vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities)
1393 struct vm_hpet_cap cap;
1395 bzero(&cap, sizeof(struct vm_hpet_cap));
1396 error = ioctl(ctx->fd, VM_GET_HPET_CAPABILITIES, &cap);
1397 if (capabilities != NULL)
1398 *capabilities = cap.capabilities;
1403 vm_gla2gpa(struct vcpu *vcpu, struct vm_guest_paging *paging,
1404 uint64_t gla, int prot, uint64_t *gpa, int *fault)
1406 struct vm_gla2gpa gg;
1409 bzero(&gg, sizeof(struct vm_gla2gpa));
1412 gg.paging = *paging;
1414 error = vcpu_ioctl(vcpu, VM_GLA2GPA, &gg);
1423 vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging,
1424 uint64_t gla, int prot, uint64_t *gpa, int *fault)
1426 struct vm_gla2gpa gg;
1429 bzero(&gg, sizeof(struct vm_gla2gpa));
1432 gg.paging = *paging;
1434 error = vcpu_ioctl(vcpu, VM_GLA2GPA_NOFAULT, &gg);
1443 #define min(a,b) (((a) < (b)) ? (a) : (b))
1447 vm_copy_setup(struct vcpu *vcpu, struct vm_guest_paging *paging,
1448 uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt,
1455 for (i = 0; i < iovcnt; i++) {
1456 iov[i].iov_base = 0;
1462 error = vm_gla2gpa(vcpu, paging, gla, prot, &gpa, fault);
1463 if (error || *fault)
1466 off = gpa & PAGE_MASK;
1467 n = MIN(len, PAGE_SIZE - off);
1469 va = vm_map_gpa(vcpu->ctx, gpa, n);
1485 vm_copy_teardown(struct iovec *iov __unused, int iovcnt __unused)
1488 * Intentionally empty. This is used by the instruction
1489 * emulation code shared with the kernel. The in-kernel
1490 * version of this is non-empty.
1495 vm_copyin(struct iovec *iov, void *vp, size_t len)
1503 assert(iov->iov_len);
1504 n = min(len, iov->iov_len);
1505 src = iov->iov_base;
1515 vm_copyout(const void *vp, struct iovec *iov, size_t len)
1523 assert(iov->iov_len);
1524 n = min(len, iov->iov_len);
1525 dst = iov->iov_base;
1535 vm_get_cpus(struct vmctx *ctx, int which, cpuset_t *cpus)
1537 struct vm_cpuset vm_cpuset;
1540 bzero(&vm_cpuset, sizeof(struct vm_cpuset));
1541 vm_cpuset.which = which;
1542 vm_cpuset.cpusetsize = sizeof(cpuset_t);
1543 vm_cpuset.cpus = cpus;
1545 error = ioctl(ctx->fd, VM_GET_CPUS, &vm_cpuset);
1550 vm_active_cpus(struct vmctx *ctx, cpuset_t *cpus)
1553 return (vm_get_cpus(ctx, VM_ACTIVE_CPUS, cpus));
1557 vm_suspended_cpus(struct vmctx *ctx, cpuset_t *cpus)
1560 return (vm_get_cpus(ctx, VM_SUSPENDED_CPUS, cpus));
1564 vm_debug_cpus(struct vmctx *ctx, cpuset_t *cpus)
1567 return (vm_get_cpus(ctx, VM_DEBUG_CPUS, cpus));
1571 vm_activate_cpu(struct vcpu *vcpu)
1573 struct vm_activate_cpu ac;
1576 bzero(&ac, sizeof(struct vm_activate_cpu));
1577 error = vcpu_ioctl(vcpu, VM_ACTIVATE_CPU, &ac);
1582 vm_suspend_all_cpus(struct vmctx *ctx)
1584 struct vm_activate_cpu ac;
1587 bzero(&ac, sizeof(struct vm_activate_cpu));
1589 error = ioctl(ctx->fd, VM_SUSPEND_CPU, &ac);
1594 vm_suspend_cpu(struct vcpu *vcpu)
1596 struct vm_activate_cpu ac;
1599 bzero(&ac, sizeof(struct vm_activate_cpu));
1600 error = vcpu_ioctl(vcpu, VM_SUSPEND_CPU, &ac);
1605 vm_resume_cpu(struct vcpu *vcpu)
1607 struct vm_activate_cpu ac;
1610 bzero(&ac, sizeof(struct vm_activate_cpu));
1611 error = vcpu_ioctl(vcpu, VM_RESUME_CPU, &ac);
1616 vm_resume_all_cpus(struct vmctx *ctx)
1618 struct vm_activate_cpu ac;
1621 bzero(&ac, sizeof(struct vm_activate_cpu));
1623 error = ioctl(ctx->fd, VM_RESUME_CPU, &ac);
1628 vm_get_intinfo(struct vcpu *vcpu, uint64_t *info1, uint64_t *info2)
1630 struct vm_intinfo vmii;
1633 bzero(&vmii, sizeof(struct vm_intinfo));
1634 error = vcpu_ioctl(vcpu, VM_GET_INTINFO, &vmii);
1636 *info1 = vmii.info1;
1637 *info2 = vmii.info2;
1643 vm_set_intinfo(struct vcpu *vcpu, uint64_t info1)
1645 struct vm_intinfo vmii;
1648 bzero(&vmii, sizeof(struct vm_intinfo));
1650 error = vcpu_ioctl(vcpu, VM_SET_INTINFO, &vmii);
1655 vm_rtc_write(struct vmctx *ctx, int offset, uint8_t value)
1657 struct vm_rtc_data rtcdata;
1660 bzero(&rtcdata, sizeof(struct vm_rtc_data));
1661 rtcdata.offset = offset;
1662 rtcdata.value = value;
1663 error = ioctl(ctx->fd, VM_RTC_WRITE, &rtcdata);
1668 vm_rtc_read(struct vmctx *ctx, int offset, uint8_t *retval)
1670 struct vm_rtc_data rtcdata;
1673 bzero(&rtcdata, sizeof(struct vm_rtc_data));
1674 rtcdata.offset = offset;
1675 error = ioctl(ctx->fd, VM_RTC_READ, &rtcdata);
1677 *retval = rtcdata.value;
1682 vm_rtc_settime(struct vmctx *ctx, time_t secs)
1684 struct vm_rtc_time rtctime;
1687 bzero(&rtctime, sizeof(struct vm_rtc_time));
1688 rtctime.secs = secs;
1689 error = ioctl(ctx->fd, VM_RTC_SETTIME, &rtctime);
1694 vm_rtc_gettime(struct vmctx *ctx, time_t *secs)
1696 struct vm_rtc_time rtctime;
1699 bzero(&rtctime, sizeof(struct vm_rtc_time));
1700 error = ioctl(ctx->fd, VM_RTC_GETTIME, &rtctime);
1702 *secs = rtctime.secs;
1707 vm_restart_instruction(struct vcpu *vcpu)
1711 return (vcpu_ioctl(vcpu, VM_RESTART_INSTRUCTION, &arg));
1715 vm_snapshot_req(struct vmctx *ctx, struct vm_snapshot_meta *meta)
1718 if (ioctl(ctx->fd, VM_SNAPSHOT_REQ, meta) == -1) {
1719 #ifdef SNAPSHOT_DEBUG
1720 fprintf(stderr, "%s: snapshot failed for %s: %d\r\n",
1721 __func__, meta->dev_name, errno);
1729 vm_restore_time(struct vmctx *ctx)
1734 return (ioctl(ctx->fd, VM_RESTORE_TIME, &dummy));
1738 vm_set_topology(struct vmctx *ctx,
1739 uint16_t sockets, uint16_t cores, uint16_t threads, uint16_t maxcpus)
1741 struct vm_cpu_topology topology;
1743 bzero(&topology, sizeof (struct vm_cpu_topology));
1744 topology.sockets = sockets;
1745 topology.cores = cores;
1746 topology.threads = threads;
1747 topology.maxcpus = maxcpus;
1748 return (ioctl(ctx->fd, VM_SET_TOPOLOGY, &topology));
1752 vm_get_topology(struct vmctx *ctx,
1753 uint16_t *sockets, uint16_t *cores, uint16_t *threads, uint16_t *maxcpus)
1755 struct vm_cpu_topology topology;
1758 bzero(&topology, sizeof (struct vm_cpu_topology));
1759 error = ioctl(ctx->fd, VM_GET_TOPOLOGY, &topology);
1761 *sockets = topology.sockets;
1762 *cores = topology.cores;
1763 *threads = topology.threads;
1764 *maxcpus = topology.maxcpus;
1769 /* Keep in sync with machine/vmm_dev.h. */
1770 static const cap_ioctl_t vm_ioctl_cmds[] = { VM_RUN, VM_SUSPEND, VM_REINIT,
1771 VM_ALLOC_MEMSEG, VM_GET_MEMSEG, VM_MMAP_MEMSEG, VM_MMAP_MEMSEG,
1772 VM_MMAP_GETNEXT, VM_MUNMAP_MEMSEG, VM_SET_REGISTER, VM_GET_REGISTER,
1773 VM_SET_SEGMENT_DESCRIPTOR, VM_GET_SEGMENT_DESCRIPTOR,
1774 VM_SET_REGISTER_SET, VM_GET_REGISTER_SET,
1775 VM_SET_KERNEMU_DEV, VM_GET_KERNEMU_DEV,
1776 VM_INJECT_EXCEPTION, VM_LAPIC_IRQ, VM_LAPIC_LOCAL_IRQ,
1777 VM_LAPIC_MSI, VM_IOAPIC_ASSERT_IRQ, VM_IOAPIC_DEASSERT_IRQ,
1778 VM_IOAPIC_PULSE_IRQ, VM_IOAPIC_PINCOUNT, VM_ISA_ASSERT_IRQ,
1779 VM_ISA_DEASSERT_IRQ, VM_ISA_PULSE_IRQ, VM_ISA_SET_IRQ_TRIGGER,
1780 VM_SET_CAPABILITY, VM_GET_CAPABILITY, VM_BIND_PPTDEV,
1781 VM_UNBIND_PPTDEV, VM_MAP_PPTDEV_MMIO, VM_PPTDEV_MSI,
1782 VM_PPTDEV_MSIX, VM_UNMAP_PPTDEV_MMIO, VM_PPTDEV_DISABLE_MSIX,
1783 VM_INJECT_NMI, VM_STATS, VM_STAT_DESC,
1784 VM_SET_X2APIC_STATE, VM_GET_X2APIC_STATE,
1785 VM_GET_HPET_CAPABILITIES, VM_GET_GPA_PMAP, VM_GLA2GPA,
1787 VM_ACTIVATE_CPU, VM_GET_CPUS, VM_SUSPEND_CPU, VM_RESUME_CPU,
1788 VM_SET_INTINFO, VM_GET_INTINFO,
1789 VM_RTC_WRITE, VM_RTC_READ, VM_RTC_SETTIME, VM_RTC_GETTIME,
1790 VM_RESTART_INSTRUCTION, VM_SET_TOPOLOGY, VM_GET_TOPOLOGY,
1791 VM_SNAPSHOT_REQ, VM_RESTORE_TIME
1795 vm_limit_rights(struct vmctx *ctx)
1797 cap_rights_t rights;
1800 cap_rights_init(&rights, CAP_IOCTL, CAP_MMAP_RW);
1801 if (caph_rights_limit(ctx->fd, &rights) != 0)
1803 ncmds = nitems(vm_ioctl_cmds);
1804 if (caph_ioctls_limit(ctx->fd, vm_ioctl_cmds, ncmds) != 0)
1810 * Avoid using in new code. Operations on the fd should be wrapped here so that
1811 * capability rights can be kept in sync.
1814 vm_get_device_fd(struct vmctx *ctx)
1820 /* Legacy interface, do not use. */
1822 vm_get_ioctls(size_t *len)
1827 cmds = malloc(sizeof(vm_ioctl_cmds));
1830 bcopy(vm_ioctl_cmds, cmds, sizeof(vm_ioctl_cmds));
1834 *len = nitems(vm_ioctl_cmds);