2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011 NetApp, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/sysctl.h>
36 #include <sys/ioctl.h>
37 #include <sys/linker.h>
39 #include <sys/module.h>
40 #include <sys/_iovec.h>
41 #include <sys/cpuset.h>
43 #include <x86/segments.h>
44 #include <machine/specialreg.h>
58 #include <machine/vmm.h>
59 #include <machine/vmm_dev.h>
60 #include <machine/vmm_snapshot.h>
64 #define MB (1024 * 1024UL)
65 #define GB (1024 * 1024 * 1024UL)
68 * Size of the guard region before and after the virtual address space
69 * mapping the guest physical memory. This must be a multiple of the
70 * superpage size for performance reasons.
72 #define VM_MMAP_GUARD_SIZE (4 * MB)
74 #define PROT_RW (PROT_READ | PROT_WRITE)
75 #define PROT_ALL (PROT_READ | PROT_WRITE | PROT_EXEC)
79 uint32_t lowmem_limit;
87 #define CREATE(x) sysctlbyname("hw.vmm.create", NULL, NULL, (x), strlen((x)))
88 #define DESTROY(x) sysctlbyname("hw.vmm.destroy", NULL, NULL, (x), strlen((x)))
91 vm_device_open(const char *name)
96 len = strlen("/dev/vmm/") + strlen(name) + 1;
98 assert(vmfile != NULL);
99 snprintf(vmfile, len, "/dev/vmm/%s", name);
101 /* Open the device file */
102 fd = open(vmfile, O_RDWR, 0);
109 vm_create(const char *name)
111 /* Try to load vmm(4) module before creating a guest. */
112 if (modfind("vmm") < 0)
114 return (CREATE((char *)name));
118 vm_open(const char *name)
122 vm = malloc(sizeof(struct vmctx) + strlen(name) + 1);
127 vm->lowmem_limit = 3 * GB;
128 vm->name = (char *)(vm + 1);
129 strcpy(vm->name, name);
131 if ((vm->fd = vm_device_open(vm->name)) < 0)
141 vm_destroy(struct vmctx *vm)
153 vm_parse_memsize(const char *optarg, size_t *ret_memsize)
159 optval = strtoul(optarg, &endptr, 0);
160 if (*optarg != '\0' && *endptr == '\0') {
162 * For the sake of backward compatibility if the memory size
163 * specified on the command line is less than a megabyte then
164 * it is interpreted as being in units of MB.
168 *ret_memsize = optval;
171 error = expand_number(optarg, ret_memsize);
177 vm_get_lowmem_limit(struct vmctx *ctx)
180 return (ctx->lowmem_limit);
184 vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit)
187 ctx->lowmem_limit = limit;
191 vm_set_memflags(struct vmctx *ctx, int flags)
194 ctx->memflags = flags;
198 vm_get_memflags(struct vmctx *ctx)
201 return (ctx->memflags);
205 * Map segment 'segid' starting at 'off' into guest address range [gpa,gpa+len).
208 vm_mmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, int segid, vm_ooffset_t off,
209 size_t len, int prot)
211 struct vm_memmap memmap;
215 memmap.segid = segid;
221 if (ctx->memflags & VM_MEM_F_WIRED)
222 memmap.flags |= VM_MEMMAP_F_WIRED;
225 * If this mapping already exists then don't create it again. This
226 * is the common case for SYSMEM mappings created by bhyveload(8).
228 error = vm_mmap_getnext(ctx, &gpa, &segid, &off, &len, &prot, &flags);
229 if (error == 0 && gpa == memmap.gpa) {
230 if (segid != memmap.segid || off != memmap.segoff ||
231 prot != memmap.prot || flags != memmap.flags) {
239 error = ioctl(ctx->fd, VM_MMAP_MEMSEG, &memmap);
244 vm_get_guestmem_from_ctx(struct vmctx *ctx, char **guest_baseaddr,
245 size_t *lowmem_size, size_t *highmem_size)
248 *guest_baseaddr = ctx->baseaddr;
249 *lowmem_size = ctx->lowmem;
250 *highmem_size = ctx->highmem;
255 vm_munmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, size_t len)
257 struct vm_munmap munmap;
263 error = ioctl(ctx->fd, VM_MUNMAP_MEMSEG, &munmap);
268 vm_mmap_getnext(struct vmctx *ctx, vm_paddr_t *gpa, int *segid,
269 vm_ooffset_t *segoff, size_t *len, int *prot, int *flags)
271 struct vm_memmap memmap;
274 bzero(&memmap, sizeof(struct vm_memmap));
276 error = ioctl(ctx->fd, VM_MMAP_GETNEXT, &memmap);
279 *segid = memmap.segid;
280 *segoff = memmap.segoff;
283 *flags = memmap.flags;
289 * Return 0 if the segments are identical and non-zero otherwise.
291 * This is slightly complicated by the fact that only device memory segments
295 cmpseg(size_t len, const char *str, size_t len2, const char *str2)
299 if ((!str && !str2) || (str && str2 && !strcmp(str, str2)))
306 vm_alloc_memseg(struct vmctx *ctx, int segid, size_t len, const char *name)
308 struct vm_memseg memseg;
313 * If the memory segment has already been created then just return.
314 * This is the usual case for the SYSMEM segment created by userspace
315 * loaders like bhyveload(8).
317 error = vm_get_memseg(ctx, segid, &memseg.len, memseg.name,
318 sizeof(memseg.name));
322 if (memseg.len != 0) {
323 if (cmpseg(len, name, memseg.len, VM_MEMSEG_NAME(&memseg))) {
331 bzero(&memseg, sizeof(struct vm_memseg));
332 memseg.segid = segid;
335 n = strlcpy(memseg.name, name, sizeof(memseg.name));
336 if (n >= sizeof(memseg.name)) {
337 errno = ENAMETOOLONG;
342 error = ioctl(ctx->fd, VM_ALLOC_MEMSEG, &memseg);
347 vm_get_memseg(struct vmctx *ctx, int segid, size_t *lenp, char *namebuf,
350 struct vm_memseg memseg;
354 memseg.segid = segid;
355 error = ioctl(ctx->fd, VM_GET_MEMSEG, &memseg);
358 n = strlcpy(namebuf, memseg.name, bufsize);
360 errno = ENAMETOOLONG;
368 setup_memory_segment(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char *base)
373 /* Map 'len' bytes starting at 'gpa' in the guest address space */
374 error = vm_mmap_memseg(ctx, gpa, VM_SYSMEM, gpa, len, PROT_ALL);
378 flags = MAP_SHARED | MAP_FIXED;
379 if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
382 /* mmap into the process address space on the host */
383 ptr = mmap(base + gpa, len, PROT_RW, flags, ctx->fd, gpa);
384 if (ptr == MAP_FAILED)
391 vm_setup_memory(struct vmctx *ctx, size_t memsize, enum vm_mmap_style vms)
395 char *baseaddr, *ptr;
398 assert(vms == VM_MMAP_ALL);
401 * If 'memsize' cannot fit entirely in the 'lowmem' segment then
402 * create another 'highmem' segment above 4GB for the remainder.
404 if (memsize > ctx->lowmem_limit) {
405 ctx->lowmem = ctx->lowmem_limit;
406 ctx->highmem = memsize - ctx->lowmem_limit;
407 objsize = 4*GB + ctx->highmem;
409 ctx->lowmem = memsize;
411 objsize = ctx->lowmem;
414 error = vm_alloc_memseg(ctx, VM_SYSMEM, objsize, NULL);
419 * Stake out a contiguous region covering the guest physical memory
420 * and the adjoining guard regions.
422 len = VM_MMAP_GUARD_SIZE + objsize + VM_MMAP_GUARD_SIZE;
423 ptr = mmap(NULL, len, PROT_NONE, MAP_GUARD | MAP_ALIGNED_SUPER, -1, 0);
424 if (ptr == MAP_FAILED)
427 baseaddr = ptr + VM_MMAP_GUARD_SIZE;
428 if (ctx->highmem > 0) {
431 error = setup_memory_segment(ctx, gpa, len, baseaddr);
436 if (ctx->lowmem > 0) {
439 error = setup_memory_segment(ctx, gpa, len, baseaddr);
444 ctx->baseaddr = baseaddr;
450 * Returns a non-NULL pointer if [gaddr, gaddr+len) is entirely contained in
451 * the lowmem or highmem regions.
453 * In particular return NULL if [gaddr, gaddr+len) falls in guest MMIO region.
454 * The instruction emulation code depends on this behavior.
457 vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len)
460 if (ctx->lowmem > 0) {
461 if (gaddr < ctx->lowmem && len <= ctx->lowmem &&
462 gaddr + len <= ctx->lowmem)
463 return (ctx->baseaddr + gaddr);
466 if (ctx->highmem > 0) {
468 if (gaddr < 4*GB + ctx->highmem &&
469 len <= ctx->highmem &&
470 gaddr + len <= 4*GB + ctx->highmem)
471 return (ctx->baseaddr + gaddr);
479 vm_rev_map_gpa(struct vmctx *ctx, void *addr)
483 offaddr = (char *)addr - ctx->baseaddr;
486 if (offaddr >= 0 && offaddr <= ctx->lowmem)
489 if (ctx->highmem > 0)
490 if (offaddr >= 4*GB && offaddr < 4*GB + ctx->highmem)
493 return ((vm_paddr_t)-1);
496 /* TODO: maximum size for vmname */
498 vm_get_name(struct vmctx *ctx, char *buf, size_t max_len)
501 if (strlcpy(buf, ctx->name, max_len) >= max_len)
507 vm_get_lowmem_size(struct vmctx *ctx)
510 return (ctx->lowmem);
514 vm_get_highmem_size(struct vmctx *ctx)
517 return (ctx->highmem);
521 vm_create_devmem(struct vmctx *ctx, int segid, const char *name, size_t len)
523 char pathname[MAXPATHLEN];
526 int fd, error, flags;
530 if (name == NULL || strlen(name) == 0) {
535 error = vm_alloc_memseg(ctx, segid, len, name);
539 strlcpy(pathname, "/dev/vmm.io/", sizeof(pathname));
540 strlcat(pathname, ctx->name, sizeof(pathname));
541 strlcat(pathname, ".", sizeof(pathname));
542 strlcat(pathname, name, sizeof(pathname));
544 fd = open(pathname, O_RDWR);
549 * Stake out a contiguous region covering the device memory and the
550 * adjoining guard regions.
552 len2 = VM_MMAP_GUARD_SIZE + len + VM_MMAP_GUARD_SIZE;
553 base = mmap(NULL, len2, PROT_NONE, MAP_GUARD | MAP_ALIGNED_SUPER, -1,
555 if (base == MAP_FAILED)
558 flags = MAP_SHARED | MAP_FIXED;
559 if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
562 /* mmap the devmem region in the host address space */
563 ptr = mmap(base + VM_MMAP_GUARD_SIZE, len, PROT_RW, flags, fd, 0);
571 vm_set_desc(struct vmctx *ctx, int vcpu, int reg,
572 uint64_t base, uint32_t limit, uint32_t access)
575 struct vm_seg_desc vmsegdesc;
577 bzero(&vmsegdesc, sizeof(vmsegdesc));
578 vmsegdesc.cpuid = vcpu;
579 vmsegdesc.regnum = reg;
580 vmsegdesc.desc.base = base;
581 vmsegdesc.desc.limit = limit;
582 vmsegdesc.desc.access = access;
584 error = ioctl(ctx->fd, VM_SET_SEGMENT_DESCRIPTOR, &vmsegdesc);
589 vm_get_desc(struct vmctx *ctx, int vcpu, int reg,
590 uint64_t *base, uint32_t *limit, uint32_t *access)
593 struct vm_seg_desc vmsegdesc;
595 bzero(&vmsegdesc, sizeof(vmsegdesc));
596 vmsegdesc.cpuid = vcpu;
597 vmsegdesc.regnum = reg;
599 error = ioctl(ctx->fd, VM_GET_SEGMENT_DESCRIPTOR, &vmsegdesc);
601 *base = vmsegdesc.desc.base;
602 *limit = vmsegdesc.desc.limit;
603 *access = vmsegdesc.desc.access;
609 vm_get_seg_desc(struct vmctx *ctx, int vcpu, int reg, struct seg_desc *seg_desc)
613 error = vm_get_desc(ctx, vcpu, reg, &seg_desc->base, &seg_desc->limit,
619 vm_set_register(struct vmctx *ctx, int vcpu, int reg, uint64_t val)
622 struct vm_register vmreg;
624 bzero(&vmreg, sizeof(vmreg));
629 error = ioctl(ctx->fd, VM_SET_REGISTER, &vmreg);
634 vm_get_register(struct vmctx *ctx, int vcpu, int reg, uint64_t *ret_val)
637 struct vm_register vmreg;
639 bzero(&vmreg, sizeof(vmreg));
643 error = ioctl(ctx->fd, VM_GET_REGISTER, &vmreg);
644 *ret_val = vmreg.regval;
649 vm_set_register_set(struct vmctx *ctx, int vcpu, unsigned int count,
650 const int *regnums, uint64_t *regvals)
653 struct vm_register_set vmregset;
655 bzero(&vmregset, sizeof(vmregset));
656 vmregset.cpuid = vcpu;
657 vmregset.count = count;
658 vmregset.regnums = regnums;
659 vmregset.regvals = regvals;
661 error = ioctl(ctx->fd, VM_SET_REGISTER_SET, &vmregset);
666 vm_get_register_set(struct vmctx *ctx, int vcpu, unsigned int count,
667 const int *regnums, uint64_t *regvals)
670 struct vm_register_set vmregset;
672 bzero(&vmregset, sizeof(vmregset));
673 vmregset.cpuid = vcpu;
674 vmregset.count = count;
675 vmregset.regnums = regnums;
676 vmregset.regvals = regvals;
678 error = ioctl(ctx->fd, VM_GET_REGISTER_SET, &vmregset);
683 vm_run(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit)
688 bzero(&vmrun, sizeof(vmrun));
691 error = ioctl(ctx->fd, VM_RUN, &vmrun);
692 bcopy(&vmrun.vm_exit, vmexit, sizeof(struct vm_exit));
697 vm_suspend(struct vmctx *ctx, enum vm_suspend_how how)
699 struct vm_suspend vmsuspend;
701 bzero(&vmsuspend, sizeof(vmsuspend));
703 return (ioctl(ctx->fd, VM_SUSPEND, &vmsuspend));
707 vm_reinit(struct vmctx *ctx)
710 return (ioctl(ctx->fd, VM_REINIT, 0));
714 vm_inject_exception(struct vmctx *ctx, int vcpu, int vector, int errcode_valid,
715 uint32_t errcode, int restart_instruction)
717 struct vm_exception exc;
721 exc.error_code = errcode;
722 exc.error_code_valid = errcode_valid;
723 exc.restart_instruction = restart_instruction;
725 return (ioctl(ctx->fd, VM_INJECT_EXCEPTION, &exc));
729 vm_apicid2vcpu(struct vmctx *ctx, int apicid)
732 * The apic id associated with the 'vcpu' has the same numerical value
733 * as the 'vcpu' itself.
739 vm_lapic_irq(struct vmctx *ctx, int vcpu, int vector)
741 struct vm_lapic_irq vmirq;
743 bzero(&vmirq, sizeof(vmirq));
745 vmirq.vector = vector;
747 return (ioctl(ctx->fd, VM_LAPIC_IRQ, &vmirq));
751 vm_lapic_local_irq(struct vmctx *ctx, int vcpu, int vector)
753 struct vm_lapic_irq vmirq;
755 bzero(&vmirq, sizeof(vmirq));
757 vmirq.vector = vector;
759 return (ioctl(ctx->fd, VM_LAPIC_LOCAL_IRQ, &vmirq));
763 vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg)
765 struct vm_lapic_msi vmmsi;
767 bzero(&vmmsi, sizeof(vmmsi));
771 return (ioctl(ctx->fd, VM_LAPIC_MSI, &vmmsi));
775 vm_ioapic_assert_irq(struct vmctx *ctx, int irq)
777 struct vm_ioapic_irq ioapic_irq;
779 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
780 ioapic_irq.irq = irq;
782 return (ioctl(ctx->fd, VM_IOAPIC_ASSERT_IRQ, &ioapic_irq));
786 vm_ioapic_deassert_irq(struct vmctx *ctx, int irq)
788 struct vm_ioapic_irq ioapic_irq;
790 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
791 ioapic_irq.irq = irq;
793 return (ioctl(ctx->fd, VM_IOAPIC_DEASSERT_IRQ, &ioapic_irq));
797 vm_ioapic_pulse_irq(struct vmctx *ctx, int irq)
799 struct vm_ioapic_irq ioapic_irq;
801 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
802 ioapic_irq.irq = irq;
804 return (ioctl(ctx->fd, VM_IOAPIC_PULSE_IRQ, &ioapic_irq));
808 vm_ioapic_pincount(struct vmctx *ctx, int *pincount)
811 return (ioctl(ctx->fd, VM_IOAPIC_PINCOUNT, pincount));
815 vm_readwrite_kernemu_device(struct vmctx *ctx, int vcpu, vm_paddr_t gpa,
816 bool write, int size, uint64_t *value)
818 struct vm_readwrite_kernemu_device irp = {
820 .access_width = fls(size) - 1,
822 .value = write ? *value : ~0ul,
824 long cmd = (write ? VM_SET_KERNEMU_DEV : VM_GET_KERNEMU_DEV);
827 rc = ioctl(ctx->fd, cmd, &irp);
828 if (rc == 0 && !write)
834 vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
836 struct vm_isa_irq isa_irq;
838 bzero(&isa_irq, sizeof(struct vm_isa_irq));
839 isa_irq.atpic_irq = atpic_irq;
840 isa_irq.ioapic_irq = ioapic_irq;
842 return (ioctl(ctx->fd, VM_ISA_ASSERT_IRQ, &isa_irq));
846 vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
848 struct vm_isa_irq isa_irq;
850 bzero(&isa_irq, sizeof(struct vm_isa_irq));
851 isa_irq.atpic_irq = atpic_irq;
852 isa_irq.ioapic_irq = ioapic_irq;
854 return (ioctl(ctx->fd, VM_ISA_DEASSERT_IRQ, &isa_irq));
858 vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
860 struct vm_isa_irq isa_irq;
862 bzero(&isa_irq, sizeof(struct vm_isa_irq));
863 isa_irq.atpic_irq = atpic_irq;
864 isa_irq.ioapic_irq = ioapic_irq;
866 return (ioctl(ctx->fd, VM_ISA_PULSE_IRQ, &isa_irq));
870 vm_isa_set_irq_trigger(struct vmctx *ctx, int atpic_irq,
871 enum vm_intr_trigger trigger)
873 struct vm_isa_irq_trigger isa_irq_trigger;
875 bzero(&isa_irq_trigger, sizeof(struct vm_isa_irq_trigger));
876 isa_irq_trigger.atpic_irq = atpic_irq;
877 isa_irq_trigger.trigger = trigger;
879 return (ioctl(ctx->fd, VM_ISA_SET_IRQ_TRIGGER, &isa_irq_trigger));
883 vm_inject_nmi(struct vmctx *ctx, int vcpu)
887 bzero(&vmnmi, sizeof(vmnmi));
890 return (ioctl(ctx->fd, VM_INJECT_NMI, &vmnmi));
893 static const char *capstrmap[] = {
894 [VM_CAP_HALT_EXIT] = "hlt_exit",
895 [VM_CAP_MTRAP_EXIT] = "mtrap_exit",
896 [VM_CAP_PAUSE_EXIT] = "pause_exit",
897 [VM_CAP_UNRESTRICTED_GUEST] = "unrestricted_guest",
898 [VM_CAP_ENABLE_INVPCID] = "enable_invpcid",
899 [VM_CAP_BPT_EXIT] = "bpt_exit",
903 vm_capability_name2type(const char *capname)
907 for (i = 0; i < nitems(capstrmap); i++) {
908 if (strcmp(capstrmap[i], capname) == 0)
916 vm_capability_type2name(int type)
918 if (type >= 0 && type < nitems(capstrmap))
919 return (capstrmap[type]);
925 vm_get_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap,
929 struct vm_capability vmcap;
931 bzero(&vmcap, sizeof(vmcap));
935 error = ioctl(ctx->fd, VM_GET_CAPABILITY, &vmcap);
936 *retval = vmcap.capval;
941 vm_set_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap, int val)
943 struct vm_capability vmcap;
945 bzero(&vmcap, sizeof(vmcap));
950 return (ioctl(ctx->fd, VM_SET_CAPABILITY, &vmcap));
954 vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
956 struct vm_pptdev pptdev;
958 bzero(&pptdev, sizeof(pptdev));
963 return (ioctl(ctx->fd, VM_BIND_PPTDEV, &pptdev));
967 vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
969 struct vm_pptdev pptdev;
971 bzero(&pptdev, sizeof(pptdev));
976 return (ioctl(ctx->fd, VM_UNBIND_PPTDEV, &pptdev));
980 vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
981 vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
983 struct vm_pptdev_mmio pptmmio;
985 bzero(&pptmmio, sizeof(pptmmio));
993 return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio));
997 vm_unmap_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
998 vm_paddr_t gpa, size_t len)
1000 struct vm_pptdev_mmio pptmmio;
1002 bzero(&pptmmio, sizeof(pptmmio));
1004 pptmmio.slot = slot;
1005 pptmmio.func = func;
1009 return (ioctl(ctx->fd, VM_UNMAP_PPTDEV_MMIO, &pptmmio));
1013 vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
1014 uint64_t addr, uint64_t msg, int numvec)
1016 struct vm_pptdev_msi pptmsi;
1018 bzero(&pptmsi, sizeof(pptmsi));
1025 pptmsi.numvec = numvec;
1027 return (ioctl(ctx->fd, VM_PPTDEV_MSI, &pptmsi));
1031 vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
1032 int idx, uint64_t addr, uint64_t msg, uint32_t vector_control)
1034 struct vm_pptdev_msix pptmsix;
1036 bzero(&pptmsix, sizeof(pptmsix));
1037 pptmsix.vcpu = vcpu;
1039 pptmsix.slot = slot;
1040 pptmsix.func = func;
1043 pptmsix.addr = addr;
1044 pptmsix.vector_control = vector_control;
1046 return ioctl(ctx->fd, VM_PPTDEV_MSIX, &pptmsix);
1050 vm_disable_pptdev_msix(struct vmctx *ctx, int bus, int slot, int func)
1052 struct vm_pptdev ppt;
1054 bzero(&ppt, sizeof(ppt));
1059 return ioctl(ctx->fd, VM_PPTDEV_DISABLE_MSIX, &ppt);
1063 vm_get_stats(struct vmctx *ctx, int vcpu, struct timeval *ret_tv,
1068 static struct vm_stats vmstats;
1070 vmstats.cpuid = vcpu;
1072 error = ioctl(ctx->fd, VM_STATS, &vmstats);
1075 *ret_entries = vmstats.num_entries;
1077 *ret_tv = vmstats.tv;
1078 return (vmstats.statbuf);
1084 vm_get_stat_desc(struct vmctx *ctx, int index)
1086 static struct vm_stat_desc statdesc;
1088 statdesc.index = index;
1089 if (ioctl(ctx->fd, VM_STAT_DESC, &statdesc) == 0)
1090 return (statdesc.desc);
1096 vm_get_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state *state)
1099 struct vm_x2apic x2apic;
1101 bzero(&x2apic, sizeof(x2apic));
1102 x2apic.cpuid = vcpu;
1104 error = ioctl(ctx->fd, VM_GET_X2APIC_STATE, &x2apic);
1105 *state = x2apic.state;
1110 vm_set_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state state)
1113 struct vm_x2apic x2apic;
1115 bzero(&x2apic, sizeof(x2apic));
1116 x2apic.cpuid = vcpu;
1117 x2apic.state = state;
1119 error = ioctl(ctx->fd, VM_SET_X2APIC_STATE, &x2apic);
1125 * From Intel Vol 3a:
1126 * Table 9-1. IA-32 Processor States Following Power-up, Reset or INIT
1129 vcpu_reset(struct vmctx *vmctx, int vcpu)
1132 uint64_t rflags, rip, cr0, cr4, zero, desc_base, rdx;
1133 uint32_t desc_access, desc_limit;
1139 error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RFLAGS, rflags);
1144 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RIP, rip)) != 0)
1148 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR0, cr0)) != 0)
1151 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR3, zero)) != 0)
1155 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR4, cr4)) != 0)
1159 * CS: present, r/w, accessed, 16-bit, byte granularity, usable
1161 desc_base = 0xffff0000;
1162 desc_limit = 0xffff;
1163 desc_access = 0x0093;
1164 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_CS,
1165 desc_base, desc_limit, desc_access);
1170 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CS, sel)) != 0)
1174 * SS,DS,ES,FS,GS: present, r/w, accessed, 16-bit, byte granularity
1177 desc_limit = 0xffff;
1178 desc_access = 0x0093;
1179 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_SS,
1180 desc_base, desc_limit, desc_access);
1184 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_DS,
1185 desc_base, desc_limit, desc_access);
1189 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_ES,
1190 desc_base, desc_limit, desc_access);
1194 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_FS,
1195 desc_base, desc_limit, desc_access);
1199 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GS,
1200 desc_base, desc_limit, desc_access);
1205 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_SS, sel)) != 0)
1207 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_DS, sel)) != 0)
1209 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_ES, sel)) != 0)
1211 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_FS, sel)) != 0)
1213 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_GS, sel)) != 0)
1216 /* General purpose registers */
1218 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RAX, zero)) != 0)
1220 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBX, zero)) != 0)
1222 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RCX, zero)) != 0)
1224 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDX, rdx)) != 0)
1226 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSI, zero)) != 0)
1228 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDI, zero)) != 0)
1230 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBP, zero)) != 0)
1232 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSP, zero)) != 0)
1237 desc_limit = 0xffff;
1239 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GDTR,
1240 desc_base, desc_limit, desc_access);
1244 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_IDTR,
1245 desc_base, desc_limit, desc_access);
1251 desc_limit = 0xffff;
1252 desc_access = 0x0000008b;
1253 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_TR, 0, 0, desc_access);
1258 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_TR, sel)) != 0)
1263 desc_limit = 0xffff;
1264 desc_access = 0x00000082;
1265 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_LDTR, desc_base,
1266 desc_limit, desc_access);
1271 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_LDTR, 0)) != 0)
1274 /* XXX cr2, debug registers */
1282 vm_get_gpa_pmap(struct vmctx *ctx, uint64_t gpa, uint64_t *pte, int *num)
1285 struct vm_gpa_pte gpapte;
1287 bzero(&gpapte, sizeof(gpapte));
1290 error = ioctl(ctx->fd, VM_GET_GPA_PMAP, &gpapte);
1293 *num = gpapte.ptenum;
1294 for (i = 0; i < gpapte.ptenum; i++)
1295 pte[i] = gpapte.pte[i];
1302 vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities)
1305 struct vm_hpet_cap cap;
1307 bzero(&cap, sizeof(struct vm_hpet_cap));
1308 error = ioctl(ctx->fd, VM_GET_HPET_CAPABILITIES, &cap);
1309 if (capabilities != NULL)
1310 *capabilities = cap.capabilities;
1315 vm_gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
1316 uint64_t gla, int prot, uint64_t *gpa, int *fault)
1318 struct vm_gla2gpa gg;
1321 bzero(&gg, sizeof(struct vm_gla2gpa));
1325 gg.paging = *paging;
1327 error = ioctl(ctx->fd, VM_GLA2GPA, &gg);
1336 vm_gla2gpa_nofault(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
1337 uint64_t gla, int prot, uint64_t *gpa, int *fault)
1339 struct vm_gla2gpa gg;
1342 bzero(&gg, sizeof(struct vm_gla2gpa));
1346 gg.paging = *paging;
1348 error = ioctl(ctx->fd, VM_GLA2GPA_NOFAULT, &gg);
1357 #define min(a,b) (((a) < (b)) ? (a) : (b))
1361 vm_copy_setup(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
1362 uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt,
1367 int error, i, n, off;
1369 for (i = 0; i < iovcnt; i++) {
1370 iov[i].iov_base = 0;
1376 error = vm_gla2gpa(ctx, vcpu, paging, gla, prot, &gpa, fault);
1377 if (error || *fault)
1380 off = gpa & PAGE_MASK;
1381 n = min(len, PAGE_SIZE - off);
1383 va = vm_map_gpa(ctx, gpa, n);
1399 vm_copy_teardown(struct vmctx *ctx, int vcpu, struct iovec *iov, int iovcnt)
1406 vm_copyin(struct vmctx *ctx, int vcpu, struct iovec *iov, void *vp, size_t len)
1414 assert(iov->iov_len);
1415 n = min(len, iov->iov_len);
1416 src = iov->iov_base;
1426 vm_copyout(struct vmctx *ctx, int vcpu, const void *vp, struct iovec *iov,
1435 assert(iov->iov_len);
1436 n = min(len, iov->iov_len);
1437 dst = iov->iov_base;
1447 vm_get_cpus(struct vmctx *ctx, int which, cpuset_t *cpus)
1449 struct vm_cpuset vm_cpuset;
1452 bzero(&vm_cpuset, sizeof(struct vm_cpuset));
1453 vm_cpuset.which = which;
1454 vm_cpuset.cpusetsize = sizeof(cpuset_t);
1455 vm_cpuset.cpus = cpus;
1457 error = ioctl(ctx->fd, VM_GET_CPUS, &vm_cpuset);
1462 vm_active_cpus(struct vmctx *ctx, cpuset_t *cpus)
1465 return (vm_get_cpus(ctx, VM_ACTIVE_CPUS, cpus));
1469 vm_suspended_cpus(struct vmctx *ctx, cpuset_t *cpus)
1472 return (vm_get_cpus(ctx, VM_SUSPENDED_CPUS, cpus));
1476 vm_debug_cpus(struct vmctx *ctx, cpuset_t *cpus)
1479 return (vm_get_cpus(ctx, VM_DEBUG_CPUS, cpus));
1483 vm_activate_cpu(struct vmctx *ctx, int vcpu)
1485 struct vm_activate_cpu ac;
1488 bzero(&ac, sizeof(struct vm_activate_cpu));
1490 error = ioctl(ctx->fd, VM_ACTIVATE_CPU, &ac);
1495 vm_suspend_cpu(struct vmctx *ctx, int vcpu)
1497 struct vm_activate_cpu ac;
1500 bzero(&ac, sizeof(struct vm_activate_cpu));
1502 error = ioctl(ctx->fd, VM_SUSPEND_CPU, &ac);
1507 vm_resume_cpu(struct vmctx *ctx, int vcpu)
1509 struct vm_activate_cpu ac;
1512 bzero(&ac, sizeof(struct vm_activate_cpu));
1514 error = ioctl(ctx->fd, VM_RESUME_CPU, &ac);
1519 vm_get_intinfo(struct vmctx *ctx, int vcpu, uint64_t *info1, uint64_t *info2)
1521 struct vm_intinfo vmii;
1524 bzero(&vmii, sizeof(struct vm_intinfo));
1526 error = ioctl(ctx->fd, VM_GET_INTINFO, &vmii);
1528 *info1 = vmii.info1;
1529 *info2 = vmii.info2;
1535 vm_set_intinfo(struct vmctx *ctx, int vcpu, uint64_t info1)
1537 struct vm_intinfo vmii;
1540 bzero(&vmii, sizeof(struct vm_intinfo));
1543 error = ioctl(ctx->fd, VM_SET_INTINFO, &vmii);
1548 vm_rtc_write(struct vmctx *ctx, int offset, uint8_t value)
1550 struct vm_rtc_data rtcdata;
1553 bzero(&rtcdata, sizeof(struct vm_rtc_data));
1554 rtcdata.offset = offset;
1555 rtcdata.value = value;
1556 error = ioctl(ctx->fd, VM_RTC_WRITE, &rtcdata);
1561 vm_rtc_read(struct vmctx *ctx, int offset, uint8_t *retval)
1563 struct vm_rtc_data rtcdata;
1566 bzero(&rtcdata, sizeof(struct vm_rtc_data));
1567 rtcdata.offset = offset;
1568 error = ioctl(ctx->fd, VM_RTC_READ, &rtcdata);
1570 *retval = rtcdata.value;
1575 vm_rtc_settime(struct vmctx *ctx, time_t secs)
1577 struct vm_rtc_time rtctime;
1580 bzero(&rtctime, sizeof(struct vm_rtc_time));
1581 rtctime.secs = secs;
1582 error = ioctl(ctx->fd, VM_RTC_SETTIME, &rtctime);
1587 vm_rtc_gettime(struct vmctx *ctx, time_t *secs)
1589 struct vm_rtc_time rtctime;
1592 bzero(&rtctime, sizeof(struct vm_rtc_time));
1593 error = ioctl(ctx->fd, VM_RTC_GETTIME, &rtctime);
1595 *secs = rtctime.secs;
1600 vm_restart_instruction(void *arg, int vcpu)
1602 struct vmctx *ctx = arg;
1604 return (ioctl(ctx->fd, VM_RESTART_INSTRUCTION, &vcpu));
1608 vm_snapshot_req(struct vm_snapshot_meta *meta)
1611 if (ioctl(meta->ctx->fd, VM_SNAPSHOT_REQ, meta) == -1) {
1612 #ifdef SNAPSHOT_DEBUG
1613 fprintf(stderr, "%s: snapshot failed for %s: %d\r\n",
1614 __func__, meta->dev_name, errno);
1622 vm_restore_time(struct vmctx *ctx)
1627 return (ioctl(ctx->fd, VM_RESTORE_TIME, &dummy));
1631 vm_set_topology(struct vmctx *ctx,
1632 uint16_t sockets, uint16_t cores, uint16_t threads, uint16_t maxcpus)
1634 struct vm_cpu_topology topology;
1636 bzero(&topology, sizeof (struct vm_cpu_topology));
1637 topology.sockets = sockets;
1638 topology.cores = cores;
1639 topology.threads = threads;
1640 topology.maxcpus = maxcpus;
1641 return (ioctl(ctx->fd, VM_SET_TOPOLOGY, &topology));
1645 vm_get_topology(struct vmctx *ctx,
1646 uint16_t *sockets, uint16_t *cores, uint16_t *threads, uint16_t *maxcpus)
1648 struct vm_cpu_topology topology;
1651 bzero(&topology, sizeof (struct vm_cpu_topology));
1652 error = ioctl(ctx->fd, VM_GET_TOPOLOGY, &topology);
1654 *sockets = topology.sockets;
1655 *cores = topology.cores;
1656 *threads = topology.threads;
1657 *maxcpus = topology.maxcpus;
1663 vm_get_device_fd(struct vmctx *ctx)
1670 vm_get_ioctls(size_t *len)
1673 /* keep in sync with machine/vmm_dev.h */
1674 static const cap_ioctl_t vm_ioctl_cmds[] = { VM_RUN, VM_SUSPEND, VM_REINIT,
1675 VM_ALLOC_MEMSEG, VM_GET_MEMSEG, VM_MMAP_MEMSEG, VM_MMAP_MEMSEG,
1676 VM_MMAP_GETNEXT, VM_MUNMAP_MEMSEG, VM_SET_REGISTER, VM_GET_REGISTER,
1677 VM_SET_SEGMENT_DESCRIPTOR, VM_GET_SEGMENT_DESCRIPTOR,
1678 VM_SET_REGISTER_SET, VM_GET_REGISTER_SET,
1679 VM_SET_KERNEMU_DEV, VM_GET_KERNEMU_DEV,
1680 VM_INJECT_EXCEPTION, VM_LAPIC_IRQ, VM_LAPIC_LOCAL_IRQ,
1681 VM_LAPIC_MSI, VM_IOAPIC_ASSERT_IRQ, VM_IOAPIC_DEASSERT_IRQ,
1682 VM_IOAPIC_PULSE_IRQ, VM_IOAPIC_PINCOUNT, VM_ISA_ASSERT_IRQ,
1683 VM_ISA_DEASSERT_IRQ, VM_ISA_PULSE_IRQ, VM_ISA_SET_IRQ_TRIGGER,
1684 VM_SET_CAPABILITY, VM_GET_CAPABILITY, VM_BIND_PPTDEV,
1685 VM_UNBIND_PPTDEV, VM_MAP_PPTDEV_MMIO, VM_PPTDEV_MSI,
1686 VM_PPTDEV_MSIX, VM_UNMAP_PPTDEV_MMIO, VM_PPTDEV_DISABLE_MSIX,
1687 VM_INJECT_NMI, VM_STATS, VM_STAT_DESC,
1688 VM_SET_X2APIC_STATE, VM_GET_X2APIC_STATE,
1689 VM_GET_HPET_CAPABILITIES, VM_GET_GPA_PMAP, VM_GLA2GPA,
1691 VM_ACTIVATE_CPU, VM_GET_CPUS, VM_SUSPEND_CPU, VM_RESUME_CPU,
1692 VM_SET_INTINFO, VM_GET_INTINFO,
1693 VM_RTC_WRITE, VM_RTC_READ, VM_RTC_SETTIME, VM_RTC_GETTIME,
1694 VM_RESTART_INSTRUCTION, VM_SET_TOPOLOGY, VM_GET_TOPOLOGY };
1697 cmds = malloc(sizeof(vm_ioctl_cmds));
1700 bcopy(vm_ioctl_cmds, cmds, sizeof(vm_ioctl_cmds));
1704 *len = nitems(vm_ioctl_cmds);