2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2011 NetApp, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/param.h>
30 #include <sys/capsicum.h>
31 #include <sys/sysctl.h>
32 #include <sys/ioctl.h>
34 #include <sys/linker.h>
35 #include <sys/module.h>
36 #include <sys/_iovec.h>
37 #include <sys/cpuset.h>
39 #include <capsicum_helpers.h>
52 #include <machine/vmm.h>
53 #include <machine/vmm_dev.h>
54 #include <machine/vmm_snapshot.h>
59 #define MB (1024 * 1024UL)
60 #define GB (1024 * 1024 * 1024UL)
63 * Size of the guard region before and after the virtual address space
64 * mapping the guest physical memory. This must be a multiple of the
65 * superpage size for performance reasons.
67 #define VM_MMAP_GUARD_SIZE (4 * MB)
69 #define PROT_RW (PROT_READ | PROT_WRITE)
70 #define PROT_ALL (PROT_READ | PROT_WRITE | PROT_EXEC)
72 #define CREATE(x) sysctlbyname("hw.vmm.create", NULL, NULL, (x), strlen((x)))
73 #define DESTROY(x) sysctlbyname("hw.vmm.destroy", NULL, NULL, (x), strlen((x)))
76 vm_device_open(const char *name)
81 len = strlen("/dev/vmm/") + strlen(name) + 1;
83 assert(vmfile != NULL);
84 snprintf(vmfile, len, "/dev/vmm/%s", name);
86 /* Open the device file */
87 fd = open(vmfile, O_RDWR, 0);
94 vm_create(const char *name)
96 /* Try to load vmm(4) module before creating a guest. */
97 if (modfind("vmm") < 0)
99 return (CREATE(name));
103 vm_open(const char *name)
108 vm = malloc(sizeof(struct vmctx) + strlen(name) + 1);
113 vm->lowmem_limit = 3 * GB;
114 vm->name = (char *)(vm + 1);
115 strcpy(vm->name, name);
117 if ((vm->fd = vm_device_open(vm->name)) < 0)
129 vm_close(struct vmctx *vm)
138 vm_destroy(struct vmctx *vm)
150 vm_vcpu_open(struct vmctx *ctx, int vcpuid)
154 vcpu = malloc(sizeof(*vcpu));
156 vcpu->vcpuid = vcpuid;
161 vm_vcpu_close(struct vcpu *vcpu)
167 vcpu_id(struct vcpu *vcpu)
169 return (vcpu->vcpuid);
173 vm_parse_memsize(const char *opt, size_t *ret_memsize)
179 optval = strtoul(opt, &endptr, 0);
180 if (*opt != '\0' && *endptr == '\0') {
182 * For the sake of backward compatibility if the memory size
183 * specified on the command line is less than a megabyte then
184 * it is interpreted as being in units of MB.
188 *ret_memsize = optval;
191 error = expand_number(opt, ret_memsize);
197 vm_get_lowmem_limit(struct vmctx *ctx)
200 return (ctx->lowmem_limit);
204 vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit)
207 ctx->lowmem_limit = limit;
211 vm_set_memflags(struct vmctx *ctx, int flags)
214 ctx->memflags = flags;
218 vm_get_memflags(struct vmctx *ctx)
221 return (ctx->memflags);
225 * Map segment 'segid' starting at 'off' into guest address range [gpa,gpa+len).
228 vm_mmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, int segid, vm_ooffset_t off,
229 size_t len, int prot)
231 struct vm_memmap memmap;
235 memmap.segid = segid;
241 if (ctx->memflags & VM_MEM_F_WIRED)
242 memmap.flags |= VM_MEMMAP_F_WIRED;
245 * If this mapping already exists then don't create it again. This
246 * is the common case for SYSMEM mappings created by bhyveload(8).
248 error = vm_mmap_getnext(ctx, &gpa, &segid, &off, &len, &prot, &flags);
249 if (error == 0 && gpa == memmap.gpa) {
250 if (segid != memmap.segid || off != memmap.segoff ||
251 prot != memmap.prot || flags != memmap.flags) {
259 error = ioctl(ctx->fd, VM_MMAP_MEMSEG, &memmap);
264 vm_get_guestmem_from_ctx(struct vmctx *ctx, char **guest_baseaddr,
265 size_t *lowmem_size, size_t *highmem_size)
268 *guest_baseaddr = ctx->baseaddr;
269 *lowmem_size = ctx->lowmem;
270 *highmem_size = ctx->highmem;
275 vm_munmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, size_t len)
277 struct vm_munmap munmap;
283 error = ioctl(ctx->fd, VM_MUNMAP_MEMSEG, &munmap);
288 vm_mmap_getnext(struct vmctx *ctx, vm_paddr_t *gpa, int *segid,
289 vm_ooffset_t *segoff, size_t *len, int *prot, int *flags)
291 struct vm_memmap memmap;
294 bzero(&memmap, sizeof(struct vm_memmap));
296 error = ioctl(ctx->fd, VM_MMAP_GETNEXT, &memmap);
299 *segid = memmap.segid;
300 *segoff = memmap.segoff;
303 *flags = memmap.flags;
309 * Return 0 if the segments are identical and non-zero otherwise.
311 * This is slightly complicated by the fact that only device memory segments
315 cmpseg(size_t len, const char *str, size_t len2, const char *str2)
319 if ((!str && !str2) || (str && str2 && !strcmp(str, str2)))
326 vm_alloc_memseg(struct vmctx *ctx, int segid, size_t len, const char *name)
328 struct vm_memseg memseg;
333 * If the memory segment has already been created then just return.
334 * This is the usual case for the SYSMEM segment created by userspace
335 * loaders like bhyveload(8).
337 error = vm_get_memseg(ctx, segid, &memseg.len, memseg.name,
338 sizeof(memseg.name));
342 if (memseg.len != 0) {
343 if (cmpseg(len, name, memseg.len, VM_MEMSEG_NAME(&memseg))) {
351 bzero(&memseg, sizeof(struct vm_memseg));
352 memseg.segid = segid;
355 n = strlcpy(memseg.name, name, sizeof(memseg.name));
356 if (n >= sizeof(memseg.name)) {
357 errno = ENAMETOOLONG;
362 error = ioctl(ctx->fd, VM_ALLOC_MEMSEG, &memseg);
367 vm_get_memseg(struct vmctx *ctx, int segid, size_t *lenp, char *namebuf,
370 struct vm_memseg memseg;
374 memseg.segid = segid;
375 error = ioctl(ctx->fd, VM_GET_MEMSEG, &memseg);
378 n = strlcpy(namebuf, memseg.name, bufsize);
380 errno = ENAMETOOLONG;
388 setup_memory_segment(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char *base)
393 /* Map 'len' bytes starting at 'gpa' in the guest address space */
394 error = vm_mmap_memseg(ctx, gpa, VM_SYSMEM, gpa, len, PROT_ALL);
398 flags = MAP_SHARED | MAP_FIXED;
399 if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
402 /* mmap into the process address space on the host */
403 ptr = mmap(base + gpa, len, PROT_RW, flags, ctx->fd, gpa);
404 if (ptr == MAP_FAILED)
411 vm_setup_memory(struct vmctx *ctx, size_t memsize, enum vm_mmap_style vms)
415 char *baseaddr, *ptr;
418 assert(vms == VM_MMAP_ALL);
421 * If 'memsize' cannot fit entirely in the 'lowmem' segment then
422 * create another 'highmem' segment above 4GB for the remainder.
424 if (memsize > ctx->lowmem_limit) {
425 ctx->lowmem = ctx->lowmem_limit;
426 ctx->highmem = memsize - ctx->lowmem_limit;
427 objsize = 4*GB + ctx->highmem;
429 ctx->lowmem = memsize;
431 objsize = ctx->lowmem;
434 error = vm_alloc_memseg(ctx, VM_SYSMEM, objsize, NULL);
439 * Stake out a contiguous region covering the guest physical memory
440 * and the adjoining guard regions.
442 len = VM_MMAP_GUARD_SIZE + objsize + VM_MMAP_GUARD_SIZE;
443 ptr = mmap(NULL, len, PROT_NONE, MAP_GUARD | MAP_ALIGNED_SUPER, -1, 0);
444 if (ptr == MAP_FAILED)
447 baseaddr = ptr + VM_MMAP_GUARD_SIZE;
448 if (ctx->highmem > 0) {
451 error = setup_memory_segment(ctx, gpa, len, baseaddr);
456 if (ctx->lowmem > 0) {
459 error = setup_memory_segment(ctx, gpa, len, baseaddr);
464 ctx->baseaddr = baseaddr;
470 * Returns a non-NULL pointer if [gaddr, gaddr+len) is entirely contained in
471 * the lowmem or highmem regions.
473 * In particular return NULL if [gaddr, gaddr+len) falls in guest MMIO region.
474 * The instruction emulation code depends on this behavior.
477 vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len)
480 if (ctx->lowmem > 0) {
481 if (gaddr < ctx->lowmem && len <= ctx->lowmem &&
482 gaddr + len <= ctx->lowmem)
483 return (ctx->baseaddr + gaddr);
486 if (ctx->highmem > 0) {
488 if (gaddr < 4*GB + ctx->highmem &&
489 len <= ctx->highmem &&
490 gaddr + len <= 4*GB + ctx->highmem)
491 return (ctx->baseaddr + gaddr);
499 vm_rev_map_gpa(struct vmctx *ctx, void *addr)
503 offaddr = (char *)addr - ctx->baseaddr;
506 if (offaddr <= ctx->lowmem)
509 if (ctx->highmem > 0)
510 if (offaddr >= 4*GB && offaddr < 4*GB + ctx->highmem)
513 return ((vm_paddr_t)-1);
517 vm_get_name(struct vmctx *ctx)
524 vm_get_lowmem_size(struct vmctx *ctx)
527 return (ctx->lowmem);
531 vm_get_highmem_size(struct vmctx *ctx)
534 return (ctx->highmem);
538 vm_create_devmem(struct vmctx *ctx, int segid, const char *name, size_t len)
540 char pathname[MAXPATHLEN];
543 int fd, error, flags;
547 if (name == NULL || strlen(name) == 0) {
552 error = vm_alloc_memseg(ctx, segid, len, name);
556 strlcpy(pathname, "/dev/vmm.io/", sizeof(pathname));
557 strlcat(pathname, ctx->name, sizeof(pathname));
558 strlcat(pathname, ".", sizeof(pathname));
559 strlcat(pathname, name, sizeof(pathname));
561 fd = open(pathname, O_RDWR);
566 * Stake out a contiguous region covering the device memory and the
567 * adjoining guard regions.
569 len2 = VM_MMAP_GUARD_SIZE + len + VM_MMAP_GUARD_SIZE;
570 base = mmap(NULL, len2, PROT_NONE, MAP_GUARD | MAP_ALIGNED_SUPER, -1,
572 if (base == MAP_FAILED)
575 flags = MAP_SHARED | MAP_FIXED;
576 if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
579 /* mmap the devmem region in the host address space */
580 ptr = mmap(base + VM_MMAP_GUARD_SIZE, len, PROT_RW, flags, fd, 0);
588 vcpu_ioctl(struct vcpu *vcpu, u_long cmd, void *arg)
591 * XXX: fragile, handle with care
592 * Assumes that the first field of the ioctl data
595 *(int *)arg = vcpu->vcpuid;
596 return (ioctl(vcpu->ctx->fd, cmd, arg));
600 vm_set_register(struct vcpu *vcpu, int reg, uint64_t val)
603 struct vm_register vmreg;
605 bzero(&vmreg, sizeof(vmreg));
609 error = vcpu_ioctl(vcpu, VM_SET_REGISTER, &vmreg);
614 vm_get_register(struct vcpu *vcpu, int reg, uint64_t *ret_val)
617 struct vm_register vmreg;
619 bzero(&vmreg, sizeof(vmreg));
622 error = vcpu_ioctl(vcpu, VM_GET_REGISTER, &vmreg);
623 *ret_val = vmreg.regval;
628 vm_set_register_set(struct vcpu *vcpu, unsigned int count,
629 const int *regnums, uint64_t *regvals)
632 struct vm_register_set vmregset;
634 bzero(&vmregset, sizeof(vmregset));
635 vmregset.count = count;
636 vmregset.regnums = regnums;
637 vmregset.regvals = regvals;
639 error = vcpu_ioctl(vcpu, VM_SET_REGISTER_SET, &vmregset);
644 vm_get_register_set(struct vcpu *vcpu, unsigned int count,
645 const int *regnums, uint64_t *regvals)
648 struct vm_register_set vmregset;
650 bzero(&vmregset, sizeof(vmregset));
651 vmregset.count = count;
652 vmregset.regnums = regnums;
653 vmregset.regvals = regvals;
655 error = vcpu_ioctl(vcpu, VM_GET_REGISTER_SET, &vmregset);
660 vm_run(struct vcpu *vcpu, struct vm_run *vmrun)
662 return (vcpu_ioctl(vcpu, VM_RUN, vmrun));
666 vm_suspend(struct vmctx *ctx, enum vm_suspend_how how)
668 struct vm_suspend vmsuspend;
670 bzero(&vmsuspend, sizeof(vmsuspend));
672 return (ioctl(ctx->fd, VM_SUSPEND, &vmsuspend));
676 vm_reinit(struct vmctx *ctx)
679 return (ioctl(ctx->fd, VM_REINIT, 0));
683 vm_inject_exception(struct vcpu *vcpu, int vector, int errcode_valid,
684 uint32_t errcode, int restart_instruction)
686 struct vm_exception exc;
689 exc.error_code = errcode;
690 exc.error_code_valid = errcode_valid;
691 exc.restart_instruction = restart_instruction;
693 return (vcpu_ioctl(vcpu, VM_INJECT_EXCEPTION, &exc));
697 vm_readwrite_kernemu_device(struct vcpu *vcpu, vm_paddr_t gpa,
698 bool write, int size, uint64_t *value)
700 struct vm_readwrite_kernemu_device irp = {
701 .access_width = fls(size) - 1,
703 .value = write ? *value : ~0ul,
705 long cmd = (write ? VM_SET_KERNEMU_DEV : VM_GET_KERNEMU_DEV);
708 rc = vcpu_ioctl(vcpu, cmd, &irp);
709 if (rc == 0 && !write)
715 vm_capability_name2type(const char *capname)
719 for (i = 0; i < VM_CAP_MAX; i++) {
720 if (vm_capstrmap[i] != NULL &&
721 strcmp(vm_capstrmap[i], capname) == 0)
729 vm_capability_type2name(int type)
731 if (type >= 0 && type < VM_CAP_MAX)
732 return (vm_capstrmap[type]);
738 vm_get_capability(struct vcpu *vcpu, enum vm_cap_type cap, int *retval)
741 struct vm_capability vmcap;
743 bzero(&vmcap, sizeof(vmcap));
746 error = vcpu_ioctl(vcpu, VM_GET_CAPABILITY, &vmcap);
747 *retval = vmcap.capval;
752 vm_set_capability(struct vcpu *vcpu, enum vm_cap_type cap, int val)
754 struct vm_capability vmcap;
756 bzero(&vmcap, sizeof(vmcap));
760 return (vcpu_ioctl(vcpu, VM_SET_CAPABILITY, &vmcap));
764 vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
766 struct vm_pptdev pptdev;
768 bzero(&pptdev, sizeof(pptdev));
773 return (ioctl(ctx->fd, VM_BIND_PPTDEV, &pptdev));
777 vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
779 struct vm_pptdev pptdev;
781 bzero(&pptdev, sizeof(pptdev));
786 return (ioctl(ctx->fd, VM_UNBIND_PPTDEV, &pptdev));
790 vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
791 vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
793 struct vm_pptdev_mmio pptmmio;
795 bzero(&pptmmio, sizeof(pptmmio));
803 return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio));
807 vm_unmap_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
808 vm_paddr_t gpa, size_t len)
810 struct vm_pptdev_mmio pptmmio;
812 bzero(&pptmmio, sizeof(pptmmio));
819 return (ioctl(ctx->fd, VM_UNMAP_PPTDEV_MMIO, &pptmmio));
823 vm_setup_pptdev_msi(struct vmctx *ctx, int bus, int slot, int func,
824 uint64_t addr, uint64_t msg, int numvec)
826 struct vm_pptdev_msi pptmsi;
828 bzero(&pptmsi, sizeof(pptmsi));
834 pptmsi.numvec = numvec;
836 return (ioctl(ctx->fd, VM_PPTDEV_MSI, &pptmsi));
840 vm_setup_pptdev_msix(struct vmctx *ctx, int bus, int slot, int func,
841 int idx, uint64_t addr, uint64_t msg, uint32_t vector_control)
843 struct vm_pptdev_msix pptmsix;
845 bzero(&pptmsix, sizeof(pptmsix));
852 pptmsix.vector_control = vector_control;
854 return ioctl(ctx->fd, VM_PPTDEV_MSIX, &pptmsix);
858 vm_disable_pptdev_msix(struct vmctx *ctx, int bus, int slot, int func)
860 struct vm_pptdev ppt;
862 bzero(&ppt, sizeof(ppt));
867 return ioctl(ctx->fd, VM_PPTDEV_DISABLE_MSIX, &ppt);
871 vm_get_stats(struct vcpu *vcpu, struct timeval *ret_tv,
874 static _Thread_local uint64_t *stats_buf;
875 static _Thread_local u_int stats_count;
877 struct vm_stats vmstats;
883 for (index = 0;; index += nitems(vmstats.statbuf)) {
884 vmstats.index = index;
885 if (vcpu_ioctl(vcpu, VM_STATS, &vmstats) != 0)
887 if (stats_count < index + vmstats.num_entries) {
888 new_stats = realloc(stats_buf,
889 (index + vmstats.num_entries) * sizeof(uint64_t));
890 if (new_stats == NULL) {
894 stats_count = index + vmstats.num_entries;
895 stats_buf = new_stats;
897 memcpy(stats_buf + index, vmstats.statbuf,
898 vmstats.num_entries * sizeof(uint64_t));
899 count += vmstats.num_entries;
902 if (vmstats.num_entries != nitems(vmstats.statbuf))
907 *ret_entries = count;
909 *ret_tv = vmstats.tv;
916 vm_get_stat_desc(struct vmctx *ctx, int index)
918 static struct vm_stat_desc statdesc;
920 statdesc.index = index;
921 if (ioctl(ctx->fd, VM_STAT_DESC, &statdesc) == 0)
922 return (statdesc.desc);
928 vm_get_x2apic_state(struct vcpu *vcpu, enum x2apic_state *state)
931 struct vm_x2apic x2apic;
933 bzero(&x2apic, sizeof(x2apic));
935 error = vcpu_ioctl(vcpu, VM_GET_X2APIC_STATE, &x2apic);
936 *state = x2apic.state;
941 vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state)
944 struct vm_x2apic x2apic;
946 bzero(&x2apic, sizeof(x2apic));
947 x2apic.state = state;
949 error = vcpu_ioctl(vcpu, VM_SET_X2APIC_STATE, &x2apic);
955 vm_get_gpa_pmap(struct vmctx *ctx, uint64_t gpa, uint64_t *pte, int *num)
958 struct vm_gpa_pte gpapte;
960 bzero(&gpapte, sizeof(gpapte));
963 error = ioctl(ctx->fd, VM_GET_GPA_PMAP, &gpapte);
966 *num = gpapte.ptenum;
967 for (i = 0; i < gpapte.ptenum; i++)
968 pte[i] = gpapte.pte[i];
975 vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities)
978 struct vm_hpet_cap cap;
980 bzero(&cap, sizeof(struct vm_hpet_cap));
981 error = ioctl(ctx->fd, VM_GET_HPET_CAPABILITIES, &cap);
982 if (capabilities != NULL)
983 *capabilities = cap.capabilities;
988 vm_gla2gpa(struct vcpu *vcpu, struct vm_guest_paging *paging,
989 uint64_t gla, int prot, uint64_t *gpa, int *fault)
991 struct vm_gla2gpa gg;
994 bzero(&gg, sizeof(struct vm_gla2gpa));
999 error = vcpu_ioctl(vcpu, VM_GLA2GPA, &gg);
1008 vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging,
1009 uint64_t gla, int prot, uint64_t *gpa, int *fault)
1011 struct vm_gla2gpa gg;
1014 bzero(&gg, sizeof(struct vm_gla2gpa));
1017 gg.paging = *paging;
1019 error = vcpu_ioctl(vcpu, VM_GLA2GPA_NOFAULT, &gg);
1028 #define min(a,b) (((a) < (b)) ? (a) : (b))
1032 vm_copy_setup(struct vcpu *vcpu, struct vm_guest_paging *paging,
1033 uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt,
1040 for (i = 0; i < iovcnt; i++) {
1041 iov[i].iov_base = 0;
1047 error = vm_gla2gpa(vcpu, paging, gla, prot, &gpa, fault);
1048 if (error || *fault)
1051 off = gpa & PAGE_MASK;
1052 n = MIN(len, PAGE_SIZE - off);
1054 va = vm_map_gpa(vcpu->ctx, gpa, n);
1070 vm_copy_teardown(struct iovec *iov __unused, int iovcnt __unused)
1073 * Intentionally empty. This is used by the instruction
1074 * emulation code shared with the kernel. The in-kernel
1075 * version of this is non-empty.
1080 vm_copyin(struct iovec *iov, void *vp, size_t len)
1088 assert(iov->iov_len);
1089 n = min(len, iov->iov_len);
1090 src = iov->iov_base;
1100 vm_copyout(const void *vp, struct iovec *iov, size_t len)
1108 assert(iov->iov_len);
1109 n = min(len, iov->iov_len);
1110 dst = iov->iov_base;
1120 vm_get_cpus(struct vmctx *ctx, int which, cpuset_t *cpus)
1122 struct vm_cpuset vm_cpuset;
1125 bzero(&vm_cpuset, sizeof(struct vm_cpuset));
1126 vm_cpuset.which = which;
1127 vm_cpuset.cpusetsize = sizeof(cpuset_t);
1128 vm_cpuset.cpus = cpus;
1130 error = ioctl(ctx->fd, VM_GET_CPUS, &vm_cpuset);
1135 vm_active_cpus(struct vmctx *ctx, cpuset_t *cpus)
1138 return (vm_get_cpus(ctx, VM_ACTIVE_CPUS, cpus));
1142 vm_suspended_cpus(struct vmctx *ctx, cpuset_t *cpus)
1145 return (vm_get_cpus(ctx, VM_SUSPENDED_CPUS, cpus));
1149 vm_debug_cpus(struct vmctx *ctx, cpuset_t *cpus)
1152 return (vm_get_cpus(ctx, VM_DEBUG_CPUS, cpus));
1156 vm_activate_cpu(struct vcpu *vcpu)
1158 struct vm_activate_cpu ac;
1161 bzero(&ac, sizeof(struct vm_activate_cpu));
1162 error = vcpu_ioctl(vcpu, VM_ACTIVATE_CPU, &ac);
1167 vm_suspend_all_cpus(struct vmctx *ctx)
1169 struct vm_activate_cpu ac;
1172 bzero(&ac, sizeof(struct vm_activate_cpu));
1174 error = ioctl(ctx->fd, VM_SUSPEND_CPU, &ac);
1179 vm_suspend_cpu(struct vcpu *vcpu)
1181 struct vm_activate_cpu ac;
1184 bzero(&ac, sizeof(struct vm_activate_cpu));
1185 error = vcpu_ioctl(vcpu, VM_SUSPEND_CPU, &ac);
1190 vm_resume_cpu(struct vcpu *vcpu)
1192 struct vm_activate_cpu ac;
1195 bzero(&ac, sizeof(struct vm_activate_cpu));
1196 error = vcpu_ioctl(vcpu, VM_RESUME_CPU, &ac);
1201 vm_resume_all_cpus(struct vmctx *ctx)
1203 struct vm_activate_cpu ac;
1206 bzero(&ac, sizeof(struct vm_activate_cpu));
1208 error = ioctl(ctx->fd, VM_RESUME_CPU, &ac);
1213 vm_get_intinfo(struct vcpu *vcpu, uint64_t *info1, uint64_t *info2)
1215 struct vm_intinfo vmii;
1218 bzero(&vmii, sizeof(struct vm_intinfo));
1219 error = vcpu_ioctl(vcpu, VM_GET_INTINFO, &vmii);
1221 *info1 = vmii.info1;
1222 *info2 = vmii.info2;
1228 vm_set_intinfo(struct vcpu *vcpu, uint64_t info1)
1230 struct vm_intinfo vmii;
1233 bzero(&vmii, sizeof(struct vm_intinfo));
1235 error = vcpu_ioctl(vcpu, VM_SET_INTINFO, &vmii);
1240 vm_rtc_write(struct vmctx *ctx, int offset, uint8_t value)
1242 struct vm_rtc_data rtcdata;
1245 bzero(&rtcdata, sizeof(struct vm_rtc_data));
1246 rtcdata.offset = offset;
1247 rtcdata.value = value;
1248 error = ioctl(ctx->fd, VM_RTC_WRITE, &rtcdata);
1253 vm_rtc_read(struct vmctx *ctx, int offset, uint8_t *retval)
1255 struct vm_rtc_data rtcdata;
1258 bzero(&rtcdata, sizeof(struct vm_rtc_data));
1259 rtcdata.offset = offset;
1260 error = ioctl(ctx->fd, VM_RTC_READ, &rtcdata);
1262 *retval = rtcdata.value;
1267 vm_rtc_settime(struct vmctx *ctx, time_t secs)
1269 struct vm_rtc_time rtctime;
1272 bzero(&rtctime, sizeof(struct vm_rtc_time));
1273 rtctime.secs = secs;
1274 error = ioctl(ctx->fd, VM_RTC_SETTIME, &rtctime);
1279 vm_rtc_gettime(struct vmctx *ctx, time_t *secs)
1281 struct vm_rtc_time rtctime;
1284 bzero(&rtctime, sizeof(struct vm_rtc_time));
1285 error = ioctl(ctx->fd, VM_RTC_GETTIME, &rtctime);
1287 *secs = rtctime.secs;
1292 vm_restart_instruction(struct vcpu *vcpu)
1296 return (vcpu_ioctl(vcpu, VM_RESTART_INSTRUCTION, &arg));
1300 vm_snapshot_req(struct vmctx *ctx, struct vm_snapshot_meta *meta)
1303 if (ioctl(ctx->fd, VM_SNAPSHOT_REQ, meta) == -1) {
1304 #ifdef SNAPSHOT_DEBUG
1305 fprintf(stderr, "%s: snapshot failed for %s: %d\r\n",
1306 __func__, meta->dev_name, errno);
1314 vm_restore_time(struct vmctx *ctx)
1319 return (ioctl(ctx->fd, VM_RESTORE_TIME, &dummy));
1323 vm_set_topology(struct vmctx *ctx,
1324 uint16_t sockets, uint16_t cores, uint16_t threads, uint16_t maxcpus)
1326 struct vm_cpu_topology topology;
1328 bzero(&topology, sizeof (struct vm_cpu_topology));
1329 topology.sockets = sockets;
1330 topology.cores = cores;
1331 topology.threads = threads;
1332 topology.maxcpus = maxcpus;
1333 return (ioctl(ctx->fd, VM_SET_TOPOLOGY, &topology));
1337 vm_get_topology(struct vmctx *ctx,
1338 uint16_t *sockets, uint16_t *cores, uint16_t *threads, uint16_t *maxcpus)
1340 struct vm_cpu_topology topology;
1343 bzero(&topology, sizeof (struct vm_cpu_topology));
1344 error = ioctl(ctx->fd, VM_GET_TOPOLOGY, &topology);
1346 *sockets = topology.sockets;
1347 *cores = topology.cores;
1348 *threads = topology.threads;
1349 *maxcpus = topology.maxcpus;
1355 vm_limit_rights(struct vmctx *ctx)
1357 cap_rights_t rights;
1359 cap_rights_init(&rights, CAP_IOCTL, CAP_MMAP_RW);
1360 if (caph_rights_limit(ctx->fd, &rights) != 0)
1362 if (caph_ioctls_limit(ctx->fd, vm_ioctl_cmds, vm_ioctl_ncmds) != 0)
1368 * Avoid using in new code. Operations on the fd should be wrapped here so that
1369 * capability rights can be kept in sync.
1372 vm_get_device_fd(struct vmctx *ctx)
1378 /* Legacy interface, do not use. */
1380 vm_get_ioctls(size_t *len)
1386 sz = vm_ioctl_ncmds * sizeof(vm_ioctl_cmds[0]);
1390 bcopy(vm_ioctl_cmds, cmds, sz);
1394 *len = vm_ioctl_ncmds;