2 * Copyright (c) 2011 NetApp, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/sysctl.h>
34 #include <sys/ioctl.h>
36 #include <sys/_iovec.h>
37 #include <sys/cpuset.h>
39 #include <x86/segments.h>
40 #include <machine/specialreg.h>
41 #include <machine/param.h>
53 #include <machine/vmm.h>
54 #include <machine/vmm_dev.h>
58 #define MB (1024 * 1024UL)
59 #define GB (1024 * 1024 * 1024UL)
62 * Size of the guard region before and after the virtual address space
63 * mapping the guest physical memory. This must be a multiple of the
64 * superpage size for performance reasons.
66 #define VM_MMAP_GUARD_SIZE (4 * MB)
68 #define PROT_RW (PROT_READ | PROT_WRITE)
69 #define PROT_ALL (PROT_READ | PROT_WRITE | PROT_EXEC)
73 uint32_t lowmem_limit;
81 #define CREATE(x) sysctlbyname("hw.vmm.create", NULL, NULL, (x), strlen((x)))
82 #define DESTROY(x) sysctlbyname("hw.vmm.destroy", NULL, NULL, (x), strlen((x)))
85 vm_device_open(const char *name)
90 len = strlen("/dev/vmm/") + strlen(name) + 1;
92 assert(vmfile != NULL);
93 snprintf(vmfile, len, "/dev/vmm/%s", name);
95 /* Open the device file */
96 fd = open(vmfile, O_RDWR, 0);
103 vm_create(const char *name)
106 return (CREATE((char *)name));
110 vm_open(const char *name)
114 vm = malloc(sizeof(struct vmctx) + strlen(name) + 1);
119 vm->lowmem_limit = 3 * GB;
120 vm->name = (char *)(vm + 1);
121 strcpy(vm->name, name);
123 if ((vm->fd = vm_device_open(vm->name)) < 0)
133 vm_destroy(struct vmctx *vm)
145 vm_parse_memsize(const char *optarg, size_t *ret_memsize)
151 optval = strtoul(optarg, &endptr, 0);
152 if (*optarg != '\0' && *endptr == '\0') {
154 * For the sake of backward compatibility if the memory size
155 * specified on the command line is less than a megabyte then
156 * it is interpreted as being in units of MB.
160 *ret_memsize = optval;
163 error = expand_number(optarg, ret_memsize);
169 vm_get_lowmem_limit(struct vmctx *ctx)
172 return (ctx->lowmem_limit);
176 vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit)
179 ctx->lowmem_limit = limit;
183 vm_set_memflags(struct vmctx *ctx, int flags)
186 ctx->memflags = flags;
190 vm_get_memflags(struct vmctx *ctx)
193 return (ctx->memflags);
197 * Map segment 'segid' starting at 'off' into guest address range [gpa,gpa+len).
200 vm_mmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, int segid, vm_ooffset_t off,
201 size_t len, int prot)
203 struct vm_memmap memmap;
207 memmap.segid = segid;
213 if (ctx->memflags & VM_MEM_F_WIRED)
214 memmap.flags |= VM_MEMMAP_F_WIRED;
217 * If this mapping already exists then don't create it again. This
218 * is the common case for SYSMEM mappings created by bhyveload(8).
220 error = vm_mmap_getnext(ctx, &gpa, &segid, &off, &len, &prot, &flags);
221 if (error == 0 && gpa == memmap.gpa) {
222 if (segid != memmap.segid || off != memmap.segoff ||
223 prot != memmap.prot || flags != memmap.flags) {
231 error = ioctl(ctx->fd, VM_MMAP_MEMSEG, &memmap);
236 vm_mmap_getnext(struct vmctx *ctx, vm_paddr_t *gpa, int *segid,
237 vm_ooffset_t *segoff, size_t *len, int *prot, int *flags)
239 struct vm_memmap memmap;
242 bzero(&memmap, sizeof(struct vm_memmap));
244 error = ioctl(ctx->fd, VM_MMAP_GETNEXT, &memmap);
247 *segid = memmap.segid;
248 *segoff = memmap.segoff;
251 *flags = memmap.flags;
257 * Return 0 if the segments are identical and non-zero otherwise.
259 * This is slightly complicated by the fact that only device memory segments
263 cmpseg(size_t len, const char *str, size_t len2, const char *str2)
267 if ((!str && !str2) || (str && str2 && !strcmp(str, str2)))
274 vm_alloc_memseg(struct vmctx *ctx, int segid, size_t len, const char *name)
276 struct vm_memseg memseg;
281 * If the memory segment has already been created then just return.
282 * This is the usual case for the SYSMEM segment created by userspace
283 * loaders like bhyveload(8).
285 error = vm_get_memseg(ctx, segid, &memseg.len, memseg.name,
286 sizeof(memseg.name));
290 if (memseg.len != 0) {
291 if (cmpseg(len, name, memseg.len, VM_MEMSEG_NAME(&memseg))) {
299 bzero(&memseg, sizeof(struct vm_memseg));
300 memseg.segid = segid;
303 n = strlcpy(memseg.name, name, sizeof(memseg.name));
304 if (n >= sizeof(memseg.name)) {
305 errno = ENAMETOOLONG;
310 error = ioctl(ctx->fd, VM_ALLOC_MEMSEG, &memseg);
315 vm_get_memseg(struct vmctx *ctx, int segid, size_t *lenp, char *namebuf,
318 struct vm_memseg memseg;
322 memseg.segid = segid;
323 error = ioctl(ctx->fd, VM_GET_MEMSEG, &memseg);
326 n = strlcpy(namebuf, memseg.name, bufsize);
328 errno = ENAMETOOLONG;
336 setup_memory_segment(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char *base)
341 /* Map 'len' bytes starting at 'gpa' in the guest address space */
342 error = vm_mmap_memseg(ctx, gpa, VM_SYSMEM, gpa, len, PROT_ALL);
346 flags = MAP_SHARED | MAP_FIXED;
347 if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
350 /* mmap into the process address space on the host */
351 ptr = mmap(base + gpa, len, PROT_RW, flags, ctx->fd, gpa);
352 if (ptr == MAP_FAILED)
359 vm_setup_memory(struct vmctx *ctx, size_t memsize, enum vm_mmap_style vms)
363 char *baseaddr, *ptr;
366 assert(vms == VM_MMAP_ALL);
369 * If 'memsize' cannot fit entirely in the 'lowmem' segment then
370 * create another 'highmem' segment above 4GB for the remainder.
372 if (memsize > ctx->lowmem_limit) {
373 ctx->lowmem = ctx->lowmem_limit;
374 ctx->highmem = memsize - ctx->lowmem_limit;
375 objsize = 4*GB + ctx->highmem;
377 ctx->lowmem = memsize;
379 objsize = ctx->lowmem;
382 error = vm_alloc_memseg(ctx, VM_SYSMEM, objsize, NULL);
387 * Stake out a contiguous region covering the guest physical memory
388 * and the adjoining guard regions.
390 len = VM_MMAP_GUARD_SIZE + objsize + VM_MMAP_GUARD_SIZE;
391 flags = MAP_PRIVATE | MAP_ANON | MAP_NOCORE | MAP_ALIGNED_SUPER;
392 ptr = mmap(NULL, len, PROT_NONE, flags, -1, 0);
393 if (ptr == MAP_FAILED)
396 baseaddr = ptr + VM_MMAP_GUARD_SIZE;
397 if (ctx->highmem > 0) {
400 error = setup_memory_segment(ctx, gpa, len, baseaddr);
405 if (ctx->lowmem > 0) {
408 error = setup_memory_segment(ctx, gpa, len, baseaddr);
413 ctx->baseaddr = baseaddr;
419 * Returns a non-NULL pointer if [gaddr, gaddr+len) is entirely contained in
420 * the lowmem or highmem regions.
422 * In particular return NULL if [gaddr, gaddr+len) falls in guest MMIO region.
423 * The instruction emulation code depends on this behavior.
426 vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len)
429 if (ctx->lowmem > 0) {
430 if (gaddr < ctx->lowmem && len <= ctx->lowmem &&
431 gaddr + len <= ctx->lowmem)
432 return (ctx->baseaddr + gaddr);
435 if (ctx->highmem > 0) {
437 if (gaddr < 4*GB + ctx->highmem &&
438 len <= ctx->highmem &&
439 gaddr + len <= 4*GB + ctx->highmem)
440 return (ctx->baseaddr + gaddr);
448 vm_get_lowmem_size(struct vmctx *ctx)
451 return (ctx->lowmem);
455 vm_get_highmem_size(struct vmctx *ctx)
458 return (ctx->highmem);
462 vm_create_devmem(struct vmctx *ctx, int segid, const char *name, size_t len)
464 char pathname[MAXPATHLEN];
467 int fd, error, flags;
471 if (name == NULL || strlen(name) == 0) {
476 error = vm_alloc_memseg(ctx, segid, len, name);
480 strlcpy(pathname, "/dev/vmm.io/", sizeof(pathname));
481 strlcat(pathname, ctx->name, sizeof(pathname));
482 strlcat(pathname, ".", sizeof(pathname));
483 strlcat(pathname, name, sizeof(pathname));
485 fd = open(pathname, O_RDWR);
490 * Stake out a contiguous region covering the device memory and the
491 * adjoining guard regions.
493 len2 = VM_MMAP_GUARD_SIZE + len + VM_MMAP_GUARD_SIZE;
494 flags = MAP_PRIVATE | MAP_ANON | MAP_NOCORE | MAP_ALIGNED_SUPER;
495 base = mmap(NULL, len2, PROT_NONE, flags, -1, 0);
496 if (base == MAP_FAILED)
499 flags = MAP_SHARED | MAP_FIXED;
500 if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
503 /* mmap the devmem region in the host address space */
504 ptr = mmap(base + VM_MMAP_GUARD_SIZE, len, PROT_RW, flags, fd, 0);
512 vm_set_desc(struct vmctx *ctx, int vcpu, int reg,
513 uint64_t base, uint32_t limit, uint32_t access)
516 struct vm_seg_desc vmsegdesc;
518 bzero(&vmsegdesc, sizeof(vmsegdesc));
519 vmsegdesc.cpuid = vcpu;
520 vmsegdesc.regnum = reg;
521 vmsegdesc.desc.base = base;
522 vmsegdesc.desc.limit = limit;
523 vmsegdesc.desc.access = access;
525 error = ioctl(ctx->fd, VM_SET_SEGMENT_DESCRIPTOR, &vmsegdesc);
530 vm_get_desc(struct vmctx *ctx, int vcpu, int reg,
531 uint64_t *base, uint32_t *limit, uint32_t *access)
534 struct vm_seg_desc vmsegdesc;
536 bzero(&vmsegdesc, sizeof(vmsegdesc));
537 vmsegdesc.cpuid = vcpu;
538 vmsegdesc.regnum = reg;
540 error = ioctl(ctx->fd, VM_GET_SEGMENT_DESCRIPTOR, &vmsegdesc);
542 *base = vmsegdesc.desc.base;
543 *limit = vmsegdesc.desc.limit;
544 *access = vmsegdesc.desc.access;
550 vm_get_seg_desc(struct vmctx *ctx, int vcpu, int reg, struct seg_desc *seg_desc)
554 error = vm_get_desc(ctx, vcpu, reg, &seg_desc->base, &seg_desc->limit,
560 vm_set_register(struct vmctx *ctx, int vcpu, int reg, uint64_t val)
563 struct vm_register vmreg;
565 bzero(&vmreg, sizeof(vmreg));
570 error = ioctl(ctx->fd, VM_SET_REGISTER, &vmreg);
575 vm_get_register(struct vmctx *ctx, int vcpu, int reg, uint64_t *ret_val)
578 struct vm_register vmreg;
580 bzero(&vmreg, sizeof(vmreg));
584 error = ioctl(ctx->fd, VM_GET_REGISTER, &vmreg);
585 *ret_val = vmreg.regval;
590 vm_run(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit)
595 bzero(&vmrun, sizeof(vmrun));
598 error = ioctl(ctx->fd, VM_RUN, &vmrun);
599 bcopy(&vmrun.vm_exit, vmexit, sizeof(struct vm_exit));
604 vm_suspend(struct vmctx *ctx, enum vm_suspend_how how)
606 struct vm_suspend vmsuspend;
608 bzero(&vmsuspend, sizeof(vmsuspend));
610 return (ioctl(ctx->fd, VM_SUSPEND, &vmsuspend));
614 vm_reinit(struct vmctx *ctx)
617 return (ioctl(ctx->fd, VM_REINIT, 0));
621 vm_inject_exception(struct vmctx *ctx, int vcpu, int vector, int errcode_valid,
622 uint32_t errcode, int restart_instruction)
624 struct vm_exception exc;
628 exc.error_code = errcode;
629 exc.error_code_valid = errcode_valid;
630 exc.restart_instruction = restart_instruction;
632 return (ioctl(ctx->fd, VM_INJECT_EXCEPTION, &exc));
636 vm_apicid2vcpu(struct vmctx *ctx, int apicid)
639 * The apic id associated with the 'vcpu' has the same numerical value
640 * as the 'vcpu' itself.
646 vm_lapic_irq(struct vmctx *ctx, int vcpu, int vector)
648 struct vm_lapic_irq vmirq;
650 bzero(&vmirq, sizeof(vmirq));
652 vmirq.vector = vector;
654 return (ioctl(ctx->fd, VM_LAPIC_IRQ, &vmirq));
658 vm_lapic_local_irq(struct vmctx *ctx, int vcpu, int vector)
660 struct vm_lapic_irq vmirq;
662 bzero(&vmirq, sizeof(vmirq));
664 vmirq.vector = vector;
666 return (ioctl(ctx->fd, VM_LAPIC_LOCAL_IRQ, &vmirq));
670 vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg)
672 struct vm_lapic_msi vmmsi;
674 bzero(&vmmsi, sizeof(vmmsi));
678 return (ioctl(ctx->fd, VM_LAPIC_MSI, &vmmsi));
682 vm_ioapic_assert_irq(struct vmctx *ctx, int irq)
684 struct vm_ioapic_irq ioapic_irq;
686 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
687 ioapic_irq.irq = irq;
689 return (ioctl(ctx->fd, VM_IOAPIC_ASSERT_IRQ, &ioapic_irq));
693 vm_ioapic_deassert_irq(struct vmctx *ctx, int irq)
695 struct vm_ioapic_irq ioapic_irq;
697 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
698 ioapic_irq.irq = irq;
700 return (ioctl(ctx->fd, VM_IOAPIC_DEASSERT_IRQ, &ioapic_irq));
704 vm_ioapic_pulse_irq(struct vmctx *ctx, int irq)
706 struct vm_ioapic_irq ioapic_irq;
708 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
709 ioapic_irq.irq = irq;
711 return (ioctl(ctx->fd, VM_IOAPIC_PULSE_IRQ, &ioapic_irq));
715 vm_ioapic_pincount(struct vmctx *ctx, int *pincount)
718 return (ioctl(ctx->fd, VM_IOAPIC_PINCOUNT, pincount));
722 vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
724 struct vm_isa_irq isa_irq;
726 bzero(&isa_irq, sizeof(struct vm_isa_irq));
727 isa_irq.atpic_irq = atpic_irq;
728 isa_irq.ioapic_irq = ioapic_irq;
730 return (ioctl(ctx->fd, VM_ISA_ASSERT_IRQ, &isa_irq));
734 vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
736 struct vm_isa_irq isa_irq;
738 bzero(&isa_irq, sizeof(struct vm_isa_irq));
739 isa_irq.atpic_irq = atpic_irq;
740 isa_irq.ioapic_irq = ioapic_irq;
742 return (ioctl(ctx->fd, VM_ISA_DEASSERT_IRQ, &isa_irq));
746 vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
748 struct vm_isa_irq isa_irq;
750 bzero(&isa_irq, sizeof(struct vm_isa_irq));
751 isa_irq.atpic_irq = atpic_irq;
752 isa_irq.ioapic_irq = ioapic_irq;
754 return (ioctl(ctx->fd, VM_ISA_PULSE_IRQ, &isa_irq));
758 vm_isa_set_irq_trigger(struct vmctx *ctx, int atpic_irq,
759 enum vm_intr_trigger trigger)
761 struct vm_isa_irq_trigger isa_irq_trigger;
763 bzero(&isa_irq_trigger, sizeof(struct vm_isa_irq_trigger));
764 isa_irq_trigger.atpic_irq = atpic_irq;
765 isa_irq_trigger.trigger = trigger;
767 return (ioctl(ctx->fd, VM_ISA_SET_IRQ_TRIGGER, &isa_irq_trigger));
771 vm_inject_nmi(struct vmctx *ctx, int vcpu)
775 bzero(&vmnmi, sizeof(vmnmi));
778 return (ioctl(ctx->fd, VM_INJECT_NMI, &vmnmi));
785 { "hlt_exit", VM_CAP_HALT_EXIT },
786 { "mtrap_exit", VM_CAP_MTRAP_EXIT },
787 { "pause_exit", VM_CAP_PAUSE_EXIT },
788 { "unrestricted_guest", VM_CAP_UNRESTRICTED_GUEST },
789 { "enable_invpcid", VM_CAP_ENABLE_INVPCID },
794 vm_capability_name2type(const char *capname)
798 for (i = 0; capstrmap[i].name != NULL && capname != NULL; i++) {
799 if (strcmp(capstrmap[i].name, capname) == 0)
800 return (capstrmap[i].type);
807 vm_capability_type2name(int type)
811 for (i = 0; capstrmap[i].name != NULL; i++) {
812 if (capstrmap[i].type == type)
813 return (capstrmap[i].name);
820 vm_get_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap,
824 struct vm_capability vmcap;
826 bzero(&vmcap, sizeof(vmcap));
830 error = ioctl(ctx->fd, VM_GET_CAPABILITY, &vmcap);
831 *retval = vmcap.capval;
836 vm_set_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap, int val)
838 struct vm_capability vmcap;
840 bzero(&vmcap, sizeof(vmcap));
845 return (ioctl(ctx->fd, VM_SET_CAPABILITY, &vmcap));
849 vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
851 struct vm_pptdev pptdev;
853 bzero(&pptdev, sizeof(pptdev));
858 return (ioctl(ctx->fd, VM_BIND_PPTDEV, &pptdev));
862 vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
864 struct vm_pptdev pptdev;
866 bzero(&pptdev, sizeof(pptdev));
871 return (ioctl(ctx->fd, VM_UNBIND_PPTDEV, &pptdev));
875 vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
876 vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
878 struct vm_pptdev_mmio pptmmio;
880 bzero(&pptmmio, sizeof(pptmmio));
888 return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio));
892 vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
893 uint64_t addr, uint64_t msg, int numvec)
895 struct vm_pptdev_msi pptmsi;
897 bzero(&pptmsi, sizeof(pptmsi));
904 pptmsi.numvec = numvec;
906 return (ioctl(ctx->fd, VM_PPTDEV_MSI, &pptmsi));
910 vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
911 int idx, uint64_t addr, uint64_t msg, uint32_t vector_control)
913 struct vm_pptdev_msix pptmsix;
915 bzero(&pptmsix, sizeof(pptmsix));
923 pptmsix.vector_control = vector_control;
925 return ioctl(ctx->fd, VM_PPTDEV_MSIX, &pptmsix);
929 vm_get_stats(struct vmctx *ctx, int vcpu, struct timeval *ret_tv,
934 static struct vm_stats vmstats;
936 vmstats.cpuid = vcpu;
938 error = ioctl(ctx->fd, VM_STATS, &vmstats);
941 *ret_entries = vmstats.num_entries;
943 *ret_tv = vmstats.tv;
944 return (vmstats.statbuf);
950 vm_get_stat_desc(struct vmctx *ctx, int index)
952 static struct vm_stat_desc statdesc;
954 statdesc.index = index;
955 if (ioctl(ctx->fd, VM_STAT_DESC, &statdesc) == 0)
956 return (statdesc.desc);
962 vm_get_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state *state)
965 struct vm_x2apic x2apic;
967 bzero(&x2apic, sizeof(x2apic));
970 error = ioctl(ctx->fd, VM_GET_X2APIC_STATE, &x2apic);
971 *state = x2apic.state;
976 vm_set_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state state)
979 struct vm_x2apic x2apic;
981 bzero(&x2apic, sizeof(x2apic));
983 x2apic.state = state;
985 error = ioctl(ctx->fd, VM_SET_X2APIC_STATE, &x2apic);
992 * Table 9-1. IA-32 Processor States Following Power-up, Reset or INIT
995 vcpu_reset(struct vmctx *vmctx, int vcpu)
998 uint64_t rflags, rip, cr0, cr4, zero, desc_base, rdx;
999 uint32_t desc_access, desc_limit;
1005 error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RFLAGS, rflags);
1010 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RIP, rip)) != 0)
1014 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR0, cr0)) != 0)
1017 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR3, zero)) != 0)
1021 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR4, cr4)) != 0)
1025 * CS: present, r/w, accessed, 16-bit, byte granularity, usable
1027 desc_base = 0xffff0000;
1028 desc_limit = 0xffff;
1029 desc_access = 0x0093;
1030 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_CS,
1031 desc_base, desc_limit, desc_access);
1036 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CS, sel)) != 0)
1040 * SS,DS,ES,FS,GS: present, r/w, accessed, 16-bit, byte granularity
1043 desc_limit = 0xffff;
1044 desc_access = 0x0093;
1045 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_SS,
1046 desc_base, desc_limit, desc_access);
1050 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_DS,
1051 desc_base, desc_limit, desc_access);
1055 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_ES,
1056 desc_base, desc_limit, desc_access);
1060 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_FS,
1061 desc_base, desc_limit, desc_access);
1065 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GS,
1066 desc_base, desc_limit, desc_access);
1071 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_SS, sel)) != 0)
1073 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_DS, sel)) != 0)
1075 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_ES, sel)) != 0)
1077 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_FS, sel)) != 0)
1079 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_GS, sel)) != 0)
1082 /* General purpose registers */
1084 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RAX, zero)) != 0)
1086 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBX, zero)) != 0)
1088 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RCX, zero)) != 0)
1090 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDX, rdx)) != 0)
1092 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSI, zero)) != 0)
1094 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDI, zero)) != 0)
1096 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBP, zero)) != 0)
1098 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSP, zero)) != 0)
1103 desc_limit = 0xffff;
1105 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GDTR,
1106 desc_base, desc_limit, desc_access);
1110 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_IDTR,
1111 desc_base, desc_limit, desc_access);
1117 desc_limit = 0xffff;
1118 desc_access = 0x0000008b;
1119 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_TR, 0, 0, desc_access);
1124 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_TR, sel)) != 0)
1129 desc_limit = 0xffff;
1130 desc_access = 0x00000082;
1131 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_LDTR, desc_base,
1132 desc_limit, desc_access);
1137 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_LDTR, 0)) != 0)
1140 /* XXX cr2, debug registers */
1148 vm_get_gpa_pmap(struct vmctx *ctx, uint64_t gpa, uint64_t *pte, int *num)
1151 struct vm_gpa_pte gpapte;
1153 bzero(&gpapte, sizeof(gpapte));
1156 error = ioctl(ctx->fd, VM_GET_GPA_PMAP, &gpapte);
1159 *num = gpapte.ptenum;
1160 for (i = 0; i < gpapte.ptenum; i++)
1161 pte[i] = gpapte.pte[i];
1168 vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities)
1171 struct vm_hpet_cap cap;
1173 bzero(&cap, sizeof(struct vm_hpet_cap));
1174 error = ioctl(ctx->fd, VM_GET_HPET_CAPABILITIES, &cap);
1175 if (capabilities != NULL)
1176 *capabilities = cap.capabilities;
1181 vm_gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
1182 uint64_t gla, int prot, uint64_t *gpa, int *fault)
1184 struct vm_gla2gpa gg;
1187 bzero(&gg, sizeof(struct vm_gla2gpa));
1191 gg.paging = *paging;
1193 error = ioctl(ctx->fd, VM_GLA2GPA, &gg);
1202 #define min(a,b) (((a) < (b)) ? (a) : (b))
1206 vm_copy_setup(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
1207 uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt,
1212 int error, i, n, off;
1214 for (i = 0; i < iovcnt; i++) {
1215 iov[i].iov_base = 0;
1221 error = vm_gla2gpa(ctx, vcpu, paging, gla, prot, &gpa, fault);
1222 if (error || *fault)
1225 off = gpa & PAGE_MASK;
1226 n = min(len, PAGE_SIZE - off);
1228 va = vm_map_gpa(ctx, gpa, n);
1244 vm_copy_teardown(struct vmctx *ctx, int vcpu, struct iovec *iov, int iovcnt)
1251 vm_copyin(struct vmctx *ctx, int vcpu, struct iovec *iov, void *vp, size_t len)
1259 assert(iov->iov_len);
1260 n = min(len, iov->iov_len);
1261 src = iov->iov_base;
1271 vm_copyout(struct vmctx *ctx, int vcpu, const void *vp, struct iovec *iov,
1280 assert(iov->iov_len);
1281 n = min(len, iov->iov_len);
1282 dst = iov->iov_base;
1292 vm_get_cpus(struct vmctx *ctx, int which, cpuset_t *cpus)
1294 struct vm_cpuset vm_cpuset;
1297 bzero(&vm_cpuset, sizeof(struct vm_cpuset));
1298 vm_cpuset.which = which;
1299 vm_cpuset.cpusetsize = sizeof(cpuset_t);
1300 vm_cpuset.cpus = cpus;
1302 error = ioctl(ctx->fd, VM_GET_CPUS, &vm_cpuset);
1307 vm_active_cpus(struct vmctx *ctx, cpuset_t *cpus)
1310 return (vm_get_cpus(ctx, VM_ACTIVE_CPUS, cpus));
1314 vm_suspended_cpus(struct vmctx *ctx, cpuset_t *cpus)
1317 return (vm_get_cpus(ctx, VM_SUSPENDED_CPUS, cpus));
1321 vm_activate_cpu(struct vmctx *ctx, int vcpu)
1323 struct vm_activate_cpu ac;
1326 bzero(&ac, sizeof(struct vm_activate_cpu));
1328 error = ioctl(ctx->fd, VM_ACTIVATE_CPU, &ac);
1333 vm_get_intinfo(struct vmctx *ctx, int vcpu, uint64_t *info1, uint64_t *info2)
1335 struct vm_intinfo vmii;
1338 bzero(&vmii, sizeof(struct vm_intinfo));
1340 error = ioctl(ctx->fd, VM_GET_INTINFO, &vmii);
1342 *info1 = vmii.info1;
1343 *info2 = vmii.info2;
1349 vm_set_intinfo(struct vmctx *ctx, int vcpu, uint64_t info1)
1351 struct vm_intinfo vmii;
1354 bzero(&vmii, sizeof(struct vm_intinfo));
1357 error = ioctl(ctx->fd, VM_SET_INTINFO, &vmii);
1362 vm_rtc_write(struct vmctx *ctx, int offset, uint8_t value)
1364 struct vm_rtc_data rtcdata;
1367 bzero(&rtcdata, sizeof(struct vm_rtc_data));
1368 rtcdata.offset = offset;
1369 rtcdata.value = value;
1370 error = ioctl(ctx->fd, VM_RTC_WRITE, &rtcdata);
1375 vm_rtc_read(struct vmctx *ctx, int offset, uint8_t *retval)
1377 struct vm_rtc_data rtcdata;
1380 bzero(&rtcdata, sizeof(struct vm_rtc_data));
1381 rtcdata.offset = offset;
1382 error = ioctl(ctx->fd, VM_RTC_READ, &rtcdata);
1384 *retval = rtcdata.value;
1389 vm_rtc_settime(struct vmctx *ctx, time_t secs)
1391 struct vm_rtc_time rtctime;
1394 bzero(&rtctime, sizeof(struct vm_rtc_time));
1395 rtctime.secs = secs;
1396 error = ioctl(ctx->fd, VM_RTC_SETTIME, &rtctime);
1401 vm_rtc_gettime(struct vmctx *ctx, time_t *secs)
1403 struct vm_rtc_time rtctime;
1406 bzero(&rtctime, sizeof(struct vm_rtc_time));
1407 error = ioctl(ctx->fd, VM_RTC_GETTIME, &rtctime);
1409 *secs = rtctime.secs;
1414 vm_restart_instruction(void *arg, int vcpu)
1416 struct vmctx *ctx = arg;
1418 return (ioctl(ctx->fd, VM_RESTART_INSTRUCTION, &vcpu));