2 * Copyright (c) 2011 NetApp, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/types.h>
33 #include <sys/sysctl.h>
34 #include <sys/ioctl.h>
37 #include <machine/specialreg.h>
48 #include <machine/vmm.h>
49 #include <machine/vmm_dev.h>
53 #define MB (1024 * 1024UL)
54 #define GB (1024 * 1024 * 1024UL)
58 uint32_t lowmem_limit;
59 enum vm_mmap_style vms;
68 #define CREATE(x) sysctlbyname("hw.vmm.create", NULL, NULL, (x), strlen((x)))
69 #define DESTROY(x) sysctlbyname("hw.vmm.destroy", NULL, NULL, (x), strlen((x)))
72 vm_device_open(const char *name)
77 len = strlen("/dev/vmm/") + strlen(name) + 1;
79 assert(vmfile != NULL);
80 snprintf(vmfile, len, "/dev/vmm/%s", name);
82 /* Open the device file */
83 fd = open(vmfile, O_RDWR, 0);
90 vm_create(const char *name)
93 return (CREATE((char *)name));
97 vm_open(const char *name)
101 vm = malloc(sizeof(struct vmctx) + strlen(name) + 1);
106 vm->lowmem_limit = 3 * GB;
107 vm->name = (char *)(vm + 1);
108 strcpy(vm->name, name);
110 if ((vm->fd = vm_device_open(vm->name)) < 0)
120 vm_destroy(struct vmctx *vm)
132 vm_parse_memsize(const char *optarg, size_t *ret_memsize)
138 optval = strtoul(optarg, &endptr, 0);
139 if (*optarg != '\0' && *endptr == '\0') {
141 * For the sake of backward compatibility if the memory size
142 * specified on the command line is less than a megabyte then
143 * it is interpreted as being in units of MB.
147 *ret_memsize = optval;
150 error = expand_number(optarg, ret_memsize);
156 vm_get_memory_seg(struct vmctx *ctx, vm_paddr_t gpa, size_t *ret_len,
160 struct vm_memory_segment seg;
162 bzero(&seg, sizeof(seg));
164 error = ioctl(ctx->fd, VM_GET_MEMORY_SEG, &seg);
172 vm_get_lowmem_limit(struct vmctx *ctx)
175 return (ctx->lowmem_limit);
179 vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit)
182 ctx->lowmem_limit = limit;
186 vm_set_memflags(struct vmctx *ctx, int flags)
189 ctx->memflags = flags;
193 setup_memory_segment(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char **addr)
195 int error, mmap_flags;
196 struct vm_memory_segment seg;
199 * Create and optionally map 'len' bytes of memory at guest
200 * physical address 'gpa'
202 bzero(&seg, sizeof(seg));
205 error = ioctl(ctx->fd, VM_MAP_MEMORY, &seg);
206 if (error == 0 && addr != NULL) {
207 mmap_flags = MAP_SHARED;
208 if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
209 mmap_flags |= MAP_NOCORE;
210 *addr = mmap(NULL, len, PROT_READ | PROT_WRITE, mmap_flags,
217 vm_setup_memory(struct vmctx *ctx, size_t memsize, enum vm_mmap_style vms)
222 /* XXX VM_MMAP_SPARSE not implemented yet */
223 assert(vms == VM_MMAP_NONE || vms == VM_MMAP_ALL);
227 * If 'memsize' cannot fit entirely in the 'lowmem' segment then
228 * create another 'highmem' segment above 4GB for the remainder.
230 if (memsize > ctx->lowmem_limit) {
231 ctx->lowmem = ctx->lowmem_limit;
232 ctx->highmem = memsize - ctx->lowmem;
234 ctx->lowmem = memsize;
238 if (ctx->lowmem > 0) {
239 addr = (vms == VM_MMAP_ALL) ? &ctx->lowmem_addr : NULL;
240 error = setup_memory_segment(ctx, 0, ctx->lowmem, addr);
245 if (ctx->highmem > 0) {
246 addr = (vms == VM_MMAP_ALL) ? &ctx->highmem_addr : NULL;
247 error = setup_memory_segment(ctx, 4*GB, ctx->highmem, addr);
256 vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len)
259 /* XXX VM_MMAP_SPARSE not implemented yet */
260 assert(ctx->vms == VM_MMAP_ALL);
262 if (gaddr < ctx->lowmem && gaddr + len <= ctx->lowmem)
263 return ((void *)(ctx->lowmem_addr + gaddr));
267 if (gaddr < ctx->highmem && gaddr + len <= ctx->highmem)
268 return ((void *)(ctx->highmem_addr + gaddr));
275 vm_set_desc(struct vmctx *ctx, int vcpu, int reg,
276 uint64_t base, uint32_t limit, uint32_t access)
279 struct vm_seg_desc vmsegdesc;
281 bzero(&vmsegdesc, sizeof(vmsegdesc));
282 vmsegdesc.cpuid = vcpu;
283 vmsegdesc.regnum = reg;
284 vmsegdesc.desc.base = base;
285 vmsegdesc.desc.limit = limit;
286 vmsegdesc.desc.access = access;
288 error = ioctl(ctx->fd, VM_SET_SEGMENT_DESCRIPTOR, &vmsegdesc);
293 vm_get_desc(struct vmctx *ctx, int vcpu, int reg,
294 uint64_t *base, uint32_t *limit, uint32_t *access)
297 struct vm_seg_desc vmsegdesc;
299 bzero(&vmsegdesc, sizeof(vmsegdesc));
300 vmsegdesc.cpuid = vcpu;
301 vmsegdesc.regnum = reg;
303 error = ioctl(ctx->fd, VM_GET_SEGMENT_DESCRIPTOR, &vmsegdesc);
305 *base = vmsegdesc.desc.base;
306 *limit = vmsegdesc.desc.limit;
307 *access = vmsegdesc.desc.access;
313 vm_set_register(struct vmctx *ctx, int vcpu, int reg, uint64_t val)
316 struct vm_register vmreg;
318 bzero(&vmreg, sizeof(vmreg));
323 error = ioctl(ctx->fd, VM_SET_REGISTER, &vmreg);
328 vm_get_register(struct vmctx *ctx, int vcpu, int reg, uint64_t *ret_val)
331 struct vm_register vmreg;
333 bzero(&vmreg, sizeof(vmreg));
337 error = ioctl(ctx->fd, VM_GET_REGISTER, &vmreg);
338 *ret_val = vmreg.regval;
343 vm_run(struct vmctx *ctx, int vcpu, uint64_t rip, struct vm_exit *vmexit)
348 bzero(&vmrun, sizeof(vmrun));
352 error = ioctl(ctx->fd, VM_RUN, &vmrun);
353 bcopy(&vmrun.vm_exit, vmexit, sizeof(struct vm_exit));
358 vm_suspend(struct vmctx *ctx, enum vm_suspend_how how)
360 struct vm_suspend vmsuspend;
362 bzero(&vmsuspend, sizeof(vmsuspend));
364 return (ioctl(ctx->fd, VM_SUSPEND, &vmsuspend));
368 vm_inject_exception_real(struct vmctx *ctx, int vcpu, int vector,
369 int error_code, int error_code_valid)
371 struct vm_exception exc;
373 bzero(&exc, sizeof(exc));
376 exc.error_code = error_code;
377 exc.error_code_valid = error_code_valid;
379 return (ioctl(ctx->fd, VM_INJECT_EXCEPTION, &exc));
383 vm_inject_exception(struct vmctx *ctx, int vcpu, int vector)
386 return (vm_inject_exception_real(ctx, vcpu, vector, 0, 0));
390 vm_inject_exception2(struct vmctx *ctx, int vcpu, int vector, int errcode)
393 return (vm_inject_exception_real(ctx, vcpu, vector, errcode, 1));
397 vm_apicid2vcpu(struct vmctx *ctx, int apicid)
400 * The apic id associated with the 'vcpu' has the same numerical value
401 * as the 'vcpu' itself.
407 vm_lapic_irq(struct vmctx *ctx, int vcpu, int vector)
409 struct vm_lapic_irq vmirq;
411 bzero(&vmirq, sizeof(vmirq));
413 vmirq.vector = vector;
415 return (ioctl(ctx->fd, VM_LAPIC_IRQ, &vmirq));
419 vm_lapic_local_irq(struct vmctx *ctx, int vcpu, int vector)
421 struct vm_lapic_irq vmirq;
423 bzero(&vmirq, sizeof(vmirq));
425 vmirq.vector = vector;
427 return (ioctl(ctx->fd, VM_LAPIC_LOCAL_IRQ, &vmirq));
431 vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg)
433 struct vm_lapic_msi vmmsi;
435 bzero(&vmmsi, sizeof(vmmsi));
439 return (ioctl(ctx->fd, VM_LAPIC_MSI, &vmmsi));
443 vm_ioapic_assert_irq(struct vmctx *ctx, int irq)
445 struct vm_ioapic_irq ioapic_irq;
447 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
448 ioapic_irq.irq = irq;
450 return (ioctl(ctx->fd, VM_IOAPIC_ASSERT_IRQ, &ioapic_irq));
454 vm_ioapic_deassert_irq(struct vmctx *ctx, int irq)
456 struct vm_ioapic_irq ioapic_irq;
458 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
459 ioapic_irq.irq = irq;
461 return (ioctl(ctx->fd, VM_IOAPIC_DEASSERT_IRQ, &ioapic_irq));
465 vm_ioapic_pulse_irq(struct vmctx *ctx, int irq)
467 struct vm_ioapic_irq ioapic_irq;
469 bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
470 ioapic_irq.irq = irq;
472 return (ioctl(ctx->fd, VM_IOAPIC_PULSE_IRQ, &ioapic_irq));
476 vm_ioapic_pincount(struct vmctx *ctx, int *pincount)
479 return (ioctl(ctx->fd, VM_IOAPIC_PINCOUNT, pincount));
483 vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
485 struct vm_isa_irq isa_irq;
487 bzero(&isa_irq, sizeof(struct vm_isa_irq));
488 isa_irq.atpic_irq = atpic_irq;
489 isa_irq.ioapic_irq = ioapic_irq;
491 return (ioctl(ctx->fd, VM_ISA_ASSERT_IRQ, &isa_irq));
495 vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
497 struct vm_isa_irq isa_irq;
499 bzero(&isa_irq, sizeof(struct vm_isa_irq));
500 isa_irq.atpic_irq = atpic_irq;
501 isa_irq.ioapic_irq = ioapic_irq;
503 return (ioctl(ctx->fd, VM_ISA_DEASSERT_IRQ, &isa_irq));
507 vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
509 struct vm_isa_irq isa_irq;
511 bzero(&isa_irq, sizeof(struct vm_isa_irq));
512 isa_irq.atpic_irq = atpic_irq;
513 isa_irq.ioapic_irq = ioapic_irq;
515 return (ioctl(ctx->fd, VM_ISA_PULSE_IRQ, &isa_irq));
519 vm_isa_set_irq_trigger(struct vmctx *ctx, int atpic_irq,
520 enum vm_intr_trigger trigger)
522 struct vm_isa_irq_trigger isa_irq_trigger;
524 bzero(&isa_irq_trigger, sizeof(struct vm_isa_irq_trigger));
525 isa_irq_trigger.atpic_irq = atpic_irq;
526 isa_irq_trigger.trigger = trigger;
528 return (ioctl(ctx->fd, VM_ISA_SET_IRQ_TRIGGER, &isa_irq_trigger));
532 vm_inject_nmi(struct vmctx *ctx, int vcpu)
536 bzero(&vmnmi, sizeof(vmnmi));
539 return (ioctl(ctx->fd, VM_INJECT_NMI, &vmnmi));
546 { "hlt_exit", VM_CAP_HALT_EXIT },
547 { "mtrap_exit", VM_CAP_MTRAP_EXIT },
548 { "pause_exit", VM_CAP_PAUSE_EXIT },
549 { "unrestricted_guest", VM_CAP_UNRESTRICTED_GUEST },
550 { "enable_invpcid", VM_CAP_ENABLE_INVPCID },
555 vm_capability_name2type(const char *capname)
559 for (i = 0; capstrmap[i].name != NULL && capname != NULL; i++) {
560 if (strcmp(capstrmap[i].name, capname) == 0)
561 return (capstrmap[i].type);
568 vm_capability_type2name(int type)
572 for (i = 0; capstrmap[i].name != NULL; i++) {
573 if (capstrmap[i].type == type)
574 return (capstrmap[i].name);
581 vm_get_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap,
585 struct vm_capability vmcap;
587 bzero(&vmcap, sizeof(vmcap));
591 error = ioctl(ctx->fd, VM_GET_CAPABILITY, &vmcap);
592 *retval = vmcap.capval;
597 vm_set_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap, int val)
599 struct vm_capability vmcap;
601 bzero(&vmcap, sizeof(vmcap));
606 return (ioctl(ctx->fd, VM_SET_CAPABILITY, &vmcap));
610 vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
612 struct vm_pptdev pptdev;
614 bzero(&pptdev, sizeof(pptdev));
619 return (ioctl(ctx->fd, VM_BIND_PPTDEV, &pptdev));
623 vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
625 struct vm_pptdev pptdev;
627 bzero(&pptdev, sizeof(pptdev));
632 return (ioctl(ctx->fd, VM_UNBIND_PPTDEV, &pptdev));
636 vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
637 vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
639 struct vm_pptdev_mmio pptmmio;
641 bzero(&pptmmio, sizeof(pptmmio));
649 return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio));
653 vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
654 uint64_t addr, uint64_t msg, int numvec)
656 struct vm_pptdev_msi pptmsi;
658 bzero(&pptmsi, sizeof(pptmsi));
665 pptmsi.numvec = numvec;
667 return (ioctl(ctx->fd, VM_PPTDEV_MSI, &pptmsi));
671 vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
672 int idx, uint64_t addr, uint64_t msg, uint32_t vector_control)
674 struct vm_pptdev_msix pptmsix;
676 bzero(&pptmsix, sizeof(pptmsix));
684 pptmsix.vector_control = vector_control;
686 return ioctl(ctx->fd, VM_PPTDEV_MSIX, &pptmsix);
690 vm_get_stats(struct vmctx *ctx, int vcpu, struct timeval *ret_tv,
695 static struct vm_stats vmstats;
697 vmstats.cpuid = vcpu;
699 error = ioctl(ctx->fd, VM_STATS, &vmstats);
702 *ret_entries = vmstats.num_entries;
704 *ret_tv = vmstats.tv;
705 return (vmstats.statbuf);
711 vm_get_stat_desc(struct vmctx *ctx, int index)
713 static struct vm_stat_desc statdesc;
715 statdesc.index = index;
716 if (ioctl(ctx->fd, VM_STAT_DESC, &statdesc) == 0)
717 return (statdesc.desc);
723 vm_get_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state *state)
726 struct vm_x2apic x2apic;
728 bzero(&x2apic, sizeof(x2apic));
731 error = ioctl(ctx->fd, VM_GET_X2APIC_STATE, &x2apic);
732 *state = x2apic.state;
737 vm_set_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state state)
740 struct vm_x2apic x2apic;
742 bzero(&x2apic, sizeof(x2apic));
744 x2apic.state = state;
746 error = ioctl(ctx->fd, VM_SET_X2APIC_STATE, &x2apic);
753 * Table 9-1. IA-32 Processor States Following Power-up, Reset or INIT
756 vcpu_reset(struct vmctx *vmctx, int vcpu)
759 uint64_t rflags, rip, cr0, cr4, zero, desc_base, rdx;
760 uint32_t desc_access, desc_limit;
766 error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RFLAGS, rflags);
771 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RIP, rip)) != 0)
775 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR0, cr0)) != 0)
778 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR3, zero)) != 0)
782 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR4, cr4)) != 0)
786 * CS: present, r/w, accessed, 16-bit, byte granularity, usable
788 desc_base = 0xffff0000;
790 desc_access = 0x0093;
791 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_CS,
792 desc_base, desc_limit, desc_access);
797 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CS, sel)) != 0)
801 * SS,DS,ES,FS,GS: present, r/w, accessed, 16-bit, byte granularity
805 desc_access = 0x0093;
806 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_SS,
807 desc_base, desc_limit, desc_access);
811 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_DS,
812 desc_base, desc_limit, desc_access);
816 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_ES,
817 desc_base, desc_limit, desc_access);
821 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_FS,
822 desc_base, desc_limit, desc_access);
826 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GS,
827 desc_base, desc_limit, desc_access);
832 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_SS, sel)) != 0)
834 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_DS, sel)) != 0)
836 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_ES, sel)) != 0)
838 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_FS, sel)) != 0)
840 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_GS, sel)) != 0)
843 /* General purpose registers */
845 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RAX, zero)) != 0)
847 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBX, zero)) != 0)
849 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RCX, zero)) != 0)
851 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDX, rdx)) != 0)
853 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSI, zero)) != 0)
855 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDI, zero)) != 0)
857 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBP, zero)) != 0)
859 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSP, zero)) != 0)
866 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GDTR,
867 desc_base, desc_limit, desc_access);
871 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_IDTR,
872 desc_base, desc_limit, desc_access);
879 desc_access = 0x0000008b;
880 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_TR, 0, 0, desc_access);
885 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_TR, sel)) != 0)
891 desc_access = 0x00000082;
892 error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_LDTR, desc_base,
893 desc_limit, desc_access);
898 if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_LDTR, 0)) != 0)
901 /* XXX cr2, debug registers */
909 vm_get_gpa_pmap(struct vmctx *ctx, uint64_t gpa, uint64_t *pte, int *num)
912 struct vm_gpa_pte gpapte;
914 bzero(&gpapte, sizeof(gpapte));
917 error = ioctl(ctx->fd, VM_GET_GPA_PMAP, &gpapte);
920 *num = gpapte.ptenum;
921 for (i = 0; i < gpapte.ptenum; i++)
922 pte[i] = gpapte.pte[i];
929 vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities)
932 struct vm_hpet_cap cap;
934 bzero(&cap, sizeof(struct vm_hpet_cap));
935 error = ioctl(ctx->fd, VM_GET_HPET_CAPABILITIES, &cap);
936 if (capabilities != NULL)
937 *capabilities = cap.capabilities;