2 * Copyright (c) 2011 NetApp, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/queue.h>
36 #include <sys/mutex.h>
37 #include <sys/malloc.h>
39 #include <sys/sysctl.h>
40 #include <sys/libkern.h>
41 #include <sys/ioccom.h>
47 #include <vm/vm_map.h>
49 #include <machine/vmparam.h>
50 #include <machine/vmm.h>
51 #include <machine/vmm_dev.h>
53 #include "vmm_lapic.h"
57 #include "io/vatpic.h"
58 #include "io/vioapic.h"
62 struct vm *vm; /* vm instance cookie */
64 SLIST_ENTRY(vmmdev_softc) link;
67 #define VSC_LINKED 0x01
69 static SLIST_HEAD(, vmmdev_softc) head;
71 static struct mtx vmmdev_mtx;
73 static MALLOC_DEFINE(M_VMMDEV, "vmmdev", "vmmdev");
77 static struct vmmdev_softc *
78 vmmdev_lookup(const char *name)
80 struct vmmdev_softc *sc;
82 #ifdef notyet /* XXX kernel is not compiled with invariants */
83 mtx_assert(&vmmdev_mtx, MA_OWNED);
86 SLIST_FOREACH(sc, &head, link) {
87 if (strcmp(name, vm_name(sc->vm)) == 0)
94 static struct vmmdev_softc *
95 vmmdev_lookup2(struct cdev *cdev)
98 return (cdev->si_drv1);
102 vmmdev_rw(struct cdev *cdev, struct uio *uio, int flags)
104 int error, off, c, prot;
107 struct vmmdev_softc *sc;
109 static char zerobuf[PAGE_SIZE];
112 sc = vmmdev_lookup2(cdev);
116 prot = (uio->uio_rw == UIO_WRITE ? VM_PROT_WRITE : VM_PROT_READ);
117 while (uio->uio_resid > 0 && error == 0) {
118 gpa = uio->uio_offset;
119 off = gpa & PAGE_MASK;
120 c = min(uio->uio_resid, PAGE_SIZE - off);
123 * The VM has a hole in its physical memory map. If we want to
124 * use 'dd' to inspect memory beyond the hole we need to
125 * provide bogus data for memory that lies in the hole.
127 * Since this device does not support lseek(2), dd(1) will
128 * read(2) blocks of data to simulate the lseek(2).
130 hpa = vm_gpa_hold(sc->vm, gpa, c, prot, &cookie);
132 if (uio->uio_rw == UIO_READ)
133 error = uiomove(zerobuf, c, uio);
137 error = uiomove(hpa, c, uio);
138 vm_gpa_release(cookie);
145 vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
148 int error, vcpu, state_changed;
149 struct vmmdev_softc *sc;
150 struct vm_memory_segment *seg;
151 struct vm_register *vmreg;
152 struct vm_seg_desc *vmsegdesc;
153 struct vm_run *vmrun;
154 struct vm_exception *vmexc;
155 struct vm_lapic_irq *vmirq;
156 struct vm_lapic_msi *vmmsi;
157 struct vm_ioapic_irq *ioapic_irq;
158 struct vm_isa_irq *isa_irq;
159 struct vm_capability *vmcap;
160 struct vm_pptdev *pptdev;
161 struct vm_pptdev_mmio *pptmmio;
162 struct vm_pptdev_msi *pptmsi;
163 struct vm_pptdev_msix *pptmsix;
164 struct vm_nmi *vmnmi;
165 struct vm_stats *vmstats;
166 struct vm_stat_desc *statdesc;
167 struct vm_x2apic *x2apic;
168 struct vm_gpa_pte *gpapte;
170 sc = vmmdev_lookup2(cdev);
179 * Some VMM ioctls can operate only on vcpus that are not running.
183 case VM_GET_REGISTER:
184 case VM_SET_REGISTER:
185 case VM_GET_SEGMENT_DESCRIPTOR:
186 case VM_SET_SEGMENT_DESCRIPTOR:
187 case VM_INJECT_EXCEPTION:
188 case VM_GET_CAPABILITY:
189 case VM_SET_CAPABILITY:
192 case VM_SET_X2APIC_STATE:
194 * XXX fragile, handle with care
195 * Assumes that the first field of the ioctl data is the vcpu.
198 if (vcpu < 0 || vcpu >= VM_MAXCPU) {
203 error = vcpu_set_state(sc->vm, vcpu, VCPU_FROZEN, true);
210 case VM_MAP_PPTDEV_MMIO:
212 case VM_UNBIND_PPTDEV:
215 * ioctls that operate on the entire virtual machine must
216 * prevent all vcpus from running.
219 for (vcpu = 0; vcpu < VM_MAXCPU; vcpu++) {
220 error = vcpu_set_state(sc->vm, vcpu, VCPU_FROZEN, true);
227 vcpu_set_state(sc->vm, vcpu, VCPU_IDLE, false);
240 vmrun = (struct vm_run *)data;
241 error = vm_run(sc->vm, vmrun);
244 error = vm_suspend(sc->vm);
247 statdesc = (struct vm_stat_desc *)data;
248 error = vmm_stat_desc_copy(statdesc->index,
249 statdesc->desc, sizeof(statdesc->desc));
253 CTASSERT(MAX_VM_STATS >= MAX_VMM_STAT_ELEMS);
254 vmstats = (struct vm_stats *)data;
255 getmicrotime(&vmstats->tv);
256 error = vmm_stat_copy(sc->vm, vmstats->cpuid,
257 &vmstats->num_entries, vmstats->statbuf);
261 pptmsi = (struct vm_pptdev_msi *)data;
262 error = ppt_setup_msi(sc->vm, pptmsi->vcpu,
263 pptmsi->bus, pptmsi->slot, pptmsi->func,
264 pptmsi->addr, pptmsi->msg,
268 pptmsix = (struct vm_pptdev_msix *)data;
269 error = ppt_setup_msix(sc->vm, pptmsix->vcpu,
270 pptmsix->bus, pptmsix->slot,
271 pptmsix->func, pptmsix->idx,
272 pptmsix->addr, pptmsix->msg,
273 pptmsix->vector_control);
275 case VM_MAP_PPTDEV_MMIO:
276 pptmmio = (struct vm_pptdev_mmio *)data;
277 error = ppt_map_mmio(sc->vm, pptmmio->bus, pptmmio->slot,
278 pptmmio->func, pptmmio->gpa, pptmmio->len,
282 pptdev = (struct vm_pptdev *)data;
283 error = vm_assign_pptdev(sc->vm, pptdev->bus, pptdev->slot,
286 case VM_UNBIND_PPTDEV:
287 pptdev = (struct vm_pptdev *)data;
288 error = vm_unassign_pptdev(sc->vm, pptdev->bus, pptdev->slot,
291 case VM_INJECT_EXCEPTION:
292 vmexc = (struct vm_exception *)data;
293 error = vm_inject_exception(sc->vm, vmexc->cpuid, vmexc);
296 vmnmi = (struct vm_nmi *)data;
297 error = vm_inject_nmi(sc->vm, vmnmi->cpuid);
300 vmirq = (struct vm_lapic_irq *)data;
301 error = lapic_intr_edge(sc->vm, vmirq->cpuid, vmirq->vector);
303 case VM_LAPIC_LOCAL_IRQ:
304 vmirq = (struct vm_lapic_irq *)data;
305 error = lapic_set_local_intr(sc->vm, vmirq->cpuid,
309 vmmsi = (struct vm_lapic_msi *)data;
310 error = lapic_intr_msi(sc->vm, vmmsi->addr, vmmsi->msg);
312 case VM_IOAPIC_ASSERT_IRQ:
313 ioapic_irq = (struct vm_ioapic_irq *)data;
314 error = vioapic_assert_irq(sc->vm, ioapic_irq->irq);
316 case VM_IOAPIC_DEASSERT_IRQ:
317 ioapic_irq = (struct vm_ioapic_irq *)data;
318 error = vioapic_deassert_irq(sc->vm, ioapic_irq->irq);
320 case VM_IOAPIC_PULSE_IRQ:
321 ioapic_irq = (struct vm_ioapic_irq *)data;
322 error = vioapic_pulse_irq(sc->vm, ioapic_irq->irq);
324 case VM_IOAPIC_PINCOUNT:
325 *(int *)data = vioapic_pincount(sc->vm);
327 case VM_ISA_ASSERT_IRQ:
328 isa_irq = (struct vm_isa_irq *)data;
329 error = vatpic_assert_irq(sc->vm, isa_irq->atpic_irq);
330 if (error == 0 && isa_irq->ioapic_irq != -1)
331 error = vioapic_assert_irq(sc->vm,
332 isa_irq->ioapic_irq);
334 case VM_ISA_DEASSERT_IRQ:
335 isa_irq = (struct vm_isa_irq *)data;
336 error = vatpic_deassert_irq(sc->vm, isa_irq->atpic_irq);
337 if (error == 0 && isa_irq->ioapic_irq != -1)
338 error = vioapic_deassert_irq(sc->vm,
339 isa_irq->ioapic_irq);
341 case VM_ISA_PULSE_IRQ:
342 isa_irq = (struct vm_isa_irq *)data;
343 error = vatpic_pulse_irq(sc->vm, isa_irq->atpic_irq);
344 if (error == 0 && isa_irq->ioapic_irq != -1)
345 error = vioapic_pulse_irq(sc->vm, isa_irq->ioapic_irq);
348 seg = (struct vm_memory_segment *)data;
349 error = vm_malloc(sc->vm, seg->gpa, seg->len);
351 case VM_GET_MEMORY_SEG:
352 seg = (struct vm_memory_segment *)data;
354 (void)vm_gpabase2memseg(sc->vm, seg->gpa, seg);
357 case VM_GET_REGISTER:
358 vmreg = (struct vm_register *)data;
359 error = vm_get_register(sc->vm, vmreg->cpuid, vmreg->regnum,
362 case VM_SET_REGISTER:
363 vmreg = (struct vm_register *)data;
364 error = vm_set_register(sc->vm, vmreg->cpuid, vmreg->regnum,
367 case VM_SET_SEGMENT_DESCRIPTOR:
368 vmsegdesc = (struct vm_seg_desc *)data;
369 error = vm_set_seg_desc(sc->vm, vmsegdesc->cpuid,
373 case VM_GET_SEGMENT_DESCRIPTOR:
374 vmsegdesc = (struct vm_seg_desc *)data;
375 error = vm_get_seg_desc(sc->vm, vmsegdesc->cpuid,
379 case VM_GET_CAPABILITY:
380 vmcap = (struct vm_capability *)data;
381 error = vm_get_capability(sc->vm, vmcap->cpuid,
385 case VM_SET_CAPABILITY:
386 vmcap = (struct vm_capability *)data;
387 error = vm_set_capability(sc->vm, vmcap->cpuid,
391 case VM_SET_X2APIC_STATE:
392 x2apic = (struct vm_x2apic *)data;
393 error = vm_set_x2apic_state(sc->vm,
394 x2apic->cpuid, x2apic->state);
396 case VM_GET_X2APIC_STATE:
397 x2apic = (struct vm_x2apic *)data;
398 error = vm_get_x2apic_state(sc->vm,
399 x2apic->cpuid, &x2apic->state);
401 case VM_GET_GPA_PMAP:
402 gpapte = (struct vm_gpa_pte *)data;
403 pmap_get_mapping(vmspace_pmap(vm_get_vmspace(sc->vm)),
404 gpapte->gpa, gpapte->pte, &gpapte->ptenum);
407 case VM_GET_HPET_CAPABILITIES:
408 error = vhpet_getcap((struct vm_hpet_cap *)data);
415 if (state_changed == 1) {
416 vcpu_set_state(sc->vm, vcpu, VCPU_IDLE, false);
417 } else if (state_changed == 2) {
418 for (vcpu = 0; vcpu < VM_MAXCPU; vcpu++)
419 vcpu_set_state(sc->vm, vcpu, VCPU_IDLE, false);
423 /* Make sure that no handler returns a bogus value like ERESTART */
424 KASSERT(error >= 0, ("vmmdev_ioctl: invalid error return %d", error));
429 vmmdev_mmap_single(struct cdev *cdev, vm_ooffset_t *offset,
430 vm_size_t size, struct vm_object **object, int nprot)
433 struct vmmdev_softc *sc;
435 sc = vmmdev_lookup2(cdev);
436 if (sc != NULL && (nprot & PROT_EXEC) == 0)
437 error = vm_get_memobj(sc->vm, *offset, size, offset, object);
445 vmmdev_destroy(void *arg)
448 struct vmmdev_softc *sc = arg;
450 if (sc->cdev != NULL)
451 destroy_dev(sc->cdev);
456 if ((sc->flags & VSC_LINKED) != 0) {
457 mtx_lock(&vmmdev_mtx);
458 SLIST_REMOVE(&head, sc, vmmdev_softc, link);
459 mtx_unlock(&vmmdev_mtx);
466 sysctl_vmm_destroy(SYSCTL_HANDLER_ARGS)
469 char buf[VM_MAX_NAMELEN];
470 struct vmmdev_softc *sc;
473 strlcpy(buf, "beavis", sizeof(buf));
474 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
475 if (error != 0 || req->newptr == NULL)
478 mtx_lock(&vmmdev_mtx);
479 sc = vmmdev_lookup(buf);
480 if (sc == NULL || sc->cdev == NULL) {
481 mtx_unlock(&vmmdev_mtx);
486 * The 'cdev' will be destroyed asynchronously when 'si_threadcount'
487 * goes down to 0 so we should not do it again in the callback.
491 mtx_unlock(&vmmdev_mtx);
494 * Schedule the 'cdev' to be destroyed:
496 * - any new operations on this 'cdev' will return an error (ENXIO).
498 * - when the 'si_threadcount' dwindles down to zero the 'cdev' will
499 * be destroyed and the callback will be invoked in a taskqueue
502 destroy_dev_sched_cb(cdev, vmmdev_destroy, sc);
506 SYSCTL_PROC(_hw_vmm, OID_AUTO, destroy, CTLTYPE_STRING | CTLFLAG_RW,
507 NULL, 0, sysctl_vmm_destroy, "A", NULL);
509 static struct cdevsw vmmdevsw = {
511 .d_version = D_VERSION,
512 .d_ioctl = vmmdev_ioctl,
513 .d_mmap_single = vmmdev_mmap_single,
515 .d_write = vmmdev_rw,
519 sysctl_vmm_create(SYSCTL_HANDLER_ARGS)
524 struct vmmdev_softc *sc, *sc2;
525 char buf[VM_MAX_NAMELEN];
527 strlcpy(buf, "beavis", sizeof(buf));
528 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
529 if (error != 0 || req->newptr == NULL)
532 mtx_lock(&vmmdev_mtx);
533 sc = vmmdev_lookup(buf);
534 mtx_unlock(&vmmdev_mtx);
538 error = vm_create(buf, &vm);
542 sc = malloc(sizeof(struct vmmdev_softc), M_VMMDEV, M_WAITOK | M_ZERO);
546 * Lookup the name again just in case somebody sneaked in when we
549 mtx_lock(&vmmdev_mtx);
550 sc2 = vmmdev_lookup(buf);
552 SLIST_INSERT_HEAD(&head, sc, link);
553 sc->flags |= VSC_LINKED;
555 mtx_unlock(&vmmdev_mtx);
562 error = make_dev_p(MAKEDEV_CHECKNAME, &cdev, &vmmdevsw, NULL,
563 UID_ROOT, GID_WHEEL, 0600, "vmm/%s", buf);
569 mtx_lock(&vmmdev_mtx);
571 sc->cdev->si_drv1 = sc;
572 mtx_unlock(&vmmdev_mtx);
576 SYSCTL_PROC(_hw_vmm, OID_AUTO, create, CTLTYPE_STRING | CTLFLAG_RW,
577 NULL, 0, sysctl_vmm_create, "A", NULL);
582 mtx_init(&vmmdev_mtx, "vmm device mutex", NULL, MTX_DEF);
590 if (SLIST_EMPTY(&head))