]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/amd64/vmm/vmm_dev.c
/etc/services: attempt to bring the database to this century 2/2.
[FreeBSD/FreeBSD.git] / sys / amd64 / vmm / vmm_dev.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2011 NetApp, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD$
29  */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include "opt_bhyve_snapshot.h"
35
36 #include <sys/param.h>
37 #include <sys/kernel.h>
38 #include <sys/jail.h>
39 #include <sys/queue.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/malloc.h>
43 #include <sys/conf.h>
44 #include <sys/sysctl.h>
45 #include <sys/libkern.h>
46 #include <sys/ioccom.h>
47 #include <sys/mman.h>
48 #include <sys/uio.h>
49 #include <sys/proc.h>
50
51 #include <vm/vm.h>
52 #include <vm/pmap.h>
53 #include <vm/vm_map.h>
54 #include <vm/vm_object.h>
55
56 #include <machine/vmparam.h>
57 #include <machine/vmm.h>
58 #include <machine/vmm_dev.h>
59 #include <machine/vmm_instruction_emul.h>
60 #include <machine/vmm_snapshot.h>
61 #include <x86/apicreg.h>
62
63 #include "vmm_lapic.h"
64 #include "vmm_stat.h"
65 #include "vmm_mem.h"
66 #include "io/ppt.h"
67 #include "io/vatpic.h"
68 #include "io/vioapic.h"
69 #include "io/vhpet.h"
70 #include "io/vrtc.h"
71
72 struct devmem_softc {
73         int     segid;
74         char    *name;
75         struct cdev *cdev;
76         struct vmmdev_softc *sc;
77         SLIST_ENTRY(devmem_softc) link;
78 };
79
80 struct vmmdev_softc {
81         struct vm       *vm;            /* vm instance cookie */
82         struct cdev     *cdev;
83         SLIST_ENTRY(vmmdev_softc) link;
84         SLIST_HEAD(, devmem_softc) devmem;
85         int             flags;
86 };
87 #define VSC_LINKED              0x01
88
89 static SLIST_HEAD(, vmmdev_softc) head;
90
91 static unsigned pr_allow_flag;
92 static struct mtx vmmdev_mtx;
93
94 static MALLOC_DEFINE(M_VMMDEV, "vmmdev", "vmmdev");
95
96 SYSCTL_DECL(_hw_vmm);
97
98 static int vmm_priv_check(struct ucred *ucred);
99 static int devmem_create_cdev(const char *vmname, int id, char *devmem);
100 static void devmem_destroy(void *arg);
101
102 static int
103 vmm_priv_check(struct ucred *ucred)
104 {
105
106         if (jailed(ucred) &&
107             !(ucred->cr_prison->pr_allow & pr_allow_flag))
108                 return (EPERM);
109
110         return (0);
111 }
112
113 static int
114 vcpu_lock_one(struct vmmdev_softc *sc, int vcpu)
115 {
116         int error;
117
118         if (vcpu < 0 || vcpu >= vm_get_maxcpus(sc->vm))
119                 return (EINVAL);
120
121         error = vcpu_set_state(sc->vm, vcpu, VCPU_FROZEN, true);
122         return (error);
123 }
124
125 static void
126 vcpu_unlock_one(struct vmmdev_softc *sc, int vcpu)
127 {
128         enum vcpu_state state;
129
130         state = vcpu_get_state(sc->vm, vcpu, NULL);
131         if (state != VCPU_FROZEN) {
132                 panic("vcpu %s(%d) has invalid state %d", vm_name(sc->vm),
133                     vcpu, state);
134         }
135
136         vcpu_set_state(sc->vm, vcpu, VCPU_IDLE, false);
137 }
138
139 static int
140 vcpu_lock_all(struct vmmdev_softc *sc)
141 {
142         int error, vcpu;
143         uint16_t maxcpus;
144
145         maxcpus = vm_get_maxcpus(sc->vm);
146         for (vcpu = 0; vcpu < maxcpus; vcpu++) {
147                 error = vcpu_lock_one(sc, vcpu);
148                 if (error)
149                         break;
150         }
151
152         if (error) {
153                 while (--vcpu >= 0)
154                         vcpu_unlock_one(sc, vcpu);
155         }
156
157         return (error);
158 }
159
160 static void
161 vcpu_unlock_all(struct vmmdev_softc *sc)
162 {
163         int vcpu;
164         uint16_t maxcpus;
165
166         maxcpus = vm_get_maxcpus(sc->vm);
167         for (vcpu = 0; vcpu < maxcpus; vcpu++)
168                 vcpu_unlock_one(sc, vcpu);
169 }
170
171 static struct vmmdev_softc *
172 vmmdev_lookup(const char *name)
173 {
174         struct vmmdev_softc *sc;
175
176 #ifdef notyet   /* XXX kernel is not compiled with invariants */
177         mtx_assert(&vmmdev_mtx, MA_OWNED);
178 #endif
179
180         SLIST_FOREACH(sc, &head, link) {
181                 if (strcmp(name, vm_name(sc->vm)) == 0)
182                         break;
183         }
184
185         return (sc);
186 }
187
188 static struct vmmdev_softc *
189 vmmdev_lookup2(struct cdev *cdev)
190 {
191
192         return (cdev->si_drv1);
193 }
194
195 static int
196 vmmdev_rw(struct cdev *cdev, struct uio *uio, int flags)
197 {
198         int error, off, c, prot;
199         vm_paddr_t gpa, maxaddr;
200         void *hpa, *cookie;
201         struct vmmdev_softc *sc;
202         uint16_t lastcpu;
203
204         error = vmm_priv_check(curthread->td_ucred);
205         if (error)
206                 return (error);
207
208         sc = vmmdev_lookup2(cdev);
209         if (sc == NULL)
210                 return (ENXIO);
211
212         /*
213          * Get a read lock on the guest memory map by freezing any vcpu.
214          */
215         lastcpu = vm_get_maxcpus(sc->vm) - 1;
216         error = vcpu_lock_one(sc, lastcpu);
217         if (error)
218                 return (error);
219
220         prot = (uio->uio_rw == UIO_WRITE ? VM_PROT_WRITE : VM_PROT_READ);
221         maxaddr = vmm_sysmem_maxaddr(sc->vm);
222         while (uio->uio_resid > 0 && error == 0) {
223                 gpa = uio->uio_offset;
224                 off = gpa & PAGE_MASK;
225                 c = min(uio->uio_resid, PAGE_SIZE - off);
226
227                 /*
228                  * The VM has a hole in its physical memory map. If we want to
229                  * use 'dd' to inspect memory beyond the hole we need to
230                  * provide bogus data for memory that lies in the hole.
231                  *
232                  * Since this device does not support lseek(2), dd(1) will
233                  * read(2) blocks of data to simulate the lseek(2).
234                  */
235                 hpa = vm_gpa_hold(sc->vm, lastcpu, gpa, c,
236                     prot, &cookie);
237                 if (hpa == NULL) {
238                         if (uio->uio_rw == UIO_READ && gpa < maxaddr)
239                                 error = uiomove(__DECONST(void *, zero_region),
240                                     c, uio);
241                         else
242                                 error = EFAULT;
243                 } else {
244                         error = uiomove(hpa, c, uio);
245                         vm_gpa_release(cookie);
246                 }
247         }
248         vcpu_unlock_one(sc, lastcpu);
249         return (error);
250 }
251
252 CTASSERT(sizeof(((struct vm_memseg *)0)->name) >= VM_MAX_SUFFIXLEN + 1);
253
254 static int
255 get_memseg(struct vmmdev_softc *sc, struct vm_memseg *mseg, size_t len)
256 {
257         struct devmem_softc *dsc;
258         int error;
259         bool sysmem;
260
261         error = vm_get_memseg(sc->vm, mseg->segid, &mseg->len, &sysmem, NULL);
262         if (error || mseg->len == 0)
263                 return (error);
264
265         if (!sysmem) {
266                 SLIST_FOREACH(dsc, &sc->devmem, link) {
267                         if (dsc->segid == mseg->segid)
268                                 break;
269                 }
270                 KASSERT(dsc != NULL, ("%s: devmem segment %d not found",
271                     __func__, mseg->segid));
272                 error = copystr(dsc->name, mseg->name, len, NULL);
273         } else {
274                 bzero(mseg->name, len);
275         }
276
277         return (error);
278 }
279
280 static int
281 alloc_memseg(struct vmmdev_softc *sc, struct vm_memseg *mseg, size_t len)
282 {
283         char *name;
284         int error;
285         bool sysmem;
286
287         error = 0;
288         name = NULL;
289         sysmem = true;
290
291         /*
292          * The allocation is lengthened by 1 to hold a terminating NUL.  It'll
293          * by stripped off when devfs processes the full string.
294          */
295         if (VM_MEMSEG_NAME(mseg)) {
296                 sysmem = false;
297                 name = malloc(len, M_VMMDEV, M_WAITOK);
298                 error = copystr(mseg->name, name, len, NULL);
299                 if (error)
300                         goto done;
301         }
302
303         error = vm_alloc_memseg(sc->vm, mseg->segid, mseg->len, sysmem);
304         if (error)
305                 goto done;
306
307         if (VM_MEMSEG_NAME(mseg)) {
308                 error = devmem_create_cdev(vm_name(sc->vm), mseg->segid, name);
309                 if (error)
310                         vm_free_memseg(sc->vm, mseg->segid);
311                 else
312                         name = NULL;    /* freed when 'cdev' is destroyed */
313         }
314 done:
315         free(name, M_VMMDEV);
316         return (error);
317 }
318
319 static int
320 vm_get_register_set(struct vm *vm, int vcpu, unsigned int count, int *regnum,
321     uint64_t *regval)
322 {
323         int error, i;
324
325         error = 0;
326         for (i = 0; i < count; i++) {
327                 error = vm_get_register(vm, vcpu, regnum[i], &regval[i]);
328                 if (error)
329                         break;
330         }
331         return (error);
332 }
333
334 static int
335 vm_set_register_set(struct vm *vm, int vcpu, unsigned int count, int *regnum,
336     uint64_t *regval)
337 {
338         int error, i;
339
340         error = 0;
341         for (i = 0; i < count; i++) {
342                 error = vm_set_register(vm, vcpu, regnum[i], regval[i]);
343                 if (error)
344                         break;
345         }
346         return (error);
347 }
348
349 static int
350 vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
351              struct thread *td)
352 {
353         int error, vcpu, state_changed, size;
354         cpuset_t *cpuset;
355         struct vmmdev_softc *sc;
356         struct vm_register *vmreg;
357         struct vm_seg_desc *vmsegdesc;
358         struct vm_register_set *vmregset;
359         struct vm_run *vmrun;
360         struct vm_exception *vmexc;
361         struct vm_lapic_irq *vmirq;
362         struct vm_lapic_msi *vmmsi;
363         struct vm_ioapic_irq *ioapic_irq;
364         struct vm_isa_irq *isa_irq;
365         struct vm_isa_irq_trigger *isa_irq_trigger;
366         struct vm_capability *vmcap;
367         struct vm_pptdev *pptdev;
368         struct vm_pptdev_mmio *pptmmio;
369         struct vm_pptdev_msi *pptmsi;
370         struct vm_pptdev_msix *pptmsix;
371         struct vm_nmi *vmnmi;
372         struct vm_stats *vmstats;
373         struct vm_stat_desc *statdesc;
374         struct vm_x2apic *x2apic;
375         struct vm_gpa_pte *gpapte;
376         struct vm_suspend *vmsuspend;
377         struct vm_gla2gpa *gg;
378         struct vm_activate_cpu *vac;
379         struct vm_cpuset *vm_cpuset;
380         struct vm_intinfo *vmii;
381         struct vm_rtc_time *rtctime;
382         struct vm_rtc_data *rtcdata;
383         struct vm_memmap *mm;
384         struct vm_cpu_topology *topology;
385         struct vm_readwrite_kernemu_device *kernemu;
386         uint64_t *regvals;
387         int *regnums;
388 #ifdef BHYVE_SNAPSHOT
389         struct vm_snapshot_meta *snapshot_meta;
390 #endif
391
392         error = vmm_priv_check(curthread->td_ucred);
393         if (error)
394                 return (error);
395
396         sc = vmmdev_lookup2(cdev);
397         if (sc == NULL)
398                 return (ENXIO);
399
400         vcpu = -1;
401         state_changed = 0;
402
403         /*
404          * Some VMM ioctls can operate only on vcpus that are not running.
405          */
406         switch (cmd) {
407         case VM_RUN:
408         case VM_GET_REGISTER:
409         case VM_SET_REGISTER:
410         case VM_GET_SEGMENT_DESCRIPTOR:
411         case VM_SET_SEGMENT_DESCRIPTOR:
412         case VM_GET_REGISTER_SET:
413         case VM_SET_REGISTER_SET:
414         case VM_INJECT_EXCEPTION:
415         case VM_GET_CAPABILITY:
416         case VM_SET_CAPABILITY:
417         case VM_PPTDEV_MSI:
418         case VM_PPTDEV_MSIX:
419         case VM_SET_X2APIC_STATE:
420         case VM_GLA2GPA:
421         case VM_GLA2GPA_NOFAULT:
422         case VM_ACTIVATE_CPU:
423         case VM_SET_INTINFO:
424         case VM_GET_INTINFO:
425         case VM_RESTART_INSTRUCTION:
426                 /*
427                  * XXX fragile, handle with care
428                  * Assumes that the first field of the ioctl data is the vcpu.
429                  */
430                 vcpu = *(int *)data;
431                 error = vcpu_lock_one(sc, vcpu);
432                 if (error)
433                         goto done;
434                 state_changed = 1;
435                 break;
436
437         case VM_MAP_PPTDEV_MMIO:
438         case VM_BIND_PPTDEV:
439         case VM_UNBIND_PPTDEV:
440 #ifdef COMPAT_FREEBSD12
441         case VM_ALLOC_MEMSEG_FBSD12:
442 #endif
443         case VM_ALLOC_MEMSEG:
444         case VM_MMAP_MEMSEG:
445         case VM_REINIT:
446                 /*
447                  * ioctls that operate on the entire virtual machine must
448                  * prevent all vcpus from running.
449                  */
450                 error = vcpu_lock_all(sc);
451                 if (error)
452                         goto done;
453                 state_changed = 2;
454                 break;
455
456 #ifdef COMPAT_FREEBSD12
457         case VM_GET_MEMSEG_FBSD12:
458 #endif
459         case VM_GET_MEMSEG:
460         case VM_MMAP_GETNEXT:
461                 /*
462                  * Lock a vcpu to make sure that the memory map cannot be
463                  * modified while it is being inspected.
464                  */
465                 vcpu = vm_get_maxcpus(sc->vm) - 1;
466                 error = vcpu_lock_one(sc, vcpu);
467                 if (error)
468                         goto done;
469                 state_changed = 1;
470                 break;
471
472         default:
473                 break;
474         }
475
476         switch(cmd) {
477         case VM_RUN:
478                 vmrun = (struct vm_run *)data;
479                 error = vm_run(sc->vm, vmrun);
480                 break;
481         case VM_SUSPEND:
482                 vmsuspend = (struct vm_suspend *)data;
483                 error = vm_suspend(sc->vm, vmsuspend->how);
484                 break;
485         case VM_REINIT:
486                 error = vm_reinit(sc->vm);
487                 break;
488         case VM_STAT_DESC: {
489                 statdesc = (struct vm_stat_desc *)data;
490                 error = vmm_stat_desc_copy(statdesc->index,
491                                         statdesc->desc, sizeof(statdesc->desc));
492                 break;
493         }
494         case VM_STATS: {
495                 CTASSERT(MAX_VM_STATS >= MAX_VMM_STAT_ELEMS);
496                 vmstats = (struct vm_stats *)data;
497                 getmicrotime(&vmstats->tv);
498                 error = vmm_stat_copy(sc->vm, vmstats->cpuid,
499                                       &vmstats->num_entries, vmstats->statbuf);
500                 break;
501         }
502         case VM_PPTDEV_MSI:
503                 pptmsi = (struct vm_pptdev_msi *)data;
504                 error = ppt_setup_msi(sc->vm, pptmsi->vcpu,
505                                       pptmsi->bus, pptmsi->slot, pptmsi->func,
506                                       pptmsi->addr, pptmsi->msg,
507                                       pptmsi->numvec);
508                 break;
509         case VM_PPTDEV_MSIX:
510                 pptmsix = (struct vm_pptdev_msix *)data;
511                 error = ppt_setup_msix(sc->vm, pptmsix->vcpu,
512                                        pptmsix->bus, pptmsix->slot, 
513                                        pptmsix->func, pptmsix->idx,
514                                        pptmsix->addr, pptmsix->msg,
515                                        pptmsix->vector_control);
516                 break;
517         case VM_PPTDEV_DISABLE_MSIX:
518                 pptdev = (struct vm_pptdev *)data;
519                 error = ppt_disable_msix(sc->vm, pptdev->bus, pptdev->slot,
520                                          pptdev->func);
521                 break;
522         case VM_MAP_PPTDEV_MMIO:
523                 pptmmio = (struct vm_pptdev_mmio *)data;
524                 error = ppt_map_mmio(sc->vm, pptmmio->bus, pptmmio->slot,
525                                      pptmmio->func, pptmmio->gpa, pptmmio->len,
526                                      pptmmio->hpa);
527                 break;
528         case VM_BIND_PPTDEV:
529                 pptdev = (struct vm_pptdev *)data;
530                 error = vm_assign_pptdev(sc->vm, pptdev->bus, pptdev->slot,
531                                          pptdev->func);
532                 break;
533         case VM_UNBIND_PPTDEV:
534                 pptdev = (struct vm_pptdev *)data;
535                 error = vm_unassign_pptdev(sc->vm, pptdev->bus, pptdev->slot,
536                                            pptdev->func);
537                 break;
538         case VM_INJECT_EXCEPTION:
539                 vmexc = (struct vm_exception *)data;
540                 error = vm_inject_exception(sc->vm, vmexc->cpuid,
541                     vmexc->vector, vmexc->error_code_valid, vmexc->error_code,
542                     vmexc->restart_instruction);
543                 break;
544         case VM_INJECT_NMI:
545                 vmnmi = (struct vm_nmi *)data;
546                 error = vm_inject_nmi(sc->vm, vmnmi->cpuid);
547                 break;
548         case VM_LAPIC_IRQ:
549                 vmirq = (struct vm_lapic_irq *)data;
550                 error = lapic_intr_edge(sc->vm, vmirq->cpuid, vmirq->vector);
551                 break;
552         case VM_LAPIC_LOCAL_IRQ:
553                 vmirq = (struct vm_lapic_irq *)data;
554                 error = lapic_set_local_intr(sc->vm, vmirq->cpuid,
555                     vmirq->vector);
556                 break;
557         case VM_LAPIC_MSI:
558                 vmmsi = (struct vm_lapic_msi *)data;
559                 error = lapic_intr_msi(sc->vm, vmmsi->addr, vmmsi->msg);
560                 break;
561         case VM_IOAPIC_ASSERT_IRQ:
562                 ioapic_irq = (struct vm_ioapic_irq *)data;
563                 error = vioapic_assert_irq(sc->vm, ioapic_irq->irq);
564                 break;
565         case VM_IOAPIC_DEASSERT_IRQ:
566                 ioapic_irq = (struct vm_ioapic_irq *)data;
567                 error = vioapic_deassert_irq(sc->vm, ioapic_irq->irq);
568                 break;
569         case VM_IOAPIC_PULSE_IRQ:
570                 ioapic_irq = (struct vm_ioapic_irq *)data;
571                 error = vioapic_pulse_irq(sc->vm, ioapic_irq->irq);
572                 break;
573         case VM_IOAPIC_PINCOUNT:
574                 *(int *)data = vioapic_pincount(sc->vm);
575                 break;
576         case VM_SET_KERNEMU_DEV:
577         case VM_GET_KERNEMU_DEV: {
578                 mem_region_write_t mwrite;
579                 mem_region_read_t mread;
580                 bool arg;
581
582                 kernemu = (void *)data;
583
584                 if (kernemu->access_width > 0)
585                         size = (1u << kernemu->access_width);
586                 else
587                         size = 1;
588
589                 if (kernemu->gpa >= DEFAULT_APIC_BASE && kernemu->gpa < DEFAULT_APIC_BASE + PAGE_SIZE) {
590                         mread = lapic_mmio_read;
591                         mwrite = lapic_mmio_write;
592                 } else if (kernemu->gpa >= VIOAPIC_BASE && kernemu->gpa < VIOAPIC_BASE + VIOAPIC_SIZE) {
593                         mread = vioapic_mmio_read;
594                         mwrite = vioapic_mmio_write;
595                 } else if (kernemu->gpa >= VHPET_BASE && kernemu->gpa < VHPET_BASE + VHPET_SIZE) {
596                         mread = vhpet_mmio_read;
597                         mwrite = vhpet_mmio_write;
598                 } else {
599                         error = EINVAL;
600                         break;
601                 }
602
603                 if (cmd == VM_SET_KERNEMU_DEV)
604                         error = mwrite(sc->vm, kernemu->vcpuid, kernemu->gpa,
605                             kernemu->value, size, &arg);
606                 else
607                         error = mread(sc->vm, kernemu->vcpuid, kernemu->gpa,
608                             &kernemu->value, size, &arg);
609                 break;
610                 }
611         case VM_ISA_ASSERT_IRQ:
612                 isa_irq = (struct vm_isa_irq *)data;
613                 error = vatpic_assert_irq(sc->vm, isa_irq->atpic_irq);
614                 if (error == 0 && isa_irq->ioapic_irq != -1)
615                         error = vioapic_assert_irq(sc->vm,
616                             isa_irq->ioapic_irq);
617                 break;
618         case VM_ISA_DEASSERT_IRQ:
619                 isa_irq = (struct vm_isa_irq *)data;
620                 error = vatpic_deassert_irq(sc->vm, isa_irq->atpic_irq);
621                 if (error == 0 && isa_irq->ioapic_irq != -1)
622                         error = vioapic_deassert_irq(sc->vm,
623                             isa_irq->ioapic_irq);
624                 break;
625         case VM_ISA_PULSE_IRQ:
626                 isa_irq = (struct vm_isa_irq *)data;
627                 error = vatpic_pulse_irq(sc->vm, isa_irq->atpic_irq);
628                 if (error == 0 && isa_irq->ioapic_irq != -1)
629                         error = vioapic_pulse_irq(sc->vm, isa_irq->ioapic_irq);
630                 break;
631         case VM_ISA_SET_IRQ_TRIGGER:
632                 isa_irq_trigger = (struct vm_isa_irq_trigger *)data;
633                 error = vatpic_set_irq_trigger(sc->vm,
634                     isa_irq_trigger->atpic_irq, isa_irq_trigger->trigger);
635                 break;
636         case VM_MMAP_GETNEXT:
637                 mm = (struct vm_memmap *)data;
638                 error = vm_mmap_getnext(sc->vm, &mm->gpa, &mm->segid,
639                     &mm->segoff, &mm->len, &mm->prot, &mm->flags);
640                 break;
641         case VM_MMAP_MEMSEG:
642                 mm = (struct vm_memmap *)data;
643                 error = vm_mmap_memseg(sc->vm, mm->gpa, mm->segid, mm->segoff,
644                     mm->len, mm->prot, mm->flags);
645                 break;
646 #ifdef COMPAT_FREEBSD12
647         case VM_ALLOC_MEMSEG_FBSD12:
648                 error = alloc_memseg(sc, (struct vm_memseg *)data,
649                     sizeof(((struct vm_memseg_fbsd12 *)0)->name));
650                 break;
651 #endif
652         case VM_ALLOC_MEMSEG:
653                 error = alloc_memseg(sc, (struct vm_memseg *)data,
654                     sizeof(((struct vm_memseg *)0)->name));
655                 break;
656 #ifdef COMPAT_FREEBSD12
657         case VM_GET_MEMSEG_FBSD12:
658                 error = get_memseg(sc, (struct vm_memseg *)data,
659                     sizeof(((struct vm_memseg_fbsd12 *)0)->name));
660                 break;
661 #endif
662         case VM_GET_MEMSEG:
663                 error = get_memseg(sc, (struct vm_memseg *)data,
664                     sizeof(((struct vm_memseg *)0)->name));
665                 break;
666         case VM_GET_REGISTER:
667                 vmreg = (struct vm_register *)data;
668                 error = vm_get_register(sc->vm, vmreg->cpuid, vmreg->regnum,
669                                         &vmreg->regval);
670                 break;
671         case VM_SET_REGISTER:
672                 vmreg = (struct vm_register *)data;
673                 error = vm_set_register(sc->vm, vmreg->cpuid, vmreg->regnum,
674                                         vmreg->regval);
675                 break;
676         case VM_SET_SEGMENT_DESCRIPTOR:
677                 vmsegdesc = (struct vm_seg_desc *)data;
678                 error = vm_set_seg_desc(sc->vm, vmsegdesc->cpuid,
679                                         vmsegdesc->regnum,
680                                         &vmsegdesc->desc);
681                 break;
682         case VM_GET_SEGMENT_DESCRIPTOR:
683                 vmsegdesc = (struct vm_seg_desc *)data;
684                 error = vm_get_seg_desc(sc->vm, vmsegdesc->cpuid,
685                                         vmsegdesc->regnum,
686                                         &vmsegdesc->desc);
687                 break;
688         case VM_GET_REGISTER_SET:
689                 vmregset = (struct vm_register_set *)data;
690                 if (vmregset->count > VM_REG_LAST) {
691                         error = EINVAL;
692                         break;
693                 }
694                 regvals = malloc(sizeof(regvals[0]) * vmregset->count, M_VMMDEV,
695                     M_WAITOK);
696                 regnums = malloc(sizeof(regnums[0]) * vmregset->count, M_VMMDEV,
697                     M_WAITOK);
698                 error = copyin(vmregset->regnums, regnums, sizeof(regnums[0]) *
699                     vmregset->count);
700                 if (error == 0)
701                         error = vm_get_register_set(sc->vm, vmregset->cpuid,
702                             vmregset->count, regnums, regvals);
703                 if (error == 0)
704                         error = copyout(regvals, vmregset->regvals,
705                             sizeof(regvals[0]) * vmregset->count);
706                 free(regvals, M_VMMDEV);
707                 free(regnums, M_VMMDEV);
708                 break;
709         case VM_SET_REGISTER_SET:
710                 vmregset = (struct vm_register_set *)data;
711                 if (vmregset->count > VM_REG_LAST) {
712                         error = EINVAL;
713                         break;
714                 }
715                 regvals = malloc(sizeof(regvals[0]) * vmregset->count, M_VMMDEV,
716                     M_WAITOK);
717                 regnums = malloc(sizeof(regnums[0]) * vmregset->count, M_VMMDEV,
718                     M_WAITOK);
719                 error = copyin(vmregset->regnums, regnums, sizeof(regnums[0]) *
720                     vmregset->count);
721                 if (error == 0)
722                         error = copyin(vmregset->regvals, regvals,
723                             sizeof(regvals[0]) * vmregset->count);
724                 if (error == 0)
725                         error = vm_set_register_set(sc->vm, vmregset->cpuid,
726                             vmregset->count, regnums, regvals);
727                 free(regvals, M_VMMDEV);
728                 free(regnums, M_VMMDEV);
729                 break;
730         case VM_GET_CAPABILITY:
731                 vmcap = (struct vm_capability *)data;
732                 error = vm_get_capability(sc->vm, vmcap->cpuid,
733                                           vmcap->captype,
734                                           &vmcap->capval);
735                 break;
736         case VM_SET_CAPABILITY:
737                 vmcap = (struct vm_capability *)data;
738                 error = vm_set_capability(sc->vm, vmcap->cpuid,
739                                           vmcap->captype,
740                                           vmcap->capval);
741                 break;
742         case VM_SET_X2APIC_STATE:
743                 x2apic = (struct vm_x2apic *)data;
744                 error = vm_set_x2apic_state(sc->vm,
745                                             x2apic->cpuid, x2apic->state);
746                 break;
747         case VM_GET_X2APIC_STATE:
748                 x2apic = (struct vm_x2apic *)data;
749                 error = vm_get_x2apic_state(sc->vm,
750                                             x2apic->cpuid, &x2apic->state);
751                 break;
752         case VM_GET_GPA_PMAP:
753                 gpapte = (struct vm_gpa_pte *)data;
754                 pmap_get_mapping(vmspace_pmap(vm_get_vmspace(sc->vm)),
755                                  gpapte->gpa, gpapte->pte, &gpapte->ptenum);
756                 error = 0;
757                 break;
758         case VM_GET_HPET_CAPABILITIES:
759                 error = vhpet_getcap((struct vm_hpet_cap *)data);
760                 break;
761         case VM_GLA2GPA: {
762                 CTASSERT(PROT_READ == VM_PROT_READ);
763                 CTASSERT(PROT_WRITE == VM_PROT_WRITE);
764                 CTASSERT(PROT_EXEC == VM_PROT_EXECUTE);
765                 gg = (struct vm_gla2gpa *)data;
766                 error = vm_gla2gpa(sc->vm, gg->vcpuid, &gg->paging, gg->gla,
767                     gg->prot, &gg->gpa, &gg->fault);
768                 KASSERT(error == 0 || error == EFAULT,
769                     ("%s: vm_gla2gpa unknown error %d", __func__, error));
770                 break;
771         }
772         case VM_GLA2GPA_NOFAULT:
773                 gg = (struct vm_gla2gpa *)data;
774                 error = vm_gla2gpa_nofault(sc->vm, gg->vcpuid, &gg->paging,
775                     gg->gla, gg->prot, &gg->gpa, &gg->fault);
776                 KASSERT(error == 0 || error == EFAULT,
777                     ("%s: vm_gla2gpa unknown error %d", __func__, error));
778                 break;
779         case VM_ACTIVATE_CPU:
780                 vac = (struct vm_activate_cpu *)data;
781                 error = vm_activate_cpu(sc->vm, vac->vcpuid);
782                 break;
783         case VM_GET_CPUS:
784                 error = 0;
785                 vm_cpuset = (struct vm_cpuset *)data;
786                 size = vm_cpuset->cpusetsize;
787                 if (size < sizeof(cpuset_t) || size > CPU_MAXSIZE / NBBY) {
788                         error = ERANGE;
789                         break;
790                 }
791                 cpuset = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
792                 if (vm_cpuset->which == VM_ACTIVE_CPUS)
793                         *cpuset = vm_active_cpus(sc->vm);
794                 else if (vm_cpuset->which == VM_SUSPENDED_CPUS)
795                         *cpuset = vm_suspended_cpus(sc->vm);
796                 else if (vm_cpuset->which == VM_DEBUG_CPUS)
797                         *cpuset = vm_debug_cpus(sc->vm);
798                 else
799                         error = EINVAL;
800                 if (error == 0)
801                         error = copyout(cpuset, vm_cpuset->cpus, size);
802                 free(cpuset, M_TEMP);
803                 break;
804         case VM_SUSPEND_CPU:
805                 vac = (struct vm_activate_cpu *)data;
806                 error = vm_suspend_cpu(sc->vm, vac->vcpuid);
807                 break;
808         case VM_RESUME_CPU:
809                 vac = (struct vm_activate_cpu *)data;
810                 error = vm_resume_cpu(sc->vm, vac->vcpuid);
811                 break;
812         case VM_SET_INTINFO:
813                 vmii = (struct vm_intinfo *)data;
814                 error = vm_exit_intinfo(sc->vm, vmii->vcpuid, vmii->info1);
815                 break;
816         case VM_GET_INTINFO:
817                 vmii = (struct vm_intinfo *)data;
818                 error = vm_get_intinfo(sc->vm, vmii->vcpuid, &vmii->info1,
819                     &vmii->info2);
820                 break;
821         case VM_RTC_WRITE:
822                 rtcdata = (struct vm_rtc_data *)data;
823                 error = vrtc_nvram_write(sc->vm, rtcdata->offset,
824                     rtcdata->value);
825                 break;
826         case VM_RTC_READ:
827                 rtcdata = (struct vm_rtc_data *)data;
828                 error = vrtc_nvram_read(sc->vm, rtcdata->offset,
829                     &rtcdata->value);
830                 break;
831         case VM_RTC_SETTIME:
832                 rtctime = (struct vm_rtc_time *)data;
833                 error = vrtc_set_time(sc->vm, rtctime->secs);
834                 break;
835         case VM_RTC_GETTIME:
836                 error = 0;
837                 rtctime = (struct vm_rtc_time *)data;
838                 rtctime->secs = vrtc_get_time(sc->vm);
839                 break;
840         case VM_RESTART_INSTRUCTION:
841                 error = vm_restart_instruction(sc->vm, vcpu);
842                 break;
843         case VM_SET_TOPOLOGY:
844                 topology = (struct vm_cpu_topology *)data;
845                 error = vm_set_topology(sc->vm, topology->sockets,
846                     topology->cores, topology->threads, topology->maxcpus);
847                 break;
848         case VM_GET_TOPOLOGY:
849                 topology = (struct vm_cpu_topology *)data;
850                 vm_get_topology(sc->vm, &topology->sockets, &topology->cores,
851                     &topology->threads, &topology->maxcpus);
852                 error = 0;
853                 break;
854 #ifdef BHYVE_SNAPSHOT
855         case VM_SNAPSHOT_REQ:
856                 snapshot_meta = (struct vm_snapshot_meta *)data;
857                 error = vm_snapshot_req(sc->vm, snapshot_meta);
858                 break;
859         case VM_RESTORE_TIME:
860                 error = vm_restore_time(sc->vm);
861                 break;
862 #endif
863         default:
864                 error = ENOTTY;
865                 break;
866         }
867
868         if (state_changed == 1)
869                 vcpu_unlock_one(sc, vcpu);
870         else if (state_changed == 2)
871                 vcpu_unlock_all(sc);
872
873 done:
874         /*
875          * Make sure that no handler returns a kernel-internal
876          * error value to userspace.
877          */
878         KASSERT(error == ERESTART || error >= 0,
879             ("vmmdev_ioctl: invalid error return %d", error));
880         return (error);
881 }
882
883 static int
884 vmmdev_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t mapsize,
885     struct vm_object **objp, int nprot)
886 {
887         struct vmmdev_softc *sc;
888         vm_paddr_t gpa;
889         size_t len;
890         vm_ooffset_t segoff, first, last;
891         int error, found, segid;
892         uint16_t lastcpu;
893         bool sysmem;
894
895         error = vmm_priv_check(curthread->td_ucred);
896         if (error)
897                 return (error);
898
899         first = *offset;
900         last = first + mapsize;
901         if ((nprot & PROT_EXEC) || first < 0 || first >= last)
902                 return (EINVAL);
903
904         sc = vmmdev_lookup2(cdev);
905         if (sc == NULL) {
906                 /* virtual machine is in the process of being created */
907                 return (EINVAL);
908         }
909
910         /*
911          * Get a read lock on the guest memory map by freezing any vcpu.
912          */
913         lastcpu = vm_get_maxcpus(sc->vm) - 1;
914         error = vcpu_lock_one(sc, lastcpu);
915         if (error)
916                 return (error);
917
918         gpa = 0;
919         found = 0;
920         while (!found) {
921                 error = vm_mmap_getnext(sc->vm, &gpa, &segid, &segoff, &len,
922                     NULL, NULL);
923                 if (error)
924                         break;
925
926                 if (first >= gpa && last <= gpa + len)
927                         found = 1;
928                 else
929                         gpa += len;
930         }
931
932         if (found) {
933                 error = vm_get_memseg(sc->vm, segid, &len, &sysmem, objp);
934                 KASSERT(error == 0 && *objp != NULL,
935                     ("%s: invalid memory segment %d", __func__, segid));
936                 if (sysmem) {
937                         vm_object_reference(*objp);
938                         *offset = segoff + (first - gpa);
939                 } else {
940                         error = EINVAL;
941                 }
942         }
943         vcpu_unlock_one(sc, lastcpu);
944         return (error);
945 }
946
947 static void
948 vmmdev_destroy(void *arg)
949 {
950         struct vmmdev_softc *sc = arg;
951         struct devmem_softc *dsc;
952         int error;
953
954         error = vcpu_lock_all(sc);
955         KASSERT(error == 0, ("%s: error %d freezing vcpus", __func__, error));
956
957         while ((dsc = SLIST_FIRST(&sc->devmem)) != NULL) {
958                 KASSERT(dsc->cdev == NULL, ("%s: devmem not free", __func__));
959                 SLIST_REMOVE_HEAD(&sc->devmem, link);
960                 free(dsc->name, M_VMMDEV);
961                 free(dsc, M_VMMDEV);
962         }
963
964         if (sc->cdev != NULL)
965                 destroy_dev(sc->cdev);
966
967         if (sc->vm != NULL)
968                 vm_destroy(sc->vm);
969
970         if ((sc->flags & VSC_LINKED) != 0) {
971                 mtx_lock(&vmmdev_mtx);
972                 SLIST_REMOVE(&head, sc, vmmdev_softc, link);
973                 mtx_unlock(&vmmdev_mtx);
974         }
975
976         free(sc, M_VMMDEV);
977 }
978
979 static int
980 sysctl_vmm_destroy(SYSCTL_HANDLER_ARGS)
981 {
982         struct devmem_softc *dsc;
983         struct vmmdev_softc *sc;
984         struct cdev *cdev;
985         char *buf;
986         int error, buflen;
987
988         error = vmm_priv_check(req->td->td_ucred);
989         if (error)
990                 return (error);
991
992         buflen = VM_MAX_NAMELEN + 1;
993         buf = malloc(buflen, M_VMMDEV, M_WAITOK | M_ZERO);
994         strlcpy(buf, "beavis", buflen);
995         error = sysctl_handle_string(oidp, buf, buflen, req);
996         if (error != 0 || req->newptr == NULL)
997                 goto out;
998
999         mtx_lock(&vmmdev_mtx);
1000         sc = vmmdev_lookup(buf);
1001         if (sc == NULL || sc->cdev == NULL) {
1002                 mtx_unlock(&vmmdev_mtx);
1003                 error = EINVAL;
1004                 goto out;
1005         }
1006
1007         /*
1008          * The 'cdev' will be destroyed asynchronously when 'si_threadcount'
1009          * goes down to 0 so we should not do it again in the callback.
1010          *
1011          * Setting 'sc->cdev' to NULL is also used to indicate that the VM
1012          * is scheduled for destruction.
1013          */
1014         cdev = sc->cdev;
1015         sc->cdev = NULL;                
1016         mtx_unlock(&vmmdev_mtx);
1017
1018         /*
1019          * Schedule all cdevs to be destroyed:
1020          *
1021          * - any new operations on the 'cdev' will return an error (ENXIO).
1022          *
1023          * - when the 'si_threadcount' dwindles down to zero the 'cdev' will
1024          *   be destroyed and the callback will be invoked in a taskqueue
1025          *   context.
1026          *
1027          * - the 'devmem' cdevs are destroyed before the virtual machine 'cdev'
1028          */
1029         SLIST_FOREACH(dsc, &sc->devmem, link) {
1030                 KASSERT(dsc->cdev != NULL, ("devmem cdev already destroyed"));
1031                 destroy_dev_sched_cb(dsc->cdev, devmem_destroy, dsc);
1032         }
1033         destroy_dev_sched_cb(cdev, vmmdev_destroy, sc);
1034         error = 0;
1035
1036 out:
1037         free(buf, M_VMMDEV);
1038         return (error);
1039 }
1040 SYSCTL_PROC(_hw_vmm, OID_AUTO, destroy,
1041     CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_MPSAFE,
1042     NULL, 0, sysctl_vmm_destroy, "A",
1043     NULL);
1044
1045 static struct cdevsw vmmdevsw = {
1046         .d_name         = "vmmdev",
1047         .d_version      = D_VERSION,
1048         .d_ioctl        = vmmdev_ioctl,
1049         .d_mmap_single  = vmmdev_mmap_single,
1050         .d_read         = vmmdev_rw,
1051         .d_write        = vmmdev_rw,
1052 };
1053
1054 static int
1055 sysctl_vmm_create(SYSCTL_HANDLER_ARGS)
1056 {
1057         struct vm *vm;
1058         struct cdev *cdev;
1059         struct vmmdev_softc *sc, *sc2;
1060         char *buf;
1061         int error, buflen;
1062
1063         error = vmm_priv_check(req->td->td_ucred);
1064         if (error)
1065                 return (error);
1066
1067         buflen = VM_MAX_NAMELEN + 1;
1068         buf = malloc(buflen, M_VMMDEV, M_WAITOK | M_ZERO);
1069         strlcpy(buf, "beavis", buflen);
1070         error = sysctl_handle_string(oidp, buf, buflen, req);
1071         if (error != 0 || req->newptr == NULL)
1072                 goto out;
1073
1074         mtx_lock(&vmmdev_mtx);
1075         sc = vmmdev_lookup(buf);
1076         mtx_unlock(&vmmdev_mtx);
1077         if (sc != NULL) {
1078                 error = EEXIST;
1079                 goto out;
1080         }
1081
1082         error = vm_create(buf, &vm);
1083         if (error != 0)
1084                 goto out;
1085
1086         sc = malloc(sizeof(struct vmmdev_softc), M_VMMDEV, M_WAITOK | M_ZERO);
1087         sc->vm = vm;
1088         SLIST_INIT(&sc->devmem);
1089
1090         /*
1091          * Lookup the name again just in case somebody sneaked in when we
1092          * dropped the lock.
1093          */
1094         mtx_lock(&vmmdev_mtx);
1095         sc2 = vmmdev_lookup(buf);
1096         if (sc2 == NULL) {
1097                 SLIST_INSERT_HEAD(&head, sc, link);
1098                 sc->flags |= VSC_LINKED;
1099         }
1100         mtx_unlock(&vmmdev_mtx);
1101
1102         if (sc2 != NULL) {
1103                 vmmdev_destroy(sc);
1104                 error = EEXIST;
1105                 goto out;
1106         }
1107
1108         error = make_dev_p(MAKEDEV_CHECKNAME, &cdev, &vmmdevsw, NULL,
1109                            UID_ROOT, GID_WHEEL, 0600, "vmm/%s", buf);
1110         if (error != 0) {
1111                 vmmdev_destroy(sc);
1112                 goto out;
1113         }
1114
1115         mtx_lock(&vmmdev_mtx);
1116         sc->cdev = cdev;
1117         sc->cdev->si_drv1 = sc;
1118         mtx_unlock(&vmmdev_mtx);
1119
1120 out:
1121         free(buf, M_VMMDEV);
1122         return (error);
1123 }
1124 SYSCTL_PROC(_hw_vmm, OID_AUTO, create,
1125     CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_MPSAFE,
1126     NULL, 0, sysctl_vmm_create, "A",
1127     NULL);
1128
1129 void
1130 vmmdev_init(void)
1131 {
1132         mtx_init(&vmmdev_mtx, "vmm device mutex", NULL, MTX_DEF);
1133         pr_allow_flag = prison_add_allow(NULL, "vmm", NULL,
1134             "Allow use of vmm in a jail.");
1135 }
1136
1137 int
1138 vmmdev_cleanup(void)
1139 {
1140         int error;
1141
1142         if (SLIST_EMPTY(&head))
1143                 error = 0;
1144         else
1145                 error = EBUSY;
1146
1147         return (error);
1148 }
1149
1150 static int
1151 devmem_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t len,
1152     struct vm_object **objp, int nprot)
1153 {
1154         struct devmem_softc *dsc;
1155         vm_ooffset_t first, last;
1156         size_t seglen;
1157         int error;
1158         uint16_t lastcpu;
1159         bool sysmem;
1160
1161         dsc = cdev->si_drv1;
1162         if (dsc == NULL) {
1163                 /* 'cdev' has been created but is not ready for use */
1164                 return (ENXIO);
1165         }
1166
1167         first = *offset;
1168         last = *offset + len;
1169         if ((nprot & PROT_EXEC) || first < 0 || first >= last)
1170                 return (EINVAL);
1171
1172         lastcpu = vm_get_maxcpus(dsc->sc->vm) - 1;
1173         error = vcpu_lock_one(dsc->sc, lastcpu);
1174         if (error)
1175                 return (error);
1176
1177         error = vm_get_memseg(dsc->sc->vm, dsc->segid, &seglen, &sysmem, objp);
1178         KASSERT(error == 0 && !sysmem && *objp != NULL,
1179             ("%s: invalid devmem segment %d", __func__, dsc->segid));
1180
1181         vcpu_unlock_one(dsc->sc, lastcpu);
1182
1183         if (seglen >= last) {
1184                 vm_object_reference(*objp);
1185                 return (0);
1186         } else {
1187                 return (EINVAL);
1188         }
1189 }
1190
1191 static struct cdevsw devmemsw = {
1192         .d_name         = "devmem",
1193         .d_version      = D_VERSION,
1194         .d_mmap_single  = devmem_mmap_single,
1195 };
1196
1197 static int
1198 devmem_create_cdev(const char *vmname, int segid, char *devname)
1199 {
1200         struct devmem_softc *dsc;
1201         struct vmmdev_softc *sc;
1202         struct cdev *cdev;
1203         int error;
1204
1205         error = make_dev_p(MAKEDEV_CHECKNAME, &cdev, &devmemsw, NULL,
1206             UID_ROOT, GID_WHEEL, 0600, "vmm.io/%s.%s", vmname, devname);
1207         if (error)
1208                 return (error);
1209
1210         dsc = malloc(sizeof(struct devmem_softc), M_VMMDEV, M_WAITOK | M_ZERO);
1211
1212         mtx_lock(&vmmdev_mtx);
1213         sc = vmmdev_lookup(vmname);
1214         KASSERT(sc != NULL, ("%s: vm %s softc not found", __func__, vmname));
1215         if (sc->cdev == NULL) {
1216                 /* virtual machine is being created or destroyed */
1217                 mtx_unlock(&vmmdev_mtx);
1218                 free(dsc, M_VMMDEV);
1219                 destroy_dev_sched_cb(cdev, NULL, 0);
1220                 return (ENODEV);
1221         }
1222
1223         dsc->segid = segid;
1224         dsc->name = devname;
1225         dsc->cdev = cdev;
1226         dsc->sc = sc;
1227         SLIST_INSERT_HEAD(&sc->devmem, dsc, link);
1228         mtx_unlock(&vmmdev_mtx);
1229
1230         /* The 'cdev' is ready for use after 'si_drv1' is initialized */
1231         cdev->si_drv1 = dsc;
1232         return (0);
1233 }
1234
1235 static void
1236 devmem_destroy(void *arg)
1237 {
1238         struct devmem_softc *dsc = arg;
1239
1240         KASSERT(dsc->cdev, ("%s: devmem cdev already destroyed", __func__));
1241         dsc->cdev = NULL;
1242         dsc->sc = NULL;
1243 }