2 * Copyright (c) 2011 NetApp, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/types.h>
36 #include <machine/atomic.h>
37 #include <machine/segments.h>
48 #include <pthread_np.h>
51 #include <machine/vmm.h>
65 #include "spinup_ap.h"
68 #define GUEST_NIO_PORT 0x488 /* guest upcalls via i/o port */
70 #define VMEXIT_SWITCH 0 /* force vcpu switch in mux mode */
71 #define VMEXIT_CONTINUE 1 /* continue from next instruction */
72 #define VMEXIT_RESTART 2 /* restart current instruction */
73 #define VMEXIT_ABORT 3 /* abort the vm run loop */
74 #define VMEXIT_RESET 4 /* guest machine has reset */
75 #define VMEXIT_POWEROFF 5 /* guest machine has powered off */
77 #define MB (1024UL * 1024)
78 #define GB (1024UL * MB)
80 typedef int (*vmexit_handler_t)(struct vmctx *, struct vm_exit *, int *vcpu);
86 static int pincpu = -1;
87 static int guest_vmexit_on_hlt, guest_vmexit_on_pause;
88 static int virtio_msix = 1;
89 static int x2apic_mode = 0; /* default is xAPIC */
92 static int strictmsr = 1;
96 static char *progname;
97 static const int BSP = 0;
101 static void vm_loop(struct vmctx *ctx, int vcpu, uint64_t rip);
103 struct vm_exit vmexit[VM_MAXCPU];
106 uint64_t vmexit_bogus;
107 uint64_t vmexit_bogus_switch;
109 uint64_t vmexit_pause;
110 uint64_t vmexit_mtrap;
111 uint64_t vmexit_inst_emul;
112 uint64_t cpu_switch_rotate;
113 uint64_t cpu_switch_direct;
119 struct vmctx *mt_ctx;
121 } mt_vmm_info[VM_MAXCPU];
128 "Usage: %s [-aehwAHIPW] [-g <gdb port>] [-s <pci>]\n"
129 " %*s [-c vcpus] [-p pincpu] [-m mem] [-l <lpc>] <vm>\n"
130 " -a: local apic is in xAPIC mode (deprecated)\n"
131 " -A: create an ACPI table\n"
133 " -c: # cpus (default 1)\n"
134 " -p: pin vcpu 'n' to host cpu 'pincpu + n'\n"
135 " -H: vmexit from the guest on hlt\n"
136 " -P: vmexit from the guest on pause\n"
137 " -W: force virtio to use single-vector MSI\n"
138 " -e: exit on unhandled I/O access\n"
140 " -s: <slot,driver,configinfo> PCI slot config\n"
141 " -l: LPC device configuration\n"
142 " -m: memory size in MB\n"
143 " -w: ignore unimplemented MSRs\n"
144 " -x: local apic is in x2APIC mode\n",
145 progname, (int)strlen(progname), "");
151 paddr_guest2host(struct vmctx *ctx, uintptr_t gaddr, size_t len)
154 return (vm_map_gpa(ctx, gaddr, len));
158 fbsdrun_vmexit_on_pause(void)
161 return (guest_vmexit_on_pause);
165 fbsdrun_vmexit_on_hlt(void)
168 return (guest_vmexit_on_hlt);
172 fbsdrun_virtio_msix(void)
175 return (virtio_msix);
179 fbsdrun_start_thread(void *param)
181 char tname[MAXCOMLEN + 1];
182 struct mt_vmm_info *mtp;
188 snprintf(tname, sizeof(tname), "vcpu %d", vcpu);
189 pthread_set_name_np(mtp->mt_thr, tname);
191 vm_loop(mtp->mt_ctx, vcpu, vmexit[vcpu].rip);
199 fbsdrun_addcpu(struct vmctx *ctx, int vcpu, uint64_t rip)
203 if (cpumask & (1 << vcpu)) {
204 fprintf(stderr, "addcpu: attempting to add existing cpu %d\n",
209 atomic_set_int(&cpumask, 1 << vcpu);
212 * Set up the vmexit struct to allow execution to start
215 vmexit[vcpu].rip = rip;
216 vmexit[vcpu].inst_length = 0;
218 mt_vmm_info[vcpu].mt_ctx = ctx;
219 mt_vmm_info[vcpu].mt_vcpu = vcpu;
221 error = pthread_create(&mt_vmm_info[vcpu].mt_thr, NULL,
222 fbsdrun_start_thread, &mt_vmm_info[vcpu]);
227 fbsdrun_deletecpu(struct vmctx *ctx, int vcpu)
230 if ((cpumask & (1 << vcpu)) == 0) {
231 fprintf(stderr, "addcpu: attempting to delete unknown cpu %d\n",
236 atomic_clear_int(&cpumask, 1 << vcpu);
237 return (cpumask == 0);
241 vmexit_catch_reset(void)
244 return (VMEXIT_RESET);
248 vmexit_catch_inout(void)
250 return (VMEXIT_ABORT);
254 vmexit_handle_notify(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu,
259 * put guest-driven debug here
262 return (VMEXIT_CONTINUE);
266 vmexit_inout(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
269 int bytes, port, in, out;
275 port = vme->u.inout.port;
276 bytes = vme->u.inout.bytes;
277 eax = vme->u.inout.eax;
278 in = vme->u.inout.in;
281 /* We don't deal with these */
282 if (vme->u.inout.string || vme->u.inout.rep)
283 return (VMEXIT_ABORT);
285 /* Special case of guest reset */
286 if (out && port == 0x64 && (uint8_t)eax == 0xFE)
287 return (vmexit_catch_reset());
289 /* Extra-special case of host notifications */
290 if (out && port == GUEST_NIO_PORT)
291 return (vmexit_handle_notify(ctx, vme, pvcpu, eax));
293 error = emulate_inout(ctx, vcpu, in, port, bytes, &eax, strictio);
294 if (error == INOUT_OK && in)
295 error = vm_set_register(ctx, vcpu, VM_REG_GUEST_RAX, eax);
299 return (VMEXIT_CONTINUE);
301 return (VMEXIT_RESET);
303 return (VMEXIT_POWEROFF);
305 fprintf(stderr, "Unhandled %s%c 0x%04x\n",
307 bytes == 1 ? 'b' : (bytes == 2 ? 'w' : 'l'), port);
308 return (vmexit_catch_inout());
313 vmexit_rdmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
320 error = emulate_rdmsr(ctx, *pvcpu, vme->u.msr.code, &val);
322 fprintf(stderr, "rdmsr to register %#x on vcpu %d\n",
323 vme->u.msr.code, *pvcpu);
325 error = vm_inject_exception2(ctx, *pvcpu, IDT_GP, 0);
327 return (VMEXIT_RESTART);
332 error = vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RAX, eax);
336 error = vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RDX, edx);
339 return (VMEXIT_CONTINUE);
343 vmexit_wrmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
347 error = emulate_wrmsr(ctx, *pvcpu, vme->u.msr.code, vme->u.msr.wval);
349 fprintf(stderr, "wrmsr to register %#x(%#lx) on vcpu %d\n",
350 vme->u.msr.code, vme->u.msr.wval, *pvcpu);
352 error = vm_inject_exception2(ctx, *pvcpu, IDT_GP, 0);
354 return (VMEXIT_RESTART);
357 return (VMEXIT_CONTINUE);
361 vmexit_spinup_ap(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
364 int retval = VMEXIT_CONTINUE;
366 newcpu = spinup_ap(ctx, *pvcpu,
367 vme->u.spinup_ap.vcpu, vme->u.spinup_ap.rip);
373 vmexit_spindown_cpu(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
377 lastcpu = fbsdrun_deletecpu(ctx, *pvcpu);
380 return (vmexit_catch_reset());
384 vmexit_vmx(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
387 fprintf(stderr, "vm exit[%d]\n", *pvcpu);
388 fprintf(stderr, "\treason\t\tVMX\n");
389 fprintf(stderr, "\trip\t\t0x%016lx\n", vmexit->rip);
390 fprintf(stderr, "\tinst_length\t%d\n", vmexit->inst_length);
391 fprintf(stderr, "\tstatus\t\t%d\n", vmexit->u.vmx.status);
392 fprintf(stderr, "\texit_reason\t%u\n", vmexit->u.vmx.exit_reason);
393 fprintf(stderr, "\tqualification\t0x%016lx\n",
394 vmexit->u.vmx.exit_qualification);
395 fprintf(stderr, "\tinst_type\t\t%d\n", vmexit->u.vmx.inst_type);
396 fprintf(stderr, "\tinst_error\t\t%d\n", vmexit->u.vmx.inst_error);
398 return (VMEXIT_ABORT);
402 vmexit_bogus(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
405 stats.vmexit_bogus++;
407 return (VMEXIT_RESTART);
411 vmexit_hlt(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
417 * Just continue execution with the next instruction. We use
418 * the HLT VM exit as a way to be friendly with the host
421 return (VMEXIT_CONTINUE);
425 vmexit_pause(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
428 stats.vmexit_pause++;
430 return (VMEXIT_CONTINUE);
434 vmexit_mtrap(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
437 stats.vmexit_mtrap++;
439 return (VMEXIT_RESTART);
443 vmexit_inst_emul(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
446 stats.vmexit_inst_emul++;
448 err = emulate_mem(ctx, *pvcpu, vmexit->u.inst_emul.gpa,
449 &vmexit->u.inst_emul.vie);
454 "Failed to emulate instruction at 0x%lx\n",
456 } else if (err == ESRCH) {
457 fprintf(stderr, "Unhandled memory access to 0x%lx\n",
458 vmexit->u.inst_emul.gpa);
461 return (VMEXIT_ABORT);
464 return (VMEXIT_CONTINUE);
467 static vmexit_handler_t handler[VM_EXITCODE_MAX] = {
468 [VM_EXITCODE_INOUT] = vmexit_inout,
469 [VM_EXITCODE_VMX] = vmexit_vmx,
470 [VM_EXITCODE_BOGUS] = vmexit_bogus,
471 [VM_EXITCODE_RDMSR] = vmexit_rdmsr,
472 [VM_EXITCODE_WRMSR] = vmexit_wrmsr,
473 [VM_EXITCODE_MTRAP] = vmexit_mtrap,
474 [VM_EXITCODE_INST_EMUL] = vmexit_inst_emul,
475 [VM_EXITCODE_SPINUP_AP] = vmexit_spinup_ap,
476 [VM_EXITCODE_SPINDOWN_CPU] = vmexit_spindown_cpu,
480 vm_loop(struct vmctx *ctx, int vcpu, uint64_t rip)
483 int error, rc, prevcpu;
484 enum vm_exitcode exitcode;
488 CPU_SET(pincpu + vcpu, &mask);
489 error = pthread_setaffinity_np(pthread_self(),
490 sizeof(mask), &mask);
495 error = vm_run(ctx, vcpu, rip, &vmexit[vcpu]);
501 exitcode = vmexit[vcpu].exitcode;
502 if (exitcode >= VM_EXITCODE_MAX || handler[exitcode] == NULL) {
503 fprintf(stderr, "vm_loop: unexpected exitcode 0x%x\n",
508 rc = (*handler[exitcode])(ctx, &vmexit[vcpu], &vcpu);
511 case VMEXIT_CONTINUE:
512 rip = vmexit[vcpu].rip + vmexit[vcpu].inst_length;
515 rip = vmexit[vcpu].rip;
523 fprintf(stderr, "vm_run error %d, errno %d\n", error, errno);
527 num_vcpus_allowed(struct vmctx *ctx)
531 error = vm_get_capability(ctx, BSP, VM_CAP_UNRESTRICTED_GUEST, &tmp);
534 * The guest is allowed to spinup more than one processor only if the
535 * UNRESTRICTED_GUEST capability is available.
544 fbsdrun_set_capabilities(struct vmctx *ctx, int cpu)
548 if (fbsdrun_vmexit_on_hlt()) {
549 err = vm_get_capability(ctx, cpu, VM_CAP_HALT_EXIT, &tmp);
551 fprintf(stderr, "VM exit on HLT not supported\n");
554 vm_set_capability(ctx, cpu, VM_CAP_HALT_EXIT, 1);
556 handler[VM_EXITCODE_HLT] = vmexit_hlt;
559 if (fbsdrun_vmexit_on_pause()) {
561 * pause exit support required for this mode
563 err = vm_get_capability(ctx, cpu, VM_CAP_PAUSE_EXIT, &tmp);
566 "SMP mux requested, no pause support\n");
569 vm_set_capability(ctx, cpu, VM_CAP_PAUSE_EXIT, 1);
571 handler[VM_EXITCODE_PAUSE] = vmexit_pause;
575 err = vm_set_x2apic_state(ctx, cpu, X2APIC_ENABLED);
577 err = vm_set_x2apic_state(ctx, cpu, X2APIC_DISABLED);
580 fprintf(stderr, "Unable to set x2apic state (%d)\n", err);
584 vm_set_capability(ctx, cpu, VM_CAP_ENABLE_INVPCID, 1);
588 main(int argc, char *argv[])
590 int c, error, gdb_port, err, bvmcons;
597 progname = basename(argv[0]);
602 while ((c = getopt(argc, argv, "abehwxAHIPWp:g:c:s:m:l:")) != -1) {
614 pincpu = atoi(optarg);
617 guest_ncpus = atoi(optarg);
620 gdb_port = atoi(optarg);
623 if (lpc_device_parse(optarg) != 0) {
624 errx(EX_USAGE, "invalid lpc device "
625 "configuration '%s'", optarg);
629 if (pci_parse_slot(optarg) != 0)
634 error = vm_parse_memsize(optarg, &memsize);
636 errx(EX_USAGE, "invalid memsize '%s'", optarg);
639 guest_vmexit_on_hlt = 1;
643 * The "-I" option was used to add an ioapic to the
646 * An ioapic is now provided unconditionally for each
647 * virtual machine and this option is now deprecated.
651 guest_vmexit_on_pause = 1;
679 ctx = vm_open(vmname);
685 max_vcpus = num_vcpus_allowed(ctx);
686 if (guest_ncpus > max_vcpus) {
687 fprintf(stderr, "%d vCPUs requested but only %d available\n",
688 guest_ncpus, max_vcpus);
692 fbsdrun_set_capabilities(ctx, BSP);
694 err = vm_setup_memory(ctx, memsize, VM_MMAP_ALL);
696 fprintf(stderr, "Unable to setup memory (%d)\n", err);
707 * Exit if a device emulation finds an error in it's initilization
709 if (init_pci(ctx) != 0)
713 init_dbgport(gdb_port);
718 error = vm_get_register(ctx, BSP, VM_REG_GUEST_RIP, &rip);
722 * build the guest tables, MP etc.
724 mptable_build(ctx, guest_ncpus);
727 error = acpi_build(ctx, guest_ncpus);
732 * Change the proc title to include the VM name.
734 setproctitle("%s", vmname);
739 fbsdrun_addcpu(ctx, BSP, rip);
742 * Head off to the main event dispatch loop