2 * Copyright (c) 2012 Sandvine, Inc.
3 * Copyright (c) 2012 NetApp, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
36 #include <sys/systm.h>
41 #include <machine/pmap.h>
42 #include <machine/vmparam.h>
43 #include <machine/vmm.h>
45 #include <sys/types.h>
46 #include <sys/errno.h>
48 #include <machine/vmm.h>
54 CPU_MODE_COMPATIBILITY, /* IA-32E mode (CS.L = 0) */
55 CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */
58 /* struct vie_op.op_type */
67 /* struct vie_op.op_flags */
68 #define VIE_OP_F_IMM (1 << 0) /* immediate operand present */
69 #define VIE_OP_F_IMM8 (1 << 1) /* 8-bit immediate operand */
71 static const struct vie_op one_byte_opcodes[256] = {
74 .op_type = VIE_OP_TYPE_MOV,
78 .op_type = VIE_OP_TYPE_MOV,
82 .op_type = VIE_OP_TYPE_MOV,
86 .op_type = VIE_OP_TYPE_MOV,
90 .op_type = VIE_OP_TYPE_MOV,
91 .op_flags = VIE_OP_F_IMM,
95 .op_type = VIE_OP_TYPE_AND,
98 /* XXX Group 1 extended opcode - not just AND */
100 .op_type = VIE_OP_TYPE_AND,
101 .op_flags = VIE_OP_F_IMM,
104 /* XXX Group 1 extended opcode - not just OR */
106 .op_type = VIE_OP_TYPE_OR,
107 .op_flags = VIE_OP_F_IMM8,
112 #define VIE_MOD_INDIRECT 0
113 #define VIE_MOD_INDIRECT_DISP8 1
114 #define VIE_MOD_INDIRECT_DISP32 2
115 #define VIE_MOD_DIRECT 3
119 #define VIE_RM_DISP32 5
121 #define GB (1024 * 1024 * 1024)
123 static enum vm_reg_name gpr_map[16] = {
142 static uint64_t size2mask[] = {
146 [8] = 0xffffffffffffffff,
150 vie_read_register(void *vm, int vcpuid, enum vm_reg_name reg, uint64_t *rval)
154 error = vm_get_register(vm, vcpuid, reg, rval);
160 vie_read_bytereg(void *vm, int vcpuid, struct vie *vie, uint8_t *rval)
164 enum vm_reg_name reg;
167 reg = gpr_map[vie->reg];
170 * 64-bit mode imposes limitations on accessing legacy byte registers.
172 * The legacy high-byte registers cannot be addressed if the REX
173 * prefix is present. In this case the values 4, 5, 6 and 7 of the
174 * 'ModRM:reg' field address %spl, %bpl, %sil and %dil respectively.
176 * If the REX prefix is not present then the values 4, 5, 6 and 7
177 * of the 'ModRM:reg' field address the legacy high-byte registers,
178 * %ah, %ch, %dh and %bh respectively.
180 if (!vie->rex_present) {
181 if (vie->reg & 0x4) {
183 * Obtain the value of %ah by reading %rax and shifting
184 * right by 8 bits (same for %bh, %ch and %dh).
187 reg = gpr_map[vie->reg & 0x3];
191 error = vm_get_register(vm, vcpuid, reg, &val);
192 *rval = val >> rshift;
197 vie_update_register(void *vm, int vcpuid, enum vm_reg_name reg,
198 uint64_t val, int size)
206 error = vie_read_register(vm, vcpuid, reg, &origval);
209 val &= size2mask[size];
210 val |= origval & ~size2mask[size];
221 error = vm_set_register(vm, vcpuid, reg, val);
226 * The following simplifying assumptions are made during emulation:
228 * - guest is in 64-bit mode
229 * - default address size is 64-bits
230 * - default operand size is 32-bits
232 * - operand size override is not supported
234 * - address size override is not supported
237 emulate_mov(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
238 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
241 enum vm_reg_name reg;
248 switch (vie->op.op_byte) {
251 * MOV byte from reg (ModRM:reg) to mem (ModRM:r/m)
253 * REX + 88/r: mov r/m8, r8 (%ah, %ch, %dh, %bh not available)
256 error = vie_read_bytereg(vm, vcpuid, vie, &byte);
258 error = memwrite(vm, vcpuid, gpa, byte, size, arg);
262 * MOV from reg (ModRM:reg) to mem (ModRM:r/m)
263 * 89/r: mov r/m32, r32
264 * REX.W + 89/r mov r/m64, r64
268 reg = gpr_map[vie->reg];
269 error = vie_read_register(vm, vcpuid, reg, &val);
271 val &= size2mask[size];
272 error = memwrite(vm, vcpuid, gpa, val, size, arg);
278 * MOV from mem (ModRM:r/m) to reg (ModRM:reg)
280 * REX + 8A/r: mov r/m8, r8
281 * 8B/r: mov r32, r/m32
282 * REX.W 8B/r: mov r64, r/m64
284 if (vie->op.op_byte == 0x8A)
288 error = memread(vm, vcpuid, gpa, &val, size, arg);
290 reg = gpr_map[vie->reg];
291 error = vie_update_register(vm, vcpuid, reg, val, size);
296 * MOV from imm32 to mem (ModRM:r/m)
297 * C7/0 mov r/m32, imm32
298 * REX.W + C7/0 mov r/m64, imm32 (sign-extended to 64-bits)
300 val = vie->immediate; /* already sign-extended */
306 val &= size2mask[size];
308 error = memwrite(vm, vcpuid, gpa, val, size, arg);
318 emulate_and(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
319 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
322 enum vm_reg_name reg;
328 switch (vie->op.op_byte) {
331 * AND reg (ModRM:reg) and mem (ModRM:r/m) and store the
334 * 23/r and r32, r/m32
335 * REX.W + 23/r and r64, r/m64
340 /* get the first operand */
341 reg = gpr_map[vie->reg];
342 error = vie_read_register(vm, vcpuid, reg, &val1);
346 /* get the second operand */
347 error = memread(vm, vcpuid, gpa, &val2, size, arg);
351 /* perform the operation and write the result */
353 error = vie_update_register(vm, vcpuid, reg, val1, size);
357 * AND mem (ModRM:r/m) with immediate and store the
360 * 81/ and r/m32, imm32
361 * REX.W + 81/ and r/m64, imm32 sign-extended to 64
363 * Currently, only the AND operation of the 0x81 opcode
364 * is implemented (ModRM:reg = b100).
366 if ((vie->reg & 7) != 4)
372 /* get the first operand */
373 error = memread(vm, vcpuid, gpa, &val1, size, arg);
378 * perform the operation with the pre-fetched immediate
379 * operand and write the result
381 val1 &= vie->immediate;
382 error = memwrite(vm, vcpuid, gpa, val1, size, arg);
391 emulate_or(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
392 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
400 switch (vie->op.op_byte) {
403 * OR mem (ModRM:r/m) with immediate and store the
406 * 83/ OR r/m32, imm8 sign-extended to 32
407 * REX.W + 83/ OR r/m64, imm8 sign-extended to 64
409 * Currently, only the OR operation of the 0x83 opcode
410 * is implemented (ModRM:reg = b001).
412 if ((vie->reg & 7) != 1)
418 /* get the first operand */
419 error = memread(vm, vcpuid, gpa, &val1, size, arg);
424 * perform the operation with the pre-fetched immediate
425 * operand and write the result
427 val1 |= vie->immediate;
428 error = memwrite(vm, vcpuid, gpa, val1, size, arg);
437 vmm_emulate_instruction(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
438 mem_region_read_t memread, mem_region_write_t memwrite,
446 switch (vie->op.op_type) {
447 case VIE_OP_TYPE_MOV:
448 error = emulate_mov(vm, vcpuid, gpa, vie,
449 memread, memwrite, memarg);
451 case VIE_OP_TYPE_AND:
452 error = emulate_and(vm, vcpuid, gpa, vie,
453 memread, memwrite, memarg);
456 error = emulate_or(vm, vcpuid, gpa, vie,
457 memread, memwrite, memarg);
469 vie_init(struct vie *vie)
472 bzero(vie, sizeof(struct vie));
474 vie->base_register = VM_REG_LAST;
475 vie->index_register = VM_REG_LAST;
479 gla2gpa(struct vm *vm, uint64_t gla, uint64_t ptpphys,
480 uint64_t *gpa, uint64_t *gpaend)
482 int nlevels, ptpshift, ptpindex;
483 uint64_t *ptpbase, pte, pgsize;
487 * XXX assumes 64-bit guest with 4 page walk levels
490 while (--nlevels >= 0) {
491 /* Zero out the lower 12 bits and the upper 12 bits */
492 ptpphys >>= 12; ptpphys <<= 24; ptpphys >>= 12;
494 ptpbase = vm_gpa_hold(vm, ptpphys, PAGE_SIZE, VM_PROT_READ,
499 ptpshift = PAGE_SHIFT + nlevels * 9;
500 ptpindex = (gla >> ptpshift) & 0x1FF;
501 pgsize = 1UL << ptpshift;
503 pte = ptpbase[ptpindex];
505 vm_gpa_release(cookie);
507 if ((pte & PG_V) == 0)
520 /* Zero out the lower 'ptpshift' bits and the upper 12 bits */
521 pte >>= ptpshift; pte <<= (ptpshift + 12); pte >>= 12;
522 *gpa = pte | (gla & (pgsize - 1));
523 *gpaend = pte + pgsize;
531 vmm_fetch_instruction(struct vm *vm, int cpuid, uint64_t rip, int inst_length,
532 uint64_t cr3, struct vie *vie)
535 uint64_t gpa, gpaend, off;
539 * XXX cache previously fetched instructions using 'rip' as the tag
542 prot = VM_PROT_READ | VM_PROT_EXECUTE;
543 if (inst_length > VIE_INST_SIZE)
544 panic("vmm_fetch_instruction: invalid length %d", inst_length);
546 /* Copy the instruction into 'vie' */
547 while (vie->num_valid < inst_length) {
548 err = gla2gpa(vm, rip, cr3, &gpa, &gpaend);
552 off = gpa & PAGE_MASK;
553 n = min(inst_length - vie->num_valid, PAGE_SIZE - off);
555 if ((hpa = vm_gpa_hold(vm, gpa, n, prot, &cookie)) == NULL)
558 bcopy(hpa, &vie->inst[vie->num_valid], n);
560 vm_gpa_release(cookie);
566 if (vie->num_valid == inst_length)
573 vie_peek(struct vie *vie, uint8_t *x)
576 if (vie->num_processed < vie->num_valid) {
577 *x = vie->inst[vie->num_processed];
584 vie_advance(struct vie *vie)
587 vie->num_processed++;
591 decode_rex(struct vie *vie)
595 if (vie_peek(vie, &x))
598 if (x >= 0x40 && x <= 0x4F) {
599 vie->rex_present = 1;
601 vie->rex_w = x & 0x8 ? 1 : 0;
602 vie->rex_r = x & 0x4 ? 1 : 0;
603 vie->rex_x = x & 0x2 ? 1 : 0;
604 vie->rex_b = x & 0x1 ? 1 : 0;
613 decode_opcode(struct vie *vie)
617 if (vie_peek(vie, &x))
620 vie->op = one_byte_opcodes[x];
622 if (vie->op.op_type == VIE_OP_TYPE_NONE)
630 decode_modrm(struct vie *vie)
633 enum cpu_mode cpu_mode;
636 * XXX assuming that guest is in IA-32E 64-bit mode
638 cpu_mode = CPU_MODE_64BIT;
640 if (vie_peek(vie, &x))
643 vie->mod = (x >> 6) & 0x3;
644 vie->rm = (x >> 0) & 0x7;
645 vie->reg = (x >> 3) & 0x7;
648 * A direct addressing mode makes no sense in the context of an EPT
649 * fault. There has to be a memory access involved to cause the
652 if (vie->mod == VIE_MOD_DIRECT)
655 if ((vie->mod == VIE_MOD_INDIRECT && vie->rm == VIE_RM_DISP32) ||
656 (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)) {
658 * Table 2-5: Special Cases of REX Encodings
660 * mod=0, r/m=5 is used in the compatibility mode to
661 * indicate a disp32 without a base register.
663 * mod!=3, r/m=4 is used in the compatibility mode to
664 * indicate that the SIB byte is present.
666 * The 'b' bit in the REX prefix is don't care in
670 vie->rm |= (vie->rex_b << 3);
673 vie->reg |= (vie->rex_r << 3);
676 if (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)
679 vie->base_register = gpr_map[vie->rm];
682 case VIE_MOD_INDIRECT_DISP8:
685 case VIE_MOD_INDIRECT_DISP32:
688 case VIE_MOD_INDIRECT:
689 if (vie->rm == VIE_RM_DISP32) {
692 * Table 2-7. RIP-Relative Addressing
694 * In 64-bit mode mod=00 r/m=101 implies [rip] + disp32
695 * whereas in compatibility mode it just implies disp32.
698 if (cpu_mode == CPU_MODE_64BIT)
699 vie->base_register = VM_REG_GUEST_RIP;
701 vie->base_register = VM_REG_LAST;
713 decode_sib(struct vie *vie)
717 /* Proceed only if SIB byte is present */
718 if (vie->mod == VIE_MOD_DIRECT || vie->rm != VIE_RM_SIB)
721 if (vie_peek(vie, &x))
724 /* De-construct the SIB byte */
725 vie->ss = (x >> 6) & 0x3;
726 vie->index = (x >> 3) & 0x7;
727 vie->base = (x >> 0) & 0x7;
729 /* Apply the REX prefix modifiers */
730 vie->index |= vie->rex_x << 3;
731 vie->base |= vie->rex_b << 3;
734 case VIE_MOD_INDIRECT_DISP8:
737 case VIE_MOD_INDIRECT_DISP32:
742 if (vie->mod == VIE_MOD_INDIRECT &&
743 (vie->base == 5 || vie->base == 13)) {
745 * Special case when base register is unused if mod = 0
746 * and base = %rbp or %r13.
749 * Table 2-3: 32-bit Addressing Forms with the SIB Byte
750 * Table 2-5: Special Cases of REX Encodings
754 vie->base_register = gpr_map[vie->base];
758 * All encodings of 'index' are valid except for %rsp (4).
761 * Table 2-3: 32-bit Addressing Forms with the SIB Byte
762 * Table 2-5: Special Cases of REX Encodings
765 vie->index_register = gpr_map[vie->index];
767 /* 'scale' makes sense only in the context of an index register */
768 if (vie->index_register < VM_REG_LAST)
769 vie->scale = 1 << vie->ss;
777 decode_displacement(struct vie *vie)
788 if ((n = vie->disp_bytes) == 0)
791 if (n != 1 && n != 4)
792 panic("decode_displacement: invalid disp_bytes %d", n);
794 for (i = 0; i < n; i++) {
795 if (vie_peek(vie, &x))
803 vie->displacement = u.signed8; /* sign-extended */
805 vie->displacement = u.signed32; /* sign-extended */
811 decode_immediate(struct vie *vie)
821 /* Figure out immediate operand size (if any) */
822 if (vie->op.op_flags & VIE_OP_F_IMM)
824 else if (vie->op.op_flags & VIE_OP_F_IMM8)
827 if ((n = vie->imm_bytes) == 0)
830 if (n != 1 && n != 4)
831 panic("decode_immediate: invalid imm_bytes %d", n);
833 for (i = 0; i < n; i++) {
834 if (vie_peek(vie, &x))
842 vie->immediate = u.signed8; /* sign-extended */
844 vie->immediate = u.signed32; /* sign-extended */
850 * Verify that all the bytes in the instruction buffer were consumed.
853 verify_inst_length(struct vie *vie)
856 if (vie->num_processed == vie->num_valid)
863 * Verify that the 'guest linear address' provided as collateral of the nested
864 * page table fault matches with our instruction decoding.
867 verify_gla(struct vm *vm, int cpuid, uint64_t gla, struct vie *vie)
872 /* Skip 'gla' verification */
873 if (gla == VIE_INVALID_GLA)
877 if (vie->base_register != VM_REG_LAST) {
878 error = vm_get_register(vm, cpuid, vie->base_register, &base);
880 printf("verify_gla: error %d getting base reg %d\n",
881 error, vie->base_register);
886 * RIP-relative addressing starts from the following
889 if (vie->base_register == VM_REG_GUEST_RIP)
890 base += vie->num_valid;
894 if (vie->index_register != VM_REG_LAST) {
895 error = vm_get_register(vm, cpuid, vie->index_register, &idx);
897 printf("verify_gla: error %d getting index reg %d\n",
898 error, vie->index_register);
903 if (base + vie->scale * idx + vie->displacement != gla) {
904 printf("verify_gla mismatch: "
905 "base(0x%0lx), scale(%d), index(0x%0lx), "
906 "disp(0x%0lx), gla(0x%0lx)\n",
907 base, vie->scale, idx, vie->displacement, gla);
915 vmm_decode_instruction(struct vm *vm, int cpuid, uint64_t gla, struct vie *vie)
921 if (decode_opcode(vie))
924 if (decode_modrm(vie))
930 if (decode_displacement(vie))
933 if (decode_immediate(vie))
936 if (verify_inst_length(vie))
939 if (verify_gla(vm, cpuid, gla, vie))
942 vie->decoded = 1; /* success */