2 * Copyright (c) 2012 Sandvine, Inc.
3 * Copyright (c) 2012 NetApp, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
36 #include <sys/systm.h>
41 #include <machine/pmap.h>
42 #include <machine/vmparam.h>
43 #include <machine/vmm.h>
45 #include <sys/types.h>
46 #include <sys/errno.h>
48 #include <machine/vmm.h>
54 CPU_MODE_COMPATIBILITY, /* IA-32E mode (CS.L = 0) */
55 CPU_MODE_64BIT, /* IA-32E mode (CS.L = 1) */
58 /* struct vie_op.op_type */
67 /* struct vie_op.op_flags */
68 #define VIE_OP_F_IMM (1 << 0) /* immediate operand present */
69 #define VIE_OP_F_IMM8 (1 << 1) /* 8-bit immediate operand */
71 static const struct vie_op one_byte_opcodes[256] = {
74 .op_type = VIE_OP_TYPE_MOV,
78 .op_type = VIE_OP_TYPE_MOV,
82 .op_type = VIE_OP_TYPE_MOV,
86 .op_type = VIE_OP_TYPE_MOV,
90 .op_type = VIE_OP_TYPE_MOV,
91 .op_flags = VIE_OP_F_IMM,
95 .op_type = VIE_OP_TYPE_AND,
98 /* XXX Group 1 extended opcode - not just AND */
100 .op_type = VIE_OP_TYPE_AND,
101 .op_flags = VIE_OP_F_IMM,
104 /* XXX Group 1 extended opcode - not just OR */
106 .op_type = VIE_OP_TYPE_OR,
107 .op_flags = VIE_OP_F_IMM8,
112 #define VIE_MOD_INDIRECT 0
113 #define VIE_MOD_INDIRECT_DISP8 1
114 #define VIE_MOD_INDIRECT_DISP32 2
115 #define VIE_MOD_DIRECT 3
119 #define VIE_RM_DISP32 5
121 #define GB (1024 * 1024 * 1024)
123 static enum vm_reg_name gpr_map[16] = {
142 static uint64_t size2mask[] = {
146 [8] = 0xffffffffffffffff,
150 vie_read_register(void *vm, int vcpuid, enum vm_reg_name reg, uint64_t *rval)
154 error = vm_get_register(vm, vcpuid, reg, rval);
160 vie_read_bytereg(void *vm, int vcpuid, struct vie *vie, uint8_t *rval)
164 enum vm_reg_name reg;
167 reg = gpr_map[vie->reg];
170 * 64-bit mode imposes limitations on accessing legacy byte registers.
172 * The legacy high-byte registers cannot be addressed if the REX
173 * prefix is present. In this case the values 4, 5, 6 and 7 of the
174 * 'ModRM:reg' field address %spl, %bpl, %sil and %dil respectively.
176 * If the REX prefix is not present then the values 4, 5, 6 and 7
177 * of the 'ModRM:reg' field address the legacy high-byte registers,
178 * %ah, %ch, %dh and %bh respectively.
180 if (!vie->rex_present) {
181 if (vie->reg & 0x4) {
183 * Obtain the value of %ah by reading %rax and shifting
184 * right by 8 bits (same for %bh, %ch and %dh).
187 reg = gpr_map[vie->reg & 0x3];
191 error = vm_get_register(vm, vcpuid, reg, &val);
192 *rval = val >> rshift;
197 vie_update_register(void *vm, int vcpuid, enum vm_reg_name reg,
198 uint64_t val, int size)
206 error = vie_read_register(vm, vcpuid, reg, &origval);
209 val &= size2mask[size];
210 val |= origval & ~size2mask[size];
221 error = vm_set_register(vm, vcpuid, reg, val);
226 * The following simplifying assumptions are made during emulation:
228 * - guest is in 64-bit mode
229 * - default address size is 64-bits
230 * - default operand size is 32-bits
232 * - operand size override is not supported
234 * - address size override is not supported
237 emulate_mov(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
238 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
241 enum vm_reg_name reg;
248 switch (vie->op.op_byte) {
251 * MOV byte from reg (ModRM:reg) to mem (ModRM:r/m)
253 * REX + 88/r: mov r/m8, r8 (%ah, %ch, %dh, %bh not available)
256 error = vie_read_bytereg(vm, vcpuid, vie, &byte);
258 error = memwrite(vm, vcpuid, gpa, byte, size, arg);
262 * MOV from reg (ModRM:reg) to mem (ModRM:r/m)
263 * 89/r: mov r/m32, r32
264 * REX.W + 89/r mov r/m64, r64
268 reg = gpr_map[vie->reg];
269 error = vie_read_register(vm, vcpuid, reg, &val);
271 val &= size2mask[size];
272 error = memwrite(vm, vcpuid, gpa, val, size, arg);
278 * MOV from mem (ModRM:r/m) to reg (ModRM:reg)
280 * REX + 8A/r: mov r/m8, r8
281 * 8B/r: mov r32, r/m32
282 * REX.W 8B/r: mov r64, r/m64
284 if (vie->op.op_byte == 0x8A)
288 error = memread(vm, vcpuid, gpa, &val, size, arg);
290 reg = gpr_map[vie->reg];
291 error = vie_update_register(vm, vcpuid, reg, val, size);
296 * MOV from imm32 to mem (ModRM:r/m)
297 * C7/0 mov r/m32, imm32
298 * REX.W + C7/0 mov r/m64, imm32 (sign-extended to 64-bits)
300 val = vie->immediate; /* already sign-extended */
306 val &= size2mask[size];
308 error = memwrite(vm, vcpuid, gpa, val, size, arg);
318 emulate_and(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
319 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
322 enum vm_reg_name reg;
328 switch (vie->op.op_byte) {
331 * AND reg (ModRM:reg) and mem (ModRM:r/m) and store the
334 * 23/r and r32, r/m32
335 * REX.W + 23/r and r64, r/m64
340 /* get the first operand */
341 reg = gpr_map[vie->reg];
342 error = vie_read_register(vm, vcpuid, reg, &val1);
346 /* get the second operand */
347 error = memread(vm, vcpuid, gpa, &val2, size, arg);
351 /* perform the operation and write the result */
353 error = vie_update_register(vm, vcpuid, reg, val1, size);
357 * AND mem (ModRM:r/m) with immediate and store the
360 * 81/ and r/m32, imm32
361 * REX.W + 81/ and r/m64, imm32 sign-extended to 64
363 * Currently, only the AND operation of the 0x81 opcode
364 * is implemented (ModRM:reg = b100).
366 if ((vie->reg & 7) != 4)
372 /* get the first operand */
373 error = memread(vm, vcpuid, gpa, &val1, size, arg);
378 * perform the operation with the pre-fetched immediate
379 * operand and write the result
381 val1 &= vie->immediate;
382 error = memwrite(vm, vcpuid, gpa, val1, size, arg);
391 emulate_or(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
392 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
400 switch (vie->op.op_byte) {
403 * OR mem (ModRM:r/m) with immediate and store the
406 * 83/ OR r/m32, imm8 sign-extended to 32
407 * REX.W + 83/ OR r/m64, imm8 sign-extended to 64
409 * Currently, only the OR operation of the 0x83 opcode
410 * is implemented (ModRM:reg = b001).
412 if ((vie->reg & 7) != 1)
418 /* get the first operand */
419 error = memread(vm, vcpuid, gpa, &val1, size, arg);
424 * perform the operation with the pre-fetched immediate
425 * operand and write the result
427 val1 |= vie->immediate;
428 error = memwrite(vm, vcpuid, gpa, val1, size, arg);
437 vmm_emulate_instruction(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
438 mem_region_read_t memread, mem_region_write_t memwrite,
446 switch (vie->op.op_type) {
447 case VIE_OP_TYPE_MOV:
448 error = emulate_mov(vm, vcpuid, gpa, vie,
449 memread, memwrite, memarg);
451 case VIE_OP_TYPE_AND:
452 error = emulate_and(vm, vcpuid, gpa, vie,
453 memread, memwrite, memarg);
456 error = emulate_or(vm, vcpuid, gpa, vie,
457 memread, memwrite, memarg);
469 vie_init(struct vie *vie)
472 bzero(vie, sizeof(struct vie));
474 vie->base_register = VM_REG_LAST;
475 vie->index_register = VM_REG_LAST;
479 gla2gpa(struct vm *vm, uint64_t gla, uint64_t ptpphys,
480 uint64_t *gpa, uint64_t *gpaend)
483 int nlevels, ptpshift, ptpindex;
484 uint64_t *ptpbase, pte, pgsize;
487 * XXX assumes 64-bit guest with 4 page walk levels
490 while (--nlevels >= 0) {
491 /* Zero out the lower 12 bits and the upper 12 bits */
492 ptpphys >>= 12; ptpphys <<= 24; ptpphys >>= 12;
494 hpa = vm_gpa2hpa(vm, ptpphys, PAGE_SIZE);
498 ptpbase = (uint64_t *)PHYS_TO_DMAP(hpa);
500 ptpshift = PAGE_SHIFT + nlevels * 9;
501 ptpindex = (gla >> ptpshift) & 0x1FF;
502 pgsize = 1UL << ptpshift;
504 pte = ptpbase[ptpindex];
506 if ((pte & PG_V) == 0)
519 /* Zero out the lower 'ptpshift' bits and the upper 12 bits */
520 pte >>= ptpshift; pte <<= (ptpshift + 12); pte >>= 12;
521 *gpa = pte | (gla & (pgsize - 1));
522 *gpaend = pte + pgsize;
530 vmm_fetch_instruction(struct vm *vm, int cpuid, uint64_t rip, int inst_length,
531 uint64_t cr3, struct vie *vie)
534 uint64_t hpa, gpa, gpaend, off;
537 * XXX cache previously fetched instructions using 'rip' as the tag
540 if (inst_length > VIE_INST_SIZE)
541 panic("vmm_fetch_instruction: invalid length %d", inst_length);
545 /* Copy the instruction into 'vie' */
546 while (vie->num_valid < inst_length) {
547 err = gla2gpa(vm, rip, cr3, &gpa, &gpaend);
551 off = gpa & PAGE_MASK;
552 n = min(inst_length - vie->num_valid, PAGE_SIZE - off);
554 hpa = vm_gpa2hpa(vm, gpa, n);
558 bcopy((void *)PHYS_TO_DMAP(hpa), &vie->inst[vie->num_valid], n);
564 if (vie->num_valid == inst_length)
571 vie_peek(struct vie *vie, uint8_t *x)
574 if (vie->num_processed < vie->num_valid) {
575 *x = vie->inst[vie->num_processed];
582 vie_advance(struct vie *vie)
585 vie->num_processed++;
589 decode_rex(struct vie *vie)
593 if (vie_peek(vie, &x))
596 if (x >= 0x40 && x <= 0x4F) {
597 vie->rex_present = 1;
599 vie->rex_w = x & 0x8 ? 1 : 0;
600 vie->rex_r = x & 0x4 ? 1 : 0;
601 vie->rex_x = x & 0x2 ? 1 : 0;
602 vie->rex_b = x & 0x1 ? 1 : 0;
611 decode_opcode(struct vie *vie)
615 if (vie_peek(vie, &x))
618 vie->op = one_byte_opcodes[x];
620 if (vie->op.op_type == VIE_OP_TYPE_NONE)
628 decode_modrm(struct vie *vie)
631 enum cpu_mode cpu_mode;
634 * XXX assuming that guest is in IA-32E 64-bit mode
636 cpu_mode = CPU_MODE_64BIT;
638 if (vie_peek(vie, &x))
641 vie->mod = (x >> 6) & 0x3;
642 vie->rm = (x >> 0) & 0x7;
643 vie->reg = (x >> 3) & 0x7;
646 * A direct addressing mode makes no sense in the context of an EPT
647 * fault. There has to be a memory access involved to cause the
650 if (vie->mod == VIE_MOD_DIRECT)
653 if ((vie->mod == VIE_MOD_INDIRECT && vie->rm == VIE_RM_DISP32) ||
654 (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)) {
656 * Table 2-5: Special Cases of REX Encodings
658 * mod=0, r/m=5 is used in the compatibility mode to
659 * indicate a disp32 without a base register.
661 * mod!=3, r/m=4 is used in the compatibility mode to
662 * indicate that the SIB byte is present.
664 * The 'b' bit in the REX prefix is don't care in
668 vie->rm |= (vie->rex_b << 3);
671 vie->reg |= (vie->rex_r << 3);
674 if (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)
677 vie->base_register = gpr_map[vie->rm];
680 case VIE_MOD_INDIRECT_DISP8:
683 case VIE_MOD_INDIRECT_DISP32:
686 case VIE_MOD_INDIRECT:
687 if (vie->rm == VIE_RM_DISP32) {
690 * Table 2-7. RIP-Relative Addressing
692 * In 64-bit mode mod=00 r/m=101 implies [rip] + disp32
693 * whereas in compatibility mode it just implies disp32.
696 if (cpu_mode == CPU_MODE_64BIT)
697 vie->base_register = VM_REG_GUEST_RIP;
699 vie->base_register = VM_REG_LAST;
704 /* Figure out immediate operand size (if any) */
705 if (vie->op.op_flags & VIE_OP_F_IMM)
707 else if (vie->op.op_flags & VIE_OP_F_IMM8)
717 decode_sib(struct vie *vie)
721 /* Proceed only if SIB byte is present */
722 if (vie->mod == VIE_MOD_DIRECT || vie->rm != VIE_RM_SIB)
725 if (vie_peek(vie, &x))
728 /* De-construct the SIB byte */
729 vie->ss = (x >> 6) & 0x3;
730 vie->index = (x >> 3) & 0x7;
731 vie->base = (x >> 0) & 0x7;
733 /* Apply the REX prefix modifiers */
734 vie->index |= vie->rex_x << 3;
735 vie->base |= vie->rex_b << 3;
738 case VIE_MOD_INDIRECT_DISP8:
741 case VIE_MOD_INDIRECT_DISP32:
746 if (vie->mod == VIE_MOD_INDIRECT &&
747 (vie->base == 5 || vie->base == 13)) {
749 * Special case when base register is unused if mod = 0
750 * and base = %rbp or %r13.
753 * Table 2-3: 32-bit Addressing Forms with the SIB Byte
754 * Table 2-5: Special Cases of REX Encodings
758 vie->base_register = gpr_map[vie->base];
762 * All encodings of 'index' are valid except for %rsp (4).
765 * Table 2-3: 32-bit Addressing Forms with the SIB Byte
766 * Table 2-5: Special Cases of REX Encodings
769 vie->index_register = gpr_map[vie->index];
771 /* 'scale' makes sense only in the context of an index register */
772 if (vie->index_register < VM_REG_LAST)
773 vie->scale = 1 << vie->ss;
781 decode_displacement(struct vie *vie)
792 if ((n = vie->disp_bytes) == 0)
795 if (n != 1 && n != 4)
796 panic("decode_displacement: invalid disp_bytes %d", n);
798 for (i = 0; i < n; i++) {
799 if (vie_peek(vie, &x))
807 vie->displacement = u.signed8; /* sign-extended */
809 vie->displacement = u.signed32; /* sign-extended */
815 decode_immediate(struct vie *vie)
825 if ((n = vie->imm_bytes) == 0)
828 if (n != 1 && n != 4)
829 panic("decode_immediate: invalid imm_bytes %d", n);
831 for (i = 0; i < n; i++) {
832 if (vie_peek(vie, &x))
840 vie->immediate = u.signed8; /* sign-extended */
842 vie->immediate = u.signed32; /* sign-extended */
848 * Verify that all the bytes in the instruction buffer were consumed.
851 verify_inst_length(struct vie *vie)
854 if (vie->num_processed == vie->num_valid)
861 * Verify that the 'guest linear address' provided as collateral of the nested
862 * page table fault matches with our instruction decoding.
865 verify_gla(struct vm *vm, int cpuid, uint64_t gla, struct vie *vie)
870 /* Skip 'gla' verification */
871 if (gla == VIE_INVALID_GLA)
875 if (vie->base_register != VM_REG_LAST) {
876 error = vm_get_register(vm, cpuid, vie->base_register, &base);
878 printf("verify_gla: error %d getting base reg %d\n",
879 error, vie->base_register);
884 * RIP-relative addressing starts from the following
887 if (vie->base_register == VM_REG_GUEST_RIP)
888 base += vie->num_valid;
892 if (vie->index_register != VM_REG_LAST) {
893 error = vm_get_register(vm, cpuid, vie->index_register, &idx);
895 printf("verify_gla: error %d getting index reg %d\n",
896 error, vie->index_register);
901 if (base + vie->scale * idx + vie->displacement != gla) {
902 printf("verify_gla mismatch: "
903 "base(0x%0lx), scale(%d), index(0x%0lx), "
904 "disp(0x%0lx), gla(0x%0lx)\n",
905 base, vie->scale, idx, vie->displacement, gla);
913 vmm_decode_instruction(struct vm *vm, int cpuid, uint64_t gla, struct vie *vie)
919 if (decode_opcode(vie))
922 if (decode_modrm(vie))
928 if (decode_displacement(vie))
931 if (decode_immediate(vie))
934 if (verify_inst_length(vie))
937 if (verify_gla(vm, cpuid, gla, vie))
940 vie->decoded = 1; /* success */