2 * Copyright (c) 2012 Sandvine, Inc.
3 * Copyright (c) 2012 NetApp, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
36 #include <sys/systm.h>
42 #include <machine/vmparam.h>
43 #include <machine/vmm.h>
45 #include <sys/types.h>
46 #include <sys/errno.h>
48 #include <machine/vmm.h>
52 #define KASSERT(exp,msg) assert((exp))
55 #include <machine/vmm_instruction_emul.h>
57 #include <x86/specialreg.h>
59 /* struct vie_op.op_type */
71 /* struct vie_op.op_flags */
72 #define VIE_OP_F_IMM (1 << 0) /* immediate operand present */
73 #define VIE_OP_F_IMM8 (1 << 1) /* 8-bit immediate operand */
75 static const struct vie_op two_byte_opcodes[256] = {
78 .op_type = VIE_OP_TYPE_MOVZX,
82 .op_type = VIE_OP_TYPE_MOVSX,
86 static const struct vie_op one_byte_opcodes[256] = {
89 .op_type = VIE_OP_TYPE_TWO_BYTE
93 .op_type = VIE_OP_TYPE_MOV,
97 .op_type = VIE_OP_TYPE_MOV,
101 .op_type = VIE_OP_TYPE_MOV,
105 .op_type = VIE_OP_TYPE_MOV,
108 /* XXX Group 11 extended opcode - not just MOV */
110 .op_type = VIE_OP_TYPE_MOV,
111 .op_flags = VIE_OP_F_IMM8,
115 .op_type = VIE_OP_TYPE_MOV,
116 .op_flags = VIE_OP_F_IMM,
120 .op_type = VIE_OP_TYPE_AND,
123 /* XXX Group 1 extended opcode - not just AND */
125 .op_type = VIE_OP_TYPE_AND,
126 .op_flags = VIE_OP_F_IMM,
129 /* XXX Group 1 extended opcode - not just OR */
131 .op_type = VIE_OP_TYPE_OR,
132 .op_flags = VIE_OP_F_IMM8,
137 #define VIE_MOD_INDIRECT 0
138 #define VIE_MOD_INDIRECT_DISP8 1
139 #define VIE_MOD_INDIRECT_DISP32 2
140 #define VIE_MOD_DIRECT 3
144 #define VIE_RM_DISP32 5
146 #define GB (1024 * 1024 * 1024)
148 static enum vm_reg_name gpr_map[16] = {
167 static uint64_t size2mask[] = {
171 [8] = 0xffffffffffffffff,
175 vie_read_register(void *vm, int vcpuid, enum vm_reg_name reg, uint64_t *rval)
179 error = vm_get_register(vm, vcpuid, reg, rval);
185 vie_read_bytereg(void *vm, int vcpuid, struct vie *vie, uint8_t *rval)
189 enum vm_reg_name reg;
192 reg = gpr_map[vie->reg];
195 * 64-bit mode imposes limitations on accessing legacy byte registers.
197 * The legacy high-byte registers cannot be addressed if the REX
198 * prefix is present. In this case the values 4, 5, 6 and 7 of the
199 * 'ModRM:reg' field address %spl, %bpl, %sil and %dil respectively.
201 * If the REX prefix is not present then the values 4, 5, 6 and 7
202 * of the 'ModRM:reg' field address the legacy high-byte registers,
203 * %ah, %ch, %dh and %bh respectively.
205 if (!vie->rex_present) {
206 if (vie->reg & 0x4) {
208 * Obtain the value of %ah by reading %rax and shifting
209 * right by 8 bits (same for %bh, %ch and %dh).
212 reg = gpr_map[vie->reg & 0x3];
216 error = vm_get_register(vm, vcpuid, reg, &val);
217 *rval = val >> rshift;
222 vie_update_register(void *vm, int vcpuid, enum vm_reg_name reg,
223 uint64_t val, int size)
231 error = vie_read_register(vm, vcpuid, reg, &origval);
234 val &= size2mask[size];
235 val |= origval & ~size2mask[size];
246 error = vm_set_register(vm, vcpuid, reg, val);
251 * The following simplifying assumptions are made during emulation:
253 * - guest is in 64-bit mode
254 * - default address size is 64-bits
255 * - default operand size is 32-bits
257 * - operand size override is not supported
259 * - address size override is not supported
262 emulate_mov(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
263 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
266 enum vm_reg_name reg;
273 switch (vie->op.op_byte) {
276 * MOV byte from reg (ModRM:reg) to mem (ModRM:r/m)
278 * REX + 88/r: mov r/m8, r8 (%ah, %ch, %dh, %bh not available)
281 error = vie_read_bytereg(vm, vcpuid, vie, &byte);
283 error = memwrite(vm, vcpuid, gpa, byte, size, arg);
287 * MOV from reg (ModRM:reg) to mem (ModRM:r/m)
288 * 89/r: mov r/m32, r32
289 * REX.W + 89/r mov r/m64, r64
293 reg = gpr_map[vie->reg];
294 error = vie_read_register(vm, vcpuid, reg, &val);
296 val &= size2mask[size];
297 error = memwrite(vm, vcpuid, gpa, val, size, arg);
303 * MOV from mem (ModRM:r/m) to reg (ModRM:reg)
305 * REX + 8A/r: mov r/m8, r8
306 * 8B/r: mov r32, r/m32
307 * REX.W 8B/r: mov r64, r/m64
309 if (vie->op.op_byte == 0x8A)
313 error = memread(vm, vcpuid, gpa, &val, size, arg);
315 reg = gpr_map[vie->reg];
316 error = vie_update_register(vm, vcpuid, reg, val, size);
321 * MOV from imm8 to mem (ModRM:r/m)
322 * C6/0 mov r/m8, imm8
323 * REX + C6/0 mov r/m8, imm8
326 error = memwrite(vm, vcpuid, gpa, vie->immediate, size, arg);
330 * MOV from imm32 to mem (ModRM:r/m)
331 * C7/0 mov r/m32, imm32
332 * REX.W + C7/0 mov r/m64, imm32 (sign-extended to 64-bits)
334 val = vie->immediate; /* already sign-extended */
340 val &= size2mask[size];
342 error = memwrite(vm, vcpuid, gpa, val, size, arg);
352 * The following simplifying assumptions are made during emulation:
354 * - guest is in 64-bit mode
355 * - default address size is 64-bits
356 * - default operand size is 32-bits
358 * - operand size override is not supported
360 * - address size override is not supported
363 emulate_movx(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
364 mem_region_read_t memread, mem_region_write_t memwrite,
368 enum vm_reg_name reg;
374 switch (vie->op.op_byte) {
377 * MOV and zero extend byte from mem (ModRM:r/m) to
380 * 0F B6/r movzx r/m8, r32
381 * REX.W + 0F B6/r movzx r/m8, r64
384 /* get the first operand */
385 error = memread(vm, vcpuid, gpa, &val, 1, arg);
389 /* get the second operand */
390 reg = gpr_map[vie->reg];
395 /* write the result */
396 error = vie_update_register(vm, vcpuid, reg, val, size);
400 * MOV and sign extend byte from mem (ModRM:r/m) to
403 * 0F BE/r movsx r/m8, r32
404 * REX.W + 0F BE/r movsx r/m8, r64
407 /* get the first operand */
408 error = memread(vm, vcpuid, gpa, &val, 1, arg);
412 /* get the second operand */
413 reg = gpr_map[vie->reg];
418 /* sign extend byte */
421 /* write the result */
422 error = vie_update_register(vm, vcpuid, reg, val, size);
431 emulate_and(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
432 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
435 enum vm_reg_name reg;
441 switch (vie->op.op_byte) {
444 * AND reg (ModRM:reg) and mem (ModRM:r/m) and store the
447 * 23/r and r32, r/m32
448 * REX.W + 23/r and r64, r/m64
453 /* get the first operand */
454 reg = gpr_map[vie->reg];
455 error = vie_read_register(vm, vcpuid, reg, &val1);
459 /* get the second operand */
460 error = memread(vm, vcpuid, gpa, &val2, size, arg);
464 /* perform the operation and write the result */
466 error = vie_update_register(vm, vcpuid, reg, val1, size);
470 * AND mem (ModRM:r/m) with immediate and store the
473 * 81/ and r/m32, imm32
474 * REX.W + 81/ and r/m64, imm32 sign-extended to 64
476 * Currently, only the AND operation of the 0x81 opcode
477 * is implemented (ModRM:reg = b100).
479 if ((vie->reg & 7) != 4)
485 /* get the first operand */
486 error = memread(vm, vcpuid, gpa, &val1, size, arg);
491 * perform the operation with the pre-fetched immediate
492 * operand and write the result
494 val1 &= vie->immediate;
495 error = memwrite(vm, vcpuid, gpa, val1, size, arg);
504 emulate_or(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
505 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
513 switch (vie->op.op_byte) {
516 * OR mem (ModRM:r/m) with immediate and store the
519 * 83/ OR r/m32, imm8 sign-extended to 32
520 * REX.W + 83/ OR r/m64, imm8 sign-extended to 64
522 * Currently, only the OR operation of the 0x83 opcode
523 * is implemented (ModRM:reg = b001).
525 if ((vie->reg & 7) != 1)
531 /* get the first operand */
532 error = memread(vm, vcpuid, gpa, &val1, size, arg);
537 * perform the operation with the pre-fetched immediate
538 * operand and write the result
540 val1 |= vie->immediate;
541 error = memwrite(vm, vcpuid, gpa, val1, size, arg);
550 vmm_emulate_instruction(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
551 mem_region_read_t memread, mem_region_write_t memwrite,
559 switch (vie->op.op_type) {
560 case VIE_OP_TYPE_MOV:
561 error = emulate_mov(vm, vcpuid, gpa, vie,
562 memread, memwrite, memarg);
564 case VIE_OP_TYPE_MOVSX:
565 case VIE_OP_TYPE_MOVZX:
566 error = emulate_movx(vm, vcpuid, gpa, vie,
567 memread, memwrite, memarg);
569 case VIE_OP_TYPE_AND:
570 error = emulate_and(vm, vcpuid, gpa, vie,
571 memread, memwrite, memarg);
574 error = emulate_or(vm, vcpuid, gpa, vie,
575 memread, memwrite, memarg);
586 vie_alignment_check(int cpl, int size, uint64_t cr0, uint64_t rf, uint64_t gla)
588 KASSERT(size == 1 || size == 2 || size == 4 || size == 8,
589 ("%s: invalid size %d", __func__, size));
590 KASSERT(cpl >= 0 && cpl <= 3, ("%s: invalid cpl %d", __func__, cpl));
592 if (cpl != 3 || (cr0 & CR0_AM) == 0 || (rf & PSL_AC) == 0)
595 return ((gla & (size - 1)) ? 1 : 0);
599 vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla)
603 if (cpu_mode != CPU_MODE_64BIT)
607 * The value of the bit 47 in the 'gla' should be replicated in the
608 * most significant 16 bits.
610 mask = ~((1UL << 48) - 1);
611 if (gla & (1UL << 47))
612 return ((gla & mask) != mask);
614 return ((gla & mask) != 0);
618 vie_size2mask(int size)
620 KASSERT(size == 1 || size == 2 || size == 4 || size == 8,
621 ("vie_size2mask: invalid size %d", size));
622 return (size2mask[size]);
626 vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum vm_reg_name seg,
627 struct seg_desc *desc, uint64_t offset, int length, int addrsize,
628 int prot, uint64_t *gla)
630 uint64_t low_limit, high_limit, segbase;
633 KASSERT(seg >= VM_REG_GUEST_ES && seg <= VM_REG_GUEST_GS,
634 ("%s: invalid segment %d", __func__, seg));
635 KASSERT(length == 1 || length == 2 || length == 4 || length == 8,
636 ("%s: invalid operand size %d", __func__, length));
637 KASSERT((prot & ~(PROT_READ | PROT_WRITE)) == 0,
638 ("%s: invalid prot %#x", __func__, prot));
640 if (cpu_mode == CPU_MODE_64BIT) {
641 KASSERT(addrsize == 4 || addrsize == 8, ("%s: invalid address "
642 "size %d for cpu_mode %d", __func__, addrsize, cpu_mode));
645 KASSERT(addrsize == 2 || addrsize == 4, ("%s: invalid address "
646 "size %d for cpu mode %d", __func__, addrsize, cpu_mode));
649 * If the segment selector is loaded with a NULL selector
650 * then the descriptor is unusable and attempting to use
651 * it results in a #GP(0).
653 if (SEG_DESC_UNUSABLE(desc))
657 * The processor generates a #NP exception when a segment
658 * register is loaded with a selector that points to a
659 * descriptor that is not present. If this was the case then
660 * it would have been checked before the VM-exit.
662 KASSERT(SEG_DESC_PRESENT(desc), ("segment %d not present: %#x",
666 * The descriptor type must indicate a code/data segment.
668 type = SEG_DESC_TYPE(desc);
669 KASSERT(type >= 16 && type <= 31, ("segment %d has invalid "
670 "descriptor type %#x", seg, type));
672 if (prot & PROT_READ) {
673 /* #GP on a read access to a exec-only code segment */
674 if ((type & 0xA) == 0x8)
678 if (prot & PROT_WRITE) {
680 * #GP on a write access to a code segment or a
681 * read-only data segment.
683 if (type & 0x8) /* code segment */
686 if ((type & 0xA) == 0) /* read-only data seg */
691 * 'desc->limit' is fully expanded taking granularity into
694 if ((type & 0xC) == 0x4) {
695 /* expand-down data segment */
696 low_limit = desc->limit + 1;
697 high_limit = SEG_DESC_DEF32(desc) ? 0xffffffff : 0xffff;
699 /* code segment or expand-up data segment */
701 high_limit = desc->limit;
705 offset &= vie_size2mask(addrsize);
706 if (offset < low_limit || offset > high_limit)
714 * In 64-bit mode all segments except %fs and %gs have a segment
717 if (cpu_mode == CPU_MODE_64BIT && seg != VM_REG_GUEST_FS &&
718 seg != VM_REG_GUEST_GS) {
721 segbase = desc->base;
725 * Truncate 'offset' to the effective address size before adding
726 * it to the segment base.
728 offset &= vie_size2mask(addrsize);
729 *gla = (segbase + offset) & vie_size2mask(glasize);
735 vie_init(struct vie *vie)
738 bzero(vie, sizeof(struct vie));
740 vie->base_register = VM_REG_LAST;
741 vie->index_register = VM_REG_LAST;
745 pf_error_code(int usermode, int prot, int rsvd, uint64_t pte)
750 error_code |= PGEX_P;
751 if (prot & VM_PROT_WRITE)
752 error_code |= PGEX_W;
754 error_code |= PGEX_U;
756 error_code |= PGEX_RSV;
757 if (prot & VM_PROT_EXECUTE)
758 error_code |= PGEX_I;
764 ptp_release(void **cookie)
766 if (*cookie != NULL) {
767 vm_gpa_release(*cookie);
773 ptp_hold(struct vm *vm, vm_paddr_t ptpphys, size_t len, void **cookie)
778 ptr = vm_gpa_hold(vm, ptpphys, len, VM_PROT_RW, cookie);
783 vmm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
784 uint64_t gla, int prot, uint64_t *gpa)
786 int nlevels, pfcode, ptpshift, ptpindex, retval, usermode, writable;
788 uint64_t *ptpbase, ptpphys, pte, pgsize;
789 uint32_t *ptpbase32, pte32;
792 usermode = (paging->cpl == 3 ? 1 : 0);
793 writable = prot & VM_PROT_WRITE;
798 ptpphys = paging->cr3; /* root of the page tables */
799 ptp_release(&cookie);
803 if (vie_canonical_check(paging->cpu_mode, gla)) {
805 * XXX assuming a non-stack reference otherwise a stack fault
806 * should be generated.
808 vm_inject_gp(vm, vcpuid);
812 if (paging->paging_mode == PAGING_MODE_FLAT) {
817 if (paging->paging_mode == PAGING_MODE_32) {
819 while (--nlevels >= 0) {
820 /* Zero out the lower 12 bits. */
823 ptpbase32 = ptp_hold(vm, ptpphys, PAGE_SIZE, &cookie);
825 if (ptpbase32 == NULL)
828 ptpshift = PAGE_SHIFT + nlevels * 10;
829 ptpindex = (gla >> ptpshift) & 0x3FF;
830 pgsize = 1UL << ptpshift;
832 pte32 = ptpbase32[ptpindex];
834 if ((pte32 & PG_V) == 0 ||
835 (usermode && (pte32 & PG_U) == 0) ||
836 (writable && (pte32 & PG_RW) == 0)) {
837 pfcode = pf_error_code(usermode, prot, 0,
839 vm_inject_pf(vm, vcpuid, pfcode, gla);
844 * Emulate the x86 MMU's management of the accessed
845 * and dirty flags. While the accessed flag is set
846 * at every level of the page table, the dirty flag
847 * is only set at the last level providing the guest
850 if ((pte32 & PG_A) == 0) {
851 if (atomic_cmpset_32(&ptpbase32[ptpindex],
852 pte32, pte32 | PG_A) == 0) {
857 /* XXX must be ignored if CR4.PSE=0 */
858 if (nlevels > 0 && (pte32 & PG_PS) != 0)
864 /* Set the dirty bit in the page table entry if necessary */
865 if (writable && (pte32 & PG_M) == 0) {
866 if (atomic_cmpset_32(&ptpbase32[ptpindex],
867 pte32, pte32 | PG_M) == 0) {
872 /* Zero out the lower 'ptpshift' bits */
873 pte32 >>= ptpshift; pte32 <<= ptpshift;
874 *gpa = pte32 | (gla & (pgsize - 1));
878 if (paging->paging_mode == PAGING_MODE_PAE) {
879 /* Zero out the lower 5 bits and the upper 32 bits */
880 ptpphys &= 0xffffffe0UL;
882 ptpbase = ptp_hold(vm, ptpphys, sizeof(*ptpbase) * 4, &cookie);
886 ptpindex = (gla >> 30) & 0x3;
888 pte = ptpbase[ptpindex];
890 if ((pte & PG_V) == 0) {
891 pfcode = pf_error_code(usermode, prot, 0, pte);
892 vm_inject_pf(vm, vcpuid, pfcode, gla);
901 while (--nlevels >= 0) {
902 /* Zero out the lower 12 bits and the upper 12 bits */
903 ptpphys >>= 12; ptpphys <<= 24; ptpphys >>= 12;
905 ptpbase = ptp_hold(vm, ptpphys, PAGE_SIZE, &cookie);
909 ptpshift = PAGE_SHIFT + nlevels * 9;
910 ptpindex = (gla >> ptpshift) & 0x1FF;
911 pgsize = 1UL << ptpshift;
913 pte = ptpbase[ptpindex];
915 if ((pte & PG_V) == 0 ||
916 (usermode && (pte & PG_U) == 0) ||
917 (writable && (pte & PG_RW) == 0)) {
918 pfcode = pf_error_code(usermode, prot, 0, pte);
919 vm_inject_pf(vm, vcpuid, pfcode, gla);
923 /* Set the accessed bit in the page table entry */
924 if ((pte & PG_A) == 0) {
925 if (atomic_cmpset_64(&ptpbase[ptpindex],
926 pte, pte | PG_A) == 0) {
931 if (nlevels > 0 && (pte & PG_PS) != 0) {
932 if (pgsize > 1 * GB) {
933 pfcode = pf_error_code(usermode, prot, 1, pte);
934 vm_inject_pf(vm, vcpuid, pfcode, gla);
943 /* Set the dirty bit in the page table entry if necessary */
944 if (writable && (pte & PG_M) == 0) {
945 if (atomic_cmpset_64(&ptpbase[ptpindex], pte, pte | PG_M) == 0)
949 /* Zero out the lower 'ptpshift' bits and the upper 12 bits */
950 pte >>= ptpshift; pte <<= (ptpshift + 12); pte >>= 12;
951 *gpa = pte | (gla & (pgsize - 1));
953 ptp_release(&cookie);
964 vmm_fetch_instruction(struct vm *vm, int cpuid, struct vm_guest_paging *paging,
965 uint64_t rip, int inst_length, struct vie *vie)
972 * XXX cache previously fetched instructions using 'rip' as the tag
975 prot = VM_PROT_READ | VM_PROT_EXECUTE;
976 if (inst_length > VIE_INST_SIZE)
977 panic("vmm_fetch_instruction: invalid length %d", inst_length);
979 /* Copy the instruction into 'vie' */
980 while (vie->num_valid < inst_length) {
981 error = vmm_gla2gpa(vm, cpuid, paging, rip, prot, &gpa);
985 off = gpa & PAGE_MASK;
986 n = min(inst_length - vie->num_valid, PAGE_SIZE - off);
988 if ((hpa = vm_gpa_hold(vm, gpa, n, prot, &cookie)) == NULL)
991 bcopy(hpa, &vie->inst[vie->num_valid], n);
993 vm_gpa_release(cookie);
999 if (vie->num_valid == inst_length)
1006 vie_peek(struct vie *vie, uint8_t *x)
1009 if (vie->num_processed < vie->num_valid) {
1010 *x = vie->inst[vie->num_processed];
1017 vie_advance(struct vie *vie)
1020 vie->num_processed++;
1024 decode_rex(struct vie *vie)
1028 if (vie_peek(vie, &x))
1031 if (x >= 0x40 && x <= 0x4F) {
1032 vie->rex_present = 1;
1034 vie->rex_w = x & 0x8 ? 1 : 0;
1035 vie->rex_r = x & 0x4 ? 1 : 0;
1036 vie->rex_x = x & 0x2 ? 1 : 0;
1037 vie->rex_b = x & 0x1 ? 1 : 0;
1046 decode_two_byte_opcode(struct vie *vie)
1050 if (vie_peek(vie, &x))
1053 vie->op = two_byte_opcodes[x];
1055 if (vie->op.op_type == VIE_OP_TYPE_NONE)
1063 decode_opcode(struct vie *vie)
1067 if (vie_peek(vie, &x))
1070 vie->op = one_byte_opcodes[x];
1072 if (vie->op.op_type == VIE_OP_TYPE_NONE)
1077 if (vie->op.op_type == VIE_OP_TYPE_TWO_BYTE)
1078 return (decode_two_byte_opcode(vie));
1084 decode_modrm(struct vie *vie, enum vm_cpu_mode cpu_mode)
1088 if (vie_peek(vie, &x))
1091 vie->mod = (x >> 6) & 0x3;
1092 vie->rm = (x >> 0) & 0x7;
1093 vie->reg = (x >> 3) & 0x7;
1096 * A direct addressing mode makes no sense in the context of an EPT
1097 * fault. There has to be a memory access involved to cause the
1100 if (vie->mod == VIE_MOD_DIRECT)
1103 if ((vie->mod == VIE_MOD_INDIRECT && vie->rm == VIE_RM_DISP32) ||
1104 (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)) {
1106 * Table 2-5: Special Cases of REX Encodings
1108 * mod=0, r/m=5 is used in the compatibility mode to
1109 * indicate a disp32 without a base register.
1111 * mod!=3, r/m=4 is used in the compatibility mode to
1112 * indicate that the SIB byte is present.
1114 * The 'b' bit in the REX prefix is don't care in
1118 vie->rm |= (vie->rex_b << 3);
1121 vie->reg |= (vie->rex_r << 3);
1124 if (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)
1127 vie->base_register = gpr_map[vie->rm];
1130 case VIE_MOD_INDIRECT_DISP8:
1131 vie->disp_bytes = 1;
1133 case VIE_MOD_INDIRECT_DISP32:
1134 vie->disp_bytes = 4;
1136 case VIE_MOD_INDIRECT:
1137 if (vie->rm == VIE_RM_DISP32) {
1138 vie->disp_bytes = 4;
1140 * Table 2-7. RIP-Relative Addressing
1142 * In 64-bit mode mod=00 r/m=101 implies [rip] + disp32
1143 * whereas in compatibility mode it just implies disp32.
1146 if (cpu_mode == CPU_MODE_64BIT)
1147 vie->base_register = VM_REG_GUEST_RIP;
1149 vie->base_register = VM_REG_LAST;
1161 decode_sib(struct vie *vie)
1165 /* Proceed only if SIB byte is present */
1166 if (vie->mod == VIE_MOD_DIRECT || vie->rm != VIE_RM_SIB)
1169 if (vie_peek(vie, &x))
1172 /* De-construct the SIB byte */
1173 vie->ss = (x >> 6) & 0x3;
1174 vie->index = (x >> 3) & 0x7;
1175 vie->base = (x >> 0) & 0x7;
1177 /* Apply the REX prefix modifiers */
1178 vie->index |= vie->rex_x << 3;
1179 vie->base |= vie->rex_b << 3;
1182 case VIE_MOD_INDIRECT_DISP8:
1183 vie->disp_bytes = 1;
1185 case VIE_MOD_INDIRECT_DISP32:
1186 vie->disp_bytes = 4;
1190 if (vie->mod == VIE_MOD_INDIRECT &&
1191 (vie->base == 5 || vie->base == 13)) {
1193 * Special case when base register is unused if mod = 0
1194 * and base = %rbp or %r13.
1197 * Table 2-3: 32-bit Addressing Forms with the SIB Byte
1198 * Table 2-5: Special Cases of REX Encodings
1200 vie->disp_bytes = 4;
1202 vie->base_register = gpr_map[vie->base];
1206 * All encodings of 'index' are valid except for %rsp (4).
1209 * Table 2-3: 32-bit Addressing Forms with the SIB Byte
1210 * Table 2-5: Special Cases of REX Encodings
1212 if (vie->index != 4)
1213 vie->index_register = gpr_map[vie->index];
1215 /* 'scale' makes sense only in the context of an index register */
1216 if (vie->index_register < VM_REG_LAST)
1217 vie->scale = 1 << vie->ss;
1225 decode_displacement(struct vie *vie)
1236 if ((n = vie->disp_bytes) == 0)
1239 if (n != 1 && n != 4)
1240 panic("decode_displacement: invalid disp_bytes %d", n);
1242 for (i = 0; i < n; i++) {
1243 if (vie_peek(vie, &x))
1251 vie->displacement = u.signed8; /* sign-extended */
1253 vie->displacement = u.signed32; /* sign-extended */
1259 decode_immediate(struct vie *vie)
1269 /* Figure out immediate operand size (if any) */
1270 if (vie->op.op_flags & VIE_OP_F_IMM)
1272 else if (vie->op.op_flags & VIE_OP_F_IMM8)
1275 if ((n = vie->imm_bytes) == 0)
1278 if (n != 1 && n != 4)
1279 panic("decode_immediate: invalid imm_bytes %d", n);
1281 for (i = 0; i < n; i++) {
1282 if (vie_peek(vie, &x))
1290 vie->immediate = u.signed8; /* sign-extended */
1292 vie->immediate = u.signed32; /* sign-extended */
1298 * Verify that all the bytes in the instruction buffer were consumed.
1301 verify_inst_length(struct vie *vie)
1304 if (vie->num_processed == vie->num_valid)
1311 * Verify that the 'guest linear address' provided as collateral of the nested
1312 * page table fault matches with our instruction decoding.
1315 verify_gla(struct vm *vm, int cpuid, uint64_t gla, struct vie *vie)
1320 /* Skip 'gla' verification */
1321 if (gla == VIE_INVALID_GLA)
1325 if (vie->base_register != VM_REG_LAST) {
1326 error = vm_get_register(vm, cpuid, vie->base_register, &base);
1328 printf("verify_gla: error %d getting base reg %d\n",
1329 error, vie->base_register);
1334 * RIP-relative addressing starts from the following
1337 if (vie->base_register == VM_REG_GUEST_RIP)
1338 base += vie->num_valid;
1342 if (vie->index_register != VM_REG_LAST) {
1343 error = vm_get_register(vm, cpuid, vie->index_register, &idx);
1345 printf("verify_gla: error %d getting index reg %d\n",
1346 error, vie->index_register);
1351 if (base + vie->scale * idx + vie->displacement != gla) {
1352 printf("verify_gla mismatch: "
1353 "base(0x%0lx), scale(%d), index(0x%0lx), "
1354 "disp(0x%0lx), gla(0x%0lx)\n",
1355 base, vie->scale, idx, vie->displacement, gla);
1363 vmm_decode_instruction(struct vm *vm, int cpuid, uint64_t gla,
1364 enum vm_cpu_mode cpu_mode, struct vie *vie)
1367 if (cpu_mode == CPU_MODE_64BIT) {
1368 if (decode_rex(vie))
1372 if (decode_opcode(vie))
1375 if (decode_modrm(vie, cpu_mode))
1378 if (decode_sib(vie))
1381 if (decode_displacement(vie))
1384 if (decode_immediate(vie))
1387 if (verify_inst_length(vie))
1390 if (verify_gla(vm, cpuid, gla, vie))
1393 vie->decoded = 1; /* success */
1397 #endif /* _KERNEL */