2 * Copyright (c) 2012 Sandvine, Inc.
3 * Copyright (c) 2012 NetApp, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
36 #include <sys/systm.h>
42 #include <machine/vmparam.h>
43 #include <machine/vmm.h>
45 #include <sys/types.h>
46 #include <sys/errno.h>
48 #include <machine/vmm.h>
52 #define KASSERT(exp,msg) assert((exp))
55 #include <machine/vmm_instruction_emul.h>
57 #include <x86/specialreg.h>
59 /* struct vie_op.op_type */
71 /* struct vie_op.op_flags */
72 #define VIE_OP_F_IMM (1 << 0) /* 16/32-bit immediate operand */
73 #define VIE_OP_F_IMM8 (1 << 1) /* 8-bit immediate operand */
74 #define VIE_OP_F_MOFFSET (1 << 2) /* 16/32/64-bit immediate moffset */
76 static const struct vie_op two_byte_opcodes[256] = {
79 .op_type = VIE_OP_TYPE_MOVZX,
83 .op_type = VIE_OP_TYPE_MOVSX,
87 static const struct vie_op one_byte_opcodes[256] = {
90 .op_type = VIE_OP_TYPE_TWO_BYTE
94 .op_type = VIE_OP_TYPE_MOV,
98 .op_type = VIE_OP_TYPE_MOV,
102 .op_type = VIE_OP_TYPE_MOV,
106 .op_type = VIE_OP_TYPE_MOV,
109 /* XXX Group 11 extended opcode - not just MOV */
111 .op_type = VIE_OP_TYPE_MOV,
112 .op_flags = VIE_OP_F_IMM8,
116 .op_type = VIE_OP_TYPE_MOV,
117 .op_flags = VIE_OP_F_IMM,
121 .op_type = VIE_OP_TYPE_AND,
124 /* XXX Group 1 extended opcode - not just AND */
126 .op_type = VIE_OP_TYPE_AND,
127 .op_flags = VIE_OP_F_IMM,
130 /* XXX Group 1 extended opcode - not just OR */
132 .op_type = VIE_OP_TYPE_OR,
133 .op_flags = VIE_OP_F_IMM8,
138 #define VIE_MOD_INDIRECT 0
139 #define VIE_MOD_INDIRECT_DISP8 1
140 #define VIE_MOD_INDIRECT_DISP32 2
141 #define VIE_MOD_DIRECT 3
145 #define VIE_RM_DISP32 5
147 #define GB (1024 * 1024 * 1024)
149 static enum vm_reg_name gpr_map[16] = {
168 static uint64_t size2mask[] = {
172 [8] = 0xffffffffffffffff,
176 vie_read_register(void *vm, int vcpuid, enum vm_reg_name reg, uint64_t *rval)
180 error = vm_get_register(vm, vcpuid, reg, rval);
186 vie_calc_bytereg(struct vie *vie, enum vm_reg_name *reg, int *lhbr)
189 *reg = gpr_map[vie->reg];
192 * 64-bit mode imposes limitations on accessing legacy high byte
195 * The legacy high-byte registers cannot be addressed if the REX
196 * prefix is present. In this case the values 4, 5, 6 and 7 of the
197 * 'ModRM:reg' field address %spl, %bpl, %sil and %dil respectively.
199 * If the REX prefix is not present then the values 4, 5, 6 and 7
200 * of the 'ModRM:reg' field address the legacy high-byte registers,
201 * %ah, %ch, %dh and %bh respectively.
203 if (!vie->rex_present) {
204 if (vie->reg & 0x4) {
206 *reg = gpr_map[vie->reg & 0x3];
212 vie_read_bytereg(void *vm, int vcpuid, struct vie *vie, uint8_t *rval)
216 enum vm_reg_name reg;
218 vie_calc_bytereg(vie, ®, &lhbr);
219 error = vm_get_register(vm, vcpuid, reg, &val);
222 * To obtain the value of a legacy high byte register shift the
223 * base register right by 8 bits (%ah = %rax >> 8).
233 vie_write_bytereg(void *vm, int vcpuid, struct vie *vie, uint8_t byte)
235 uint64_t origval, val, mask;
237 enum vm_reg_name reg;
239 vie_calc_bytereg(vie, ®, &lhbr);
240 error = vm_get_register(vm, vcpuid, reg, &origval);
246 * Shift left by 8 to store 'byte' in a legacy high
252 val |= origval & ~mask;
253 error = vm_set_register(vm, vcpuid, reg, val);
259 vie_update_register(void *vm, int vcpuid, enum vm_reg_name reg,
260 uint64_t val, int size)
268 error = vie_read_register(vm, vcpuid, reg, &origval);
271 val &= size2mask[size];
272 val |= origval & ~size2mask[size];
283 error = vm_set_register(vm, vcpuid, reg, val);
288 emulate_mov(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
289 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
292 enum vm_reg_name reg;
299 switch (vie->op.op_byte) {
302 * MOV byte from reg (ModRM:reg) to mem (ModRM:r/m)
304 * REX + 88/r: mov r/m8, r8 (%ah, %ch, %dh, %bh not available)
306 size = 1; /* override for byte operation */
307 error = vie_read_bytereg(vm, vcpuid, vie, &byte);
309 error = memwrite(vm, vcpuid, gpa, byte, size, arg);
313 * MOV from reg (ModRM:reg) to mem (ModRM:r/m)
314 * 89/r: mov r/m16, r16
315 * 89/r: mov r/m32, r32
316 * REX.W + 89/r mov r/m64, r64
318 reg = gpr_map[vie->reg];
319 error = vie_read_register(vm, vcpuid, reg, &val);
321 val &= size2mask[size];
322 error = memwrite(vm, vcpuid, gpa, val, size, arg);
327 * MOV byte from mem (ModRM:r/m) to reg (ModRM:reg)
329 * REX + 8A/r: mov r8, r/m8
331 size = 1; /* override for byte operation */
332 error = memread(vm, vcpuid, gpa, &val, size, arg);
334 error = vie_write_bytereg(vm, vcpuid, vie, val);
338 * MOV from mem (ModRM:r/m) to reg (ModRM:reg)
339 * 8B/r: mov r16, r/m16
340 * 8B/r: mov r32, r/m32
341 * REX.W 8B/r: mov r64, r/m64
343 error = memread(vm, vcpuid, gpa, &val, size, arg);
345 reg = gpr_map[vie->reg];
346 error = vie_update_register(vm, vcpuid, reg, val, size);
351 * MOV from imm8 to mem (ModRM:r/m)
352 * C6/0 mov r/m8, imm8
353 * REX + C6/0 mov r/m8, imm8
355 size = 1; /* override for byte operation */
356 error = memwrite(vm, vcpuid, gpa, vie->immediate, size, arg);
360 * MOV from imm16/imm32 to mem (ModRM:r/m)
361 * C7/0 mov r/m16, imm16
362 * C7/0 mov r/m32, imm32
363 * REX.W + C7/0 mov r/m64, imm32 (sign-extended to 64-bits)
365 val = vie->immediate & size2mask[size];
366 error = memwrite(vm, vcpuid, gpa, val, size, arg);
376 emulate_movx(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
377 mem_region_read_t memread, mem_region_write_t memwrite,
381 enum vm_reg_name reg;
387 switch (vie->op.op_byte) {
390 * MOV and zero extend byte from mem (ModRM:r/m) to
393 * 0F B6/r movzx r16, r/m8
394 * 0F B6/r movzx r32, r/m8
395 * REX.W + 0F B6/r movzx r64, r/m8
398 /* get the first operand */
399 error = memread(vm, vcpuid, gpa, &val, 1, arg);
403 /* get the second operand */
404 reg = gpr_map[vie->reg];
406 /* zero-extend byte */
409 /* write the result */
410 error = vie_update_register(vm, vcpuid, reg, val, size);
414 * MOV and sign extend byte from mem (ModRM:r/m) to
417 * 0F BE/r movsx r16, r/m8
418 * 0F BE/r movsx r32, r/m8
419 * REX.W + 0F BE/r movsx r64, r/m8
422 /* get the first operand */
423 error = memread(vm, vcpuid, gpa, &val, 1, arg);
427 /* get the second operand */
428 reg = gpr_map[vie->reg];
430 /* sign extend byte */
433 /* write the result */
434 error = vie_update_register(vm, vcpuid, reg, val, size);
443 emulate_and(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
444 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
447 enum vm_reg_name reg;
453 switch (vie->op.op_byte) {
456 * AND reg (ModRM:reg) and mem (ModRM:r/m) and store the
459 * 23/r and r16, r/m16
460 * 23/r and r32, r/m32
461 * REX.W + 23/r and r64, r/m64
464 /* get the first operand */
465 reg = gpr_map[vie->reg];
466 error = vie_read_register(vm, vcpuid, reg, &val1);
470 /* get the second operand */
471 error = memread(vm, vcpuid, gpa, &val2, size, arg);
475 /* perform the operation and write the result */
477 error = vie_update_register(vm, vcpuid, reg, val1, size);
481 * AND mem (ModRM:r/m) with immediate and store the
484 * 81 /4 and r/m16, imm16
485 * 81 /4 and r/m32, imm32
486 * REX.W + 81 /4 and r/m64, imm32 sign-extended to 64
488 * Currently, only the AND operation of the 0x81 opcode
489 * is implemented (ModRM:reg = b100).
491 if ((vie->reg & 7) != 4)
494 /* get the first operand */
495 error = memread(vm, vcpuid, gpa, &val1, size, arg);
500 * perform the operation with the pre-fetched immediate
501 * operand and write the result
503 val1 &= vie->immediate;
504 error = memwrite(vm, vcpuid, gpa, val1, size, arg);
513 emulate_or(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
514 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
522 switch (vie->op.op_byte) {
525 * OR mem (ModRM:r/m) with immediate and store the
528 * 83 /1 OR r/m16, imm8 sign-extended to 16
529 * 83 /1 OR r/m32, imm8 sign-extended to 32
530 * REX.W + 83/1 OR r/m64, imm8 sign-extended to 64
532 * Currently, only the OR operation of the 0x83 opcode
533 * is implemented (ModRM:reg = b001).
535 if ((vie->reg & 7) != 1)
538 /* get the first operand */
539 error = memread(vm, vcpuid, gpa, &val1, size, arg);
544 * perform the operation with the pre-fetched immediate
545 * operand and write the result
547 val1 |= vie->immediate;
548 error = memwrite(vm, vcpuid, gpa, val1, size, arg);
557 vmm_emulate_instruction(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
558 mem_region_read_t memread, mem_region_write_t memwrite,
566 switch (vie->op.op_type) {
567 case VIE_OP_TYPE_MOV:
568 error = emulate_mov(vm, vcpuid, gpa, vie,
569 memread, memwrite, memarg);
571 case VIE_OP_TYPE_MOVSX:
572 case VIE_OP_TYPE_MOVZX:
573 error = emulate_movx(vm, vcpuid, gpa, vie,
574 memread, memwrite, memarg);
576 case VIE_OP_TYPE_AND:
577 error = emulate_and(vm, vcpuid, gpa, vie,
578 memread, memwrite, memarg);
581 error = emulate_or(vm, vcpuid, gpa, vie,
582 memread, memwrite, memarg);
593 vie_alignment_check(int cpl, int size, uint64_t cr0, uint64_t rf, uint64_t gla)
595 KASSERT(size == 1 || size == 2 || size == 4 || size == 8,
596 ("%s: invalid size %d", __func__, size));
597 KASSERT(cpl >= 0 && cpl <= 3, ("%s: invalid cpl %d", __func__, cpl));
599 if (cpl != 3 || (cr0 & CR0_AM) == 0 || (rf & PSL_AC) == 0)
602 return ((gla & (size - 1)) ? 1 : 0);
606 vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla)
610 if (cpu_mode != CPU_MODE_64BIT)
614 * The value of the bit 47 in the 'gla' should be replicated in the
615 * most significant 16 bits.
617 mask = ~((1UL << 48) - 1);
618 if (gla & (1UL << 47))
619 return ((gla & mask) != mask);
621 return ((gla & mask) != 0);
625 vie_size2mask(int size)
627 KASSERT(size == 1 || size == 2 || size == 4 || size == 8,
628 ("vie_size2mask: invalid size %d", size));
629 return (size2mask[size]);
633 vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum vm_reg_name seg,
634 struct seg_desc *desc, uint64_t offset, int length, int addrsize,
635 int prot, uint64_t *gla)
637 uint64_t firstoff, low_limit, high_limit, segbase;
640 KASSERT(seg >= VM_REG_GUEST_ES && seg <= VM_REG_GUEST_GS,
641 ("%s: invalid segment %d", __func__, seg));
642 KASSERT(length == 1 || length == 2 || length == 4 || length == 8,
643 ("%s: invalid operand size %d", __func__, length));
644 KASSERT((prot & ~(PROT_READ | PROT_WRITE)) == 0,
645 ("%s: invalid prot %#x", __func__, prot));
648 if (cpu_mode == CPU_MODE_64BIT) {
649 KASSERT(addrsize == 4 || addrsize == 8, ("%s: invalid address "
650 "size %d for cpu_mode %d", __func__, addrsize, cpu_mode));
653 KASSERT(addrsize == 2 || addrsize == 4, ("%s: invalid address "
654 "size %d for cpu mode %d", __func__, addrsize, cpu_mode));
657 * If the segment selector is loaded with a NULL selector
658 * then the descriptor is unusable and attempting to use
659 * it results in a #GP(0).
661 if (SEG_DESC_UNUSABLE(desc->access))
665 * The processor generates a #NP exception when a segment
666 * register is loaded with a selector that points to a
667 * descriptor that is not present. If this was the case then
668 * it would have been checked before the VM-exit.
670 KASSERT(SEG_DESC_PRESENT(desc->access),
671 ("segment %d not present: %#x", seg, desc->access));
674 * The descriptor type must indicate a code/data segment.
676 type = SEG_DESC_TYPE(desc->access);
677 KASSERT(type >= 16 && type <= 31, ("segment %d has invalid "
678 "descriptor type %#x", seg, type));
680 if (prot & PROT_READ) {
681 /* #GP on a read access to a exec-only code segment */
682 if ((type & 0xA) == 0x8)
686 if (prot & PROT_WRITE) {
688 * #GP on a write access to a code segment or a
689 * read-only data segment.
691 if (type & 0x8) /* code segment */
694 if ((type & 0xA) == 0) /* read-only data seg */
699 * 'desc->limit' is fully expanded taking granularity into
702 if ((type & 0xC) == 0x4) {
703 /* expand-down data segment */
704 low_limit = desc->limit + 1;
705 high_limit = SEG_DESC_DEF32(desc->access) ?
708 /* code segment or expand-up data segment */
710 high_limit = desc->limit;
714 offset &= vie_size2mask(addrsize);
715 if (offset < low_limit || offset > high_limit)
723 * In 64-bit mode all segments except %fs and %gs have a segment
726 if (cpu_mode == CPU_MODE_64BIT && seg != VM_REG_GUEST_FS &&
727 seg != VM_REG_GUEST_GS) {
730 segbase = desc->base;
734 * Truncate 'firstoff' to the effective address size before adding
735 * it to the segment base.
737 firstoff &= vie_size2mask(addrsize);
738 *gla = (segbase + firstoff) & vie_size2mask(glasize);
744 vie_init(struct vie *vie)
747 bzero(vie, sizeof(struct vie));
749 vie->base_register = VM_REG_LAST;
750 vie->index_register = VM_REG_LAST;
754 pf_error_code(int usermode, int prot, int rsvd, uint64_t pte)
759 error_code |= PGEX_P;
760 if (prot & VM_PROT_WRITE)
761 error_code |= PGEX_W;
763 error_code |= PGEX_U;
765 error_code |= PGEX_RSV;
766 if (prot & VM_PROT_EXECUTE)
767 error_code |= PGEX_I;
773 ptp_release(void **cookie)
775 if (*cookie != NULL) {
776 vm_gpa_release(*cookie);
782 ptp_hold(struct vm *vm, vm_paddr_t ptpphys, size_t len, void **cookie)
787 ptr = vm_gpa_hold(vm, ptpphys, len, VM_PROT_RW, cookie);
792 vmm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
793 uint64_t gla, int prot, uint64_t *gpa)
795 int nlevels, pfcode, ptpshift, ptpindex, retval, usermode, writable;
797 uint64_t *ptpbase, ptpphys, pte, pgsize;
798 uint32_t *ptpbase32, pte32;
801 usermode = (paging->cpl == 3 ? 1 : 0);
802 writable = prot & VM_PROT_WRITE;
807 ptpphys = paging->cr3; /* root of the page tables */
808 ptp_release(&cookie);
812 if (vie_canonical_check(paging->cpu_mode, gla)) {
814 * XXX assuming a non-stack reference otherwise a stack fault
815 * should be generated.
817 vm_inject_gp(vm, vcpuid);
821 if (paging->paging_mode == PAGING_MODE_FLAT) {
826 if (paging->paging_mode == PAGING_MODE_32) {
828 while (--nlevels >= 0) {
829 /* Zero out the lower 12 bits. */
832 ptpbase32 = ptp_hold(vm, ptpphys, PAGE_SIZE, &cookie);
834 if (ptpbase32 == NULL)
837 ptpshift = PAGE_SHIFT + nlevels * 10;
838 ptpindex = (gla >> ptpshift) & 0x3FF;
839 pgsize = 1UL << ptpshift;
841 pte32 = ptpbase32[ptpindex];
843 if ((pte32 & PG_V) == 0 ||
844 (usermode && (pte32 & PG_U) == 0) ||
845 (writable && (pte32 & PG_RW) == 0)) {
846 pfcode = pf_error_code(usermode, prot, 0,
848 vm_inject_pf(vm, vcpuid, pfcode, gla);
853 * Emulate the x86 MMU's management of the accessed
854 * and dirty flags. While the accessed flag is set
855 * at every level of the page table, the dirty flag
856 * is only set at the last level providing the guest
859 if ((pte32 & PG_A) == 0) {
860 if (atomic_cmpset_32(&ptpbase32[ptpindex],
861 pte32, pte32 | PG_A) == 0) {
866 /* XXX must be ignored if CR4.PSE=0 */
867 if (nlevels > 0 && (pte32 & PG_PS) != 0)
873 /* Set the dirty bit in the page table entry if necessary */
874 if (writable && (pte32 & PG_M) == 0) {
875 if (atomic_cmpset_32(&ptpbase32[ptpindex],
876 pte32, pte32 | PG_M) == 0) {
881 /* Zero out the lower 'ptpshift' bits */
882 pte32 >>= ptpshift; pte32 <<= ptpshift;
883 *gpa = pte32 | (gla & (pgsize - 1));
887 if (paging->paging_mode == PAGING_MODE_PAE) {
888 /* Zero out the lower 5 bits and the upper 32 bits */
889 ptpphys &= 0xffffffe0UL;
891 ptpbase = ptp_hold(vm, ptpphys, sizeof(*ptpbase) * 4, &cookie);
895 ptpindex = (gla >> 30) & 0x3;
897 pte = ptpbase[ptpindex];
899 if ((pte & PG_V) == 0) {
900 pfcode = pf_error_code(usermode, prot, 0, pte);
901 vm_inject_pf(vm, vcpuid, pfcode, gla);
910 while (--nlevels >= 0) {
911 /* Zero out the lower 12 bits and the upper 12 bits */
912 ptpphys >>= 12; ptpphys <<= 24; ptpphys >>= 12;
914 ptpbase = ptp_hold(vm, ptpphys, PAGE_SIZE, &cookie);
918 ptpshift = PAGE_SHIFT + nlevels * 9;
919 ptpindex = (gla >> ptpshift) & 0x1FF;
920 pgsize = 1UL << ptpshift;
922 pte = ptpbase[ptpindex];
924 if ((pte & PG_V) == 0 ||
925 (usermode && (pte & PG_U) == 0) ||
926 (writable && (pte & PG_RW) == 0)) {
927 pfcode = pf_error_code(usermode, prot, 0, pte);
928 vm_inject_pf(vm, vcpuid, pfcode, gla);
932 /* Set the accessed bit in the page table entry */
933 if ((pte & PG_A) == 0) {
934 if (atomic_cmpset_64(&ptpbase[ptpindex],
935 pte, pte | PG_A) == 0) {
940 if (nlevels > 0 && (pte & PG_PS) != 0) {
941 if (pgsize > 1 * GB) {
942 pfcode = pf_error_code(usermode, prot, 1, pte);
943 vm_inject_pf(vm, vcpuid, pfcode, gla);
952 /* Set the dirty bit in the page table entry if necessary */
953 if (writable && (pte & PG_M) == 0) {
954 if (atomic_cmpset_64(&ptpbase[ptpindex], pte, pte | PG_M) == 0)
958 /* Zero out the lower 'ptpshift' bits and the upper 12 bits */
959 pte >>= ptpshift; pte <<= (ptpshift + 12); pte >>= 12;
960 *gpa = pte | (gla & (pgsize - 1));
962 ptp_release(&cookie);
973 vmm_fetch_instruction(struct vm *vm, int cpuid, struct vm_guest_paging *paging,
974 uint64_t rip, int inst_length, struct vie *vie)
981 * XXX cache previously fetched instructions using 'rip' as the tag
984 prot = VM_PROT_READ | VM_PROT_EXECUTE;
985 if (inst_length > VIE_INST_SIZE)
986 panic("vmm_fetch_instruction: invalid length %d", inst_length);
988 /* Copy the instruction into 'vie' */
989 while (vie->num_valid < inst_length) {
990 error = vmm_gla2gpa(vm, cpuid, paging, rip, prot, &gpa);
994 off = gpa & PAGE_MASK;
995 n = min(inst_length - vie->num_valid, PAGE_SIZE - off);
997 if ((hpa = vm_gpa_hold(vm, gpa, n, prot, &cookie)) == NULL)
1000 bcopy(hpa, &vie->inst[vie->num_valid], n);
1002 vm_gpa_release(cookie);
1005 vie->num_valid += n;
1008 if (vie->num_valid == inst_length)
1015 vie_peek(struct vie *vie, uint8_t *x)
1018 if (vie->num_processed < vie->num_valid) {
1019 *x = vie->inst[vie->num_processed];
1026 vie_advance(struct vie *vie)
1029 vie->num_processed++;
1033 decode_prefixes(struct vie *vie, enum vm_cpu_mode cpu_mode, int cs_d)
1038 if (vie_peek(vie, &x))
1042 vie->opsize_override = 1;
1044 vie->addrsize_override = 1;
1052 * From section 2.2.1, "REX Prefixes", Intel SDM Vol 2:
1053 * - Only one REX prefix is allowed per instruction.
1054 * - The REX prefix must immediately precede the opcode byte or the
1055 * escape opcode byte.
1056 * - If an instruction has a mandatory prefix (0x66, 0xF2 or 0xF3)
1057 * the mandatory prefix must come before the REX prefix.
1059 if (cpu_mode == CPU_MODE_64BIT && x >= 0x40 && x <= 0x4F) {
1060 vie->rex_present = 1;
1061 vie->rex_w = x & 0x8 ? 1 : 0;
1062 vie->rex_r = x & 0x4 ? 1 : 0;
1063 vie->rex_x = x & 0x2 ? 1 : 0;
1064 vie->rex_b = x & 0x1 ? 1 : 0;
1069 * Section "Operand-Size And Address-Size Attributes", Intel SDM, Vol 1
1071 if (cpu_mode == CPU_MODE_64BIT) {
1073 * Default address size is 64-bits and default operand size
1076 vie->addrsize = vie->addrsize_override ? 4 : 8;
1079 else if (vie->opsize_override)
1084 /* Default address and operand sizes are 32-bits */
1085 vie->addrsize = vie->addrsize_override ? 2 : 4;
1086 vie->opsize = vie->opsize_override ? 2 : 4;
1088 /* Default address and operand sizes are 16-bits */
1089 vie->addrsize = vie->addrsize_override ? 4 : 2;
1090 vie->opsize = vie->opsize_override ? 4 : 2;
1096 decode_two_byte_opcode(struct vie *vie)
1100 if (vie_peek(vie, &x))
1103 vie->op = two_byte_opcodes[x];
1105 if (vie->op.op_type == VIE_OP_TYPE_NONE)
1113 decode_opcode(struct vie *vie)
1117 if (vie_peek(vie, &x))
1120 vie->op = one_byte_opcodes[x];
1122 if (vie->op.op_type == VIE_OP_TYPE_NONE)
1127 if (vie->op.op_type == VIE_OP_TYPE_TWO_BYTE)
1128 return (decode_two_byte_opcode(vie));
1134 decode_modrm(struct vie *vie, enum vm_cpu_mode cpu_mode)
1138 if (cpu_mode == CPU_MODE_REAL)
1141 if (vie_peek(vie, &x))
1144 vie->mod = (x >> 6) & 0x3;
1145 vie->rm = (x >> 0) & 0x7;
1146 vie->reg = (x >> 3) & 0x7;
1149 * A direct addressing mode makes no sense in the context of an EPT
1150 * fault. There has to be a memory access involved to cause the
1153 if (vie->mod == VIE_MOD_DIRECT)
1156 if ((vie->mod == VIE_MOD_INDIRECT && vie->rm == VIE_RM_DISP32) ||
1157 (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)) {
1159 * Table 2-5: Special Cases of REX Encodings
1161 * mod=0, r/m=5 is used in the compatibility mode to
1162 * indicate a disp32 without a base register.
1164 * mod!=3, r/m=4 is used in the compatibility mode to
1165 * indicate that the SIB byte is present.
1167 * The 'b' bit in the REX prefix is don't care in
1171 vie->rm |= (vie->rex_b << 3);
1174 vie->reg |= (vie->rex_r << 3);
1177 if (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)
1180 vie->base_register = gpr_map[vie->rm];
1183 case VIE_MOD_INDIRECT_DISP8:
1184 vie->disp_bytes = 1;
1186 case VIE_MOD_INDIRECT_DISP32:
1187 vie->disp_bytes = 4;
1189 case VIE_MOD_INDIRECT:
1190 if (vie->rm == VIE_RM_DISP32) {
1191 vie->disp_bytes = 4;
1193 * Table 2-7. RIP-Relative Addressing
1195 * In 64-bit mode mod=00 r/m=101 implies [rip] + disp32
1196 * whereas in compatibility mode it just implies disp32.
1199 if (cpu_mode == CPU_MODE_64BIT)
1200 vie->base_register = VM_REG_GUEST_RIP;
1202 vie->base_register = VM_REG_LAST;
1214 decode_sib(struct vie *vie)
1218 /* Proceed only if SIB byte is present */
1219 if (vie->mod == VIE_MOD_DIRECT || vie->rm != VIE_RM_SIB)
1222 if (vie_peek(vie, &x))
1225 /* De-construct the SIB byte */
1226 vie->ss = (x >> 6) & 0x3;
1227 vie->index = (x >> 3) & 0x7;
1228 vie->base = (x >> 0) & 0x7;
1230 /* Apply the REX prefix modifiers */
1231 vie->index |= vie->rex_x << 3;
1232 vie->base |= vie->rex_b << 3;
1235 case VIE_MOD_INDIRECT_DISP8:
1236 vie->disp_bytes = 1;
1238 case VIE_MOD_INDIRECT_DISP32:
1239 vie->disp_bytes = 4;
1243 if (vie->mod == VIE_MOD_INDIRECT &&
1244 (vie->base == 5 || vie->base == 13)) {
1246 * Special case when base register is unused if mod = 0
1247 * and base = %rbp or %r13.
1250 * Table 2-3: 32-bit Addressing Forms with the SIB Byte
1251 * Table 2-5: Special Cases of REX Encodings
1253 vie->disp_bytes = 4;
1255 vie->base_register = gpr_map[vie->base];
1259 * All encodings of 'index' are valid except for %rsp (4).
1262 * Table 2-3: 32-bit Addressing Forms with the SIB Byte
1263 * Table 2-5: Special Cases of REX Encodings
1265 if (vie->index != 4)
1266 vie->index_register = gpr_map[vie->index];
1268 /* 'scale' makes sense only in the context of an index register */
1269 if (vie->index_register < VM_REG_LAST)
1270 vie->scale = 1 << vie->ss;
1278 decode_displacement(struct vie *vie)
1289 if ((n = vie->disp_bytes) == 0)
1292 if (n != 1 && n != 4)
1293 panic("decode_displacement: invalid disp_bytes %d", n);
1295 for (i = 0; i < n; i++) {
1296 if (vie_peek(vie, &x))
1304 vie->displacement = u.signed8; /* sign-extended */
1306 vie->displacement = u.signed32; /* sign-extended */
1312 decode_immediate(struct vie *vie)
1324 /* Figure out immediate operand size (if any) */
1325 if (vie->op.op_flags & VIE_OP_F_MOFFSET) {
1327 * Section 2.2.1.4, "Direct Memory-Offset MOVs", Intel SDM:
1328 * The memory offset size follows the address-size of the
1329 * instruction. Although this is treated as an immediate
1330 * value during instruction decoding it is interpreted as
1331 * a segment offset by the instruction emulation.
1333 vie->imm_bytes = vie->addrsize;
1334 } else if (vie->op.op_flags & VIE_OP_F_IMM) {
1336 * Section 2.2.1.5 "Immediates", Intel SDM:
1337 * In 64-bit mode the typical size of immediate operands
1338 * remains 32-bits. When the operand size if 64-bits, the
1339 * processor sign-extends all immediates to 64-bits prior
1342 if (vie->opsize == 4 || vie->opsize == 8)
1346 } else if (vie->op.op_flags & VIE_OP_F_IMM8) {
1350 if ((n = vie->imm_bytes) == 0)
1353 KASSERT(n == 1 || n == 2 || n == 4 || n == 8,
1354 ("%s: invalid number of immediate bytes: %d", __func__, n));
1356 for (i = 0; i < n; i++) {
1357 if (vie_peek(vie, &x))
1364 /* sign-extend the immediate value before use */
1366 vie->immediate = u.signed8;
1368 vie->immediate = u.signed16;
1370 vie->immediate = u.signed32;
1372 vie->immediate = u.signed64;
1375 if (vie->op.op_flags & VIE_OP_F_MOFFSET) {
1377 * If the immediate value is going to be interpreted as a
1378 * segment offset then undo the sign-extension above.
1380 vie->immediate &= size2mask[n];
1387 * Verify that all the bytes in the instruction buffer were consumed.
1390 verify_inst_length(struct vie *vie)
1393 if (vie->num_processed == vie->num_valid)
1400 * Verify that the 'guest linear address' provided as collateral of the nested
1401 * page table fault matches with our instruction decoding.
1404 verify_gla(struct vm *vm, int cpuid, uint64_t gla, struct vie *vie)
1407 uint64_t base, idx, gla2;
1409 /* Skip 'gla' verification */
1410 if (gla == VIE_INVALID_GLA)
1414 if (vie->base_register != VM_REG_LAST) {
1415 error = vm_get_register(vm, cpuid, vie->base_register, &base);
1417 printf("verify_gla: error %d getting base reg %d\n",
1418 error, vie->base_register);
1423 * RIP-relative addressing starts from the following
1426 if (vie->base_register == VM_REG_GUEST_RIP)
1427 base += vie->num_valid;
1431 if (vie->index_register != VM_REG_LAST) {
1432 error = vm_get_register(vm, cpuid, vie->index_register, &idx);
1434 printf("verify_gla: error %d getting index reg %d\n",
1435 error, vie->index_register);
1440 /* XXX assuming that the base address of the segment is 0 */
1441 gla2 = base + vie->scale * idx + vie->displacement;
1442 gla2 &= size2mask[vie->addrsize];
1444 printf("verify_gla mismatch: "
1445 "base(0x%0lx), scale(%d), index(0x%0lx), "
1446 "disp(0x%0lx), gla(0x%0lx), gla2(0x%0lx)\n",
1447 base, vie->scale, idx, vie->displacement, gla, gla2);
1455 vmm_decode_instruction(struct vm *vm, int cpuid, uint64_t gla,
1456 enum vm_cpu_mode cpu_mode, int cs_d, struct vie *vie)
1459 if (decode_prefixes(vie, cpu_mode, cs_d))
1462 if (decode_opcode(vie))
1465 if (decode_modrm(vie, cpu_mode))
1468 if (decode_sib(vie))
1471 if (decode_displacement(vie))
1474 if (decode_immediate(vie))
1477 if (verify_inst_length(vie))
1480 if (verify_gla(vm, cpuid, gla, vie))
1483 vie->decoded = 1; /* success */
1487 #endif /* _KERNEL */