2 * Copyright (c) 2012 Sandvine, Inc.
3 * Copyright (c) 2012 NetApp, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
36 #include <sys/systm.h>
42 #include <machine/vmparam.h>
43 #include <machine/vmm.h>
45 #include <sys/types.h>
46 #include <sys/errno.h>
47 #include <sys/_iovec.h>
49 #include <machine/vmm.h>
53 #define KASSERT(exp,msg) assert((exp))
56 #include <machine/vmm_instruction_emul.h>
58 #include <x86/specialreg.h>
60 /* struct vie_op.op_type */
76 /* struct vie_op.op_flags */
77 #define VIE_OP_F_IMM (1 << 0) /* 16/32-bit immediate operand */
78 #define VIE_OP_F_IMM8 (1 << 1) /* 8-bit immediate operand */
79 #define VIE_OP_F_MOFFSET (1 << 2) /* 16/32/64-bit immediate moffset */
80 #define VIE_OP_F_NO_MODRM (1 << 3)
82 static const struct vie_op two_byte_opcodes[256] = {
85 .op_type = VIE_OP_TYPE_MOVZX,
89 .op_type = VIE_OP_TYPE_MOVZX,
93 .op_type = VIE_OP_TYPE_MOVSX,
97 static const struct vie_op one_byte_opcodes[256] = {
100 .op_type = VIE_OP_TYPE_TWO_BYTE
104 .op_type = VIE_OP_TYPE_SUB,
108 .op_type = VIE_OP_TYPE_CMP,
112 .op_type = VIE_OP_TYPE_MOV,
116 .op_type = VIE_OP_TYPE_MOV,
120 .op_type = VIE_OP_TYPE_MOV,
124 .op_type = VIE_OP_TYPE_MOV,
128 .op_type = VIE_OP_TYPE_MOV,
129 .op_flags = VIE_OP_F_MOFFSET | VIE_OP_F_NO_MODRM,
133 .op_type = VIE_OP_TYPE_MOV,
134 .op_flags = VIE_OP_F_MOFFSET | VIE_OP_F_NO_MODRM,
137 /* XXX Group 11 extended opcode - not just MOV */
139 .op_type = VIE_OP_TYPE_MOV,
140 .op_flags = VIE_OP_F_IMM8,
144 .op_type = VIE_OP_TYPE_MOV,
145 .op_flags = VIE_OP_F_IMM,
149 .op_type = VIE_OP_TYPE_AND,
152 /* XXX Group 1 extended opcode - not just AND */
154 .op_type = VIE_OP_TYPE_AND,
155 .op_flags = VIE_OP_F_IMM,
158 /* XXX Group 1 extended opcode - not just OR */
160 .op_type = VIE_OP_TYPE_OR,
161 .op_flags = VIE_OP_F_IMM8,
164 /* XXX Group 1A extended opcode - not just POP */
166 .op_type = VIE_OP_TYPE_POP,
169 /* XXX Group 5 extended opcode - not just PUSH */
171 .op_type = VIE_OP_TYPE_PUSH,
176 #define VIE_MOD_INDIRECT 0
177 #define VIE_MOD_INDIRECT_DISP8 1
178 #define VIE_MOD_INDIRECT_DISP32 2
179 #define VIE_MOD_DIRECT 3
183 #define VIE_RM_DISP32 5
185 #define GB (1024 * 1024 * 1024)
187 static enum vm_reg_name gpr_map[16] = {
206 static uint64_t size2mask[] = {
210 [8] = 0xffffffffffffffff,
214 vie_read_register(void *vm, int vcpuid, enum vm_reg_name reg, uint64_t *rval)
218 error = vm_get_register(vm, vcpuid, reg, rval);
224 vie_calc_bytereg(struct vie *vie, enum vm_reg_name *reg, int *lhbr)
227 *reg = gpr_map[vie->reg];
230 * 64-bit mode imposes limitations on accessing legacy high byte
233 * The legacy high-byte registers cannot be addressed if the REX
234 * prefix is present. In this case the values 4, 5, 6 and 7 of the
235 * 'ModRM:reg' field address %spl, %bpl, %sil and %dil respectively.
237 * If the REX prefix is not present then the values 4, 5, 6 and 7
238 * of the 'ModRM:reg' field address the legacy high-byte registers,
239 * %ah, %ch, %dh and %bh respectively.
241 if (!vie->rex_present) {
242 if (vie->reg & 0x4) {
244 *reg = gpr_map[vie->reg & 0x3];
250 vie_read_bytereg(void *vm, int vcpuid, struct vie *vie, uint8_t *rval)
254 enum vm_reg_name reg;
256 vie_calc_bytereg(vie, ®, &lhbr);
257 error = vm_get_register(vm, vcpuid, reg, &val);
260 * To obtain the value of a legacy high byte register shift the
261 * base register right by 8 bits (%ah = %rax >> 8).
271 vie_write_bytereg(void *vm, int vcpuid, struct vie *vie, uint8_t byte)
273 uint64_t origval, val, mask;
275 enum vm_reg_name reg;
277 vie_calc_bytereg(vie, ®, &lhbr);
278 error = vm_get_register(vm, vcpuid, reg, &origval);
284 * Shift left by 8 to store 'byte' in a legacy high
290 val |= origval & ~mask;
291 error = vm_set_register(vm, vcpuid, reg, val);
297 vie_update_register(void *vm, int vcpuid, enum vm_reg_name reg,
298 uint64_t val, int size)
306 error = vie_read_register(vm, vcpuid, reg, &origval);
309 val &= size2mask[size];
310 val |= origval & ~size2mask[size];
321 error = vm_set_register(vm, vcpuid, reg, val);
325 #define RFLAGS_STATUS_BITS (PSL_C | PSL_PF | PSL_AF | PSL_Z | PSL_N | PSL_V)
328 * Return the status flags that would result from doing (x - y).
332 getcc##sz(uint##sz##_t x, uint##sz##_t y) \
336 __asm __volatile("sub %2,%1; pushfq; popq %0" : \
337 "=r" (rflags), "+r" (x) : "m" (y)); \
347 getcc(int opsize, uint64_t x, uint64_t y)
349 KASSERT(opsize == 1 || opsize == 2 || opsize == 4 || opsize == 8,
350 ("getcc: invalid operand size %d", opsize));
353 return (getcc8(x, y));
354 else if (opsize == 2)
355 return (getcc16(x, y));
356 else if (opsize == 4)
357 return (getcc32(x, y));
359 return (getcc64(x, y));
363 emulate_mov(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
364 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
367 enum vm_reg_name reg;
374 switch (vie->op.op_byte) {
377 * MOV byte from reg (ModRM:reg) to mem (ModRM:r/m)
379 * REX + 88/r: mov r/m8, r8 (%ah, %ch, %dh, %bh not available)
381 size = 1; /* override for byte operation */
382 error = vie_read_bytereg(vm, vcpuid, vie, &byte);
384 error = memwrite(vm, vcpuid, gpa, byte, size, arg);
388 * MOV from reg (ModRM:reg) to mem (ModRM:r/m)
389 * 89/r: mov r/m16, r16
390 * 89/r: mov r/m32, r32
391 * REX.W + 89/r mov r/m64, r64
393 reg = gpr_map[vie->reg];
394 error = vie_read_register(vm, vcpuid, reg, &val);
396 val &= size2mask[size];
397 error = memwrite(vm, vcpuid, gpa, val, size, arg);
402 * MOV byte from mem (ModRM:r/m) to reg (ModRM:reg)
404 * REX + 8A/r: mov r8, r/m8
406 size = 1; /* override for byte operation */
407 error = memread(vm, vcpuid, gpa, &val, size, arg);
409 error = vie_write_bytereg(vm, vcpuid, vie, val);
413 * MOV from mem (ModRM:r/m) to reg (ModRM:reg)
414 * 8B/r: mov r16, r/m16
415 * 8B/r: mov r32, r/m32
416 * REX.W 8B/r: mov r64, r/m64
418 error = memread(vm, vcpuid, gpa, &val, size, arg);
420 reg = gpr_map[vie->reg];
421 error = vie_update_register(vm, vcpuid, reg, val, size);
426 * MOV from seg:moffset to AX/EAX/RAX
427 * A1: mov AX, moffs16
428 * A1: mov EAX, moffs32
429 * REX.W + A1: mov RAX, moffs64
431 error = memread(vm, vcpuid, gpa, &val, size, arg);
433 reg = VM_REG_GUEST_RAX;
434 error = vie_update_register(vm, vcpuid, reg, val, size);
439 * MOV from AX/EAX/RAX to seg:moffset
440 * A3: mov moffs16, AX
441 * A3: mov moffs32, EAX
442 * REX.W + A3: mov moffs64, RAX
444 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RAX, &val);
446 val &= size2mask[size];
447 error = memwrite(vm, vcpuid, gpa, val, size, arg);
452 * MOV from imm8 to mem (ModRM:r/m)
453 * C6/0 mov r/m8, imm8
454 * REX + C6/0 mov r/m8, imm8
456 size = 1; /* override for byte operation */
457 error = memwrite(vm, vcpuid, gpa, vie->immediate, size, arg);
461 * MOV from imm16/imm32 to mem (ModRM:r/m)
462 * C7/0 mov r/m16, imm16
463 * C7/0 mov r/m32, imm32
464 * REX.W + C7/0 mov r/m64, imm32 (sign-extended to 64-bits)
466 val = vie->immediate & size2mask[size];
467 error = memwrite(vm, vcpuid, gpa, val, size, arg);
477 emulate_movx(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
478 mem_region_read_t memread, mem_region_write_t memwrite,
482 enum vm_reg_name reg;
488 switch (vie->op.op_byte) {
491 * MOV and zero extend byte from mem (ModRM:r/m) to
494 * 0F B6/r movzx r16, r/m8
495 * 0F B6/r movzx r32, r/m8
496 * REX.W + 0F B6/r movzx r64, r/m8
499 /* get the first operand */
500 error = memread(vm, vcpuid, gpa, &val, 1, arg);
504 /* get the second operand */
505 reg = gpr_map[vie->reg];
507 /* zero-extend byte */
510 /* write the result */
511 error = vie_update_register(vm, vcpuid, reg, val, size);
515 * MOV and zero extend word from mem (ModRM:r/m) to
518 * 0F B7/r movzx r32, r/m16
519 * REX.W + 0F B7/r movzx r64, r/m16
521 error = memread(vm, vcpuid, gpa, &val, 2, arg);
525 reg = gpr_map[vie->reg];
527 /* zero-extend word */
530 error = vie_update_register(vm, vcpuid, reg, val, size);
534 * MOV and sign extend byte from mem (ModRM:r/m) to
537 * 0F BE/r movsx r16, r/m8
538 * 0F BE/r movsx r32, r/m8
539 * REX.W + 0F BE/r movsx r64, r/m8
542 /* get the first operand */
543 error = memread(vm, vcpuid, gpa, &val, 1, arg);
547 /* get the second operand */
548 reg = gpr_map[vie->reg];
550 /* sign extend byte */
553 /* write the result */
554 error = vie_update_register(vm, vcpuid, reg, val, size);
563 emulate_and(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
564 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
567 enum vm_reg_name reg;
568 uint64_t result, rflags, rflags2, val1, val2;
573 switch (vie->op.op_byte) {
576 * AND reg (ModRM:reg) and mem (ModRM:r/m) and store the
579 * 23/r and r16, r/m16
580 * 23/r and r32, r/m32
581 * REX.W + 23/r and r64, r/m64
584 /* get the first operand */
585 reg = gpr_map[vie->reg];
586 error = vie_read_register(vm, vcpuid, reg, &val1);
590 /* get the second operand */
591 error = memread(vm, vcpuid, gpa, &val2, size, arg);
595 /* perform the operation and write the result */
596 result = val1 & val2;
597 error = vie_update_register(vm, vcpuid, reg, result, size);
601 * AND/OR mem (ModRM:r/m) with immediate and store the
606 * 81 /i op r/m16, imm16
607 * 81 /i op r/m32, imm32
608 * REX.W + 81 /i op r/m64, imm32 sign-extended to 64
612 /* get the first operand */
613 error = memread(vm, vcpuid, gpa, &val1, size, arg);
618 * perform the operation with the pre-fetched immediate
619 * operand and write the result
621 switch (vie->reg & 7) {
623 /* modrm:reg == b100, AND */
624 result = val1 & vie->immediate;
627 /* modrm:reg == b001, OR */
628 result = val1 | vie->immediate;
637 error = memwrite(vm, vcpuid, gpa, result, size, arg);
645 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
650 * OF and CF are cleared; the SF, ZF and PF flags are set according
651 * to the result; AF is undefined.
653 * The updated status flags are obtained by subtracting 0 from 'result'.
655 rflags2 = getcc(size, result, 0);
656 rflags &= ~RFLAGS_STATUS_BITS;
657 rflags |= rflags2 & (PSL_PF | PSL_Z | PSL_N);
659 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, rflags, 8);
664 emulate_or(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
665 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
668 uint64_t val1, result, rflags, rflags2;
673 switch (vie->op.op_byte) {
676 * OR mem (ModRM:r/m) with immediate and store the
679 * 83 /1 OR r/m16, imm8 sign-extended to 16
680 * 83 /1 OR r/m32, imm8 sign-extended to 32
681 * REX.W + 83/1 OR r/m64, imm8 sign-extended to 64
683 * Currently, only the OR operation of the 0x83 opcode
684 * is implemented (ModRM:reg = b001).
686 if ((vie->reg & 7) != 1)
689 /* get the first operand */
690 error = memread(vm, vcpuid, gpa, &val1, size, arg);
695 * perform the operation with the pre-fetched immediate
696 * operand and write the result
698 result = val1 | vie->immediate;
699 error = memwrite(vm, vcpuid, gpa, result, size, arg);
707 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
712 * OF and CF are cleared; the SF, ZF and PF flags are set according
713 * to the result; AF is undefined.
715 * The updated status flags are obtained by subtracting 0 from 'result'.
717 rflags2 = getcc(size, result, 0);
718 rflags &= ~RFLAGS_STATUS_BITS;
719 rflags |= rflags2 & (PSL_PF | PSL_Z | PSL_N);
721 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, rflags, 8);
726 emulate_cmp(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
727 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
730 uint64_t op1, op2, rflags, rflags2;
731 enum vm_reg_name reg;
734 switch (vie->op.op_byte) {
737 * 3B/r CMP r16, r/m16
738 * 3B/r CMP r32, r/m32
739 * REX.W + 3B/r CMP r64, r/m64
741 * Compare first operand (reg) with second operand (r/m) and
742 * set status flags in EFLAGS register. The comparison is
743 * performed by subtracting the second operand from the first
744 * operand and then setting the status flags.
747 /* Get the first operand */
748 reg = gpr_map[vie->reg];
749 error = vie_read_register(vm, vcpuid, reg, &op1);
753 /* Get the second operand */
754 error = memread(vm, vcpuid, gpa, &op2, size, arg);
762 rflags2 = getcc(size, op1, op2);
763 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
766 rflags &= ~RFLAGS_STATUS_BITS;
767 rflags |= rflags2 & RFLAGS_STATUS_BITS;
769 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, rflags, 8);
774 emulate_sub(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
775 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
778 uint64_t nval, rflags, rflags2, val1, val2;
779 enum vm_reg_name reg;
784 switch (vie->op.op_byte) {
787 * SUB r/m from r and store the result in r
789 * 2B/r SUB r16, r/m16
790 * 2B/r SUB r32, r/m32
791 * REX.W + 2B/r SUB r64, r/m64
794 /* get the first operand */
795 reg = gpr_map[vie->reg];
796 error = vie_read_register(vm, vcpuid, reg, &val1);
800 /* get the second operand */
801 error = memread(vm, vcpuid, gpa, &val2, size, arg);
805 /* perform the operation and write the result */
807 error = vie_update_register(vm, vcpuid, reg, nval, size);
814 rflags2 = getcc(size, val1, val2);
815 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS,
820 rflags &= ~RFLAGS_STATUS_BITS;
821 rflags |= rflags2 & RFLAGS_STATUS_BITS;
822 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS,
830 emulate_stack_op(void *vm, int vcpuid, uint64_t mmio_gpa, struct vie *vie,
831 struct vm_guest_paging *paging, mem_region_read_t memread,
832 mem_region_write_t memwrite, void *arg)
835 struct vm_copyinfo copyinfo[2];
837 struct iovec copyinfo[2];
839 struct seg_desc ss_desc;
840 uint64_t cr0, rflags, rsp, stack_gla, val;
841 int error, size, stackaddrsize, pushop;
845 pushop = (vie->op.op_type == VIE_OP_TYPE_PUSH) ? 1 : 0;
848 * From "Address-Size Attributes for Stack Accesses", Intel SDL, Vol 1
850 if (paging->cpu_mode == CPU_MODE_REAL) {
852 } else if (paging->cpu_mode == CPU_MODE_64BIT) {
854 * "Stack Manipulation Instructions in 64-bit Mode", SDM, Vol 3
855 * - Stack pointer size is always 64-bits.
856 * - PUSH/POP of 32-bit values is not possible in 64-bit mode.
857 * - 16-bit PUSH/POP is supported by using the operand size
858 * override prefix (66H).
861 size = vie->opsize_override ? 2 : 8;
864 * In protected or compability mode the 'B' flag in the
865 * stack-segment descriptor determines the size of the
868 error = vm_get_seg_desc(vm, vcpuid, VM_REG_GUEST_SS, &ss_desc);
869 KASSERT(error == 0, ("%s: error %d getting SS descriptor",
871 if (SEG_DESC_DEF32(ss_desc.access))
877 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_CR0, &cr0);
878 KASSERT(error == 0, ("%s: error %d getting cr0", __func__, error));
880 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
881 KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
883 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RSP, &rsp);
884 KASSERT(error == 0, ("%s: error %d getting rsp", __func__, error));
889 if (vie_calculate_gla(paging->cpu_mode, VM_REG_GUEST_SS, &ss_desc,
890 rsp, size, stackaddrsize, pushop ? PROT_WRITE : PROT_READ,
892 vm_inject_ss(vm, vcpuid, 0);
896 if (vie_canonical_check(paging->cpu_mode, stack_gla)) {
897 vm_inject_ss(vm, vcpuid, 0);
901 if (vie_alignment_check(paging->cpl, size, cr0, rflags, stack_gla)) {
902 vm_inject_ac(vm, vcpuid, 0);
906 error = vm_copy_setup(vm, vcpuid, paging, stack_gla, size,
907 pushop ? PROT_WRITE : PROT_READ, copyinfo, nitems(copyinfo));
910 * XXX cannot return a negative error value here because it
911 * ends up being the return value of the VM_RUN() ioctl and
912 * is interpreted as a pseudo-error (for e.g. ERESTART).
915 } else if (error == 1) {
916 /* Resume guest execution to handle page fault */
921 error = memread(vm, vcpuid, mmio_gpa, &val, size, arg);
923 vm_copyout(vm, vcpuid, &val, copyinfo, size);
925 vm_copyin(vm, vcpuid, copyinfo, &val, size);
926 error = memwrite(vm, vcpuid, mmio_gpa, val, size, arg);
930 vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
934 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RSP, rsp,
936 KASSERT(error == 0, ("error %d updating rsp", error));
942 emulate_push(void *vm, int vcpuid, uint64_t mmio_gpa, struct vie *vie,
943 struct vm_guest_paging *paging, mem_region_read_t memread,
944 mem_region_write_t memwrite, void *arg)
949 * Table A-6, "Opcode Extensions", Intel SDM, Vol 2.
951 * PUSH is part of the group 5 extended opcodes and is identified
952 * by ModRM:reg = b110.
954 if ((vie->reg & 7) != 6)
957 error = emulate_stack_op(vm, vcpuid, mmio_gpa, vie, paging, memread,
963 emulate_pop(void *vm, int vcpuid, uint64_t mmio_gpa, struct vie *vie,
964 struct vm_guest_paging *paging, mem_region_read_t memread,
965 mem_region_write_t memwrite, void *arg)
970 * Table A-6, "Opcode Extensions", Intel SDM, Vol 2.
972 * POP is part of the group 1A extended opcodes and is identified
973 * by ModRM:reg = b000.
975 if ((vie->reg & 7) != 0)
978 error = emulate_stack_op(vm, vcpuid, mmio_gpa, vie, paging, memread,
984 vmm_emulate_instruction(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
985 struct vm_guest_paging *paging, mem_region_read_t memread,
986 mem_region_write_t memwrite, void *memarg)
993 switch (vie->op.op_type) {
994 case VIE_OP_TYPE_POP:
995 error = emulate_pop(vm, vcpuid, gpa, vie, paging, memread,
998 case VIE_OP_TYPE_PUSH:
999 error = emulate_push(vm, vcpuid, gpa, vie, paging, memread,
1002 case VIE_OP_TYPE_CMP:
1003 error = emulate_cmp(vm, vcpuid, gpa, vie,
1004 memread, memwrite, memarg);
1006 case VIE_OP_TYPE_MOV:
1007 error = emulate_mov(vm, vcpuid, gpa, vie,
1008 memread, memwrite, memarg);
1010 case VIE_OP_TYPE_MOVSX:
1011 case VIE_OP_TYPE_MOVZX:
1012 error = emulate_movx(vm, vcpuid, gpa, vie,
1013 memread, memwrite, memarg);
1015 case VIE_OP_TYPE_AND:
1016 error = emulate_and(vm, vcpuid, gpa, vie,
1017 memread, memwrite, memarg);
1019 case VIE_OP_TYPE_OR:
1020 error = emulate_or(vm, vcpuid, gpa, vie,
1021 memread, memwrite, memarg);
1023 case VIE_OP_TYPE_SUB:
1024 error = emulate_sub(vm, vcpuid, gpa, vie,
1025 memread, memwrite, memarg);
1036 vie_alignment_check(int cpl, int size, uint64_t cr0, uint64_t rf, uint64_t gla)
1038 KASSERT(size == 1 || size == 2 || size == 4 || size == 8,
1039 ("%s: invalid size %d", __func__, size));
1040 KASSERT(cpl >= 0 && cpl <= 3, ("%s: invalid cpl %d", __func__, cpl));
1042 if (cpl != 3 || (cr0 & CR0_AM) == 0 || (rf & PSL_AC) == 0)
1045 return ((gla & (size - 1)) ? 1 : 0);
1049 vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla)
1053 if (cpu_mode != CPU_MODE_64BIT)
1057 * The value of the bit 47 in the 'gla' should be replicated in the
1058 * most significant 16 bits.
1060 mask = ~((1UL << 48) - 1);
1061 if (gla & (1UL << 47))
1062 return ((gla & mask) != mask);
1064 return ((gla & mask) != 0);
1068 vie_size2mask(int size)
1070 KASSERT(size == 1 || size == 2 || size == 4 || size == 8,
1071 ("vie_size2mask: invalid size %d", size));
1072 return (size2mask[size]);
1076 vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum vm_reg_name seg,
1077 struct seg_desc *desc, uint64_t offset, int length, int addrsize,
1078 int prot, uint64_t *gla)
1080 uint64_t firstoff, low_limit, high_limit, segbase;
1083 KASSERT(seg >= VM_REG_GUEST_ES && seg <= VM_REG_GUEST_GS,
1084 ("%s: invalid segment %d", __func__, seg));
1085 KASSERT(length == 1 || length == 2 || length == 4 || length == 8,
1086 ("%s: invalid operand size %d", __func__, length));
1087 KASSERT((prot & ~(PROT_READ | PROT_WRITE)) == 0,
1088 ("%s: invalid prot %#x", __func__, prot));
1091 if (cpu_mode == CPU_MODE_64BIT) {
1092 KASSERT(addrsize == 4 || addrsize == 8, ("%s: invalid address "
1093 "size %d for cpu_mode %d", __func__, addrsize, cpu_mode));
1096 KASSERT(addrsize == 2 || addrsize == 4, ("%s: invalid address "
1097 "size %d for cpu mode %d", __func__, addrsize, cpu_mode));
1100 * If the segment selector is loaded with a NULL selector
1101 * then the descriptor is unusable and attempting to use
1102 * it results in a #GP(0).
1104 if (SEG_DESC_UNUSABLE(desc->access))
1108 * The processor generates a #NP exception when a segment
1109 * register is loaded with a selector that points to a
1110 * descriptor that is not present. If this was the case then
1111 * it would have been checked before the VM-exit.
1113 KASSERT(SEG_DESC_PRESENT(desc->access),
1114 ("segment %d not present: %#x", seg, desc->access));
1117 * The descriptor type must indicate a code/data segment.
1119 type = SEG_DESC_TYPE(desc->access);
1120 KASSERT(type >= 16 && type <= 31, ("segment %d has invalid "
1121 "descriptor type %#x", seg, type));
1123 if (prot & PROT_READ) {
1124 /* #GP on a read access to a exec-only code segment */
1125 if ((type & 0xA) == 0x8)
1129 if (prot & PROT_WRITE) {
1131 * #GP on a write access to a code segment or a
1132 * read-only data segment.
1134 if (type & 0x8) /* code segment */
1137 if ((type & 0xA) == 0) /* read-only data seg */
1142 * 'desc->limit' is fully expanded taking granularity into
1145 if ((type & 0xC) == 0x4) {
1146 /* expand-down data segment */
1147 low_limit = desc->limit + 1;
1148 high_limit = SEG_DESC_DEF32(desc->access) ?
1149 0xffffffff : 0xffff;
1151 /* code segment or expand-up data segment */
1153 high_limit = desc->limit;
1156 while (length > 0) {
1157 offset &= vie_size2mask(addrsize);
1158 if (offset < low_limit || offset > high_limit)
1166 * In 64-bit mode all segments except %fs and %gs have a segment
1167 * base address of 0.
1169 if (cpu_mode == CPU_MODE_64BIT && seg != VM_REG_GUEST_FS &&
1170 seg != VM_REG_GUEST_GS) {
1173 segbase = desc->base;
1177 * Truncate 'firstoff' to the effective address size before adding
1178 * it to the segment base.
1180 firstoff &= vie_size2mask(addrsize);
1181 *gla = (segbase + firstoff) & vie_size2mask(glasize);
1187 vie_init(struct vie *vie)
1190 bzero(vie, sizeof(struct vie));
1192 vie->base_register = VM_REG_LAST;
1193 vie->index_register = VM_REG_LAST;
1197 pf_error_code(int usermode, int prot, int rsvd, uint64_t pte)
1202 error_code |= PGEX_P;
1203 if (prot & VM_PROT_WRITE)
1204 error_code |= PGEX_W;
1206 error_code |= PGEX_U;
1208 error_code |= PGEX_RSV;
1209 if (prot & VM_PROT_EXECUTE)
1210 error_code |= PGEX_I;
1212 return (error_code);
1216 ptp_release(void **cookie)
1218 if (*cookie != NULL) {
1219 vm_gpa_release(*cookie);
1225 ptp_hold(struct vm *vm, vm_paddr_t ptpphys, size_t len, void **cookie)
1229 ptp_release(cookie);
1230 ptr = vm_gpa_hold(vm, ptpphys, len, VM_PROT_RW, cookie);
1235 vmm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
1236 uint64_t gla, int prot, uint64_t *gpa)
1238 int nlevels, pfcode, ptpshift, ptpindex, retval, usermode, writable;
1240 uint64_t *ptpbase, ptpphys, pte, pgsize;
1241 uint32_t *ptpbase32, pte32;
1244 usermode = (paging->cpl == 3 ? 1 : 0);
1245 writable = prot & VM_PROT_WRITE;
1250 ptpphys = paging->cr3; /* root of the page tables */
1251 ptp_release(&cookie);
1255 if (vie_canonical_check(paging->cpu_mode, gla)) {
1257 * XXX assuming a non-stack reference otherwise a stack fault
1258 * should be generated.
1260 vm_inject_gp(vm, vcpuid);
1264 if (paging->paging_mode == PAGING_MODE_FLAT) {
1269 if (paging->paging_mode == PAGING_MODE_32) {
1271 while (--nlevels >= 0) {
1272 /* Zero out the lower 12 bits. */
1275 ptpbase32 = ptp_hold(vm, ptpphys, PAGE_SIZE, &cookie);
1277 if (ptpbase32 == NULL)
1280 ptpshift = PAGE_SHIFT + nlevels * 10;
1281 ptpindex = (gla >> ptpshift) & 0x3FF;
1282 pgsize = 1UL << ptpshift;
1284 pte32 = ptpbase32[ptpindex];
1286 if ((pte32 & PG_V) == 0 ||
1287 (usermode && (pte32 & PG_U) == 0) ||
1288 (writable && (pte32 & PG_RW) == 0)) {
1289 pfcode = pf_error_code(usermode, prot, 0,
1291 vm_inject_pf(vm, vcpuid, pfcode, gla);
1296 * Emulate the x86 MMU's management of the accessed
1297 * and dirty flags. While the accessed flag is set
1298 * at every level of the page table, the dirty flag
1299 * is only set at the last level providing the guest
1302 if ((pte32 & PG_A) == 0) {
1303 if (atomic_cmpset_32(&ptpbase32[ptpindex],
1304 pte32, pte32 | PG_A) == 0) {
1309 /* XXX must be ignored if CR4.PSE=0 */
1310 if (nlevels > 0 && (pte32 & PG_PS) != 0)
1316 /* Set the dirty bit in the page table entry if necessary */
1317 if (writable && (pte32 & PG_M) == 0) {
1318 if (atomic_cmpset_32(&ptpbase32[ptpindex],
1319 pte32, pte32 | PG_M) == 0) {
1324 /* Zero out the lower 'ptpshift' bits */
1325 pte32 >>= ptpshift; pte32 <<= ptpshift;
1326 *gpa = pte32 | (gla & (pgsize - 1));
1330 if (paging->paging_mode == PAGING_MODE_PAE) {
1331 /* Zero out the lower 5 bits and the upper 32 bits */
1332 ptpphys &= 0xffffffe0UL;
1334 ptpbase = ptp_hold(vm, ptpphys, sizeof(*ptpbase) * 4, &cookie);
1335 if (ptpbase == NULL)
1338 ptpindex = (gla >> 30) & 0x3;
1340 pte = ptpbase[ptpindex];
1342 if ((pte & PG_V) == 0) {
1343 pfcode = pf_error_code(usermode, prot, 0, pte);
1344 vm_inject_pf(vm, vcpuid, pfcode, gla);
1353 while (--nlevels >= 0) {
1354 /* Zero out the lower 12 bits and the upper 12 bits */
1355 ptpphys >>= 12; ptpphys <<= 24; ptpphys >>= 12;
1357 ptpbase = ptp_hold(vm, ptpphys, PAGE_SIZE, &cookie);
1358 if (ptpbase == NULL)
1361 ptpshift = PAGE_SHIFT + nlevels * 9;
1362 ptpindex = (gla >> ptpshift) & 0x1FF;
1363 pgsize = 1UL << ptpshift;
1365 pte = ptpbase[ptpindex];
1367 if ((pte & PG_V) == 0 ||
1368 (usermode && (pte & PG_U) == 0) ||
1369 (writable && (pte & PG_RW) == 0)) {
1370 pfcode = pf_error_code(usermode, prot, 0, pte);
1371 vm_inject_pf(vm, vcpuid, pfcode, gla);
1375 /* Set the accessed bit in the page table entry */
1376 if ((pte & PG_A) == 0) {
1377 if (atomic_cmpset_64(&ptpbase[ptpindex],
1378 pte, pte | PG_A) == 0) {
1383 if (nlevels > 0 && (pte & PG_PS) != 0) {
1384 if (pgsize > 1 * GB) {
1385 pfcode = pf_error_code(usermode, prot, 1, pte);
1386 vm_inject_pf(vm, vcpuid, pfcode, gla);
1395 /* Set the dirty bit in the page table entry if necessary */
1396 if (writable && (pte & PG_M) == 0) {
1397 if (atomic_cmpset_64(&ptpbase[ptpindex], pte, pte | PG_M) == 0)
1401 /* Zero out the lower 'ptpshift' bits and the upper 12 bits */
1402 pte >>= ptpshift; pte <<= (ptpshift + 12); pte >>= 12;
1403 *gpa = pte | (gla & (pgsize - 1));
1405 ptp_release(&cookie);
1416 vmm_fetch_instruction(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
1417 uint64_t rip, int inst_length, struct vie *vie)
1419 struct vm_copyinfo copyinfo[2];
1422 if (inst_length > VIE_INST_SIZE)
1423 panic("vmm_fetch_instruction: invalid length %d", inst_length);
1425 prot = PROT_READ | PROT_EXEC;
1426 error = vm_copy_setup(vm, vcpuid, paging, rip, inst_length, prot,
1427 copyinfo, nitems(copyinfo));
1429 vm_copyin(vm, vcpuid, copyinfo, vie->inst, inst_length);
1430 vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
1431 vie->num_valid = inst_length;
1437 vie_peek(struct vie *vie, uint8_t *x)
1440 if (vie->num_processed < vie->num_valid) {
1441 *x = vie->inst[vie->num_processed];
1448 vie_advance(struct vie *vie)
1451 vie->num_processed++;
1455 decode_prefixes(struct vie *vie, enum vm_cpu_mode cpu_mode, int cs_d)
1460 if (vie_peek(vie, &x))
1464 vie->opsize_override = 1;
1466 vie->addrsize_override = 1;
1474 * From section 2.2.1, "REX Prefixes", Intel SDM Vol 2:
1475 * - Only one REX prefix is allowed per instruction.
1476 * - The REX prefix must immediately precede the opcode byte or the
1477 * escape opcode byte.
1478 * - If an instruction has a mandatory prefix (0x66, 0xF2 or 0xF3)
1479 * the mandatory prefix must come before the REX prefix.
1481 if (cpu_mode == CPU_MODE_64BIT && x >= 0x40 && x <= 0x4F) {
1482 vie->rex_present = 1;
1483 vie->rex_w = x & 0x8 ? 1 : 0;
1484 vie->rex_r = x & 0x4 ? 1 : 0;
1485 vie->rex_x = x & 0x2 ? 1 : 0;
1486 vie->rex_b = x & 0x1 ? 1 : 0;
1491 * Section "Operand-Size And Address-Size Attributes", Intel SDM, Vol 1
1493 if (cpu_mode == CPU_MODE_64BIT) {
1495 * Default address size is 64-bits and default operand size
1498 vie->addrsize = vie->addrsize_override ? 4 : 8;
1501 else if (vie->opsize_override)
1506 /* Default address and operand sizes are 32-bits */
1507 vie->addrsize = vie->addrsize_override ? 2 : 4;
1508 vie->opsize = vie->opsize_override ? 2 : 4;
1510 /* Default address and operand sizes are 16-bits */
1511 vie->addrsize = vie->addrsize_override ? 4 : 2;
1512 vie->opsize = vie->opsize_override ? 4 : 2;
1518 decode_two_byte_opcode(struct vie *vie)
1522 if (vie_peek(vie, &x))
1525 vie->op = two_byte_opcodes[x];
1527 if (vie->op.op_type == VIE_OP_TYPE_NONE)
1535 decode_opcode(struct vie *vie)
1539 if (vie_peek(vie, &x))
1542 vie->op = one_byte_opcodes[x];
1544 if (vie->op.op_type == VIE_OP_TYPE_NONE)
1549 if (vie->op.op_type == VIE_OP_TYPE_TWO_BYTE)
1550 return (decode_two_byte_opcode(vie));
1556 decode_modrm(struct vie *vie, enum vm_cpu_mode cpu_mode)
1560 if (cpu_mode == CPU_MODE_REAL)
1563 if (vie->op.op_flags & VIE_OP_F_NO_MODRM)
1566 if (vie_peek(vie, &x))
1569 vie->mod = (x >> 6) & 0x3;
1570 vie->rm = (x >> 0) & 0x7;
1571 vie->reg = (x >> 3) & 0x7;
1574 * A direct addressing mode makes no sense in the context of an EPT
1575 * fault. There has to be a memory access involved to cause the
1578 if (vie->mod == VIE_MOD_DIRECT)
1581 if ((vie->mod == VIE_MOD_INDIRECT && vie->rm == VIE_RM_DISP32) ||
1582 (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)) {
1584 * Table 2-5: Special Cases of REX Encodings
1586 * mod=0, r/m=5 is used in the compatibility mode to
1587 * indicate a disp32 without a base register.
1589 * mod!=3, r/m=4 is used in the compatibility mode to
1590 * indicate that the SIB byte is present.
1592 * The 'b' bit in the REX prefix is don't care in
1596 vie->rm |= (vie->rex_b << 3);
1599 vie->reg |= (vie->rex_r << 3);
1602 if (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)
1605 vie->base_register = gpr_map[vie->rm];
1608 case VIE_MOD_INDIRECT_DISP8:
1609 vie->disp_bytes = 1;
1611 case VIE_MOD_INDIRECT_DISP32:
1612 vie->disp_bytes = 4;
1614 case VIE_MOD_INDIRECT:
1615 if (vie->rm == VIE_RM_DISP32) {
1616 vie->disp_bytes = 4;
1618 * Table 2-7. RIP-Relative Addressing
1620 * In 64-bit mode mod=00 r/m=101 implies [rip] + disp32
1621 * whereas in compatibility mode it just implies disp32.
1624 if (cpu_mode == CPU_MODE_64BIT)
1625 vie->base_register = VM_REG_GUEST_RIP;
1627 vie->base_register = VM_REG_LAST;
1639 decode_sib(struct vie *vie)
1643 /* Proceed only if SIB byte is present */
1644 if (vie->mod == VIE_MOD_DIRECT || vie->rm != VIE_RM_SIB)
1647 if (vie_peek(vie, &x))
1650 /* De-construct the SIB byte */
1651 vie->ss = (x >> 6) & 0x3;
1652 vie->index = (x >> 3) & 0x7;
1653 vie->base = (x >> 0) & 0x7;
1655 /* Apply the REX prefix modifiers */
1656 vie->index |= vie->rex_x << 3;
1657 vie->base |= vie->rex_b << 3;
1660 case VIE_MOD_INDIRECT_DISP8:
1661 vie->disp_bytes = 1;
1663 case VIE_MOD_INDIRECT_DISP32:
1664 vie->disp_bytes = 4;
1668 if (vie->mod == VIE_MOD_INDIRECT &&
1669 (vie->base == 5 || vie->base == 13)) {
1671 * Special case when base register is unused if mod = 0
1672 * and base = %rbp or %r13.
1675 * Table 2-3: 32-bit Addressing Forms with the SIB Byte
1676 * Table 2-5: Special Cases of REX Encodings
1678 vie->disp_bytes = 4;
1680 vie->base_register = gpr_map[vie->base];
1684 * All encodings of 'index' are valid except for %rsp (4).
1687 * Table 2-3: 32-bit Addressing Forms with the SIB Byte
1688 * Table 2-5: Special Cases of REX Encodings
1690 if (vie->index != 4)
1691 vie->index_register = gpr_map[vie->index];
1693 /* 'scale' makes sense only in the context of an index register */
1694 if (vie->index_register < VM_REG_LAST)
1695 vie->scale = 1 << vie->ss;
1703 decode_displacement(struct vie *vie)
1714 if ((n = vie->disp_bytes) == 0)
1717 if (n != 1 && n != 4)
1718 panic("decode_displacement: invalid disp_bytes %d", n);
1720 for (i = 0; i < n; i++) {
1721 if (vie_peek(vie, &x))
1729 vie->displacement = u.signed8; /* sign-extended */
1731 vie->displacement = u.signed32; /* sign-extended */
1737 decode_immediate(struct vie *vie)
1748 /* Figure out immediate operand size (if any) */
1749 if (vie->op.op_flags & VIE_OP_F_IMM) {
1751 * Section 2.2.1.5 "Immediates", Intel SDM:
1752 * In 64-bit mode the typical size of immediate operands
1753 * remains 32-bits. When the operand size if 64-bits, the
1754 * processor sign-extends all immediates to 64-bits prior
1757 if (vie->opsize == 4 || vie->opsize == 8)
1761 } else if (vie->op.op_flags & VIE_OP_F_IMM8) {
1765 if ((n = vie->imm_bytes) == 0)
1768 KASSERT(n == 1 || n == 2 || n == 4,
1769 ("%s: invalid number of immediate bytes: %d", __func__, n));
1771 for (i = 0; i < n; i++) {
1772 if (vie_peek(vie, &x))
1779 /* sign-extend the immediate value before use */
1781 vie->immediate = u.signed8;
1783 vie->immediate = u.signed16;
1785 vie->immediate = u.signed32;
1791 decode_moffset(struct vie *vie)
1800 if ((vie->op.op_flags & VIE_OP_F_MOFFSET) == 0)
1804 * Section 2.2.1.4, "Direct Memory-Offset MOVs", Intel SDM:
1805 * The memory offset size follows the address-size of the instruction.
1808 KASSERT(n == 2 || n == 4 || n == 8, ("invalid moffset bytes: %d", n));
1811 for (i = 0; i < n; i++) {
1812 if (vie_peek(vie, &x))
1818 vie->displacement = u.u64;
1823 * Verify that all the bytes in the instruction buffer were consumed.
1826 verify_inst_length(struct vie *vie)
1829 if (vie->num_processed == vie->num_valid)
1836 * Verify that the 'guest linear address' provided as collateral of the nested
1837 * page table fault matches with our instruction decoding.
1840 verify_gla(struct vm *vm, int cpuid, uint64_t gla, struct vie *vie)
1843 uint64_t base, idx, gla2;
1845 /* Skip 'gla' verification */
1846 if (gla == VIE_INVALID_GLA)
1850 if (vie->base_register != VM_REG_LAST) {
1851 error = vm_get_register(vm, cpuid, vie->base_register, &base);
1853 printf("verify_gla: error %d getting base reg %d\n",
1854 error, vie->base_register);
1859 * RIP-relative addressing starts from the following
1862 if (vie->base_register == VM_REG_GUEST_RIP)
1863 base += vie->num_valid;
1867 if (vie->index_register != VM_REG_LAST) {
1868 error = vm_get_register(vm, cpuid, vie->index_register, &idx);
1870 printf("verify_gla: error %d getting index reg %d\n",
1871 error, vie->index_register);
1876 /* XXX assuming that the base address of the segment is 0 */
1877 gla2 = base + vie->scale * idx + vie->displacement;
1878 gla2 &= size2mask[vie->addrsize];
1880 printf("verify_gla mismatch: "
1881 "base(0x%0lx), scale(%d), index(0x%0lx), "
1882 "disp(0x%0lx), gla(0x%0lx), gla2(0x%0lx)\n",
1883 base, vie->scale, idx, vie->displacement, gla, gla2);
1891 vmm_decode_instruction(struct vm *vm, int cpuid, uint64_t gla,
1892 enum vm_cpu_mode cpu_mode, int cs_d, struct vie *vie)
1895 if (decode_prefixes(vie, cpu_mode, cs_d))
1898 if (decode_opcode(vie))
1901 if (decode_modrm(vie, cpu_mode))
1904 if (decode_sib(vie))
1907 if (decode_displacement(vie))
1910 if (decode_immediate(vie))
1913 if (decode_moffset(vie))
1916 if (verify_inst_length(vie))
1919 if (verify_gla(vm, cpuid, gla, vie))
1922 vie->decoded = 1; /* success */
1926 #endif /* _KERNEL */