2 * Copyright (c) 2012 Sandvine, Inc.
3 * Copyright (c) 2012 NetApp, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
36 #include <sys/systm.h>
42 #include <machine/vmparam.h>
43 #include <machine/vmm.h>
45 #include <sys/types.h>
46 #include <sys/errno.h>
47 #include <sys/_iovec.h>
49 #include <machine/vmm.h>
53 #define KASSERT(exp,msg) assert((exp))
56 #include <machine/vmm_instruction_emul.h>
58 #include <x86/specialreg.h>
60 /* struct vie_op.op_type */
80 /* struct vie_op.op_flags */
81 #define VIE_OP_F_IMM (1 << 0) /* 16/32-bit immediate operand */
82 #define VIE_OP_F_IMM8 (1 << 1) /* 8-bit immediate operand */
83 #define VIE_OP_F_MOFFSET (1 << 2) /* 16/32/64-bit immediate moffset */
84 #define VIE_OP_F_NO_MODRM (1 << 3)
85 #define VIE_OP_F_NO_GLA_VERIFICATION (1 << 4)
87 static const struct vie_op two_byte_opcodes[256] = {
90 .op_type = VIE_OP_TYPE_MOVZX,
94 .op_type = VIE_OP_TYPE_MOVZX,
98 .op_type = VIE_OP_TYPE_BITTEST,
99 .op_flags = VIE_OP_F_IMM8,
103 .op_type = VIE_OP_TYPE_MOVSX,
107 static const struct vie_op one_byte_opcodes[256] = {
110 .op_type = VIE_OP_TYPE_TWO_BYTE
114 .op_type = VIE_OP_TYPE_OR,
118 .op_type = VIE_OP_TYPE_SUB,
122 .op_type = VIE_OP_TYPE_CMP,
126 .op_type = VIE_OP_TYPE_CMP,
130 .op_type = VIE_OP_TYPE_MOV,
134 .op_type = VIE_OP_TYPE_MOV,
138 .op_type = VIE_OP_TYPE_MOV,
142 .op_type = VIE_OP_TYPE_MOV,
146 .op_type = VIE_OP_TYPE_MOV,
147 .op_flags = VIE_OP_F_MOFFSET | VIE_OP_F_NO_MODRM,
151 .op_type = VIE_OP_TYPE_MOV,
152 .op_flags = VIE_OP_F_MOFFSET | VIE_OP_F_NO_MODRM,
156 .op_type = VIE_OP_TYPE_MOVS,
157 .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
161 .op_type = VIE_OP_TYPE_MOVS,
162 .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
166 .op_type = VIE_OP_TYPE_STOS,
167 .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
171 .op_type = VIE_OP_TYPE_STOS,
172 .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
175 /* XXX Group 11 extended opcode - not just MOV */
177 .op_type = VIE_OP_TYPE_MOV,
178 .op_flags = VIE_OP_F_IMM8,
182 .op_type = VIE_OP_TYPE_MOV,
183 .op_flags = VIE_OP_F_IMM,
187 .op_type = VIE_OP_TYPE_AND,
190 /* Group 1 extended opcode */
192 .op_type = VIE_OP_TYPE_GROUP1,
193 .op_flags = VIE_OP_F_IMM8,
196 /* Group 1 extended opcode */
198 .op_type = VIE_OP_TYPE_GROUP1,
199 .op_flags = VIE_OP_F_IMM,
202 /* Group 1 extended opcode */
204 .op_type = VIE_OP_TYPE_GROUP1,
205 .op_flags = VIE_OP_F_IMM8,
208 /* XXX Group 1A extended opcode - not just POP */
210 .op_type = VIE_OP_TYPE_POP,
213 /* XXX Group 5 extended opcode - not just PUSH */
215 .op_type = VIE_OP_TYPE_PUSH,
220 #define VIE_MOD_INDIRECT 0
221 #define VIE_MOD_INDIRECT_DISP8 1
222 #define VIE_MOD_INDIRECT_DISP32 2
223 #define VIE_MOD_DIRECT 3
227 #define VIE_RM_DISP32 5
229 #define GB (1024 * 1024 * 1024)
231 static enum vm_reg_name gpr_map[16] = {
250 static uint64_t size2mask[] = {
254 [8] = 0xffffffffffffffff,
258 vie_read_register(void *vm, int vcpuid, enum vm_reg_name reg, uint64_t *rval)
262 error = vm_get_register(vm, vcpuid, reg, rval);
268 vie_calc_bytereg(struct vie *vie, enum vm_reg_name *reg, int *lhbr)
271 *reg = gpr_map[vie->reg];
274 * 64-bit mode imposes limitations on accessing legacy high byte
277 * The legacy high-byte registers cannot be addressed if the REX
278 * prefix is present. In this case the values 4, 5, 6 and 7 of the
279 * 'ModRM:reg' field address %spl, %bpl, %sil and %dil respectively.
281 * If the REX prefix is not present then the values 4, 5, 6 and 7
282 * of the 'ModRM:reg' field address the legacy high-byte registers,
283 * %ah, %ch, %dh and %bh respectively.
285 if (!vie->rex_present) {
286 if (vie->reg & 0x4) {
288 *reg = gpr_map[vie->reg & 0x3];
294 vie_read_bytereg(void *vm, int vcpuid, struct vie *vie, uint8_t *rval)
298 enum vm_reg_name reg;
300 vie_calc_bytereg(vie, ®, &lhbr);
301 error = vm_get_register(vm, vcpuid, reg, &val);
304 * To obtain the value of a legacy high byte register shift the
305 * base register right by 8 bits (%ah = %rax >> 8).
315 vie_write_bytereg(void *vm, int vcpuid, struct vie *vie, uint8_t byte)
317 uint64_t origval, val, mask;
319 enum vm_reg_name reg;
321 vie_calc_bytereg(vie, ®, &lhbr);
322 error = vm_get_register(vm, vcpuid, reg, &origval);
328 * Shift left by 8 to store 'byte' in a legacy high
334 val |= origval & ~mask;
335 error = vm_set_register(vm, vcpuid, reg, val);
341 vie_update_register(void *vm, int vcpuid, enum vm_reg_name reg,
342 uint64_t val, int size)
350 error = vie_read_register(vm, vcpuid, reg, &origval);
353 val &= size2mask[size];
354 val |= origval & ~size2mask[size];
365 error = vm_set_register(vm, vcpuid, reg, val);
369 #define RFLAGS_STATUS_BITS (PSL_C | PSL_PF | PSL_AF | PSL_Z | PSL_N | PSL_V)
372 * Return the status flags that would result from doing (x - y).
376 getcc##sz(uint##sz##_t x, uint##sz##_t y) \
380 __asm __volatile("sub %2,%1; pushfq; popq %0" : \
381 "=r" (rflags), "+r" (x) : "m" (y)); \
391 getcc(int opsize, uint64_t x, uint64_t y)
393 KASSERT(opsize == 1 || opsize == 2 || opsize == 4 || opsize == 8,
394 ("getcc: invalid operand size %d", opsize));
397 return (getcc8(x, y));
398 else if (opsize == 2)
399 return (getcc16(x, y));
400 else if (opsize == 4)
401 return (getcc32(x, y));
403 return (getcc64(x, y));
407 emulate_mov(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
408 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
411 enum vm_reg_name reg;
418 switch (vie->op.op_byte) {
421 * MOV byte from reg (ModRM:reg) to mem (ModRM:r/m)
423 * REX + 88/r: mov r/m8, r8 (%ah, %ch, %dh, %bh not available)
425 size = 1; /* override for byte operation */
426 error = vie_read_bytereg(vm, vcpuid, vie, &byte);
428 error = memwrite(vm, vcpuid, gpa, byte, size, arg);
432 * MOV from reg (ModRM:reg) to mem (ModRM:r/m)
433 * 89/r: mov r/m16, r16
434 * 89/r: mov r/m32, r32
435 * REX.W + 89/r mov r/m64, r64
437 reg = gpr_map[vie->reg];
438 error = vie_read_register(vm, vcpuid, reg, &val);
440 val &= size2mask[size];
441 error = memwrite(vm, vcpuid, gpa, val, size, arg);
446 * MOV byte from mem (ModRM:r/m) to reg (ModRM:reg)
448 * REX + 8A/r: mov r8, r/m8
450 size = 1; /* override for byte operation */
451 error = memread(vm, vcpuid, gpa, &val, size, arg);
453 error = vie_write_bytereg(vm, vcpuid, vie, val);
457 * MOV from mem (ModRM:r/m) to reg (ModRM:reg)
458 * 8B/r: mov r16, r/m16
459 * 8B/r: mov r32, r/m32
460 * REX.W 8B/r: mov r64, r/m64
462 error = memread(vm, vcpuid, gpa, &val, size, arg);
464 reg = gpr_map[vie->reg];
465 error = vie_update_register(vm, vcpuid, reg, val, size);
470 * MOV from seg:moffset to AX/EAX/RAX
471 * A1: mov AX, moffs16
472 * A1: mov EAX, moffs32
473 * REX.W + A1: mov RAX, moffs64
475 error = memread(vm, vcpuid, gpa, &val, size, arg);
477 reg = VM_REG_GUEST_RAX;
478 error = vie_update_register(vm, vcpuid, reg, val, size);
483 * MOV from AX/EAX/RAX to seg:moffset
484 * A3: mov moffs16, AX
485 * A3: mov moffs32, EAX
486 * REX.W + A3: mov moffs64, RAX
488 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RAX, &val);
490 val &= size2mask[size];
491 error = memwrite(vm, vcpuid, gpa, val, size, arg);
496 * MOV from imm8 to mem (ModRM:r/m)
497 * C6/0 mov r/m8, imm8
498 * REX + C6/0 mov r/m8, imm8
500 size = 1; /* override for byte operation */
501 error = memwrite(vm, vcpuid, gpa, vie->immediate, size, arg);
505 * MOV from imm16/imm32 to mem (ModRM:r/m)
506 * C7/0 mov r/m16, imm16
507 * C7/0 mov r/m32, imm32
508 * REX.W + C7/0 mov r/m64, imm32 (sign-extended to 64-bits)
510 val = vie->immediate & size2mask[size];
511 error = memwrite(vm, vcpuid, gpa, val, size, arg);
521 emulate_movx(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
522 mem_region_read_t memread, mem_region_write_t memwrite,
526 enum vm_reg_name reg;
532 switch (vie->op.op_byte) {
535 * MOV and zero extend byte from mem (ModRM:r/m) to
538 * 0F B6/r movzx r16, r/m8
539 * 0F B6/r movzx r32, r/m8
540 * REX.W + 0F B6/r movzx r64, r/m8
543 /* get the first operand */
544 error = memread(vm, vcpuid, gpa, &val, 1, arg);
548 /* get the second operand */
549 reg = gpr_map[vie->reg];
551 /* zero-extend byte */
554 /* write the result */
555 error = vie_update_register(vm, vcpuid, reg, val, size);
559 * MOV and zero extend word from mem (ModRM:r/m) to
562 * 0F B7/r movzx r32, r/m16
563 * REX.W + 0F B7/r movzx r64, r/m16
565 error = memread(vm, vcpuid, gpa, &val, 2, arg);
569 reg = gpr_map[vie->reg];
571 /* zero-extend word */
574 error = vie_update_register(vm, vcpuid, reg, val, size);
578 * MOV and sign extend byte from mem (ModRM:r/m) to
581 * 0F BE/r movsx r16, r/m8
582 * 0F BE/r movsx r32, r/m8
583 * REX.W + 0F BE/r movsx r64, r/m8
586 /* get the first operand */
587 error = memread(vm, vcpuid, gpa, &val, 1, arg);
591 /* get the second operand */
592 reg = gpr_map[vie->reg];
594 /* sign extend byte */
597 /* write the result */
598 error = vie_update_register(vm, vcpuid, reg, val, size);
607 * Helper function to calculate and validate a linear address.
610 get_gla(void *vm, int vcpuid, struct vie *vie, struct vm_guest_paging *paging,
611 int opsize, int addrsize, int prot, enum vm_reg_name seg,
612 enum vm_reg_name gpr, uint64_t *gla, int *fault)
614 struct seg_desc desc;
615 uint64_t cr0, val, rflags;
618 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_CR0, &cr0);
619 KASSERT(error == 0, ("%s: error %d getting cr0", __func__, error));
621 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
622 KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
624 error = vm_get_seg_desc(vm, vcpuid, seg, &desc);
625 KASSERT(error == 0, ("%s: error %d getting segment descriptor %d",
626 __func__, error, seg));
628 error = vie_read_register(vm, vcpuid, gpr, &val);
629 KASSERT(error == 0, ("%s: error %d getting register %d", __func__,
632 if (vie_calculate_gla(paging->cpu_mode, seg, &desc, val, opsize,
633 addrsize, prot, gla)) {
634 if (seg == VM_REG_GUEST_SS)
635 vm_inject_ss(vm, vcpuid, 0);
637 vm_inject_gp(vm, vcpuid);
641 if (vie_canonical_check(paging->cpu_mode, *gla)) {
642 if (seg == VM_REG_GUEST_SS)
643 vm_inject_ss(vm, vcpuid, 0);
645 vm_inject_gp(vm, vcpuid);
649 if (vie_alignment_check(paging->cpl, opsize, cr0, rflags, *gla)) {
650 vm_inject_ac(vm, vcpuid, 0);
663 emulate_movs(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
664 struct vm_guest_paging *paging, mem_region_read_t memread,
665 mem_region_write_t memwrite, void *arg)
668 struct vm_copyinfo copyinfo[2];
670 struct iovec copyinfo[2];
672 uint64_t dstaddr, srcaddr, dstgpa, srcgpa, val;
673 uint64_t rcx, rdi, rsi, rflags;
674 int error, fault, opsize, seg, repeat;
676 opsize = (vie->op.op_byte == 0xA4) ? 1 : vie->opsize;
681 * XXX although the MOVS instruction is only supposed to be used with
682 * the "rep" prefix some guests like FreeBSD will use "repnz" instead.
684 * Empirically the "repnz" prefix has identical behavior to "rep"
685 * and the zero flag does not make a difference.
687 repeat = vie->repz_present | vie->repnz_present;
690 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RCX, &rcx);
691 KASSERT(!error, ("%s: error %d getting rcx", __func__, error));
694 * The count register is %rcx, %ecx or %cx depending on the
695 * address size of the instruction.
697 if ((rcx & vie_size2mask(vie->addrsize)) == 0) {
704 * Source Destination Comments
705 * --------------------------------------------
706 * (1) memory memory n/a
707 * (2) memory mmio emulated
708 * (3) mmio memory emulated
709 * (4) mmio mmio emulated
711 * At this point we don't have sufficient information to distinguish
712 * between (2), (3) and (4). We use 'vm_copy_setup()' to tease this
713 * out because it will succeed only when operating on regular memory.
715 * XXX the emulation doesn't properly handle the case where 'gpa'
716 * is straddling the boundary between the normal memory and MMIO.
719 seg = vie->segment_override ? vie->segment_register : VM_REG_GUEST_DS;
720 error = get_gla(vm, vcpuid, vie, paging, opsize, vie->addrsize,
721 PROT_READ, seg, VM_REG_GUEST_RSI, &srcaddr, &fault);
725 error = vm_copy_setup(vm, vcpuid, paging, srcaddr, opsize, PROT_READ,
726 copyinfo, nitems(copyinfo), &fault);
729 goto done; /* Resume guest to handle fault */
732 * case (2): read from system memory and write to mmio.
734 vm_copyin(vm, vcpuid, copyinfo, &val, opsize);
735 vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
736 error = memwrite(vm, vcpuid, gpa, val, opsize, arg);
741 * 'vm_copy_setup()' is expected to fail for cases (3) and (4)
742 * if 'srcaddr' is in the mmio space.
745 error = get_gla(vm, vcpuid, vie, paging, opsize, vie->addrsize,
746 PROT_WRITE, VM_REG_GUEST_ES, VM_REG_GUEST_RDI, &dstaddr,
751 error = vm_copy_setup(vm, vcpuid, paging, dstaddr, opsize,
752 PROT_WRITE, copyinfo, nitems(copyinfo), &fault);
755 goto done; /* Resume guest to handle fault */
758 * case (3): read from MMIO and write to system memory.
760 * A MMIO read can have side-effects so we
761 * commit to it only after vm_copy_setup() is
762 * successful. If a page-fault needs to be
763 * injected into the guest then it will happen
764 * before the MMIO read is attempted.
766 error = memread(vm, vcpuid, gpa, &val, opsize, arg);
770 vm_copyout(vm, vcpuid, &val, copyinfo, opsize);
771 vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
774 * Case (4): read from and write to mmio.
776 * Commit to the MMIO read/write (with potential
777 * side-effects) only after we are sure that the
778 * instruction is not going to be restarted due
779 * to address translation faults.
781 error = vm_gla2gpa(vm, vcpuid, paging, srcaddr,
782 PROT_READ, &srcgpa, &fault);
786 error = vm_gla2gpa(vm, vcpuid, paging, dstaddr,
787 PROT_WRITE, &dstgpa, &fault);
791 error = memread(vm, vcpuid, srcgpa, &val, opsize, arg);
795 error = memwrite(vm, vcpuid, dstgpa, val, opsize, arg);
801 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RSI, &rsi);
802 KASSERT(error == 0, ("%s: error %d getting rsi", __func__, error));
804 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RDI, &rdi);
805 KASSERT(error == 0, ("%s: error %d getting rdi", __func__, error));
807 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
808 KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
810 if (rflags & PSL_D) {
818 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RSI, rsi,
820 KASSERT(error == 0, ("%s: error %d updating rsi", __func__, error));
822 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RDI, rdi,
824 KASSERT(error == 0, ("%s: error %d updating rdi", __func__, error));
828 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RCX,
830 KASSERT(!error, ("%s: error %d updating rcx", __func__, error));
833 * Repeat the instruction if the count register is not zero.
835 if ((rcx & vie_size2mask(vie->addrsize)) != 0)
836 vm_restart_instruction(vm, vcpuid);
839 KASSERT(error == 0 || error == EFAULT, ("%s: unexpected error %d",
845 emulate_stos(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
846 struct vm_guest_paging *paging, mem_region_read_t memread,
847 mem_region_write_t memwrite, void *arg)
849 int error, opsize, repeat;
851 uint64_t rcx, rdi, rflags;
853 opsize = (vie->op.op_byte == 0xAA) ? 1 : vie->opsize;
854 repeat = vie->repz_present | vie->repnz_present;
857 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RCX, &rcx);
858 KASSERT(!error, ("%s: error %d getting rcx", __func__, error));
861 * The count register is %rcx, %ecx or %cx depending on the
862 * address size of the instruction.
864 if ((rcx & vie_size2mask(vie->addrsize)) == 0)
868 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RAX, &val);
869 KASSERT(!error, ("%s: error %d getting rax", __func__, error));
871 error = memwrite(vm, vcpuid, gpa, val, opsize, arg);
875 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RDI, &rdi);
876 KASSERT(error == 0, ("%s: error %d getting rdi", __func__, error));
878 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
879 KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
886 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RDI, rdi,
888 KASSERT(error == 0, ("%s: error %d updating rdi", __func__, error));
892 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RCX,
894 KASSERT(!error, ("%s: error %d updating rcx", __func__, error));
897 * Repeat the instruction if the count register is not zero.
899 if ((rcx & vie_size2mask(vie->addrsize)) != 0)
900 vm_restart_instruction(vm, vcpuid);
907 emulate_and(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
908 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
911 enum vm_reg_name reg;
912 uint64_t result, rflags, rflags2, val1, val2;
917 switch (vie->op.op_byte) {
920 * AND reg (ModRM:reg) and mem (ModRM:r/m) and store the
923 * 23/r and r16, r/m16
924 * 23/r and r32, r/m32
925 * REX.W + 23/r and r64, r/m64
928 /* get the first operand */
929 reg = gpr_map[vie->reg];
930 error = vie_read_register(vm, vcpuid, reg, &val1);
934 /* get the second operand */
935 error = memread(vm, vcpuid, gpa, &val2, size, arg);
939 /* perform the operation and write the result */
940 result = val1 & val2;
941 error = vie_update_register(vm, vcpuid, reg, result, size);
946 * AND mem (ModRM:r/m) with immediate and store the
949 * 81 /4 and r/m16, imm16
950 * 81 /4 and r/m32, imm32
951 * REX.W + 81 /4 and r/m64, imm32 sign-extended to 64
953 * 83 /4 and r/m16, imm8 sign-extended to 16
954 * 83 /4 and r/m32, imm8 sign-extended to 32
955 * REX.W + 83/4 and r/m64, imm8 sign-extended to 64
958 /* get the first operand */
959 error = memread(vm, vcpuid, gpa, &val1, size, arg);
964 * perform the operation with the pre-fetched immediate
965 * operand and write the result
967 result = val1 & vie->immediate;
968 error = memwrite(vm, vcpuid, gpa, result, size, arg);
976 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
981 * OF and CF are cleared; the SF, ZF and PF flags are set according
982 * to the result; AF is undefined.
984 * The updated status flags are obtained by subtracting 0 from 'result'.
986 rflags2 = getcc(size, result, 0);
987 rflags &= ~RFLAGS_STATUS_BITS;
988 rflags |= rflags2 & (PSL_PF | PSL_Z | PSL_N);
990 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, rflags, 8);
995 emulate_or(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
996 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
999 enum vm_reg_name reg;
1000 uint64_t result, rflags, rflags2, val1, val2;
1005 switch (vie->op.op_byte) {
1008 * OR reg (ModRM:reg) and mem (ModRM:r/m) and store the
1011 * 0b/r or r16, r/m16
1012 * 0b/r or r32, r/m32
1013 * REX.W + 0b/r or r64, r/m64
1016 /* get the first operand */
1017 reg = gpr_map[vie->reg];
1018 error = vie_read_register(vm, vcpuid, reg, &val1);
1022 /* get the second operand */
1023 error = memread(vm, vcpuid, gpa, &val2, size, arg);
1027 /* perform the operation and write the result */
1028 result = val1 | val2;
1029 error = vie_update_register(vm, vcpuid, reg, result, size);
1034 * OR mem (ModRM:r/m) with immediate and store the
1037 * 81 /1 or r/m16, imm16
1038 * 81 /1 or r/m32, imm32
1039 * REX.W + 81 /1 or r/m64, imm32 sign-extended to 64
1041 * 83 /1 or r/m16, imm8 sign-extended to 16
1042 * 83 /1 or r/m32, imm8 sign-extended to 32
1043 * REX.W + 83/1 or r/m64, imm8 sign-extended to 64
1046 /* get the first operand */
1047 error = memread(vm, vcpuid, gpa, &val1, size, arg);
1052 * perform the operation with the pre-fetched immediate
1053 * operand and write the result
1055 result = val1 | vie->immediate;
1056 error = memwrite(vm, vcpuid, gpa, result, size, arg);
1064 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
1069 * OF and CF are cleared; the SF, ZF and PF flags are set according
1070 * to the result; AF is undefined.
1072 * The updated status flags are obtained by subtracting 0 from 'result'.
1074 rflags2 = getcc(size, result, 0);
1075 rflags &= ~RFLAGS_STATUS_BITS;
1076 rflags |= rflags2 & (PSL_PF | PSL_Z | PSL_N);
1078 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, rflags, 8);
1083 emulate_cmp(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
1084 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
1087 uint64_t regop, memop, op1, op2, rflags, rflags2;
1088 enum vm_reg_name reg;
1091 switch (vie->op.op_byte) {
1095 * 39/r CMP r/m16, r16
1096 * 39/r CMP r/m32, r32
1097 * REX.W 39/r CMP r/m64, r64
1099 * 3B/r CMP r16, r/m16
1100 * 3B/r CMP r32, r/m32
1101 * REX.W + 3B/r CMP r64, r/m64
1103 * Compare the first operand with the second operand and
1104 * set status flags in EFLAGS register. The comparison is
1105 * performed by subtracting the second operand from the first
1106 * operand and then setting the status flags.
1109 /* Get the register operand */
1110 reg = gpr_map[vie->reg];
1111 error = vie_read_register(vm, vcpuid, reg, ®op);
1115 /* Get the memory operand */
1116 error = memread(vm, vcpuid, gpa, &memop, size, arg);
1120 if (vie->op.op_byte == 0x3B) {
1127 rflags2 = getcc(size, op1, op2);
1133 * 80 /7 cmp r/m8, imm8
1134 * REX + 80 /7 cmp r/m8, imm8
1136 * 81 /7 cmp r/m16, imm16
1137 * 81 /7 cmp r/m32, imm32
1138 * REX.W + 81 /7 cmp r/m64, imm32 sign-extended to 64
1140 * 83 /7 cmp r/m16, imm8 sign-extended to 16
1141 * 83 /7 cmp r/m32, imm8 sign-extended to 32
1142 * REX.W + 83 /7 cmp r/m64, imm8 sign-extended to 64
1144 * Compare mem (ModRM:r/m) with immediate and set
1145 * status flags according to the results. The
1146 * comparison is performed by subtracting the
1147 * immediate from the first operand and then setting
1151 if (vie->op.op_byte == 0x80)
1154 /* get the first operand */
1155 error = memread(vm, vcpuid, gpa, &op1, size, arg);
1159 rflags2 = getcc(size, op1, vie->immediate);
1164 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
1167 rflags &= ~RFLAGS_STATUS_BITS;
1168 rflags |= rflags2 & RFLAGS_STATUS_BITS;
1170 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, rflags, 8);
1175 emulate_sub(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
1176 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
1179 uint64_t nval, rflags, rflags2, val1, val2;
1180 enum vm_reg_name reg;
1185 switch (vie->op.op_byte) {
1188 * SUB r/m from r and store the result in r
1190 * 2B/r SUB r16, r/m16
1191 * 2B/r SUB r32, r/m32
1192 * REX.W + 2B/r SUB r64, r/m64
1195 /* get the first operand */
1196 reg = gpr_map[vie->reg];
1197 error = vie_read_register(vm, vcpuid, reg, &val1);
1201 /* get the second operand */
1202 error = memread(vm, vcpuid, gpa, &val2, size, arg);
1206 /* perform the operation and write the result */
1208 error = vie_update_register(vm, vcpuid, reg, nval, size);
1215 rflags2 = getcc(size, val1, val2);
1216 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS,
1221 rflags &= ~RFLAGS_STATUS_BITS;
1222 rflags |= rflags2 & RFLAGS_STATUS_BITS;
1223 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS,
1231 emulate_stack_op(void *vm, int vcpuid, uint64_t mmio_gpa, struct vie *vie,
1232 struct vm_guest_paging *paging, mem_region_read_t memread,
1233 mem_region_write_t memwrite, void *arg)
1236 struct vm_copyinfo copyinfo[2];
1238 struct iovec copyinfo[2];
1240 struct seg_desc ss_desc;
1241 uint64_t cr0, rflags, rsp, stack_gla, val;
1242 int error, fault, size, stackaddrsize, pushop;
1246 pushop = (vie->op.op_type == VIE_OP_TYPE_PUSH) ? 1 : 0;
1249 * From "Address-Size Attributes for Stack Accesses", Intel SDL, Vol 1
1251 if (paging->cpu_mode == CPU_MODE_REAL) {
1253 } else if (paging->cpu_mode == CPU_MODE_64BIT) {
1255 * "Stack Manipulation Instructions in 64-bit Mode", SDM, Vol 3
1256 * - Stack pointer size is always 64-bits.
1257 * - PUSH/POP of 32-bit values is not possible in 64-bit mode.
1258 * - 16-bit PUSH/POP is supported by using the operand size
1259 * override prefix (66H).
1262 size = vie->opsize_override ? 2 : 8;
1265 * In protected or compatibility mode the 'B' flag in the
1266 * stack-segment descriptor determines the size of the
1269 error = vm_get_seg_desc(vm, vcpuid, VM_REG_GUEST_SS, &ss_desc);
1270 KASSERT(error == 0, ("%s: error %d getting SS descriptor",
1272 if (SEG_DESC_DEF32(ss_desc.access))
1278 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_CR0, &cr0);
1279 KASSERT(error == 0, ("%s: error %d getting cr0", __func__, error));
1281 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
1282 KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
1284 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RSP, &rsp);
1285 KASSERT(error == 0, ("%s: error %d getting rsp", __func__, error));
1290 if (vie_calculate_gla(paging->cpu_mode, VM_REG_GUEST_SS, &ss_desc,
1291 rsp, size, stackaddrsize, pushop ? PROT_WRITE : PROT_READ,
1293 vm_inject_ss(vm, vcpuid, 0);
1297 if (vie_canonical_check(paging->cpu_mode, stack_gla)) {
1298 vm_inject_ss(vm, vcpuid, 0);
1302 if (vie_alignment_check(paging->cpl, size, cr0, rflags, stack_gla)) {
1303 vm_inject_ac(vm, vcpuid, 0);
1307 error = vm_copy_setup(vm, vcpuid, paging, stack_gla, size,
1308 pushop ? PROT_WRITE : PROT_READ, copyinfo, nitems(copyinfo),
1314 error = memread(vm, vcpuid, mmio_gpa, &val, size, arg);
1316 vm_copyout(vm, vcpuid, &val, copyinfo, size);
1318 vm_copyin(vm, vcpuid, copyinfo, &val, size);
1319 error = memwrite(vm, vcpuid, mmio_gpa, val, size, arg);
1322 vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
1325 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RSP, rsp,
1327 KASSERT(error == 0, ("error %d updating rsp", error));
1333 emulate_push(void *vm, int vcpuid, uint64_t mmio_gpa, struct vie *vie,
1334 struct vm_guest_paging *paging, mem_region_read_t memread,
1335 mem_region_write_t memwrite, void *arg)
1340 * Table A-6, "Opcode Extensions", Intel SDM, Vol 2.
1342 * PUSH is part of the group 5 extended opcodes and is identified
1343 * by ModRM:reg = b110.
1345 if ((vie->reg & 7) != 6)
1348 error = emulate_stack_op(vm, vcpuid, mmio_gpa, vie, paging, memread,
1354 emulate_pop(void *vm, int vcpuid, uint64_t mmio_gpa, struct vie *vie,
1355 struct vm_guest_paging *paging, mem_region_read_t memread,
1356 mem_region_write_t memwrite, void *arg)
1361 * Table A-6, "Opcode Extensions", Intel SDM, Vol 2.
1363 * POP is part of the group 1A extended opcodes and is identified
1364 * by ModRM:reg = b000.
1366 if ((vie->reg & 7) != 0)
1369 error = emulate_stack_op(vm, vcpuid, mmio_gpa, vie, paging, memread,
1375 emulate_group1(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
1376 struct vm_guest_paging *paging, mem_region_read_t memread,
1377 mem_region_write_t memwrite, void *memarg)
1381 switch (vie->reg & 7) {
1383 error = emulate_or(vm, vcpuid, gpa, vie,
1384 memread, memwrite, memarg);
1387 error = emulate_and(vm, vcpuid, gpa, vie,
1388 memread, memwrite, memarg);
1391 error = emulate_cmp(vm, vcpuid, gpa, vie,
1392 memread, memwrite, memarg);
1403 emulate_bittest(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
1404 mem_region_read_t memread, mem_region_write_t memwrite, void *memarg)
1406 uint64_t val, rflags;
1407 int error, bitmask, bitoff;
1410 * 0F BA is a Group 8 extended opcode.
1412 * Currently we only emulate the 'Bit Test' instruction which is
1413 * identified by a ModR/M:reg encoding of 100b.
1415 if ((vie->reg & 7) != 4)
1418 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
1419 KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
1421 error = memread(vm, vcpuid, gpa, &val, vie->opsize, memarg);
1426 * Intel SDM, Vol 2, Table 3-2:
1427 * "Range of Bit Positions Specified by Bit Offset Operands"
1429 bitmask = vie->opsize * 8 - 1;
1430 bitoff = vie->immediate & bitmask;
1432 /* Copy the bit into the Carry flag in %rflags */
1433 if (val & (1UL << bitoff))
1438 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, rflags, 8);
1439 KASSERT(error == 0, ("%s: error %d updating rflags", __func__, error));
1445 vmm_emulate_instruction(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
1446 struct vm_guest_paging *paging, mem_region_read_t memread,
1447 mem_region_write_t memwrite, void *memarg)
1454 switch (vie->op.op_type) {
1455 case VIE_OP_TYPE_GROUP1:
1456 error = emulate_group1(vm, vcpuid, gpa, vie, paging, memread,
1459 case VIE_OP_TYPE_POP:
1460 error = emulate_pop(vm, vcpuid, gpa, vie, paging, memread,
1463 case VIE_OP_TYPE_PUSH:
1464 error = emulate_push(vm, vcpuid, gpa, vie, paging, memread,
1467 case VIE_OP_TYPE_CMP:
1468 error = emulate_cmp(vm, vcpuid, gpa, vie,
1469 memread, memwrite, memarg);
1471 case VIE_OP_TYPE_MOV:
1472 error = emulate_mov(vm, vcpuid, gpa, vie,
1473 memread, memwrite, memarg);
1475 case VIE_OP_TYPE_MOVSX:
1476 case VIE_OP_TYPE_MOVZX:
1477 error = emulate_movx(vm, vcpuid, gpa, vie,
1478 memread, memwrite, memarg);
1480 case VIE_OP_TYPE_MOVS:
1481 error = emulate_movs(vm, vcpuid, gpa, vie, paging, memread,
1484 case VIE_OP_TYPE_STOS:
1485 error = emulate_stos(vm, vcpuid, gpa, vie, paging, memread,
1488 case VIE_OP_TYPE_AND:
1489 error = emulate_and(vm, vcpuid, gpa, vie,
1490 memread, memwrite, memarg);
1492 case VIE_OP_TYPE_OR:
1493 error = emulate_or(vm, vcpuid, gpa, vie,
1494 memread, memwrite, memarg);
1496 case VIE_OP_TYPE_SUB:
1497 error = emulate_sub(vm, vcpuid, gpa, vie,
1498 memread, memwrite, memarg);
1500 case VIE_OP_TYPE_BITTEST:
1501 error = emulate_bittest(vm, vcpuid, gpa, vie,
1502 memread, memwrite, memarg);
1513 vie_alignment_check(int cpl, int size, uint64_t cr0, uint64_t rf, uint64_t gla)
1515 KASSERT(size == 1 || size == 2 || size == 4 || size == 8,
1516 ("%s: invalid size %d", __func__, size));
1517 KASSERT(cpl >= 0 && cpl <= 3, ("%s: invalid cpl %d", __func__, cpl));
1519 if (cpl != 3 || (cr0 & CR0_AM) == 0 || (rf & PSL_AC) == 0)
1522 return ((gla & (size - 1)) ? 1 : 0);
1526 vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla)
1530 if (cpu_mode != CPU_MODE_64BIT)
1534 * The value of the bit 47 in the 'gla' should be replicated in the
1535 * most significant 16 bits.
1537 mask = ~((1UL << 48) - 1);
1538 if (gla & (1UL << 47))
1539 return ((gla & mask) != mask);
1541 return ((gla & mask) != 0);
1545 vie_size2mask(int size)
1547 KASSERT(size == 1 || size == 2 || size == 4 || size == 8,
1548 ("vie_size2mask: invalid size %d", size));
1549 return (size2mask[size]);
1553 vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum vm_reg_name seg,
1554 struct seg_desc *desc, uint64_t offset, int length, int addrsize,
1555 int prot, uint64_t *gla)
1557 uint64_t firstoff, low_limit, high_limit, segbase;
1560 KASSERT(seg >= VM_REG_GUEST_ES && seg <= VM_REG_GUEST_GS,
1561 ("%s: invalid segment %d", __func__, seg));
1562 KASSERT(length == 1 || length == 2 || length == 4 || length == 8,
1563 ("%s: invalid operand size %d", __func__, length));
1564 KASSERT((prot & ~(PROT_READ | PROT_WRITE)) == 0,
1565 ("%s: invalid prot %#x", __func__, prot));
1568 if (cpu_mode == CPU_MODE_64BIT) {
1569 KASSERT(addrsize == 4 || addrsize == 8, ("%s: invalid address "
1570 "size %d for cpu_mode %d", __func__, addrsize, cpu_mode));
1573 KASSERT(addrsize == 2 || addrsize == 4, ("%s: invalid address "
1574 "size %d for cpu mode %d", __func__, addrsize, cpu_mode));
1577 * If the segment selector is loaded with a NULL selector
1578 * then the descriptor is unusable and attempting to use
1579 * it results in a #GP(0).
1581 if (SEG_DESC_UNUSABLE(desc->access))
1585 * The processor generates a #NP exception when a segment
1586 * register is loaded with a selector that points to a
1587 * descriptor that is not present. If this was the case then
1588 * it would have been checked before the VM-exit.
1590 KASSERT(SEG_DESC_PRESENT(desc->access),
1591 ("segment %d not present: %#x", seg, desc->access));
1594 * The descriptor type must indicate a code/data segment.
1596 type = SEG_DESC_TYPE(desc->access);
1597 KASSERT(type >= 16 && type <= 31, ("segment %d has invalid "
1598 "descriptor type %#x", seg, type));
1600 if (prot & PROT_READ) {
1601 /* #GP on a read access to a exec-only code segment */
1602 if ((type & 0xA) == 0x8)
1606 if (prot & PROT_WRITE) {
1608 * #GP on a write access to a code segment or a
1609 * read-only data segment.
1611 if (type & 0x8) /* code segment */
1614 if ((type & 0xA) == 0) /* read-only data seg */
1619 * 'desc->limit' is fully expanded taking granularity into
1622 if ((type & 0xC) == 0x4) {
1623 /* expand-down data segment */
1624 low_limit = desc->limit + 1;
1625 high_limit = SEG_DESC_DEF32(desc->access) ?
1626 0xffffffff : 0xffff;
1628 /* code segment or expand-up data segment */
1630 high_limit = desc->limit;
1633 while (length > 0) {
1634 offset &= vie_size2mask(addrsize);
1635 if (offset < low_limit || offset > high_limit)
1643 * In 64-bit mode all segments except %fs and %gs have a segment
1644 * base address of 0.
1646 if (cpu_mode == CPU_MODE_64BIT && seg != VM_REG_GUEST_FS &&
1647 seg != VM_REG_GUEST_GS) {
1650 segbase = desc->base;
1654 * Truncate 'firstoff' to the effective address size before adding
1655 * it to the segment base.
1657 firstoff &= vie_size2mask(addrsize);
1658 *gla = (segbase + firstoff) & vie_size2mask(glasize);
1664 vie_init(struct vie *vie, const char *inst_bytes, int inst_length)
1666 KASSERT(inst_length >= 0 && inst_length <= VIE_INST_SIZE,
1667 ("%s: invalid instruction length (%d)", __func__, inst_length));
1669 bzero(vie, sizeof(struct vie));
1671 vie->base_register = VM_REG_LAST;
1672 vie->index_register = VM_REG_LAST;
1673 vie->segment_register = VM_REG_LAST;
1676 bcopy(inst_bytes, vie->inst, inst_length);
1677 vie->num_valid = inst_length;
1682 pf_error_code(int usermode, int prot, int rsvd, uint64_t pte)
1687 error_code |= PGEX_P;
1688 if (prot & VM_PROT_WRITE)
1689 error_code |= PGEX_W;
1691 error_code |= PGEX_U;
1693 error_code |= PGEX_RSV;
1694 if (prot & VM_PROT_EXECUTE)
1695 error_code |= PGEX_I;
1697 return (error_code);
1701 ptp_release(void **cookie)
1703 if (*cookie != NULL) {
1704 vm_gpa_release(*cookie);
1710 ptp_hold(struct vm *vm, int vcpu, vm_paddr_t ptpphys, size_t len, void **cookie)
1714 ptp_release(cookie);
1715 ptr = vm_gpa_hold(vm, vcpu, ptpphys, len, VM_PROT_RW, cookie);
1720 vm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
1721 uint64_t gla, int prot, uint64_t *gpa, int *guest_fault)
1723 int nlevels, pfcode, ptpshift, ptpindex, retval, usermode, writable;
1725 uint64_t *ptpbase, ptpphys, pte, pgsize;
1726 uint32_t *ptpbase32, pte32;
1731 usermode = (paging->cpl == 3 ? 1 : 0);
1732 writable = prot & VM_PROT_WRITE;
1737 ptpphys = paging->cr3; /* root of the page tables */
1738 ptp_release(&cookie);
1742 if (vie_canonical_check(paging->cpu_mode, gla)) {
1744 * XXX assuming a non-stack reference otherwise a stack fault
1745 * should be generated.
1747 vm_inject_gp(vm, vcpuid);
1751 if (paging->paging_mode == PAGING_MODE_FLAT) {
1756 if (paging->paging_mode == PAGING_MODE_32) {
1758 while (--nlevels >= 0) {
1759 /* Zero out the lower 12 bits. */
1762 ptpbase32 = ptp_hold(vm, vcpuid, ptpphys, PAGE_SIZE,
1765 if (ptpbase32 == NULL)
1768 ptpshift = PAGE_SHIFT + nlevels * 10;
1769 ptpindex = (gla >> ptpshift) & 0x3FF;
1770 pgsize = 1UL << ptpshift;
1772 pte32 = ptpbase32[ptpindex];
1774 if ((pte32 & PG_V) == 0 ||
1775 (usermode && (pte32 & PG_U) == 0) ||
1776 (writable && (pte32 & PG_RW) == 0)) {
1777 pfcode = pf_error_code(usermode, prot, 0,
1779 vm_inject_pf(vm, vcpuid, pfcode, gla);
1784 * Emulate the x86 MMU's management of the accessed
1785 * and dirty flags. While the accessed flag is set
1786 * at every level of the page table, the dirty flag
1787 * is only set at the last level providing the guest
1790 if ((pte32 & PG_A) == 0) {
1791 if (atomic_cmpset_32(&ptpbase32[ptpindex],
1792 pte32, pte32 | PG_A) == 0) {
1797 /* XXX must be ignored if CR4.PSE=0 */
1798 if (nlevels > 0 && (pte32 & PG_PS) != 0)
1804 /* Set the dirty bit in the page table entry if necessary */
1805 if (writable && (pte32 & PG_M) == 0) {
1806 if (atomic_cmpset_32(&ptpbase32[ptpindex],
1807 pte32, pte32 | PG_M) == 0) {
1812 /* Zero out the lower 'ptpshift' bits */
1813 pte32 >>= ptpshift; pte32 <<= ptpshift;
1814 *gpa = pte32 | (gla & (pgsize - 1));
1818 if (paging->paging_mode == PAGING_MODE_PAE) {
1819 /* Zero out the lower 5 bits and the upper 32 bits */
1820 ptpphys &= 0xffffffe0UL;
1822 ptpbase = ptp_hold(vm, vcpuid, ptpphys, sizeof(*ptpbase) * 4,
1824 if (ptpbase == NULL)
1827 ptpindex = (gla >> 30) & 0x3;
1829 pte = ptpbase[ptpindex];
1831 if ((pte & PG_V) == 0) {
1832 pfcode = pf_error_code(usermode, prot, 0, pte);
1833 vm_inject_pf(vm, vcpuid, pfcode, gla);
1842 while (--nlevels >= 0) {
1843 /* Zero out the lower 12 bits and the upper 12 bits */
1844 ptpphys >>= 12; ptpphys <<= 24; ptpphys >>= 12;
1846 ptpbase = ptp_hold(vm, vcpuid, ptpphys, PAGE_SIZE, &cookie);
1847 if (ptpbase == NULL)
1850 ptpshift = PAGE_SHIFT + nlevels * 9;
1851 ptpindex = (gla >> ptpshift) & 0x1FF;
1852 pgsize = 1UL << ptpshift;
1854 pte = ptpbase[ptpindex];
1856 if ((pte & PG_V) == 0 ||
1857 (usermode && (pte & PG_U) == 0) ||
1858 (writable && (pte & PG_RW) == 0)) {
1859 pfcode = pf_error_code(usermode, prot, 0, pte);
1860 vm_inject_pf(vm, vcpuid, pfcode, gla);
1864 /* Set the accessed bit in the page table entry */
1865 if ((pte & PG_A) == 0) {
1866 if (atomic_cmpset_64(&ptpbase[ptpindex],
1867 pte, pte | PG_A) == 0) {
1872 if (nlevels > 0 && (pte & PG_PS) != 0) {
1873 if (pgsize > 1 * GB) {
1874 pfcode = pf_error_code(usermode, prot, 1, pte);
1875 vm_inject_pf(vm, vcpuid, pfcode, gla);
1884 /* Set the dirty bit in the page table entry if necessary */
1885 if (writable && (pte & PG_M) == 0) {
1886 if (atomic_cmpset_64(&ptpbase[ptpindex], pte, pte | PG_M) == 0)
1890 /* Zero out the lower 'ptpshift' bits and the upper 12 bits */
1891 pte >>= ptpshift; pte <<= (ptpshift + 12); pte >>= 12;
1892 *gpa = pte | (gla & (pgsize - 1));
1894 ptp_release(&cookie);
1895 KASSERT(retval == 0 || retval == EFAULT, ("%s: unexpected retval %d",
1907 vmm_fetch_instruction(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
1908 uint64_t rip, int inst_length, struct vie *vie, int *faultptr)
1910 struct vm_copyinfo copyinfo[2];
1913 if (inst_length > VIE_INST_SIZE)
1914 panic("vmm_fetch_instruction: invalid length %d", inst_length);
1916 prot = PROT_READ | PROT_EXEC;
1917 error = vm_copy_setup(vm, vcpuid, paging, rip, inst_length, prot,
1918 copyinfo, nitems(copyinfo), faultptr);
1919 if (error || *faultptr)
1922 vm_copyin(vm, vcpuid, copyinfo, vie->inst, inst_length);
1923 vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
1924 vie->num_valid = inst_length;
1929 vie_peek(struct vie *vie, uint8_t *x)
1932 if (vie->num_processed < vie->num_valid) {
1933 *x = vie->inst[vie->num_processed];
1940 vie_advance(struct vie *vie)
1943 vie->num_processed++;
1947 segment_override(uint8_t x, int *seg)
1952 *seg = VM_REG_GUEST_CS;
1955 *seg = VM_REG_GUEST_SS;
1958 *seg = VM_REG_GUEST_DS;
1961 *seg = VM_REG_GUEST_ES;
1964 *seg = VM_REG_GUEST_FS;
1967 *seg = VM_REG_GUEST_GS;
1976 decode_prefixes(struct vie *vie, enum vm_cpu_mode cpu_mode, int cs_d)
1981 if (vie_peek(vie, &x))
1985 vie->opsize_override = 1;
1987 vie->addrsize_override = 1;
1989 vie->repz_present = 1;
1991 vie->repnz_present = 1;
1992 else if (segment_override(x, &vie->segment_register))
1993 vie->segment_override = 1;
2001 * From section 2.2.1, "REX Prefixes", Intel SDM Vol 2:
2002 * - Only one REX prefix is allowed per instruction.
2003 * - The REX prefix must immediately precede the opcode byte or the
2004 * escape opcode byte.
2005 * - If an instruction has a mandatory prefix (0x66, 0xF2 or 0xF3)
2006 * the mandatory prefix must come before the REX prefix.
2008 if (cpu_mode == CPU_MODE_64BIT && x >= 0x40 && x <= 0x4F) {
2009 vie->rex_present = 1;
2010 vie->rex_w = x & 0x8 ? 1 : 0;
2011 vie->rex_r = x & 0x4 ? 1 : 0;
2012 vie->rex_x = x & 0x2 ? 1 : 0;
2013 vie->rex_b = x & 0x1 ? 1 : 0;
2018 * Section "Operand-Size And Address-Size Attributes", Intel SDM, Vol 1
2020 if (cpu_mode == CPU_MODE_64BIT) {
2022 * Default address size is 64-bits and default operand size
2025 vie->addrsize = vie->addrsize_override ? 4 : 8;
2028 else if (vie->opsize_override)
2033 /* Default address and operand sizes are 32-bits */
2034 vie->addrsize = vie->addrsize_override ? 2 : 4;
2035 vie->opsize = vie->opsize_override ? 2 : 4;
2037 /* Default address and operand sizes are 16-bits */
2038 vie->addrsize = vie->addrsize_override ? 4 : 2;
2039 vie->opsize = vie->opsize_override ? 4 : 2;
2045 decode_two_byte_opcode(struct vie *vie)
2049 if (vie_peek(vie, &x))
2052 vie->op = two_byte_opcodes[x];
2054 if (vie->op.op_type == VIE_OP_TYPE_NONE)
2062 decode_opcode(struct vie *vie)
2066 if (vie_peek(vie, &x))
2069 vie->op = one_byte_opcodes[x];
2071 if (vie->op.op_type == VIE_OP_TYPE_NONE)
2076 if (vie->op.op_type == VIE_OP_TYPE_TWO_BYTE)
2077 return (decode_two_byte_opcode(vie));
2083 decode_modrm(struct vie *vie, enum vm_cpu_mode cpu_mode)
2087 if (vie->op.op_flags & VIE_OP_F_NO_MODRM)
2090 if (cpu_mode == CPU_MODE_REAL)
2093 if (vie_peek(vie, &x))
2096 vie->mod = (x >> 6) & 0x3;
2097 vie->rm = (x >> 0) & 0x7;
2098 vie->reg = (x >> 3) & 0x7;
2101 * A direct addressing mode makes no sense in the context of an EPT
2102 * fault. There has to be a memory access involved to cause the
2105 if (vie->mod == VIE_MOD_DIRECT)
2108 if ((vie->mod == VIE_MOD_INDIRECT && vie->rm == VIE_RM_DISP32) ||
2109 (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)) {
2111 * Table 2-5: Special Cases of REX Encodings
2113 * mod=0, r/m=5 is used in the compatibility mode to
2114 * indicate a disp32 without a base register.
2116 * mod!=3, r/m=4 is used in the compatibility mode to
2117 * indicate that the SIB byte is present.
2119 * The 'b' bit in the REX prefix is don't care in
2123 vie->rm |= (vie->rex_b << 3);
2126 vie->reg |= (vie->rex_r << 3);
2129 if (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)
2132 vie->base_register = gpr_map[vie->rm];
2135 case VIE_MOD_INDIRECT_DISP8:
2136 vie->disp_bytes = 1;
2138 case VIE_MOD_INDIRECT_DISP32:
2139 vie->disp_bytes = 4;
2141 case VIE_MOD_INDIRECT:
2142 if (vie->rm == VIE_RM_DISP32) {
2143 vie->disp_bytes = 4;
2145 * Table 2-7. RIP-Relative Addressing
2147 * In 64-bit mode mod=00 r/m=101 implies [rip] + disp32
2148 * whereas in compatibility mode it just implies disp32.
2151 if (cpu_mode == CPU_MODE_64BIT)
2152 vie->base_register = VM_REG_GUEST_RIP;
2154 vie->base_register = VM_REG_LAST;
2166 decode_sib(struct vie *vie)
2170 /* Proceed only if SIB byte is present */
2171 if (vie->mod == VIE_MOD_DIRECT || vie->rm != VIE_RM_SIB)
2174 if (vie_peek(vie, &x))
2177 /* De-construct the SIB byte */
2178 vie->ss = (x >> 6) & 0x3;
2179 vie->index = (x >> 3) & 0x7;
2180 vie->base = (x >> 0) & 0x7;
2182 /* Apply the REX prefix modifiers */
2183 vie->index |= vie->rex_x << 3;
2184 vie->base |= vie->rex_b << 3;
2187 case VIE_MOD_INDIRECT_DISP8:
2188 vie->disp_bytes = 1;
2190 case VIE_MOD_INDIRECT_DISP32:
2191 vie->disp_bytes = 4;
2195 if (vie->mod == VIE_MOD_INDIRECT &&
2196 (vie->base == 5 || vie->base == 13)) {
2198 * Special case when base register is unused if mod = 0
2199 * and base = %rbp or %r13.
2202 * Table 2-3: 32-bit Addressing Forms with the SIB Byte
2203 * Table 2-5: Special Cases of REX Encodings
2205 vie->disp_bytes = 4;
2207 vie->base_register = gpr_map[vie->base];
2211 * All encodings of 'index' are valid except for %rsp (4).
2214 * Table 2-3: 32-bit Addressing Forms with the SIB Byte
2215 * Table 2-5: Special Cases of REX Encodings
2217 if (vie->index != 4)
2218 vie->index_register = gpr_map[vie->index];
2220 /* 'scale' makes sense only in the context of an index register */
2221 if (vie->index_register < VM_REG_LAST)
2222 vie->scale = 1 << vie->ss;
2230 decode_displacement(struct vie *vie)
2241 if ((n = vie->disp_bytes) == 0)
2244 if (n != 1 && n != 4)
2245 panic("decode_displacement: invalid disp_bytes %d", n);
2247 for (i = 0; i < n; i++) {
2248 if (vie_peek(vie, &x))
2256 vie->displacement = u.signed8; /* sign-extended */
2258 vie->displacement = u.signed32; /* sign-extended */
2264 decode_immediate(struct vie *vie)
2275 /* Figure out immediate operand size (if any) */
2276 if (vie->op.op_flags & VIE_OP_F_IMM) {
2278 * Section 2.2.1.5 "Immediates", Intel SDM:
2279 * In 64-bit mode the typical size of immediate operands
2280 * remains 32-bits. When the operand size if 64-bits, the
2281 * processor sign-extends all immediates to 64-bits prior
2284 if (vie->opsize == 4 || vie->opsize == 8)
2288 } else if (vie->op.op_flags & VIE_OP_F_IMM8) {
2292 if ((n = vie->imm_bytes) == 0)
2295 KASSERT(n == 1 || n == 2 || n == 4,
2296 ("%s: invalid number of immediate bytes: %d", __func__, n));
2298 for (i = 0; i < n; i++) {
2299 if (vie_peek(vie, &x))
2306 /* sign-extend the immediate value before use */
2308 vie->immediate = u.signed8;
2310 vie->immediate = u.signed16;
2312 vie->immediate = u.signed32;
2318 decode_moffset(struct vie *vie)
2327 if ((vie->op.op_flags & VIE_OP_F_MOFFSET) == 0)
2331 * Section 2.2.1.4, "Direct Memory-Offset MOVs", Intel SDM:
2332 * The memory offset size follows the address-size of the instruction.
2335 KASSERT(n == 2 || n == 4 || n == 8, ("invalid moffset bytes: %d", n));
2338 for (i = 0; i < n; i++) {
2339 if (vie_peek(vie, &x))
2345 vie->displacement = u.u64;
2350 * Verify that the 'guest linear address' provided as collateral of the nested
2351 * page table fault matches with our instruction decoding.
2354 verify_gla(struct vm *vm, int cpuid, uint64_t gla, struct vie *vie,
2355 enum vm_cpu_mode cpu_mode)
2358 uint64_t base, segbase, idx, gla2;
2359 enum vm_reg_name seg;
2360 struct seg_desc desc;
2362 /* Skip 'gla' verification */
2363 if (gla == VIE_INVALID_GLA)
2367 if (vie->base_register != VM_REG_LAST) {
2368 error = vm_get_register(vm, cpuid, vie->base_register, &base);
2370 printf("verify_gla: error %d getting base reg %d\n",
2371 error, vie->base_register);
2376 * RIP-relative addressing starts from the following
2379 if (vie->base_register == VM_REG_GUEST_RIP)
2380 base += vie->num_processed;
2384 if (vie->index_register != VM_REG_LAST) {
2385 error = vm_get_register(vm, cpuid, vie->index_register, &idx);
2387 printf("verify_gla: error %d getting index reg %d\n",
2388 error, vie->index_register);
2394 * From "Specifying a Segment Selector", Intel SDM, Vol 1
2396 * In 64-bit mode, segmentation is generally (but not
2397 * completely) disabled. The exceptions are the FS and GS
2400 * In legacy IA-32 mode, when the ESP or EBP register is used
2401 * as the base, the SS segment is the default segment. For
2402 * other data references, except when relative to stack or
2403 * string destination the DS segment is the default. These
2404 * can be overridden to allow other segments to be accessed.
2406 if (vie->segment_override)
2407 seg = vie->segment_register;
2408 else if (vie->base_register == VM_REG_GUEST_RSP ||
2409 vie->base_register == VM_REG_GUEST_RBP)
2410 seg = VM_REG_GUEST_SS;
2412 seg = VM_REG_GUEST_DS;
2413 if (cpu_mode == CPU_MODE_64BIT && seg != VM_REG_GUEST_FS &&
2414 seg != VM_REG_GUEST_GS) {
2417 error = vm_get_seg_desc(vm, cpuid, seg, &desc);
2419 printf("verify_gla: error %d getting segment"
2420 " descriptor %d", error,
2421 vie->segment_register);
2424 segbase = desc.base;
2427 gla2 = segbase + base + vie->scale * idx + vie->displacement;
2428 gla2 &= size2mask[vie->addrsize];
2430 printf("verify_gla mismatch: segbase(0x%0lx)"
2431 "base(0x%0lx), scale(%d), index(0x%0lx), "
2432 "disp(0x%0lx), gla(0x%0lx), gla2(0x%0lx)\n",
2433 segbase, base, vie->scale, idx, vie->displacement,
2442 vmm_decode_instruction(struct vm *vm, int cpuid, uint64_t gla,
2443 enum vm_cpu_mode cpu_mode, int cs_d, struct vie *vie)
2446 if (decode_prefixes(vie, cpu_mode, cs_d))
2449 if (decode_opcode(vie))
2452 if (decode_modrm(vie, cpu_mode))
2455 if (decode_sib(vie))
2458 if (decode_displacement(vie))
2461 if (decode_immediate(vie))
2464 if (decode_moffset(vie))
2467 if ((vie->op.op_flags & VIE_OP_F_NO_GLA_VERIFICATION) == 0) {
2468 if (verify_gla(vm, cpuid, gla, vie, cpu_mode))
2472 vie->decoded = 1; /* success */
2476 #endif /* _KERNEL */