2 * Copyright (c) 2012 Sandvine, Inc.
3 * Copyright (c) 2012 NetApp, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
36 #include <sys/systm.h>
42 #include <machine/vmparam.h>
43 #include <machine/vmm.h>
45 #include <sys/types.h>
46 #include <sys/errno.h>
47 #include <sys/_iovec.h>
49 #include <machine/vmm.h>
53 #define KASSERT(exp,msg) assert((exp))
56 #include <machine/vmm_instruction_emul.h>
58 #include <x86/specialreg.h>
60 /* struct vie_op.op_type */
77 VIE_OP_TYPE_TWOB_GRP15,
83 /* struct vie_op.op_flags */
84 #define VIE_OP_F_IMM (1 << 0) /* 16/32-bit immediate operand */
85 #define VIE_OP_F_IMM8 (1 << 1) /* 8-bit immediate operand */
86 #define VIE_OP_F_MOFFSET (1 << 2) /* 16/32/64-bit immediate moffset */
87 #define VIE_OP_F_NO_MODRM (1 << 3)
88 #define VIE_OP_F_NO_GLA_VERIFICATION (1 << 4)
90 static const struct vie_op two_byte_opcodes[256] = {
93 .op_type = VIE_OP_TYPE_TWOB_GRP15,
97 .op_type = VIE_OP_TYPE_MOVZX,
101 .op_type = VIE_OP_TYPE_MOVZX,
105 .op_type = VIE_OP_TYPE_BITTEST,
106 .op_flags = VIE_OP_F_IMM8,
110 .op_type = VIE_OP_TYPE_MOVSX,
114 static const struct vie_op one_byte_opcodes[256] = {
117 .op_type = VIE_OP_TYPE_ADD,
121 .op_type = VIE_OP_TYPE_TWO_BYTE
125 .op_type = VIE_OP_TYPE_OR,
129 .op_type = VIE_OP_TYPE_SUB,
133 .op_type = VIE_OP_TYPE_CMP,
137 .op_type = VIE_OP_TYPE_CMP,
141 .op_type = VIE_OP_TYPE_MOV,
145 .op_type = VIE_OP_TYPE_MOV,
149 .op_type = VIE_OP_TYPE_MOV,
153 .op_type = VIE_OP_TYPE_MOV,
157 .op_type = VIE_OP_TYPE_MOV,
158 .op_flags = VIE_OP_F_MOFFSET | VIE_OP_F_NO_MODRM,
162 .op_type = VIE_OP_TYPE_MOV,
163 .op_flags = VIE_OP_F_MOFFSET | VIE_OP_F_NO_MODRM,
167 .op_type = VIE_OP_TYPE_MOVS,
168 .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
172 .op_type = VIE_OP_TYPE_MOVS,
173 .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
177 .op_type = VIE_OP_TYPE_STOS,
178 .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
182 .op_type = VIE_OP_TYPE_STOS,
183 .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
186 /* XXX Group 11 extended opcode - not just MOV */
188 .op_type = VIE_OP_TYPE_MOV,
189 .op_flags = VIE_OP_F_IMM8,
193 .op_type = VIE_OP_TYPE_MOV,
194 .op_flags = VIE_OP_F_IMM,
198 .op_type = VIE_OP_TYPE_AND,
201 /* Group 1 extended opcode */
203 .op_type = VIE_OP_TYPE_GROUP1,
204 .op_flags = VIE_OP_F_IMM8,
207 /* Group 1 extended opcode */
209 .op_type = VIE_OP_TYPE_GROUP1,
210 .op_flags = VIE_OP_F_IMM,
213 /* Group 1 extended opcode */
215 .op_type = VIE_OP_TYPE_GROUP1,
216 .op_flags = VIE_OP_F_IMM8,
219 /* XXX Group 1A extended opcode - not just POP */
221 .op_type = VIE_OP_TYPE_POP,
224 /* XXX Group 3 extended opcode - not just TEST */
226 .op_type = VIE_OP_TYPE_TEST,
227 .op_flags = VIE_OP_F_IMM,
230 /* XXX Group 5 extended opcode - not just PUSH */
232 .op_type = VIE_OP_TYPE_PUSH,
237 #define VIE_MOD_INDIRECT 0
238 #define VIE_MOD_INDIRECT_DISP8 1
239 #define VIE_MOD_INDIRECT_DISP32 2
240 #define VIE_MOD_DIRECT 3
244 #define VIE_RM_DISP32 5
246 #define GB (1024 * 1024 * 1024)
248 static enum vm_reg_name gpr_map[16] = {
267 static uint64_t size2mask[] = {
271 [8] = 0xffffffffffffffff,
275 vie_read_register(void *vm, int vcpuid, enum vm_reg_name reg, uint64_t *rval)
279 error = vm_get_register(vm, vcpuid, reg, rval);
285 vie_calc_bytereg(struct vie *vie, enum vm_reg_name *reg, int *lhbr)
288 *reg = gpr_map[vie->reg];
291 * 64-bit mode imposes limitations on accessing legacy high byte
294 * The legacy high-byte registers cannot be addressed if the REX
295 * prefix is present. In this case the values 4, 5, 6 and 7 of the
296 * 'ModRM:reg' field address %spl, %bpl, %sil and %dil respectively.
298 * If the REX prefix is not present then the values 4, 5, 6 and 7
299 * of the 'ModRM:reg' field address the legacy high-byte registers,
300 * %ah, %ch, %dh and %bh respectively.
302 if (!vie->rex_present) {
303 if (vie->reg & 0x4) {
305 *reg = gpr_map[vie->reg & 0x3];
311 vie_read_bytereg(void *vm, int vcpuid, struct vie *vie, uint8_t *rval)
315 enum vm_reg_name reg;
317 vie_calc_bytereg(vie, ®, &lhbr);
318 error = vm_get_register(vm, vcpuid, reg, &val);
321 * To obtain the value of a legacy high byte register shift the
322 * base register right by 8 bits (%ah = %rax >> 8).
332 vie_write_bytereg(void *vm, int vcpuid, struct vie *vie, uint8_t byte)
334 uint64_t origval, val, mask;
336 enum vm_reg_name reg;
338 vie_calc_bytereg(vie, ®, &lhbr);
339 error = vm_get_register(vm, vcpuid, reg, &origval);
345 * Shift left by 8 to store 'byte' in a legacy high
351 val |= origval & ~mask;
352 error = vm_set_register(vm, vcpuid, reg, val);
358 vie_update_register(void *vm, int vcpuid, enum vm_reg_name reg,
359 uint64_t val, int size)
367 error = vie_read_register(vm, vcpuid, reg, &origval);
370 val &= size2mask[size];
371 val |= origval & ~size2mask[size];
382 error = vm_set_register(vm, vcpuid, reg, val);
386 #define RFLAGS_STATUS_BITS (PSL_C | PSL_PF | PSL_AF | PSL_Z | PSL_N | PSL_V)
389 * Return the status flags that would result from doing (x - y).
393 getcc##sz(uint##sz##_t x, uint##sz##_t y) \
397 __asm __volatile("sub %2,%1; pushfq; popq %0" : \
398 "=r" (rflags), "+r" (x) : "m" (y)); \
408 getcc(int opsize, uint64_t x, uint64_t y)
410 KASSERT(opsize == 1 || opsize == 2 || opsize == 4 || opsize == 8,
411 ("getcc: invalid operand size %d", opsize));
414 return (getcc8(x, y));
415 else if (opsize == 2)
416 return (getcc16(x, y));
417 else if (opsize == 4)
418 return (getcc32(x, y));
420 return (getcc64(x, y));
424 * Macro creation of functions getaddflags{8,16,32,64}
426 #define GETADDFLAGS(sz) \
428 getaddflags##sz(uint##sz##_t x, uint##sz##_t y) \
432 __asm __volatile("add %2,%1; pushfq; popq %0" : \
433 "=r" (rflags), "+r" (x) : "m" (y)); \
443 getaddflags(int opsize, uint64_t x, uint64_t y)
445 KASSERT(opsize == 1 || opsize == 2 || opsize == 4 || opsize == 8,
446 ("getaddflags: invalid operand size %d", opsize));
449 return (getaddflags8(x, y));
450 else if (opsize == 2)
451 return (getaddflags16(x, y));
452 else if (opsize == 4)
453 return (getaddflags32(x, y));
455 return (getaddflags64(x, y));
459 * Return the status flags that would result from doing (x & y).
461 #define GETANDFLAGS(sz) \
463 getandflags##sz(uint##sz##_t x, uint##sz##_t y) \
467 __asm __volatile("and %2,%1; pushfq; popq %0" : \
468 "=r" (rflags), "+r" (x) : "m" (y)); \
478 getandflags(int opsize, uint64_t x, uint64_t y)
480 KASSERT(opsize == 1 || opsize == 2 || opsize == 4 || opsize == 8,
481 ("getandflags: invalid operand size %d", opsize));
484 return (getandflags8(x, y));
485 else if (opsize == 2)
486 return (getandflags16(x, y));
487 else if (opsize == 4)
488 return (getandflags32(x, y));
490 return (getandflags64(x, y));
494 emulate_mov(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
495 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
498 enum vm_reg_name reg;
505 switch (vie->op.op_byte) {
508 * MOV byte from reg (ModRM:reg) to mem (ModRM:r/m)
510 * REX + 88/r: mov r/m8, r8 (%ah, %ch, %dh, %bh not available)
512 size = 1; /* override for byte operation */
513 error = vie_read_bytereg(vm, vcpuid, vie, &byte);
515 error = memwrite(vm, vcpuid, gpa, byte, size, arg);
519 * MOV from reg (ModRM:reg) to mem (ModRM:r/m)
520 * 89/r: mov r/m16, r16
521 * 89/r: mov r/m32, r32
522 * REX.W + 89/r mov r/m64, r64
524 reg = gpr_map[vie->reg];
525 error = vie_read_register(vm, vcpuid, reg, &val);
527 val &= size2mask[size];
528 error = memwrite(vm, vcpuid, gpa, val, size, arg);
533 * MOV byte from mem (ModRM:r/m) to reg (ModRM:reg)
535 * REX + 8A/r: mov r8, r/m8
537 size = 1; /* override for byte operation */
538 error = memread(vm, vcpuid, gpa, &val, size, arg);
540 error = vie_write_bytereg(vm, vcpuid, vie, val);
544 * MOV from mem (ModRM:r/m) to reg (ModRM:reg)
545 * 8B/r: mov r16, r/m16
546 * 8B/r: mov r32, r/m32
547 * REX.W 8B/r: mov r64, r/m64
549 error = memread(vm, vcpuid, gpa, &val, size, arg);
551 reg = gpr_map[vie->reg];
552 error = vie_update_register(vm, vcpuid, reg, val, size);
557 * MOV from seg:moffset to AX/EAX/RAX
558 * A1: mov AX, moffs16
559 * A1: mov EAX, moffs32
560 * REX.W + A1: mov RAX, moffs64
562 error = memread(vm, vcpuid, gpa, &val, size, arg);
564 reg = VM_REG_GUEST_RAX;
565 error = vie_update_register(vm, vcpuid, reg, val, size);
570 * MOV from AX/EAX/RAX to seg:moffset
571 * A3: mov moffs16, AX
572 * A3: mov moffs32, EAX
573 * REX.W + A3: mov moffs64, RAX
575 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RAX, &val);
577 val &= size2mask[size];
578 error = memwrite(vm, vcpuid, gpa, val, size, arg);
583 * MOV from imm8 to mem (ModRM:r/m)
584 * C6/0 mov r/m8, imm8
585 * REX + C6/0 mov r/m8, imm8
587 size = 1; /* override for byte operation */
588 error = memwrite(vm, vcpuid, gpa, vie->immediate, size, arg);
592 * MOV from imm16/imm32 to mem (ModRM:r/m)
593 * C7/0 mov r/m16, imm16
594 * C7/0 mov r/m32, imm32
595 * REX.W + C7/0 mov r/m64, imm32 (sign-extended to 64-bits)
597 val = vie->immediate & size2mask[size];
598 error = memwrite(vm, vcpuid, gpa, val, size, arg);
608 emulate_movx(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
609 mem_region_read_t memread, mem_region_write_t memwrite,
613 enum vm_reg_name reg;
619 switch (vie->op.op_byte) {
622 * MOV and zero extend byte from mem (ModRM:r/m) to
625 * 0F B6/r movzx r16, r/m8
626 * 0F B6/r movzx r32, r/m8
627 * REX.W + 0F B6/r movzx r64, r/m8
630 /* get the first operand */
631 error = memread(vm, vcpuid, gpa, &val, 1, arg);
635 /* get the second operand */
636 reg = gpr_map[vie->reg];
638 /* zero-extend byte */
641 /* write the result */
642 error = vie_update_register(vm, vcpuid, reg, val, size);
646 * MOV and zero extend word from mem (ModRM:r/m) to
649 * 0F B7/r movzx r32, r/m16
650 * REX.W + 0F B7/r movzx r64, r/m16
652 error = memread(vm, vcpuid, gpa, &val, 2, arg);
656 reg = gpr_map[vie->reg];
658 /* zero-extend word */
661 error = vie_update_register(vm, vcpuid, reg, val, size);
665 * MOV and sign extend byte from mem (ModRM:r/m) to
668 * 0F BE/r movsx r16, r/m8
669 * 0F BE/r movsx r32, r/m8
670 * REX.W + 0F BE/r movsx r64, r/m8
673 /* get the first operand */
674 error = memread(vm, vcpuid, gpa, &val, 1, arg);
678 /* get the second operand */
679 reg = gpr_map[vie->reg];
681 /* sign extend byte */
684 /* write the result */
685 error = vie_update_register(vm, vcpuid, reg, val, size);
694 * Helper function to calculate and validate a linear address.
697 get_gla(void *vm, int vcpuid, struct vie *vie, struct vm_guest_paging *paging,
698 int opsize, int addrsize, int prot, enum vm_reg_name seg,
699 enum vm_reg_name gpr, uint64_t *gla, int *fault)
701 struct seg_desc desc;
702 uint64_t cr0, val, rflags;
705 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_CR0, &cr0);
706 KASSERT(error == 0, ("%s: error %d getting cr0", __func__, error));
708 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
709 KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
711 error = vm_get_seg_desc(vm, vcpuid, seg, &desc);
712 KASSERT(error == 0, ("%s: error %d getting segment descriptor %d",
713 __func__, error, seg));
715 error = vie_read_register(vm, vcpuid, gpr, &val);
716 KASSERT(error == 0, ("%s: error %d getting register %d", __func__,
719 if (vie_calculate_gla(paging->cpu_mode, seg, &desc, val, opsize,
720 addrsize, prot, gla)) {
721 if (seg == VM_REG_GUEST_SS)
722 vm_inject_ss(vm, vcpuid, 0);
724 vm_inject_gp(vm, vcpuid);
728 if (vie_canonical_check(paging->cpu_mode, *gla)) {
729 if (seg == VM_REG_GUEST_SS)
730 vm_inject_ss(vm, vcpuid, 0);
732 vm_inject_gp(vm, vcpuid);
736 if (vie_alignment_check(paging->cpl, opsize, cr0, rflags, *gla)) {
737 vm_inject_ac(vm, vcpuid, 0);
750 emulate_movs(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
751 struct vm_guest_paging *paging, mem_region_read_t memread,
752 mem_region_write_t memwrite, void *arg)
755 struct vm_copyinfo copyinfo[2];
757 struct iovec copyinfo[2];
759 uint64_t dstaddr, srcaddr, dstgpa, srcgpa, val;
760 uint64_t rcx, rdi, rsi, rflags;
761 int error, fault, opsize, seg, repeat;
763 opsize = (vie->op.op_byte == 0xA4) ? 1 : vie->opsize;
768 * XXX although the MOVS instruction is only supposed to be used with
769 * the "rep" prefix some guests like FreeBSD will use "repnz" instead.
771 * Empirically the "repnz" prefix has identical behavior to "rep"
772 * and the zero flag does not make a difference.
774 repeat = vie->repz_present | vie->repnz_present;
777 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RCX, &rcx);
778 KASSERT(!error, ("%s: error %d getting rcx", __func__, error));
781 * The count register is %rcx, %ecx or %cx depending on the
782 * address size of the instruction.
784 if ((rcx & vie_size2mask(vie->addrsize)) == 0) {
791 * Source Destination Comments
792 * --------------------------------------------
793 * (1) memory memory n/a
794 * (2) memory mmio emulated
795 * (3) mmio memory emulated
796 * (4) mmio mmio emulated
798 * At this point we don't have sufficient information to distinguish
799 * between (2), (3) and (4). We use 'vm_copy_setup()' to tease this
800 * out because it will succeed only when operating on regular memory.
802 * XXX the emulation doesn't properly handle the case where 'gpa'
803 * is straddling the boundary between the normal memory and MMIO.
806 seg = vie->segment_override ? vie->segment_register : VM_REG_GUEST_DS;
807 error = get_gla(vm, vcpuid, vie, paging, opsize, vie->addrsize,
808 PROT_READ, seg, VM_REG_GUEST_RSI, &srcaddr, &fault);
812 error = vm_copy_setup(vm, vcpuid, paging, srcaddr, opsize, PROT_READ,
813 copyinfo, nitems(copyinfo), &fault);
816 goto done; /* Resume guest to handle fault */
819 * case (2): read from system memory and write to mmio.
821 vm_copyin(vm, vcpuid, copyinfo, &val, opsize);
822 vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
823 error = memwrite(vm, vcpuid, gpa, val, opsize, arg);
828 * 'vm_copy_setup()' is expected to fail for cases (3) and (4)
829 * if 'srcaddr' is in the mmio space.
832 error = get_gla(vm, vcpuid, vie, paging, opsize, vie->addrsize,
833 PROT_WRITE, VM_REG_GUEST_ES, VM_REG_GUEST_RDI, &dstaddr,
838 error = vm_copy_setup(vm, vcpuid, paging, dstaddr, opsize,
839 PROT_WRITE, copyinfo, nitems(copyinfo), &fault);
842 goto done; /* Resume guest to handle fault */
845 * case (3): read from MMIO and write to system memory.
847 * A MMIO read can have side-effects so we
848 * commit to it only after vm_copy_setup() is
849 * successful. If a page-fault needs to be
850 * injected into the guest then it will happen
851 * before the MMIO read is attempted.
853 error = memread(vm, vcpuid, gpa, &val, opsize, arg);
857 vm_copyout(vm, vcpuid, &val, copyinfo, opsize);
858 vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
861 * Case (4): read from and write to mmio.
863 * Commit to the MMIO read/write (with potential
864 * side-effects) only after we are sure that the
865 * instruction is not going to be restarted due
866 * to address translation faults.
868 error = vm_gla2gpa(vm, vcpuid, paging, srcaddr,
869 PROT_READ, &srcgpa, &fault);
873 error = vm_gla2gpa(vm, vcpuid, paging, dstaddr,
874 PROT_WRITE, &dstgpa, &fault);
878 error = memread(vm, vcpuid, srcgpa, &val, opsize, arg);
882 error = memwrite(vm, vcpuid, dstgpa, val, opsize, arg);
888 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RSI, &rsi);
889 KASSERT(error == 0, ("%s: error %d getting rsi", __func__, error));
891 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RDI, &rdi);
892 KASSERT(error == 0, ("%s: error %d getting rdi", __func__, error));
894 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
895 KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
897 if (rflags & PSL_D) {
905 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RSI, rsi,
907 KASSERT(error == 0, ("%s: error %d updating rsi", __func__, error));
909 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RDI, rdi,
911 KASSERT(error == 0, ("%s: error %d updating rdi", __func__, error));
915 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RCX,
917 KASSERT(!error, ("%s: error %d updating rcx", __func__, error));
920 * Repeat the instruction if the count register is not zero.
922 if ((rcx & vie_size2mask(vie->addrsize)) != 0)
923 vm_restart_instruction(vm, vcpuid);
926 KASSERT(error == 0 || error == EFAULT, ("%s: unexpected error %d",
932 emulate_stos(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
933 struct vm_guest_paging *paging, mem_region_read_t memread,
934 mem_region_write_t memwrite, void *arg)
936 int error, opsize, repeat;
938 uint64_t rcx, rdi, rflags;
940 opsize = (vie->op.op_byte == 0xAA) ? 1 : vie->opsize;
941 repeat = vie->repz_present | vie->repnz_present;
944 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RCX, &rcx);
945 KASSERT(!error, ("%s: error %d getting rcx", __func__, error));
948 * The count register is %rcx, %ecx or %cx depending on the
949 * address size of the instruction.
951 if ((rcx & vie_size2mask(vie->addrsize)) == 0)
955 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RAX, &val);
956 KASSERT(!error, ("%s: error %d getting rax", __func__, error));
958 error = memwrite(vm, vcpuid, gpa, val, opsize, arg);
962 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RDI, &rdi);
963 KASSERT(error == 0, ("%s: error %d getting rdi", __func__, error));
965 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
966 KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
973 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RDI, rdi,
975 KASSERT(error == 0, ("%s: error %d updating rdi", __func__, error));
979 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RCX,
981 KASSERT(!error, ("%s: error %d updating rcx", __func__, error));
984 * Repeat the instruction if the count register is not zero.
986 if ((rcx & vie_size2mask(vie->addrsize)) != 0)
987 vm_restart_instruction(vm, vcpuid);
994 emulate_and(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
995 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
998 enum vm_reg_name reg;
999 uint64_t result, rflags, rflags2, val1, val2;
1004 switch (vie->op.op_byte) {
1007 * AND reg (ModRM:reg) and mem (ModRM:r/m) and store the
1010 * 23/r and r16, r/m16
1011 * 23/r and r32, r/m32
1012 * REX.W + 23/r and r64, r/m64
1015 /* get the first operand */
1016 reg = gpr_map[vie->reg];
1017 error = vie_read_register(vm, vcpuid, reg, &val1);
1021 /* get the second operand */
1022 error = memread(vm, vcpuid, gpa, &val2, size, arg);
1026 /* perform the operation and write the result */
1027 result = val1 & val2;
1028 error = vie_update_register(vm, vcpuid, reg, result, size);
1033 * AND mem (ModRM:r/m) with immediate and store the
1036 * 81 /4 and r/m16, imm16
1037 * 81 /4 and r/m32, imm32
1038 * REX.W + 81 /4 and r/m64, imm32 sign-extended to 64
1040 * 83 /4 and r/m16, imm8 sign-extended to 16
1041 * 83 /4 and r/m32, imm8 sign-extended to 32
1042 * REX.W + 83/4 and r/m64, imm8 sign-extended to 64
1045 /* get the first operand */
1046 error = memread(vm, vcpuid, gpa, &val1, size, arg);
1051 * perform the operation with the pre-fetched immediate
1052 * operand and write the result
1054 result = val1 & vie->immediate;
1055 error = memwrite(vm, vcpuid, gpa, result, size, arg);
1063 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
1068 * OF and CF are cleared; the SF, ZF and PF flags are set according
1069 * to the result; AF is undefined.
1071 * The updated status flags are obtained by subtracting 0 from 'result'.
1073 rflags2 = getcc(size, result, 0);
1074 rflags &= ~RFLAGS_STATUS_BITS;
1075 rflags |= rflags2 & (PSL_PF | PSL_Z | PSL_N);
1077 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, rflags, 8);
1082 emulate_or(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
1083 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
1086 enum vm_reg_name reg;
1087 uint64_t result, rflags, rflags2, val1, val2;
1092 switch (vie->op.op_byte) {
1095 * OR reg (ModRM:reg) and mem (ModRM:r/m) and store the
1098 * 0b/r or r16, r/m16
1099 * 0b/r or r32, r/m32
1100 * REX.W + 0b/r or r64, r/m64
1103 /* get the first operand */
1104 reg = gpr_map[vie->reg];
1105 error = vie_read_register(vm, vcpuid, reg, &val1);
1109 /* get the second operand */
1110 error = memread(vm, vcpuid, gpa, &val2, size, arg);
1114 /* perform the operation and write the result */
1115 result = val1 | val2;
1116 error = vie_update_register(vm, vcpuid, reg, result, size);
1121 * OR mem (ModRM:r/m) with immediate and store the
1124 * 81 /1 or r/m16, imm16
1125 * 81 /1 or r/m32, imm32
1126 * REX.W + 81 /1 or r/m64, imm32 sign-extended to 64
1128 * 83 /1 or r/m16, imm8 sign-extended to 16
1129 * 83 /1 or r/m32, imm8 sign-extended to 32
1130 * REX.W + 83/1 or r/m64, imm8 sign-extended to 64
1133 /* get the first operand */
1134 error = memread(vm, vcpuid, gpa, &val1, size, arg);
1139 * perform the operation with the pre-fetched immediate
1140 * operand and write the result
1142 result = val1 | vie->immediate;
1143 error = memwrite(vm, vcpuid, gpa, result, size, arg);
1151 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
1156 * OF and CF are cleared; the SF, ZF and PF flags are set according
1157 * to the result; AF is undefined.
1159 * The updated status flags are obtained by subtracting 0 from 'result'.
1161 rflags2 = getcc(size, result, 0);
1162 rflags &= ~RFLAGS_STATUS_BITS;
1163 rflags |= rflags2 & (PSL_PF | PSL_Z | PSL_N);
1165 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, rflags, 8);
1170 emulate_cmp(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
1171 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
1174 uint64_t regop, memop, op1, op2, rflags, rflags2;
1175 enum vm_reg_name reg;
1178 switch (vie->op.op_byte) {
1182 * 39/r CMP r/m16, r16
1183 * 39/r CMP r/m32, r32
1184 * REX.W 39/r CMP r/m64, r64
1186 * 3B/r CMP r16, r/m16
1187 * 3B/r CMP r32, r/m32
1188 * REX.W + 3B/r CMP r64, r/m64
1190 * Compare the first operand with the second operand and
1191 * set status flags in EFLAGS register. The comparison is
1192 * performed by subtracting the second operand from the first
1193 * operand and then setting the status flags.
1196 /* Get the register operand */
1197 reg = gpr_map[vie->reg];
1198 error = vie_read_register(vm, vcpuid, reg, ®op);
1202 /* Get the memory operand */
1203 error = memread(vm, vcpuid, gpa, &memop, size, arg);
1207 if (vie->op.op_byte == 0x3B) {
1214 rflags2 = getcc(size, op1, op2);
1220 * 80 /7 cmp r/m8, imm8
1221 * REX + 80 /7 cmp r/m8, imm8
1223 * 81 /7 cmp r/m16, imm16
1224 * 81 /7 cmp r/m32, imm32
1225 * REX.W + 81 /7 cmp r/m64, imm32 sign-extended to 64
1227 * 83 /7 cmp r/m16, imm8 sign-extended to 16
1228 * 83 /7 cmp r/m32, imm8 sign-extended to 32
1229 * REX.W + 83 /7 cmp r/m64, imm8 sign-extended to 64
1231 * Compare mem (ModRM:r/m) with immediate and set
1232 * status flags according to the results. The
1233 * comparison is performed by subtracting the
1234 * immediate from the first operand and then setting
1238 if (vie->op.op_byte == 0x80)
1241 /* get the first operand */
1242 error = memread(vm, vcpuid, gpa, &op1, size, arg);
1246 rflags2 = getcc(size, op1, vie->immediate);
1251 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
1254 rflags &= ~RFLAGS_STATUS_BITS;
1255 rflags |= rflags2 & RFLAGS_STATUS_BITS;
1257 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, rflags, 8);
1262 emulate_test(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
1263 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
1266 uint64_t op1, rflags, rflags2;
1271 switch (vie->op.op_byte) {
1274 * F7 /0 test r/m16, imm16
1275 * F7 /0 test r/m32, imm32
1276 * REX.W + F7 /0 test r/m64, imm32 sign-extended to 64
1278 * Test mem (ModRM:r/m) with immediate and set status
1279 * flags according to the results. The comparison is
1280 * performed by anding the immediate from the first
1281 * operand and then setting the status flags.
1283 if ((vie->reg & 7) != 0)
1286 error = memread(vm, vcpuid, gpa, &op1, size, arg);
1290 rflags2 = getandflags(size, op1, vie->immediate);
1295 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
1300 * OF and CF are cleared; the SF, ZF and PF flags are set according
1301 * to the result; AF is undefined.
1303 rflags &= ~RFLAGS_STATUS_BITS;
1304 rflags |= rflags2 & (PSL_PF | PSL_Z | PSL_N);
1306 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, rflags, 8);
1311 emulate_add(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
1312 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
1315 uint64_t nval, rflags, rflags2, val1, val2;
1316 enum vm_reg_name reg;
1321 switch (vie->op.op_byte) {
1324 * ADD r/m to r and store the result in r
1326 * 03/r ADD r16, r/m16
1327 * 03/r ADD r32, r/m32
1328 * REX.W + 03/r ADD r64, r/m64
1331 /* get the first operand */
1332 reg = gpr_map[vie->reg];
1333 error = vie_read_register(vm, vcpuid, reg, &val1);
1337 /* get the second operand */
1338 error = memread(vm, vcpuid, gpa, &val2, size, arg);
1342 /* perform the operation and write the result */
1344 error = vie_update_register(vm, vcpuid, reg, nval, size);
1351 rflags2 = getaddflags(size, val1, val2);
1352 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS,
1357 rflags &= ~RFLAGS_STATUS_BITS;
1358 rflags |= rflags2 & RFLAGS_STATUS_BITS;
1359 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS,
1367 emulate_sub(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
1368 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
1371 uint64_t nval, rflags, rflags2, val1, val2;
1372 enum vm_reg_name reg;
1377 switch (vie->op.op_byte) {
1380 * SUB r/m from r and store the result in r
1382 * 2B/r SUB r16, r/m16
1383 * 2B/r SUB r32, r/m32
1384 * REX.W + 2B/r SUB r64, r/m64
1387 /* get the first operand */
1388 reg = gpr_map[vie->reg];
1389 error = vie_read_register(vm, vcpuid, reg, &val1);
1393 /* get the second operand */
1394 error = memread(vm, vcpuid, gpa, &val2, size, arg);
1398 /* perform the operation and write the result */
1400 error = vie_update_register(vm, vcpuid, reg, nval, size);
1407 rflags2 = getcc(size, val1, val2);
1408 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS,
1413 rflags &= ~RFLAGS_STATUS_BITS;
1414 rflags |= rflags2 & RFLAGS_STATUS_BITS;
1415 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS,
1423 emulate_stack_op(void *vm, int vcpuid, uint64_t mmio_gpa, struct vie *vie,
1424 struct vm_guest_paging *paging, mem_region_read_t memread,
1425 mem_region_write_t memwrite, void *arg)
1428 struct vm_copyinfo copyinfo[2];
1430 struct iovec copyinfo[2];
1432 struct seg_desc ss_desc;
1433 uint64_t cr0, rflags, rsp, stack_gla, val;
1434 int error, fault, size, stackaddrsize, pushop;
1438 pushop = (vie->op.op_type == VIE_OP_TYPE_PUSH) ? 1 : 0;
1441 * From "Address-Size Attributes for Stack Accesses", Intel SDL, Vol 1
1443 if (paging->cpu_mode == CPU_MODE_REAL) {
1445 } else if (paging->cpu_mode == CPU_MODE_64BIT) {
1447 * "Stack Manipulation Instructions in 64-bit Mode", SDM, Vol 3
1448 * - Stack pointer size is always 64-bits.
1449 * - PUSH/POP of 32-bit values is not possible in 64-bit mode.
1450 * - 16-bit PUSH/POP is supported by using the operand size
1451 * override prefix (66H).
1454 size = vie->opsize_override ? 2 : 8;
1457 * In protected or compatibility mode the 'B' flag in the
1458 * stack-segment descriptor determines the size of the
1461 error = vm_get_seg_desc(vm, vcpuid, VM_REG_GUEST_SS, &ss_desc);
1462 KASSERT(error == 0, ("%s: error %d getting SS descriptor",
1464 if (SEG_DESC_DEF32(ss_desc.access))
1470 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_CR0, &cr0);
1471 KASSERT(error == 0, ("%s: error %d getting cr0", __func__, error));
1473 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
1474 KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
1476 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RSP, &rsp);
1477 KASSERT(error == 0, ("%s: error %d getting rsp", __func__, error));
1482 if (vie_calculate_gla(paging->cpu_mode, VM_REG_GUEST_SS, &ss_desc,
1483 rsp, size, stackaddrsize, pushop ? PROT_WRITE : PROT_READ,
1485 vm_inject_ss(vm, vcpuid, 0);
1489 if (vie_canonical_check(paging->cpu_mode, stack_gla)) {
1490 vm_inject_ss(vm, vcpuid, 0);
1494 if (vie_alignment_check(paging->cpl, size, cr0, rflags, stack_gla)) {
1495 vm_inject_ac(vm, vcpuid, 0);
1499 error = vm_copy_setup(vm, vcpuid, paging, stack_gla, size,
1500 pushop ? PROT_WRITE : PROT_READ, copyinfo, nitems(copyinfo),
1506 error = memread(vm, vcpuid, mmio_gpa, &val, size, arg);
1508 vm_copyout(vm, vcpuid, &val, copyinfo, size);
1510 vm_copyin(vm, vcpuid, copyinfo, &val, size);
1511 error = memwrite(vm, vcpuid, mmio_gpa, val, size, arg);
1514 vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
1517 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RSP, rsp,
1519 KASSERT(error == 0, ("error %d updating rsp", error));
1525 emulate_push(void *vm, int vcpuid, uint64_t mmio_gpa, struct vie *vie,
1526 struct vm_guest_paging *paging, mem_region_read_t memread,
1527 mem_region_write_t memwrite, void *arg)
1532 * Table A-6, "Opcode Extensions", Intel SDM, Vol 2.
1534 * PUSH is part of the group 5 extended opcodes and is identified
1535 * by ModRM:reg = b110.
1537 if ((vie->reg & 7) != 6)
1540 error = emulate_stack_op(vm, vcpuid, mmio_gpa, vie, paging, memread,
1546 emulate_pop(void *vm, int vcpuid, uint64_t mmio_gpa, struct vie *vie,
1547 struct vm_guest_paging *paging, mem_region_read_t memread,
1548 mem_region_write_t memwrite, void *arg)
1553 * Table A-6, "Opcode Extensions", Intel SDM, Vol 2.
1555 * POP is part of the group 1A extended opcodes and is identified
1556 * by ModRM:reg = b000.
1558 if ((vie->reg & 7) != 0)
1561 error = emulate_stack_op(vm, vcpuid, mmio_gpa, vie, paging, memread,
1567 emulate_group1(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
1568 struct vm_guest_paging *paging, mem_region_read_t memread,
1569 mem_region_write_t memwrite, void *memarg)
1573 switch (vie->reg & 7) {
1575 error = emulate_or(vm, vcpuid, gpa, vie,
1576 memread, memwrite, memarg);
1579 error = emulate_and(vm, vcpuid, gpa, vie,
1580 memread, memwrite, memarg);
1583 error = emulate_cmp(vm, vcpuid, gpa, vie,
1584 memread, memwrite, memarg);
1595 emulate_bittest(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
1596 mem_region_read_t memread, mem_region_write_t memwrite, void *memarg)
1598 uint64_t val, rflags;
1599 int error, bitmask, bitoff;
1602 * 0F BA is a Group 8 extended opcode.
1604 * Currently we only emulate the 'Bit Test' instruction which is
1605 * identified by a ModR/M:reg encoding of 100b.
1607 if ((vie->reg & 7) != 4)
1610 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
1611 KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
1613 error = memread(vm, vcpuid, gpa, &val, vie->opsize, memarg);
1618 * Intel SDM, Vol 2, Table 3-2:
1619 * "Range of Bit Positions Specified by Bit Offset Operands"
1621 bitmask = vie->opsize * 8 - 1;
1622 bitoff = vie->immediate & bitmask;
1624 /* Copy the bit into the Carry flag in %rflags */
1625 if (val & (1UL << bitoff))
1630 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, rflags, 8);
1631 KASSERT(error == 0, ("%s: error %d updating rflags", __func__, error));
1637 emulate_twob_group15(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
1638 mem_region_read_t memread, mem_region_write_t memwrite, void *memarg)
1643 switch (vie->reg & 7) {
1644 case 0x7: /* CLFLUSH, CLFLUSHOPT, and SFENCE */
1645 if (vie->mod == 0x3) {
1647 * SFENCE. Ignore it, VM exit provides enough
1648 * barriers on its own.
1653 * CLFLUSH, CLFLUSHOPT. Only check for access
1656 error = memread(vm, vcpuid, gpa, &buf, 1, memarg);
1668 vmm_emulate_instruction(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
1669 struct vm_guest_paging *paging, mem_region_read_t memread,
1670 mem_region_write_t memwrite, void *memarg)
1677 switch (vie->op.op_type) {
1678 case VIE_OP_TYPE_GROUP1:
1679 error = emulate_group1(vm, vcpuid, gpa, vie, paging, memread,
1682 case VIE_OP_TYPE_POP:
1683 error = emulate_pop(vm, vcpuid, gpa, vie, paging, memread,
1686 case VIE_OP_TYPE_PUSH:
1687 error = emulate_push(vm, vcpuid, gpa, vie, paging, memread,
1690 case VIE_OP_TYPE_CMP:
1691 error = emulate_cmp(vm, vcpuid, gpa, vie,
1692 memread, memwrite, memarg);
1694 case VIE_OP_TYPE_MOV:
1695 error = emulate_mov(vm, vcpuid, gpa, vie,
1696 memread, memwrite, memarg);
1698 case VIE_OP_TYPE_MOVSX:
1699 case VIE_OP_TYPE_MOVZX:
1700 error = emulate_movx(vm, vcpuid, gpa, vie,
1701 memread, memwrite, memarg);
1703 case VIE_OP_TYPE_MOVS:
1704 error = emulate_movs(vm, vcpuid, gpa, vie, paging, memread,
1707 case VIE_OP_TYPE_STOS:
1708 error = emulate_stos(vm, vcpuid, gpa, vie, paging, memread,
1711 case VIE_OP_TYPE_AND:
1712 error = emulate_and(vm, vcpuid, gpa, vie,
1713 memread, memwrite, memarg);
1715 case VIE_OP_TYPE_OR:
1716 error = emulate_or(vm, vcpuid, gpa, vie,
1717 memread, memwrite, memarg);
1719 case VIE_OP_TYPE_SUB:
1720 error = emulate_sub(vm, vcpuid, gpa, vie,
1721 memread, memwrite, memarg);
1723 case VIE_OP_TYPE_BITTEST:
1724 error = emulate_bittest(vm, vcpuid, gpa, vie,
1725 memread, memwrite, memarg);
1727 case VIE_OP_TYPE_TWOB_GRP15:
1728 error = emulate_twob_group15(vm, vcpuid, gpa, vie,
1729 memread, memwrite, memarg);
1731 case VIE_OP_TYPE_ADD:
1732 error = emulate_add(vm, vcpuid, gpa, vie, memread,
1735 case VIE_OP_TYPE_TEST:
1736 error = emulate_test(vm, vcpuid, gpa, vie,
1737 memread, memwrite, memarg);
1748 vie_alignment_check(int cpl, int size, uint64_t cr0, uint64_t rf, uint64_t gla)
1750 KASSERT(size == 1 || size == 2 || size == 4 || size == 8,
1751 ("%s: invalid size %d", __func__, size));
1752 KASSERT(cpl >= 0 && cpl <= 3, ("%s: invalid cpl %d", __func__, cpl));
1754 if (cpl != 3 || (cr0 & CR0_AM) == 0 || (rf & PSL_AC) == 0)
1757 return ((gla & (size - 1)) ? 1 : 0);
1761 vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla)
1765 if (cpu_mode != CPU_MODE_64BIT)
1769 * The value of the bit 47 in the 'gla' should be replicated in the
1770 * most significant 16 bits.
1772 mask = ~((1UL << 48) - 1);
1773 if (gla & (1UL << 47))
1774 return ((gla & mask) != mask);
1776 return ((gla & mask) != 0);
1780 vie_size2mask(int size)
1782 KASSERT(size == 1 || size == 2 || size == 4 || size == 8,
1783 ("vie_size2mask: invalid size %d", size));
1784 return (size2mask[size]);
1788 vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum vm_reg_name seg,
1789 struct seg_desc *desc, uint64_t offset, int length, int addrsize,
1790 int prot, uint64_t *gla)
1792 uint64_t firstoff, low_limit, high_limit, segbase;
1795 KASSERT(seg >= VM_REG_GUEST_ES && seg <= VM_REG_GUEST_GS,
1796 ("%s: invalid segment %d", __func__, seg));
1797 KASSERT(length == 1 || length == 2 || length == 4 || length == 8,
1798 ("%s: invalid operand size %d", __func__, length));
1799 KASSERT((prot & ~(PROT_READ | PROT_WRITE)) == 0,
1800 ("%s: invalid prot %#x", __func__, prot));
1803 if (cpu_mode == CPU_MODE_64BIT) {
1804 KASSERT(addrsize == 4 || addrsize == 8, ("%s: invalid address "
1805 "size %d for cpu_mode %d", __func__, addrsize, cpu_mode));
1808 KASSERT(addrsize == 2 || addrsize == 4, ("%s: invalid address "
1809 "size %d for cpu mode %d", __func__, addrsize, cpu_mode));
1812 * If the segment selector is loaded with a NULL selector
1813 * then the descriptor is unusable and attempting to use
1814 * it results in a #GP(0).
1816 if (SEG_DESC_UNUSABLE(desc->access))
1820 * The processor generates a #NP exception when a segment
1821 * register is loaded with a selector that points to a
1822 * descriptor that is not present. If this was the case then
1823 * it would have been checked before the VM-exit.
1825 KASSERT(SEG_DESC_PRESENT(desc->access),
1826 ("segment %d not present: %#x", seg, desc->access));
1829 * The descriptor type must indicate a code/data segment.
1831 type = SEG_DESC_TYPE(desc->access);
1832 KASSERT(type >= 16 && type <= 31, ("segment %d has invalid "
1833 "descriptor type %#x", seg, type));
1835 if (prot & PROT_READ) {
1836 /* #GP on a read access to a exec-only code segment */
1837 if ((type & 0xA) == 0x8)
1841 if (prot & PROT_WRITE) {
1843 * #GP on a write access to a code segment or a
1844 * read-only data segment.
1846 if (type & 0x8) /* code segment */
1849 if ((type & 0xA) == 0) /* read-only data seg */
1854 * 'desc->limit' is fully expanded taking granularity into
1857 if ((type & 0xC) == 0x4) {
1858 /* expand-down data segment */
1859 low_limit = desc->limit + 1;
1860 high_limit = SEG_DESC_DEF32(desc->access) ?
1861 0xffffffff : 0xffff;
1863 /* code segment or expand-up data segment */
1865 high_limit = desc->limit;
1868 while (length > 0) {
1869 offset &= vie_size2mask(addrsize);
1870 if (offset < low_limit || offset > high_limit)
1878 * In 64-bit mode all segments except %fs and %gs have a segment
1879 * base address of 0.
1881 if (cpu_mode == CPU_MODE_64BIT && seg != VM_REG_GUEST_FS &&
1882 seg != VM_REG_GUEST_GS) {
1885 segbase = desc->base;
1889 * Truncate 'firstoff' to the effective address size before adding
1890 * it to the segment base.
1892 firstoff &= vie_size2mask(addrsize);
1893 *gla = (segbase + firstoff) & vie_size2mask(glasize);
1899 vie_init(struct vie *vie, const char *inst_bytes, int inst_length)
1901 KASSERT(inst_length >= 0 && inst_length <= VIE_INST_SIZE,
1902 ("%s: invalid instruction length (%d)", __func__, inst_length));
1904 bzero(vie, sizeof(struct vie));
1906 vie->base_register = VM_REG_LAST;
1907 vie->index_register = VM_REG_LAST;
1908 vie->segment_register = VM_REG_LAST;
1911 bcopy(inst_bytes, vie->inst, inst_length);
1912 vie->num_valid = inst_length;
1917 pf_error_code(int usermode, int prot, int rsvd, uint64_t pte)
1922 error_code |= PGEX_P;
1923 if (prot & VM_PROT_WRITE)
1924 error_code |= PGEX_W;
1926 error_code |= PGEX_U;
1928 error_code |= PGEX_RSV;
1929 if (prot & VM_PROT_EXECUTE)
1930 error_code |= PGEX_I;
1932 return (error_code);
1936 ptp_release(void **cookie)
1938 if (*cookie != NULL) {
1939 vm_gpa_release(*cookie);
1945 ptp_hold(struct vm *vm, int vcpu, vm_paddr_t ptpphys, size_t len, void **cookie)
1949 ptp_release(cookie);
1950 ptr = vm_gpa_hold(vm, vcpu, ptpphys, len, VM_PROT_RW, cookie);
1955 vm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
1956 uint64_t gla, int prot, uint64_t *gpa, int *guest_fault)
1958 int nlevels, pfcode, ptpshift, ptpindex, retval, usermode, writable;
1960 uint64_t *ptpbase, ptpphys, pte, pgsize;
1961 uint32_t *ptpbase32, pte32;
1966 usermode = (paging->cpl == 3 ? 1 : 0);
1967 writable = prot & VM_PROT_WRITE;
1972 ptpphys = paging->cr3; /* root of the page tables */
1973 ptp_release(&cookie);
1977 if (vie_canonical_check(paging->cpu_mode, gla)) {
1979 * XXX assuming a non-stack reference otherwise a stack fault
1980 * should be generated.
1982 vm_inject_gp(vm, vcpuid);
1986 if (paging->paging_mode == PAGING_MODE_FLAT) {
1991 if (paging->paging_mode == PAGING_MODE_32) {
1993 while (--nlevels >= 0) {
1994 /* Zero out the lower 12 bits. */
1997 ptpbase32 = ptp_hold(vm, vcpuid, ptpphys, PAGE_SIZE,
2000 if (ptpbase32 == NULL)
2003 ptpshift = PAGE_SHIFT + nlevels * 10;
2004 ptpindex = (gla >> ptpshift) & 0x3FF;
2005 pgsize = 1UL << ptpshift;
2007 pte32 = ptpbase32[ptpindex];
2009 if ((pte32 & PG_V) == 0 ||
2010 (usermode && (pte32 & PG_U) == 0) ||
2011 (writable && (pte32 & PG_RW) == 0)) {
2012 pfcode = pf_error_code(usermode, prot, 0,
2014 vm_inject_pf(vm, vcpuid, pfcode, gla);
2019 * Emulate the x86 MMU's management of the accessed
2020 * and dirty flags. While the accessed flag is set
2021 * at every level of the page table, the dirty flag
2022 * is only set at the last level providing the guest
2025 if ((pte32 & PG_A) == 0) {
2026 if (atomic_cmpset_32(&ptpbase32[ptpindex],
2027 pte32, pte32 | PG_A) == 0) {
2032 /* XXX must be ignored if CR4.PSE=0 */
2033 if (nlevels > 0 && (pte32 & PG_PS) != 0)
2039 /* Set the dirty bit in the page table entry if necessary */
2040 if (writable && (pte32 & PG_M) == 0) {
2041 if (atomic_cmpset_32(&ptpbase32[ptpindex],
2042 pte32, pte32 | PG_M) == 0) {
2047 /* Zero out the lower 'ptpshift' bits */
2048 pte32 >>= ptpshift; pte32 <<= ptpshift;
2049 *gpa = pte32 | (gla & (pgsize - 1));
2053 if (paging->paging_mode == PAGING_MODE_PAE) {
2054 /* Zero out the lower 5 bits and the upper 32 bits */
2055 ptpphys &= 0xffffffe0UL;
2057 ptpbase = ptp_hold(vm, vcpuid, ptpphys, sizeof(*ptpbase) * 4,
2059 if (ptpbase == NULL)
2062 ptpindex = (gla >> 30) & 0x3;
2064 pte = ptpbase[ptpindex];
2066 if ((pte & PG_V) == 0) {
2067 pfcode = pf_error_code(usermode, prot, 0, pte);
2068 vm_inject_pf(vm, vcpuid, pfcode, gla);
2077 while (--nlevels >= 0) {
2078 /* Zero out the lower 12 bits and the upper 12 bits */
2079 ptpphys >>= 12; ptpphys <<= 24; ptpphys >>= 12;
2081 ptpbase = ptp_hold(vm, vcpuid, ptpphys, PAGE_SIZE, &cookie);
2082 if (ptpbase == NULL)
2085 ptpshift = PAGE_SHIFT + nlevels * 9;
2086 ptpindex = (gla >> ptpshift) & 0x1FF;
2087 pgsize = 1UL << ptpshift;
2089 pte = ptpbase[ptpindex];
2091 if ((pte & PG_V) == 0 ||
2092 (usermode && (pte & PG_U) == 0) ||
2093 (writable && (pte & PG_RW) == 0)) {
2094 pfcode = pf_error_code(usermode, prot, 0, pte);
2095 vm_inject_pf(vm, vcpuid, pfcode, gla);
2099 /* Set the accessed bit in the page table entry */
2100 if ((pte & PG_A) == 0) {
2101 if (atomic_cmpset_64(&ptpbase[ptpindex],
2102 pte, pte | PG_A) == 0) {
2107 if (nlevels > 0 && (pte & PG_PS) != 0) {
2108 if (pgsize > 1 * GB) {
2109 pfcode = pf_error_code(usermode, prot, 1, pte);
2110 vm_inject_pf(vm, vcpuid, pfcode, gla);
2119 /* Set the dirty bit in the page table entry if necessary */
2120 if (writable && (pte & PG_M) == 0) {
2121 if (atomic_cmpset_64(&ptpbase[ptpindex], pte, pte | PG_M) == 0)
2125 /* Zero out the lower 'ptpshift' bits and the upper 12 bits */
2126 pte >>= ptpshift; pte <<= (ptpshift + 12); pte >>= 12;
2127 *gpa = pte | (gla & (pgsize - 1));
2129 ptp_release(&cookie);
2130 KASSERT(retval == 0 || retval == EFAULT, ("%s: unexpected retval %d",
2142 vmm_fetch_instruction(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
2143 uint64_t rip, int inst_length, struct vie *vie, int *faultptr)
2145 struct vm_copyinfo copyinfo[2];
2148 if (inst_length > VIE_INST_SIZE)
2149 panic("vmm_fetch_instruction: invalid length %d", inst_length);
2151 prot = PROT_READ | PROT_EXEC;
2152 error = vm_copy_setup(vm, vcpuid, paging, rip, inst_length, prot,
2153 copyinfo, nitems(copyinfo), faultptr);
2154 if (error || *faultptr)
2157 vm_copyin(vm, vcpuid, copyinfo, vie->inst, inst_length);
2158 vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
2159 vie->num_valid = inst_length;
2164 vie_peek(struct vie *vie, uint8_t *x)
2167 if (vie->num_processed < vie->num_valid) {
2168 *x = vie->inst[vie->num_processed];
2175 vie_advance(struct vie *vie)
2178 vie->num_processed++;
2182 segment_override(uint8_t x, int *seg)
2187 *seg = VM_REG_GUEST_CS;
2190 *seg = VM_REG_GUEST_SS;
2193 *seg = VM_REG_GUEST_DS;
2196 *seg = VM_REG_GUEST_ES;
2199 *seg = VM_REG_GUEST_FS;
2202 *seg = VM_REG_GUEST_GS;
2211 decode_prefixes(struct vie *vie, enum vm_cpu_mode cpu_mode, int cs_d)
2216 if (vie_peek(vie, &x))
2220 vie->opsize_override = 1;
2222 vie->addrsize_override = 1;
2224 vie->repz_present = 1;
2226 vie->repnz_present = 1;
2227 else if (segment_override(x, &vie->segment_register))
2228 vie->segment_override = 1;
2236 * From section 2.2.1, "REX Prefixes", Intel SDM Vol 2:
2237 * - Only one REX prefix is allowed per instruction.
2238 * - The REX prefix must immediately precede the opcode byte or the
2239 * escape opcode byte.
2240 * - If an instruction has a mandatory prefix (0x66, 0xF2 or 0xF3)
2241 * the mandatory prefix must come before the REX prefix.
2243 if (cpu_mode == CPU_MODE_64BIT && x >= 0x40 && x <= 0x4F) {
2244 vie->rex_present = 1;
2245 vie->rex_w = x & 0x8 ? 1 : 0;
2246 vie->rex_r = x & 0x4 ? 1 : 0;
2247 vie->rex_x = x & 0x2 ? 1 : 0;
2248 vie->rex_b = x & 0x1 ? 1 : 0;
2253 * Section "Operand-Size And Address-Size Attributes", Intel SDM, Vol 1
2255 if (cpu_mode == CPU_MODE_64BIT) {
2257 * Default address size is 64-bits and default operand size
2260 vie->addrsize = vie->addrsize_override ? 4 : 8;
2263 else if (vie->opsize_override)
2268 /* Default address and operand sizes are 32-bits */
2269 vie->addrsize = vie->addrsize_override ? 2 : 4;
2270 vie->opsize = vie->opsize_override ? 2 : 4;
2272 /* Default address and operand sizes are 16-bits */
2273 vie->addrsize = vie->addrsize_override ? 4 : 2;
2274 vie->opsize = vie->opsize_override ? 4 : 2;
2280 decode_two_byte_opcode(struct vie *vie)
2284 if (vie_peek(vie, &x))
2287 vie->op = two_byte_opcodes[x];
2289 if (vie->op.op_type == VIE_OP_TYPE_NONE)
2297 decode_opcode(struct vie *vie)
2301 if (vie_peek(vie, &x))
2304 vie->op = one_byte_opcodes[x];
2306 if (vie->op.op_type == VIE_OP_TYPE_NONE)
2311 if (vie->op.op_type == VIE_OP_TYPE_TWO_BYTE)
2312 return (decode_two_byte_opcode(vie));
2318 decode_modrm(struct vie *vie, enum vm_cpu_mode cpu_mode)
2322 if (vie->op.op_flags & VIE_OP_F_NO_MODRM)
2325 if (cpu_mode == CPU_MODE_REAL)
2328 if (vie_peek(vie, &x))
2331 vie->mod = (x >> 6) & 0x3;
2332 vie->rm = (x >> 0) & 0x7;
2333 vie->reg = (x >> 3) & 0x7;
2336 * A direct addressing mode makes no sense in the context of an EPT
2337 * fault. There has to be a memory access involved to cause the
2340 if (vie->mod == VIE_MOD_DIRECT)
2343 if ((vie->mod == VIE_MOD_INDIRECT && vie->rm == VIE_RM_DISP32) ||
2344 (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)) {
2346 * Table 2-5: Special Cases of REX Encodings
2348 * mod=0, r/m=5 is used in the compatibility mode to
2349 * indicate a disp32 without a base register.
2351 * mod!=3, r/m=4 is used in the compatibility mode to
2352 * indicate that the SIB byte is present.
2354 * The 'b' bit in the REX prefix is don't care in
2358 vie->rm |= (vie->rex_b << 3);
2361 vie->reg |= (vie->rex_r << 3);
2364 if (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)
2367 vie->base_register = gpr_map[vie->rm];
2370 case VIE_MOD_INDIRECT_DISP8:
2371 vie->disp_bytes = 1;
2373 case VIE_MOD_INDIRECT_DISP32:
2374 vie->disp_bytes = 4;
2376 case VIE_MOD_INDIRECT:
2377 if (vie->rm == VIE_RM_DISP32) {
2378 vie->disp_bytes = 4;
2380 * Table 2-7. RIP-Relative Addressing
2382 * In 64-bit mode mod=00 r/m=101 implies [rip] + disp32
2383 * whereas in compatibility mode it just implies disp32.
2386 if (cpu_mode == CPU_MODE_64BIT)
2387 vie->base_register = VM_REG_GUEST_RIP;
2389 vie->base_register = VM_REG_LAST;
2401 decode_sib(struct vie *vie)
2405 /* Proceed only if SIB byte is present */
2406 if (vie->mod == VIE_MOD_DIRECT || vie->rm != VIE_RM_SIB)
2409 if (vie_peek(vie, &x))
2412 /* De-construct the SIB byte */
2413 vie->ss = (x >> 6) & 0x3;
2414 vie->index = (x >> 3) & 0x7;
2415 vie->base = (x >> 0) & 0x7;
2417 /* Apply the REX prefix modifiers */
2418 vie->index |= vie->rex_x << 3;
2419 vie->base |= vie->rex_b << 3;
2422 case VIE_MOD_INDIRECT_DISP8:
2423 vie->disp_bytes = 1;
2425 case VIE_MOD_INDIRECT_DISP32:
2426 vie->disp_bytes = 4;
2430 if (vie->mod == VIE_MOD_INDIRECT &&
2431 (vie->base == 5 || vie->base == 13)) {
2433 * Special case when base register is unused if mod = 0
2434 * and base = %rbp or %r13.
2437 * Table 2-3: 32-bit Addressing Forms with the SIB Byte
2438 * Table 2-5: Special Cases of REX Encodings
2440 vie->disp_bytes = 4;
2442 vie->base_register = gpr_map[vie->base];
2446 * All encodings of 'index' are valid except for %rsp (4).
2449 * Table 2-3: 32-bit Addressing Forms with the SIB Byte
2450 * Table 2-5: Special Cases of REX Encodings
2452 if (vie->index != 4)
2453 vie->index_register = gpr_map[vie->index];
2455 /* 'scale' makes sense only in the context of an index register */
2456 if (vie->index_register < VM_REG_LAST)
2457 vie->scale = 1 << vie->ss;
2465 decode_displacement(struct vie *vie)
2476 if ((n = vie->disp_bytes) == 0)
2479 if (n != 1 && n != 4)
2480 panic("decode_displacement: invalid disp_bytes %d", n);
2482 for (i = 0; i < n; i++) {
2483 if (vie_peek(vie, &x))
2491 vie->displacement = u.signed8; /* sign-extended */
2493 vie->displacement = u.signed32; /* sign-extended */
2499 decode_immediate(struct vie *vie)
2510 /* Figure out immediate operand size (if any) */
2511 if (vie->op.op_flags & VIE_OP_F_IMM) {
2513 * Section 2.2.1.5 "Immediates", Intel SDM:
2514 * In 64-bit mode the typical size of immediate operands
2515 * remains 32-bits. When the operand size if 64-bits, the
2516 * processor sign-extends all immediates to 64-bits prior
2519 if (vie->opsize == 4 || vie->opsize == 8)
2523 } else if (vie->op.op_flags & VIE_OP_F_IMM8) {
2527 if ((n = vie->imm_bytes) == 0)
2530 KASSERT(n == 1 || n == 2 || n == 4,
2531 ("%s: invalid number of immediate bytes: %d", __func__, n));
2533 for (i = 0; i < n; i++) {
2534 if (vie_peek(vie, &x))
2541 /* sign-extend the immediate value before use */
2543 vie->immediate = u.signed8;
2545 vie->immediate = u.signed16;
2547 vie->immediate = u.signed32;
2553 decode_moffset(struct vie *vie)
2562 if ((vie->op.op_flags & VIE_OP_F_MOFFSET) == 0)
2566 * Section 2.2.1.4, "Direct Memory-Offset MOVs", Intel SDM:
2567 * The memory offset size follows the address-size of the instruction.
2570 KASSERT(n == 2 || n == 4 || n == 8, ("invalid moffset bytes: %d", n));
2573 for (i = 0; i < n; i++) {
2574 if (vie_peek(vie, &x))
2580 vie->displacement = u.u64;
2585 * Verify that the 'guest linear address' provided as collateral of the nested
2586 * page table fault matches with our instruction decoding.
2589 verify_gla(struct vm *vm, int cpuid, uint64_t gla, struct vie *vie,
2590 enum vm_cpu_mode cpu_mode)
2593 uint64_t base, segbase, idx, gla2;
2594 enum vm_reg_name seg;
2595 struct seg_desc desc;
2597 /* Skip 'gla' verification */
2598 if (gla == VIE_INVALID_GLA)
2602 if (vie->base_register != VM_REG_LAST) {
2603 error = vm_get_register(vm, cpuid, vie->base_register, &base);
2605 printf("verify_gla: error %d getting base reg %d\n",
2606 error, vie->base_register);
2611 * RIP-relative addressing starts from the following
2614 if (vie->base_register == VM_REG_GUEST_RIP)
2615 base += vie->num_processed;
2619 if (vie->index_register != VM_REG_LAST) {
2620 error = vm_get_register(vm, cpuid, vie->index_register, &idx);
2622 printf("verify_gla: error %d getting index reg %d\n",
2623 error, vie->index_register);
2629 * From "Specifying a Segment Selector", Intel SDM, Vol 1
2631 * In 64-bit mode, segmentation is generally (but not
2632 * completely) disabled. The exceptions are the FS and GS
2635 * In legacy IA-32 mode, when the ESP or EBP register is used
2636 * as the base, the SS segment is the default segment. For
2637 * other data references, except when relative to stack or
2638 * string destination the DS segment is the default. These
2639 * can be overridden to allow other segments to be accessed.
2641 if (vie->segment_override)
2642 seg = vie->segment_register;
2643 else if (vie->base_register == VM_REG_GUEST_RSP ||
2644 vie->base_register == VM_REG_GUEST_RBP)
2645 seg = VM_REG_GUEST_SS;
2647 seg = VM_REG_GUEST_DS;
2648 if (cpu_mode == CPU_MODE_64BIT && seg != VM_REG_GUEST_FS &&
2649 seg != VM_REG_GUEST_GS) {
2652 error = vm_get_seg_desc(vm, cpuid, seg, &desc);
2654 printf("verify_gla: error %d getting segment"
2655 " descriptor %d", error,
2656 vie->segment_register);
2659 segbase = desc.base;
2662 gla2 = segbase + base + vie->scale * idx + vie->displacement;
2663 gla2 &= size2mask[vie->addrsize];
2665 printf("verify_gla mismatch: segbase(0x%0lx)"
2666 "base(0x%0lx), scale(%d), index(0x%0lx), "
2667 "disp(0x%0lx), gla(0x%0lx), gla2(0x%0lx)\n",
2668 segbase, base, vie->scale, idx, vie->displacement,
2677 vmm_decode_instruction(struct vm *vm, int cpuid, uint64_t gla,
2678 enum vm_cpu_mode cpu_mode, int cs_d, struct vie *vie)
2681 if (decode_prefixes(vie, cpu_mode, cs_d))
2684 if (decode_opcode(vie))
2687 if (decode_modrm(vie, cpu_mode))
2690 if (decode_sib(vie))
2693 if (decode_displacement(vie))
2696 if (decode_immediate(vie))
2699 if (decode_moffset(vie))
2702 if ((vie->op.op_flags & VIE_OP_F_NO_GLA_VERIFICATION) == 0) {
2703 if (verify_gla(vm, cpuid, gla, vie, cpu_mode))
2707 vie->decoded = 1; /* success */
2711 #endif /* _KERNEL */