2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2012 Sandvine, Inc.
5 * Copyright (c) 2012 NetApp, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
36 #include <sys/param.h>
38 #include <sys/systm.h>
44 #include <machine/vmparam.h>
45 #include <machine/vmm.h>
47 #include <sys/types.h>
48 #include <sys/errno.h>
49 #include <sys/_iovec.h>
51 #include <machine/vmm.h>
55 #define KASSERT(exp,msg) assert((exp))
58 #include <machine/vmm_instruction_emul.h>
60 #include <x86/specialreg.h>
62 /* struct vie_op.op_type */
79 VIE_OP_TYPE_TWOB_GRP15,
84 /* struct vie_op.op_flags */
85 #define VIE_OP_F_IMM (1 << 0) /* 16/32-bit immediate operand */
86 #define VIE_OP_F_IMM8 (1 << 1) /* 8-bit immediate operand */
87 #define VIE_OP_F_MOFFSET (1 << 2) /* 16/32/64-bit immediate moffset */
88 #define VIE_OP_F_NO_MODRM (1 << 3)
89 #define VIE_OP_F_NO_GLA_VERIFICATION (1 << 4)
91 static const struct vie_op two_byte_opcodes[256] = {
94 .op_type = VIE_OP_TYPE_TWOB_GRP15,
98 .op_type = VIE_OP_TYPE_MOVZX,
102 .op_type = VIE_OP_TYPE_MOVZX,
106 .op_type = VIE_OP_TYPE_BITTEST,
107 .op_flags = VIE_OP_F_IMM8,
111 .op_type = VIE_OP_TYPE_MOVSX,
115 static const struct vie_op one_byte_opcodes[256] = {
118 .op_type = VIE_OP_TYPE_ADD,
122 .op_type = VIE_OP_TYPE_TWO_BYTE
126 .op_type = VIE_OP_TYPE_OR,
130 .op_type = VIE_OP_TYPE_SUB,
134 .op_type = VIE_OP_TYPE_CMP,
138 .op_type = VIE_OP_TYPE_CMP,
142 .op_type = VIE_OP_TYPE_MOV,
146 .op_type = VIE_OP_TYPE_MOV,
150 .op_type = VIE_OP_TYPE_MOV,
154 .op_type = VIE_OP_TYPE_MOV,
158 .op_type = VIE_OP_TYPE_MOV,
159 .op_flags = VIE_OP_F_MOFFSET | VIE_OP_F_NO_MODRM,
163 .op_type = VIE_OP_TYPE_MOV,
164 .op_flags = VIE_OP_F_MOFFSET | VIE_OP_F_NO_MODRM,
168 .op_type = VIE_OP_TYPE_MOVS,
169 .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
173 .op_type = VIE_OP_TYPE_MOVS,
174 .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
178 .op_type = VIE_OP_TYPE_STOS,
179 .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
183 .op_type = VIE_OP_TYPE_STOS,
184 .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
187 /* XXX Group 11 extended opcode - not just MOV */
189 .op_type = VIE_OP_TYPE_MOV,
190 .op_flags = VIE_OP_F_IMM8,
194 .op_type = VIE_OP_TYPE_MOV,
195 .op_flags = VIE_OP_F_IMM,
199 .op_type = VIE_OP_TYPE_AND,
202 /* Group 1 extended opcode */
204 .op_type = VIE_OP_TYPE_GROUP1,
205 .op_flags = VIE_OP_F_IMM8,
208 /* Group 1 extended opcode */
210 .op_type = VIE_OP_TYPE_GROUP1,
211 .op_flags = VIE_OP_F_IMM,
214 /* Group 1 extended opcode */
216 .op_type = VIE_OP_TYPE_GROUP1,
217 .op_flags = VIE_OP_F_IMM8,
220 /* XXX Group 1A extended opcode - not just POP */
222 .op_type = VIE_OP_TYPE_POP,
225 /* XXX Group 5 extended opcode - not just PUSH */
227 .op_type = VIE_OP_TYPE_PUSH,
232 #define VIE_MOD_INDIRECT 0
233 #define VIE_MOD_INDIRECT_DISP8 1
234 #define VIE_MOD_INDIRECT_DISP32 2
235 #define VIE_MOD_DIRECT 3
239 #define VIE_RM_DISP32 5
241 #define GB (1024 * 1024 * 1024)
243 static enum vm_reg_name gpr_map[16] = {
262 static uint64_t size2mask[] = {
266 [8] = 0xffffffffffffffff,
270 vie_read_register(void *vm, int vcpuid, enum vm_reg_name reg, uint64_t *rval)
274 error = vm_get_register(vm, vcpuid, reg, rval);
280 vie_calc_bytereg(struct vie *vie, enum vm_reg_name *reg, int *lhbr)
283 *reg = gpr_map[vie->reg];
286 * 64-bit mode imposes limitations on accessing legacy high byte
289 * The legacy high-byte registers cannot be addressed if the REX
290 * prefix is present. In this case the values 4, 5, 6 and 7 of the
291 * 'ModRM:reg' field address %spl, %bpl, %sil and %dil respectively.
293 * If the REX prefix is not present then the values 4, 5, 6 and 7
294 * of the 'ModRM:reg' field address the legacy high-byte registers,
295 * %ah, %ch, %dh and %bh respectively.
297 if (!vie->rex_present) {
298 if (vie->reg & 0x4) {
300 *reg = gpr_map[vie->reg & 0x3];
306 vie_read_bytereg(void *vm, int vcpuid, struct vie *vie, uint8_t *rval)
310 enum vm_reg_name reg;
312 vie_calc_bytereg(vie, ®, &lhbr);
313 error = vm_get_register(vm, vcpuid, reg, &val);
316 * To obtain the value of a legacy high byte register shift the
317 * base register right by 8 bits (%ah = %rax >> 8).
327 vie_write_bytereg(void *vm, int vcpuid, struct vie *vie, uint8_t byte)
329 uint64_t origval, val, mask;
331 enum vm_reg_name reg;
333 vie_calc_bytereg(vie, ®, &lhbr);
334 error = vm_get_register(vm, vcpuid, reg, &origval);
340 * Shift left by 8 to store 'byte' in a legacy high
346 val |= origval & ~mask;
347 error = vm_set_register(vm, vcpuid, reg, val);
353 vie_update_register(void *vm, int vcpuid, enum vm_reg_name reg,
354 uint64_t val, int size)
362 error = vie_read_register(vm, vcpuid, reg, &origval);
365 val &= size2mask[size];
366 val |= origval & ~size2mask[size];
377 error = vm_set_register(vm, vcpuid, reg, val);
381 #define RFLAGS_STATUS_BITS (PSL_C | PSL_PF | PSL_AF | PSL_Z | PSL_N | PSL_V)
384 * Return the status flags that would result from doing (x - y).
388 getcc##sz(uint##sz##_t x, uint##sz##_t y) \
392 __asm __volatile("sub %2,%1; pushfq; popq %0" : \
393 "=r" (rflags), "+r" (x) : "m" (y)); \
403 getcc(int opsize, uint64_t x, uint64_t y)
405 KASSERT(opsize == 1 || opsize == 2 || opsize == 4 || opsize == 8,
406 ("getcc: invalid operand size %d", opsize));
409 return (getcc8(x, y));
410 else if (opsize == 2)
411 return (getcc16(x, y));
412 else if (opsize == 4)
413 return (getcc32(x, y));
415 return (getcc64(x, y));
419 * Macro creation of functions getaddflags{8,16,32,64}
421 #define GETADDFLAGS(sz) \
423 getaddflags##sz(uint##sz##_t x, uint##sz##_t y) \
427 __asm __volatile("add %2,%1; pushfq; popq %0" : \
428 "=r" (rflags), "+r" (x) : "m" (y)); \
438 getaddflags(int opsize, uint64_t x, uint64_t y)
440 KASSERT(opsize == 1 || opsize == 2 || opsize == 4 || opsize == 8,
441 ("getaddflags: invalid operand size %d", opsize));
444 return (getaddflags8(x, y));
445 else if (opsize == 2)
446 return (getaddflags16(x, y));
447 else if (opsize == 4)
448 return (getaddflags32(x, y));
450 return (getaddflags64(x, y));
454 emulate_mov(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
455 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
458 enum vm_reg_name reg;
465 switch (vie->op.op_byte) {
468 * MOV byte from reg (ModRM:reg) to mem (ModRM:r/m)
470 * REX + 88/r: mov r/m8, r8 (%ah, %ch, %dh, %bh not available)
472 size = 1; /* override for byte operation */
473 error = vie_read_bytereg(vm, vcpuid, vie, &byte);
475 error = memwrite(vm, vcpuid, gpa, byte, size, arg);
479 * MOV from reg (ModRM:reg) to mem (ModRM:r/m)
480 * 89/r: mov r/m16, r16
481 * 89/r: mov r/m32, r32
482 * REX.W + 89/r mov r/m64, r64
484 reg = gpr_map[vie->reg];
485 error = vie_read_register(vm, vcpuid, reg, &val);
487 val &= size2mask[size];
488 error = memwrite(vm, vcpuid, gpa, val, size, arg);
493 * MOV byte from mem (ModRM:r/m) to reg (ModRM:reg)
495 * REX + 8A/r: mov r8, r/m8
497 size = 1; /* override for byte operation */
498 error = memread(vm, vcpuid, gpa, &val, size, arg);
500 error = vie_write_bytereg(vm, vcpuid, vie, val);
504 * MOV from mem (ModRM:r/m) to reg (ModRM:reg)
505 * 8B/r: mov r16, r/m16
506 * 8B/r: mov r32, r/m32
507 * REX.W 8B/r: mov r64, r/m64
509 error = memread(vm, vcpuid, gpa, &val, size, arg);
511 reg = gpr_map[vie->reg];
512 error = vie_update_register(vm, vcpuid, reg, val, size);
517 * MOV from seg:moffset to AX/EAX/RAX
518 * A1: mov AX, moffs16
519 * A1: mov EAX, moffs32
520 * REX.W + A1: mov RAX, moffs64
522 error = memread(vm, vcpuid, gpa, &val, size, arg);
524 reg = VM_REG_GUEST_RAX;
525 error = vie_update_register(vm, vcpuid, reg, val, size);
530 * MOV from AX/EAX/RAX to seg:moffset
531 * A3: mov moffs16, AX
532 * A3: mov moffs32, EAX
533 * REX.W + A3: mov moffs64, RAX
535 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RAX, &val);
537 val &= size2mask[size];
538 error = memwrite(vm, vcpuid, gpa, val, size, arg);
543 * MOV from imm8 to mem (ModRM:r/m)
544 * C6/0 mov r/m8, imm8
545 * REX + C6/0 mov r/m8, imm8
547 size = 1; /* override for byte operation */
548 error = memwrite(vm, vcpuid, gpa, vie->immediate, size, arg);
552 * MOV from imm16/imm32 to mem (ModRM:r/m)
553 * C7/0 mov r/m16, imm16
554 * C7/0 mov r/m32, imm32
555 * REX.W + C7/0 mov r/m64, imm32 (sign-extended to 64-bits)
557 val = vie->immediate & size2mask[size];
558 error = memwrite(vm, vcpuid, gpa, val, size, arg);
568 emulate_movx(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
569 mem_region_read_t memread, mem_region_write_t memwrite,
573 enum vm_reg_name reg;
579 switch (vie->op.op_byte) {
582 * MOV and zero extend byte from mem (ModRM:r/m) to
585 * 0F B6/r movzx r16, r/m8
586 * 0F B6/r movzx r32, r/m8
587 * REX.W + 0F B6/r movzx r64, r/m8
590 /* get the first operand */
591 error = memread(vm, vcpuid, gpa, &val, 1, arg);
595 /* get the second operand */
596 reg = gpr_map[vie->reg];
598 /* zero-extend byte */
601 /* write the result */
602 error = vie_update_register(vm, vcpuid, reg, val, size);
606 * MOV and zero extend word from mem (ModRM:r/m) to
609 * 0F B7/r movzx r32, r/m16
610 * REX.W + 0F B7/r movzx r64, r/m16
612 error = memread(vm, vcpuid, gpa, &val, 2, arg);
616 reg = gpr_map[vie->reg];
618 /* zero-extend word */
621 error = vie_update_register(vm, vcpuid, reg, val, size);
625 * MOV and sign extend byte from mem (ModRM:r/m) to
628 * 0F BE/r movsx r16, r/m8
629 * 0F BE/r movsx r32, r/m8
630 * REX.W + 0F BE/r movsx r64, r/m8
633 /* get the first operand */
634 error = memread(vm, vcpuid, gpa, &val, 1, arg);
638 /* get the second operand */
639 reg = gpr_map[vie->reg];
641 /* sign extend byte */
644 /* write the result */
645 error = vie_update_register(vm, vcpuid, reg, val, size);
654 * Helper function to calculate and validate a linear address.
657 get_gla(void *vm, int vcpuid, struct vie *vie, struct vm_guest_paging *paging,
658 int opsize, int addrsize, int prot, enum vm_reg_name seg,
659 enum vm_reg_name gpr, uint64_t *gla, int *fault)
661 struct seg_desc desc;
662 uint64_t cr0, val, rflags;
665 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_CR0, &cr0);
666 KASSERT(error == 0, ("%s: error %d getting cr0", __func__, error));
668 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
669 KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
671 error = vm_get_seg_desc(vm, vcpuid, seg, &desc);
672 KASSERT(error == 0, ("%s: error %d getting segment descriptor %d",
673 __func__, error, seg));
675 error = vie_read_register(vm, vcpuid, gpr, &val);
676 KASSERT(error == 0, ("%s: error %d getting register %d", __func__,
679 if (vie_calculate_gla(paging->cpu_mode, seg, &desc, val, opsize,
680 addrsize, prot, gla)) {
681 if (seg == VM_REG_GUEST_SS)
682 vm_inject_ss(vm, vcpuid, 0);
684 vm_inject_gp(vm, vcpuid);
688 if (vie_canonical_check(paging->cpu_mode, *gla)) {
689 if (seg == VM_REG_GUEST_SS)
690 vm_inject_ss(vm, vcpuid, 0);
692 vm_inject_gp(vm, vcpuid);
696 if (vie_alignment_check(paging->cpl, opsize, cr0, rflags, *gla)) {
697 vm_inject_ac(vm, vcpuid, 0);
710 emulate_movs(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
711 struct vm_guest_paging *paging, mem_region_read_t memread,
712 mem_region_write_t memwrite, void *arg)
715 struct vm_copyinfo copyinfo[2];
717 struct iovec copyinfo[2];
719 uint64_t dstaddr, srcaddr, dstgpa, srcgpa, val;
720 uint64_t rcx, rdi, rsi, rflags;
721 int error, fault, opsize, seg, repeat;
723 opsize = (vie->op.op_byte == 0xA4) ? 1 : vie->opsize;
728 * XXX although the MOVS instruction is only supposed to be used with
729 * the "rep" prefix some guests like FreeBSD will use "repnz" instead.
731 * Empirically the "repnz" prefix has identical behavior to "rep"
732 * and the zero flag does not make a difference.
734 repeat = vie->repz_present | vie->repnz_present;
737 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RCX, &rcx);
738 KASSERT(!error, ("%s: error %d getting rcx", __func__, error));
741 * The count register is %rcx, %ecx or %cx depending on the
742 * address size of the instruction.
744 if ((rcx & vie_size2mask(vie->addrsize)) == 0) {
751 * Source Destination Comments
752 * --------------------------------------------
753 * (1) memory memory n/a
754 * (2) memory mmio emulated
755 * (3) mmio memory emulated
756 * (4) mmio mmio emulated
758 * At this point we don't have sufficient information to distinguish
759 * between (2), (3) and (4). We use 'vm_copy_setup()' to tease this
760 * out because it will succeed only when operating on regular memory.
762 * XXX the emulation doesn't properly handle the case where 'gpa'
763 * is straddling the boundary between the normal memory and MMIO.
766 seg = vie->segment_override ? vie->segment_register : VM_REG_GUEST_DS;
767 error = get_gla(vm, vcpuid, vie, paging, opsize, vie->addrsize,
768 PROT_READ, seg, VM_REG_GUEST_RSI, &srcaddr, &fault);
772 error = vm_copy_setup(vm, vcpuid, paging, srcaddr, opsize, PROT_READ,
773 copyinfo, nitems(copyinfo), &fault);
776 goto done; /* Resume guest to handle fault */
779 * case (2): read from system memory and write to mmio.
781 vm_copyin(vm, vcpuid, copyinfo, &val, opsize);
782 vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
783 error = memwrite(vm, vcpuid, gpa, val, opsize, arg);
788 * 'vm_copy_setup()' is expected to fail for cases (3) and (4)
789 * if 'srcaddr' is in the mmio space.
792 error = get_gla(vm, vcpuid, vie, paging, opsize, vie->addrsize,
793 PROT_WRITE, VM_REG_GUEST_ES, VM_REG_GUEST_RDI, &dstaddr,
798 error = vm_copy_setup(vm, vcpuid, paging, dstaddr, opsize,
799 PROT_WRITE, copyinfo, nitems(copyinfo), &fault);
802 goto done; /* Resume guest to handle fault */
805 * case (3): read from MMIO and write to system memory.
807 * A MMIO read can have side-effects so we
808 * commit to it only after vm_copy_setup() is
809 * successful. If a page-fault needs to be
810 * injected into the guest then it will happen
811 * before the MMIO read is attempted.
813 error = memread(vm, vcpuid, gpa, &val, opsize, arg);
817 vm_copyout(vm, vcpuid, &val, copyinfo, opsize);
818 vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
821 * Case (4): read from and write to mmio.
823 * Commit to the MMIO read/write (with potential
824 * side-effects) only after we are sure that the
825 * instruction is not going to be restarted due
826 * to address translation faults.
828 error = vm_gla2gpa(vm, vcpuid, paging, srcaddr,
829 PROT_READ, &srcgpa, &fault);
833 error = vm_gla2gpa(vm, vcpuid, paging, dstaddr,
834 PROT_WRITE, &dstgpa, &fault);
838 error = memread(vm, vcpuid, srcgpa, &val, opsize, arg);
842 error = memwrite(vm, vcpuid, dstgpa, val, opsize, arg);
848 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RSI, &rsi);
849 KASSERT(error == 0, ("%s: error %d getting rsi", __func__, error));
851 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RDI, &rdi);
852 KASSERT(error == 0, ("%s: error %d getting rdi", __func__, error));
854 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
855 KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
857 if (rflags & PSL_D) {
865 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RSI, rsi,
867 KASSERT(error == 0, ("%s: error %d updating rsi", __func__, error));
869 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RDI, rdi,
871 KASSERT(error == 0, ("%s: error %d updating rdi", __func__, error));
875 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RCX,
877 KASSERT(!error, ("%s: error %d updating rcx", __func__, error));
880 * Repeat the instruction if the count register is not zero.
882 if ((rcx & vie_size2mask(vie->addrsize)) != 0)
883 vm_restart_instruction(vm, vcpuid);
886 KASSERT(error == 0 || error == EFAULT, ("%s: unexpected error %d",
892 emulate_stos(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
893 struct vm_guest_paging *paging, mem_region_read_t memread,
894 mem_region_write_t memwrite, void *arg)
896 int error, opsize, repeat;
898 uint64_t rcx, rdi, rflags;
900 opsize = (vie->op.op_byte == 0xAA) ? 1 : vie->opsize;
901 repeat = vie->repz_present | vie->repnz_present;
904 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RCX, &rcx);
905 KASSERT(!error, ("%s: error %d getting rcx", __func__, error));
908 * The count register is %rcx, %ecx or %cx depending on the
909 * address size of the instruction.
911 if ((rcx & vie_size2mask(vie->addrsize)) == 0)
915 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RAX, &val);
916 KASSERT(!error, ("%s: error %d getting rax", __func__, error));
918 error = memwrite(vm, vcpuid, gpa, val, opsize, arg);
922 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RDI, &rdi);
923 KASSERT(error == 0, ("%s: error %d getting rdi", __func__, error));
925 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
926 KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
933 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RDI, rdi,
935 KASSERT(error == 0, ("%s: error %d updating rdi", __func__, error));
939 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RCX,
941 KASSERT(!error, ("%s: error %d updating rcx", __func__, error));
944 * Repeat the instruction if the count register is not zero.
946 if ((rcx & vie_size2mask(vie->addrsize)) != 0)
947 vm_restart_instruction(vm, vcpuid);
954 emulate_and(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
955 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
958 enum vm_reg_name reg;
959 uint64_t result, rflags, rflags2, val1, val2;
964 switch (vie->op.op_byte) {
967 * AND reg (ModRM:reg) and mem (ModRM:r/m) and store the
970 * 23/r and r16, r/m16
971 * 23/r and r32, r/m32
972 * REX.W + 23/r and r64, r/m64
975 /* get the first operand */
976 reg = gpr_map[vie->reg];
977 error = vie_read_register(vm, vcpuid, reg, &val1);
981 /* get the second operand */
982 error = memread(vm, vcpuid, gpa, &val2, size, arg);
986 /* perform the operation and write the result */
987 result = val1 & val2;
988 error = vie_update_register(vm, vcpuid, reg, result, size);
993 * AND mem (ModRM:r/m) with immediate and store the
996 * 81 /4 and r/m16, imm16
997 * 81 /4 and r/m32, imm32
998 * REX.W + 81 /4 and r/m64, imm32 sign-extended to 64
1000 * 83 /4 and r/m16, imm8 sign-extended to 16
1001 * 83 /4 and r/m32, imm8 sign-extended to 32
1002 * REX.W + 83/4 and r/m64, imm8 sign-extended to 64
1005 /* get the first operand */
1006 error = memread(vm, vcpuid, gpa, &val1, size, arg);
1011 * perform the operation with the pre-fetched immediate
1012 * operand and write the result
1014 result = val1 & vie->immediate;
1015 error = memwrite(vm, vcpuid, gpa, result, size, arg);
1023 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
1028 * OF and CF are cleared; the SF, ZF and PF flags are set according
1029 * to the result; AF is undefined.
1031 * The updated status flags are obtained by subtracting 0 from 'result'.
1033 rflags2 = getcc(size, result, 0);
1034 rflags &= ~RFLAGS_STATUS_BITS;
1035 rflags |= rflags2 & (PSL_PF | PSL_Z | PSL_N);
1037 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, rflags, 8);
1042 emulate_or(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
1043 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
1046 enum vm_reg_name reg;
1047 uint64_t result, rflags, rflags2, val1, val2;
1052 switch (vie->op.op_byte) {
1055 * OR reg (ModRM:reg) and mem (ModRM:r/m) and store the
1058 * 0b/r or r16, r/m16
1059 * 0b/r or r32, r/m32
1060 * REX.W + 0b/r or r64, r/m64
1063 /* get the first operand */
1064 reg = gpr_map[vie->reg];
1065 error = vie_read_register(vm, vcpuid, reg, &val1);
1069 /* get the second operand */
1070 error = memread(vm, vcpuid, gpa, &val2, size, arg);
1074 /* perform the operation and write the result */
1075 result = val1 | val2;
1076 error = vie_update_register(vm, vcpuid, reg, result, size);
1081 * OR mem (ModRM:r/m) with immediate and store the
1084 * 81 /1 or r/m16, imm16
1085 * 81 /1 or r/m32, imm32
1086 * REX.W + 81 /1 or r/m64, imm32 sign-extended to 64
1088 * 83 /1 or r/m16, imm8 sign-extended to 16
1089 * 83 /1 or r/m32, imm8 sign-extended to 32
1090 * REX.W + 83/1 or r/m64, imm8 sign-extended to 64
1093 /* get the first operand */
1094 error = memread(vm, vcpuid, gpa, &val1, size, arg);
1099 * perform the operation with the pre-fetched immediate
1100 * operand and write the result
1102 result = val1 | vie->immediate;
1103 error = memwrite(vm, vcpuid, gpa, result, size, arg);
1111 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
1116 * OF and CF are cleared; the SF, ZF and PF flags are set according
1117 * to the result; AF is undefined.
1119 * The updated status flags are obtained by subtracting 0 from 'result'.
1121 rflags2 = getcc(size, result, 0);
1122 rflags &= ~RFLAGS_STATUS_BITS;
1123 rflags |= rflags2 & (PSL_PF | PSL_Z | PSL_N);
1125 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, rflags, 8);
1130 emulate_cmp(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
1131 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
1134 uint64_t regop, memop, op1, op2, rflags, rflags2;
1135 enum vm_reg_name reg;
1138 switch (vie->op.op_byte) {
1142 * 39/r CMP r/m16, r16
1143 * 39/r CMP r/m32, r32
1144 * REX.W 39/r CMP r/m64, r64
1146 * 3B/r CMP r16, r/m16
1147 * 3B/r CMP r32, r/m32
1148 * REX.W + 3B/r CMP r64, r/m64
1150 * Compare the first operand with the second operand and
1151 * set status flags in EFLAGS register. The comparison is
1152 * performed by subtracting the second operand from the first
1153 * operand and then setting the status flags.
1156 /* Get the register operand */
1157 reg = gpr_map[vie->reg];
1158 error = vie_read_register(vm, vcpuid, reg, ®op);
1162 /* Get the memory operand */
1163 error = memread(vm, vcpuid, gpa, &memop, size, arg);
1167 if (vie->op.op_byte == 0x3B) {
1174 rflags2 = getcc(size, op1, op2);
1180 * 80 /7 cmp r/m8, imm8
1181 * REX + 80 /7 cmp r/m8, imm8
1183 * 81 /7 cmp r/m16, imm16
1184 * 81 /7 cmp r/m32, imm32
1185 * REX.W + 81 /7 cmp r/m64, imm32 sign-extended to 64
1187 * 83 /7 cmp r/m16, imm8 sign-extended to 16
1188 * 83 /7 cmp r/m32, imm8 sign-extended to 32
1189 * REX.W + 83 /7 cmp r/m64, imm8 sign-extended to 64
1191 * Compare mem (ModRM:r/m) with immediate and set
1192 * status flags according to the results. The
1193 * comparison is performed by subtracting the
1194 * immediate from the first operand and then setting
1198 if (vie->op.op_byte == 0x80)
1201 /* get the first operand */
1202 error = memread(vm, vcpuid, gpa, &op1, size, arg);
1206 rflags2 = getcc(size, op1, vie->immediate);
1211 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
1214 rflags &= ~RFLAGS_STATUS_BITS;
1215 rflags |= rflags2 & RFLAGS_STATUS_BITS;
1217 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, rflags, 8);
1222 emulate_add(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
1223 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
1226 uint64_t nval, rflags, rflags2, val1, val2;
1227 enum vm_reg_name reg;
1232 switch (vie->op.op_byte) {
1235 * ADD r/m to r and store the result in r
1237 * 03/r ADD r16, r/m16
1238 * 03/r ADD r32, r/m32
1239 * REX.W + 03/r ADD r64, r/m64
1242 /* get the first operand */
1243 reg = gpr_map[vie->reg];
1244 error = vie_read_register(vm, vcpuid, reg, &val1);
1248 /* get the second operand */
1249 error = memread(vm, vcpuid, gpa, &val2, size, arg);
1253 /* perform the operation and write the result */
1255 error = vie_update_register(vm, vcpuid, reg, nval, size);
1262 rflags2 = getaddflags(size, val1, val2);
1263 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS,
1268 rflags &= ~RFLAGS_STATUS_BITS;
1269 rflags |= rflags2 & RFLAGS_STATUS_BITS;
1270 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS,
1278 emulate_sub(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
1279 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
1282 uint64_t nval, rflags, rflags2, val1, val2;
1283 enum vm_reg_name reg;
1288 switch (vie->op.op_byte) {
1291 * SUB r/m from r and store the result in r
1293 * 2B/r SUB r16, r/m16
1294 * 2B/r SUB r32, r/m32
1295 * REX.W + 2B/r SUB r64, r/m64
1298 /* get the first operand */
1299 reg = gpr_map[vie->reg];
1300 error = vie_read_register(vm, vcpuid, reg, &val1);
1304 /* get the second operand */
1305 error = memread(vm, vcpuid, gpa, &val2, size, arg);
1309 /* perform the operation and write the result */
1311 error = vie_update_register(vm, vcpuid, reg, nval, size);
1318 rflags2 = getcc(size, val1, val2);
1319 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS,
1324 rflags &= ~RFLAGS_STATUS_BITS;
1325 rflags |= rflags2 & RFLAGS_STATUS_BITS;
1326 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS,
1334 emulate_stack_op(void *vm, int vcpuid, uint64_t mmio_gpa, struct vie *vie,
1335 struct vm_guest_paging *paging, mem_region_read_t memread,
1336 mem_region_write_t memwrite, void *arg)
1339 struct vm_copyinfo copyinfo[2];
1341 struct iovec copyinfo[2];
1343 struct seg_desc ss_desc;
1344 uint64_t cr0, rflags, rsp, stack_gla, val;
1345 int error, fault, size, stackaddrsize, pushop;
1349 pushop = (vie->op.op_type == VIE_OP_TYPE_PUSH) ? 1 : 0;
1352 * From "Address-Size Attributes for Stack Accesses", Intel SDL, Vol 1
1354 if (paging->cpu_mode == CPU_MODE_REAL) {
1356 } else if (paging->cpu_mode == CPU_MODE_64BIT) {
1358 * "Stack Manipulation Instructions in 64-bit Mode", SDM, Vol 3
1359 * - Stack pointer size is always 64-bits.
1360 * - PUSH/POP of 32-bit values is not possible in 64-bit mode.
1361 * - 16-bit PUSH/POP is supported by using the operand size
1362 * override prefix (66H).
1365 size = vie->opsize_override ? 2 : 8;
1368 * In protected or compatibility mode the 'B' flag in the
1369 * stack-segment descriptor determines the size of the
1372 error = vm_get_seg_desc(vm, vcpuid, VM_REG_GUEST_SS, &ss_desc);
1373 KASSERT(error == 0, ("%s: error %d getting SS descriptor",
1375 if (SEG_DESC_DEF32(ss_desc.access))
1381 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_CR0, &cr0);
1382 KASSERT(error == 0, ("%s: error %d getting cr0", __func__, error));
1384 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
1385 KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
1387 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RSP, &rsp);
1388 KASSERT(error == 0, ("%s: error %d getting rsp", __func__, error));
1393 if (vie_calculate_gla(paging->cpu_mode, VM_REG_GUEST_SS, &ss_desc,
1394 rsp, size, stackaddrsize, pushop ? PROT_WRITE : PROT_READ,
1396 vm_inject_ss(vm, vcpuid, 0);
1400 if (vie_canonical_check(paging->cpu_mode, stack_gla)) {
1401 vm_inject_ss(vm, vcpuid, 0);
1405 if (vie_alignment_check(paging->cpl, size, cr0, rflags, stack_gla)) {
1406 vm_inject_ac(vm, vcpuid, 0);
1410 error = vm_copy_setup(vm, vcpuid, paging, stack_gla, size,
1411 pushop ? PROT_WRITE : PROT_READ, copyinfo, nitems(copyinfo),
1417 error = memread(vm, vcpuid, mmio_gpa, &val, size, arg);
1419 vm_copyout(vm, vcpuid, &val, copyinfo, size);
1421 vm_copyin(vm, vcpuid, copyinfo, &val, size);
1422 error = memwrite(vm, vcpuid, mmio_gpa, val, size, arg);
1425 vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
1428 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RSP, rsp,
1430 KASSERT(error == 0, ("error %d updating rsp", error));
1436 emulate_push(void *vm, int vcpuid, uint64_t mmio_gpa, struct vie *vie,
1437 struct vm_guest_paging *paging, mem_region_read_t memread,
1438 mem_region_write_t memwrite, void *arg)
1443 * Table A-6, "Opcode Extensions", Intel SDM, Vol 2.
1445 * PUSH is part of the group 5 extended opcodes and is identified
1446 * by ModRM:reg = b110.
1448 if ((vie->reg & 7) != 6)
1451 error = emulate_stack_op(vm, vcpuid, mmio_gpa, vie, paging, memread,
1457 emulate_pop(void *vm, int vcpuid, uint64_t mmio_gpa, struct vie *vie,
1458 struct vm_guest_paging *paging, mem_region_read_t memread,
1459 mem_region_write_t memwrite, void *arg)
1464 * Table A-6, "Opcode Extensions", Intel SDM, Vol 2.
1466 * POP is part of the group 1A extended opcodes and is identified
1467 * by ModRM:reg = b000.
1469 if ((vie->reg & 7) != 0)
1472 error = emulate_stack_op(vm, vcpuid, mmio_gpa, vie, paging, memread,
1478 emulate_group1(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
1479 struct vm_guest_paging *paging, mem_region_read_t memread,
1480 mem_region_write_t memwrite, void *memarg)
1484 switch (vie->reg & 7) {
1486 error = emulate_or(vm, vcpuid, gpa, vie,
1487 memread, memwrite, memarg);
1490 error = emulate_and(vm, vcpuid, gpa, vie,
1491 memread, memwrite, memarg);
1494 error = emulate_cmp(vm, vcpuid, gpa, vie,
1495 memread, memwrite, memarg);
1506 emulate_bittest(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
1507 mem_region_read_t memread, mem_region_write_t memwrite, void *memarg)
1509 uint64_t val, rflags;
1510 int error, bitmask, bitoff;
1513 * 0F BA is a Group 8 extended opcode.
1515 * Currently we only emulate the 'Bit Test' instruction which is
1516 * identified by a ModR/M:reg encoding of 100b.
1518 if ((vie->reg & 7) != 4)
1521 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
1522 KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
1524 error = memread(vm, vcpuid, gpa, &val, vie->opsize, memarg);
1529 * Intel SDM, Vol 2, Table 3-2:
1530 * "Range of Bit Positions Specified by Bit Offset Operands"
1532 bitmask = vie->opsize * 8 - 1;
1533 bitoff = vie->immediate & bitmask;
1535 /* Copy the bit into the Carry flag in %rflags */
1536 if (val & (1UL << bitoff))
1541 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, rflags, 8);
1542 KASSERT(error == 0, ("%s: error %d updating rflags", __func__, error));
1548 emulate_twob_group15(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
1549 mem_region_read_t memread, mem_region_write_t memwrite, void *memarg)
1554 switch (vie->reg & 7) {
1555 case 0x7: /* CLFLUSH, CLFLUSHOPT, and SFENCE */
1556 if (vie->mod == 0x3) {
1558 * SFENCE. Ignore it, VM exit provides enough
1559 * barriers on its own.
1564 * CLFLUSH, CLFLUSHOPT. Only check for access
1567 error = memread(vm, vcpuid, gpa, &buf, 1, memarg);
1579 vmm_emulate_instruction(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
1580 struct vm_guest_paging *paging, mem_region_read_t memread,
1581 mem_region_write_t memwrite, void *memarg)
1588 switch (vie->op.op_type) {
1589 case VIE_OP_TYPE_GROUP1:
1590 error = emulate_group1(vm, vcpuid, gpa, vie, paging, memread,
1593 case VIE_OP_TYPE_POP:
1594 error = emulate_pop(vm, vcpuid, gpa, vie, paging, memread,
1597 case VIE_OP_TYPE_PUSH:
1598 error = emulate_push(vm, vcpuid, gpa, vie, paging, memread,
1601 case VIE_OP_TYPE_CMP:
1602 error = emulate_cmp(vm, vcpuid, gpa, vie,
1603 memread, memwrite, memarg);
1605 case VIE_OP_TYPE_MOV:
1606 error = emulate_mov(vm, vcpuid, gpa, vie,
1607 memread, memwrite, memarg);
1609 case VIE_OP_TYPE_MOVSX:
1610 case VIE_OP_TYPE_MOVZX:
1611 error = emulate_movx(vm, vcpuid, gpa, vie,
1612 memread, memwrite, memarg);
1614 case VIE_OP_TYPE_MOVS:
1615 error = emulate_movs(vm, vcpuid, gpa, vie, paging, memread,
1618 case VIE_OP_TYPE_STOS:
1619 error = emulate_stos(vm, vcpuid, gpa, vie, paging, memread,
1622 case VIE_OP_TYPE_AND:
1623 error = emulate_and(vm, vcpuid, gpa, vie,
1624 memread, memwrite, memarg);
1626 case VIE_OP_TYPE_OR:
1627 error = emulate_or(vm, vcpuid, gpa, vie,
1628 memread, memwrite, memarg);
1630 case VIE_OP_TYPE_SUB:
1631 error = emulate_sub(vm, vcpuid, gpa, vie,
1632 memread, memwrite, memarg);
1634 case VIE_OP_TYPE_BITTEST:
1635 error = emulate_bittest(vm, vcpuid, gpa, vie,
1636 memread, memwrite, memarg);
1638 case VIE_OP_TYPE_TWOB_GRP15:
1639 error = emulate_twob_group15(vm, vcpuid, gpa, vie,
1640 memread, memwrite, memarg);
1642 case VIE_OP_TYPE_ADD:
1643 error = emulate_add(vm, vcpuid, gpa, vie, memread,
1655 vie_alignment_check(int cpl, int size, uint64_t cr0, uint64_t rf, uint64_t gla)
1657 KASSERT(size == 1 || size == 2 || size == 4 || size == 8,
1658 ("%s: invalid size %d", __func__, size));
1659 KASSERT(cpl >= 0 && cpl <= 3, ("%s: invalid cpl %d", __func__, cpl));
1661 if (cpl != 3 || (cr0 & CR0_AM) == 0 || (rf & PSL_AC) == 0)
1664 return ((gla & (size - 1)) ? 1 : 0);
1668 vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla)
1672 if (cpu_mode != CPU_MODE_64BIT)
1676 * The value of the bit 47 in the 'gla' should be replicated in the
1677 * most significant 16 bits.
1679 mask = ~((1UL << 48) - 1);
1680 if (gla & (1UL << 47))
1681 return ((gla & mask) != mask);
1683 return ((gla & mask) != 0);
1687 vie_size2mask(int size)
1689 KASSERT(size == 1 || size == 2 || size == 4 || size == 8,
1690 ("vie_size2mask: invalid size %d", size));
1691 return (size2mask[size]);
1695 vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum vm_reg_name seg,
1696 struct seg_desc *desc, uint64_t offset, int length, int addrsize,
1697 int prot, uint64_t *gla)
1699 uint64_t firstoff, low_limit, high_limit, segbase;
1702 KASSERT(seg >= VM_REG_GUEST_ES && seg <= VM_REG_GUEST_GS,
1703 ("%s: invalid segment %d", __func__, seg));
1704 KASSERT(length == 1 || length == 2 || length == 4 || length == 8,
1705 ("%s: invalid operand size %d", __func__, length));
1706 KASSERT((prot & ~(PROT_READ | PROT_WRITE)) == 0,
1707 ("%s: invalid prot %#x", __func__, prot));
1710 if (cpu_mode == CPU_MODE_64BIT) {
1711 KASSERT(addrsize == 4 || addrsize == 8, ("%s: invalid address "
1712 "size %d for cpu_mode %d", __func__, addrsize, cpu_mode));
1715 KASSERT(addrsize == 2 || addrsize == 4, ("%s: invalid address "
1716 "size %d for cpu mode %d", __func__, addrsize, cpu_mode));
1719 * If the segment selector is loaded with a NULL selector
1720 * then the descriptor is unusable and attempting to use
1721 * it results in a #GP(0).
1723 if (SEG_DESC_UNUSABLE(desc->access))
1727 * The processor generates a #NP exception when a segment
1728 * register is loaded with a selector that points to a
1729 * descriptor that is not present. If this was the case then
1730 * it would have been checked before the VM-exit.
1732 KASSERT(SEG_DESC_PRESENT(desc->access),
1733 ("segment %d not present: %#x", seg, desc->access));
1736 * The descriptor type must indicate a code/data segment.
1738 type = SEG_DESC_TYPE(desc->access);
1739 KASSERT(type >= 16 && type <= 31, ("segment %d has invalid "
1740 "descriptor type %#x", seg, type));
1742 if (prot & PROT_READ) {
1743 /* #GP on a read access to a exec-only code segment */
1744 if ((type & 0xA) == 0x8)
1748 if (prot & PROT_WRITE) {
1750 * #GP on a write access to a code segment or a
1751 * read-only data segment.
1753 if (type & 0x8) /* code segment */
1756 if ((type & 0xA) == 0) /* read-only data seg */
1761 * 'desc->limit' is fully expanded taking granularity into
1764 if ((type & 0xC) == 0x4) {
1765 /* expand-down data segment */
1766 low_limit = desc->limit + 1;
1767 high_limit = SEG_DESC_DEF32(desc->access) ?
1768 0xffffffff : 0xffff;
1770 /* code segment or expand-up data segment */
1772 high_limit = desc->limit;
1775 while (length > 0) {
1776 offset &= vie_size2mask(addrsize);
1777 if (offset < low_limit || offset > high_limit)
1785 * In 64-bit mode all segments except %fs and %gs have a segment
1786 * base address of 0.
1788 if (cpu_mode == CPU_MODE_64BIT && seg != VM_REG_GUEST_FS &&
1789 seg != VM_REG_GUEST_GS) {
1792 segbase = desc->base;
1796 * Truncate 'firstoff' to the effective address size before adding
1797 * it to the segment base.
1799 firstoff &= vie_size2mask(addrsize);
1800 *gla = (segbase + firstoff) & vie_size2mask(glasize);
1806 vie_init(struct vie *vie, const char *inst_bytes, int inst_length)
1808 KASSERT(inst_length >= 0 && inst_length <= VIE_INST_SIZE,
1809 ("%s: invalid instruction length (%d)", __func__, inst_length));
1811 bzero(vie, sizeof(struct vie));
1813 vie->base_register = VM_REG_LAST;
1814 vie->index_register = VM_REG_LAST;
1815 vie->segment_register = VM_REG_LAST;
1818 bcopy(inst_bytes, vie->inst, inst_length);
1819 vie->num_valid = inst_length;
1824 pf_error_code(int usermode, int prot, int rsvd, uint64_t pte)
1829 error_code |= PGEX_P;
1830 if (prot & VM_PROT_WRITE)
1831 error_code |= PGEX_W;
1833 error_code |= PGEX_U;
1835 error_code |= PGEX_RSV;
1836 if (prot & VM_PROT_EXECUTE)
1837 error_code |= PGEX_I;
1839 return (error_code);
1843 ptp_release(void **cookie)
1845 if (*cookie != NULL) {
1846 vm_gpa_release(*cookie);
1852 ptp_hold(struct vm *vm, int vcpu, vm_paddr_t ptpphys, size_t len, void **cookie)
1856 ptp_release(cookie);
1857 ptr = vm_gpa_hold(vm, vcpu, ptpphys, len, VM_PROT_RW, cookie);
1862 _vm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
1863 uint64_t gla, int prot, uint64_t *gpa, int *guest_fault, bool check_only)
1865 int nlevels, pfcode, ptpshift, ptpindex, retval, usermode, writable;
1867 uint64_t *ptpbase, ptpphys, pte, pgsize;
1868 uint32_t *ptpbase32, pte32;
1873 usermode = (paging->cpl == 3 ? 1 : 0);
1874 writable = prot & VM_PROT_WRITE;
1879 ptpphys = paging->cr3; /* root of the page tables */
1880 ptp_release(&cookie);
1884 if (vie_canonical_check(paging->cpu_mode, gla)) {
1886 * XXX assuming a non-stack reference otherwise a stack fault
1887 * should be generated.
1890 vm_inject_gp(vm, vcpuid);
1894 if (paging->paging_mode == PAGING_MODE_FLAT) {
1899 if (paging->paging_mode == PAGING_MODE_32) {
1901 while (--nlevels >= 0) {
1902 /* Zero out the lower 12 bits. */
1905 ptpbase32 = ptp_hold(vm, vcpuid, ptpphys, PAGE_SIZE,
1908 if (ptpbase32 == NULL)
1911 ptpshift = PAGE_SHIFT + nlevels * 10;
1912 ptpindex = (gla >> ptpshift) & 0x3FF;
1913 pgsize = 1UL << ptpshift;
1915 pte32 = ptpbase32[ptpindex];
1917 if ((pte32 & PG_V) == 0 ||
1918 (usermode && (pte32 & PG_U) == 0) ||
1919 (writable && (pte32 & PG_RW) == 0)) {
1921 pfcode = pf_error_code(usermode, prot, 0,
1923 vm_inject_pf(vm, vcpuid, pfcode, gla);
1929 * Emulate the x86 MMU's management of the accessed
1930 * and dirty flags. While the accessed flag is set
1931 * at every level of the page table, the dirty flag
1932 * is only set at the last level providing the guest
1935 if (!check_only && (pte32 & PG_A) == 0) {
1936 if (atomic_cmpset_32(&ptpbase32[ptpindex],
1937 pte32, pte32 | PG_A) == 0) {
1942 /* XXX must be ignored if CR4.PSE=0 */
1943 if (nlevels > 0 && (pte32 & PG_PS) != 0)
1949 /* Set the dirty bit in the page table entry if necessary */
1950 if (!check_only && writable && (pte32 & PG_M) == 0) {
1951 if (atomic_cmpset_32(&ptpbase32[ptpindex],
1952 pte32, pte32 | PG_M) == 0) {
1957 /* Zero out the lower 'ptpshift' bits */
1958 pte32 >>= ptpshift; pte32 <<= ptpshift;
1959 *gpa = pte32 | (gla & (pgsize - 1));
1963 if (paging->paging_mode == PAGING_MODE_PAE) {
1964 /* Zero out the lower 5 bits and the upper 32 bits */
1965 ptpphys &= 0xffffffe0UL;
1967 ptpbase = ptp_hold(vm, vcpuid, ptpphys, sizeof(*ptpbase) * 4,
1969 if (ptpbase == NULL)
1972 ptpindex = (gla >> 30) & 0x3;
1974 pte = ptpbase[ptpindex];
1976 if ((pte & PG_V) == 0) {
1978 pfcode = pf_error_code(usermode, prot, 0, pte);
1979 vm_inject_pf(vm, vcpuid, pfcode, gla);
1989 while (--nlevels >= 0) {
1990 /* Zero out the lower 12 bits and the upper 12 bits */
1991 ptpphys >>= 12; ptpphys <<= 24; ptpphys >>= 12;
1993 ptpbase = ptp_hold(vm, vcpuid, ptpphys, PAGE_SIZE, &cookie);
1994 if (ptpbase == NULL)
1997 ptpshift = PAGE_SHIFT + nlevels * 9;
1998 ptpindex = (gla >> ptpshift) & 0x1FF;
1999 pgsize = 1UL << ptpshift;
2001 pte = ptpbase[ptpindex];
2003 if ((pte & PG_V) == 0 ||
2004 (usermode && (pte & PG_U) == 0) ||
2005 (writable && (pte & PG_RW) == 0)) {
2007 pfcode = pf_error_code(usermode, prot, 0, pte);
2008 vm_inject_pf(vm, vcpuid, pfcode, gla);
2013 /* Set the accessed bit in the page table entry */
2014 if (!check_only && (pte & PG_A) == 0) {
2015 if (atomic_cmpset_64(&ptpbase[ptpindex],
2016 pte, pte | PG_A) == 0) {
2021 if (nlevels > 0 && (pte & PG_PS) != 0) {
2022 if (pgsize > 1 * GB) {
2024 pfcode = pf_error_code(usermode, prot, 1,
2026 vm_inject_pf(vm, vcpuid, pfcode, gla);
2036 /* Set the dirty bit in the page table entry if necessary */
2037 if (!check_only && writable && (pte & PG_M) == 0) {
2038 if (atomic_cmpset_64(&ptpbase[ptpindex], pte, pte | PG_M) == 0)
2042 /* Zero out the lower 'ptpshift' bits and the upper 12 bits */
2043 pte >>= ptpshift; pte <<= (ptpshift + 12); pte >>= 12;
2044 *gpa = pte | (gla & (pgsize - 1));
2046 ptp_release(&cookie);
2047 KASSERT(retval == 0 || retval == EFAULT, ("%s: unexpected retval %d",
2059 vm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
2060 uint64_t gla, int prot, uint64_t *gpa, int *guest_fault)
2063 return (_vm_gla2gpa(vm, vcpuid, paging, gla, prot, gpa, guest_fault,
2068 vm_gla2gpa_nofault(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
2069 uint64_t gla, int prot, uint64_t *gpa, int *guest_fault)
2072 return (_vm_gla2gpa(vm, vcpuid, paging, gla, prot, gpa, guest_fault,
2077 vmm_fetch_instruction(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
2078 uint64_t rip, int inst_length, struct vie *vie, int *faultptr)
2080 struct vm_copyinfo copyinfo[2];
2083 if (inst_length > VIE_INST_SIZE)
2084 panic("vmm_fetch_instruction: invalid length %d", inst_length);
2086 prot = PROT_READ | PROT_EXEC;
2087 error = vm_copy_setup(vm, vcpuid, paging, rip, inst_length, prot,
2088 copyinfo, nitems(copyinfo), faultptr);
2089 if (error || *faultptr)
2092 vm_copyin(vm, vcpuid, copyinfo, vie->inst, inst_length);
2093 vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
2094 vie->num_valid = inst_length;
2099 vie_peek(struct vie *vie, uint8_t *x)
2102 if (vie->num_processed < vie->num_valid) {
2103 *x = vie->inst[vie->num_processed];
2110 vie_advance(struct vie *vie)
2113 vie->num_processed++;
2117 segment_override(uint8_t x, int *seg)
2122 *seg = VM_REG_GUEST_CS;
2125 *seg = VM_REG_GUEST_SS;
2128 *seg = VM_REG_GUEST_DS;
2131 *seg = VM_REG_GUEST_ES;
2134 *seg = VM_REG_GUEST_FS;
2137 *seg = VM_REG_GUEST_GS;
2146 decode_prefixes(struct vie *vie, enum vm_cpu_mode cpu_mode, int cs_d)
2151 if (vie_peek(vie, &x))
2155 vie->opsize_override = 1;
2157 vie->addrsize_override = 1;
2159 vie->repz_present = 1;
2161 vie->repnz_present = 1;
2162 else if (segment_override(x, &vie->segment_register))
2163 vie->segment_override = 1;
2171 * From section 2.2.1, "REX Prefixes", Intel SDM Vol 2:
2172 * - Only one REX prefix is allowed per instruction.
2173 * - The REX prefix must immediately precede the opcode byte or the
2174 * escape opcode byte.
2175 * - If an instruction has a mandatory prefix (0x66, 0xF2 or 0xF3)
2176 * the mandatory prefix must come before the REX prefix.
2178 if (cpu_mode == CPU_MODE_64BIT && x >= 0x40 && x <= 0x4F) {
2179 vie->rex_present = 1;
2180 vie->rex_w = x & 0x8 ? 1 : 0;
2181 vie->rex_r = x & 0x4 ? 1 : 0;
2182 vie->rex_x = x & 0x2 ? 1 : 0;
2183 vie->rex_b = x & 0x1 ? 1 : 0;
2188 * Section "Operand-Size And Address-Size Attributes", Intel SDM, Vol 1
2190 if (cpu_mode == CPU_MODE_64BIT) {
2192 * Default address size is 64-bits and default operand size
2195 vie->addrsize = vie->addrsize_override ? 4 : 8;
2198 else if (vie->opsize_override)
2203 /* Default address and operand sizes are 32-bits */
2204 vie->addrsize = vie->addrsize_override ? 2 : 4;
2205 vie->opsize = vie->opsize_override ? 2 : 4;
2207 /* Default address and operand sizes are 16-bits */
2208 vie->addrsize = vie->addrsize_override ? 4 : 2;
2209 vie->opsize = vie->opsize_override ? 4 : 2;
2215 decode_two_byte_opcode(struct vie *vie)
2219 if (vie_peek(vie, &x))
2222 vie->op = two_byte_opcodes[x];
2224 if (vie->op.op_type == VIE_OP_TYPE_NONE)
2232 decode_opcode(struct vie *vie)
2236 if (vie_peek(vie, &x))
2239 vie->op = one_byte_opcodes[x];
2241 if (vie->op.op_type == VIE_OP_TYPE_NONE)
2246 if (vie->op.op_type == VIE_OP_TYPE_TWO_BYTE)
2247 return (decode_two_byte_opcode(vie));
2253 decode_modrm(struct vie *vie, enum vm_cpu_mode cpu_mode)
2257 if (vie->op.op_flags & VIE_OP_F_NO_MODRM)
2260 if (cpu_mode == CPU_MODE_REAL)
2263 if (vie_peek(vie, &x))
2266 vie->mod = (x >> 6) & 0x3;
2267 vie->rm = (x >> 0) & 0x7;
2268 vie->reg = (x >> 3) & 0x7;
2271 * A direct addressing mode makes no sense in the context of an EPT
2272 * fault. There has to be a memory access involved to cause the
2275 if (vie->mod == VIE_MOD_DIRECT)
2278 if ((vie->mod == VIE_MOD_INDIRECT && vie->rm == VIE_RM_DISP32) ||
2279 (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)) {
2281 * Table 2-5: Special Cases of REX Encodings
2283 * mod=0, r/m=5 is used in the compatibility mode to
2284 * indicate a disp32 without a base register.
2286 * mod!=3, r/m=4 is used in the compatibility mode to
2287 * indicate that the SIB byte is present.
2289 * The 'b' bit in the REX prefix is don't care in
2293 vie->rm |= (vie->rex_b << 3);
2296 vie->reg |= (vie->rex_r << 3);
2299 if (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)
2302 vie->base_register = gpr_map[vie->rm];
2305 case VIE_MOD_INDIRECT_DISP8:
2306 vie->disp_bytes = 1;
2308 case VIE_MOD_INDIRECT_DISP32:
2309 vie->disp_bytes = 4;
2311 case VIE_MOD_INDIRECT:
2312 if (vie->rm == VIE_RM_DISP32) {
2313 vie->disp_bytes = 4;
2315 * Table 2-7. RIP-Relative Addressing
2317 * In 64-bit mode mod=00 r/m=101 implies [rip] + disp32
2318 * whereas in compatibility mode it just implies disp32.
2321 if (cpu_mode == CPU_MODE_64BIT)
2322 vie->base_register = VM_REG_GUEST_RIP;
2324 vie->base_register = VM_REG_LAST;
2336 decode_sib(struct vie *vie)
2340 /* Proceed only if SIB byte is present */
2341 if (vie->mod == VIE_MOD_DIRECT || vie->rm != VIE_RM_SIB)
2344 if (vie_peek(vie, &x))
2347 /* De-construct the SIB byte */
2348 vie->ss = (x >> 6) & 0x3;
2349 vie->index = (x >> 3) & 0x7;
2350 vie->base = (x >> 0) & 0x7;
2352 /* Apply the REX prefix modifiers */
2353 vie->index |= vie->rex_x << 3;
2354 vie->base |= vie->rex_b << 3;
2357 case VIE_MOD_INDIRECT_DISP8:
2358 vie->disp_bytes = 1;
2360 case VIE_MOD_INDIRECT_DISP32:
2361 vie->disp_bytes = 4;
2365 if (vie->mod == VIE_MOD_INDIRECT &&
2366 (vie->base == 5 || vie->base == 13)) {
2368 * Special case when base register is unused if mod = 0
2369 * and base = %rbp or %r13.
2372 * Table 2-3: 32-bit Addressing Forms with the SIB Byte
2373 * Table 2-5: Special Cases of REX Encodings
2375 vie->disp_bytes = 4;
2377 vie->base_register = gpr_map[vie->base];
2381 * All encodings of 'index' are valid except for %rsp (4).
2384 * Table 2-3: 32-bit Addressing Forms with the SIB Byte
2385 * Table 2-5: Special Cases of REX Encodings
2387 if (vie->index != 4)
2388 vie->index_register = gpr_map[vie->index];
2390 /* 'scale' makes sense only in the context of an index register */
2391 if (vie->index_register < VM_REG_LAST)
2392 vie->scale = 1 << vie->ss;
2400 decode_displacement(struct vie *vie)
2411 if ((n = vie->disp_bytes) == 0)
2414 if (n != 1 && n != 4)
2415 panic("decode_displacement: invalid disp_bytes %d", n);
2417 for (i = 0; i < n; i++) {
2418 if (vie_peek(vie, &x))
2426 vie->displacement = u.signed8; /* sign-extended */
2428 vie->displacement = u.signed32; /* sign-extended */
2434 decode_immediate(struct vie *vie)
2445 /* Figure out immediate operand size (if any) */
2446 if (vie->op.op_flags & VIE_OP_F_IMM) {
2448 * Section 2.2.1.5 "Immediates", Intel SDM:
2449 * In 64-bit mode the typical size of immediate operands
2450 * remains 32-bits. When the operand size if 64-bits, the
2451 * processor sign-extends all immediates to 64-bits prior
2454 if (vie->opsize == 4 || vie->opsize == 8)
2458 } else if (vie->op.op_flags & VIE_OP_F_IMM8) {
2462 if ((n = vie->imm_bytes) == 0)
2465 KASSERT(n == 1 || n == 2 || n == 4,
2466 ("%s: invalid number of immediate bytes: %d", __func__, n));
2468 for (i = 0; i < n; i++) {
2469 if (vie_peek(vie, &x))
2476 /* sign-extend the immediate value before use */
2478 vie->immediate = u.signed8;
2480 vie->immediate = u.signed16;
2482 vie->immediate = u.signed32;
2488 decode_moffset(struct vie *vie)
2497 if ((vie->op.op_flags & VIE_OP_F_MOFFSET) == 0)
2501 * Section 2.2.1.4, "Direct Memory-Offset MOVs", Intel SDM:
2502 * The memory offset size follows the address-size of the instruction.
2505 KASSERT(n == 2 || n == 4 || n == 8, ("invalid moffset bytes: %d", n));
2508 for (i = 0; i < n; i++) {
2509 if (vie_peek(vie, &x))
2515 vie->displacement = u.u64;
2520 * Verify that the 'guest linear address' provided as collateral of the nested
2521 * page table fault matches with our instruction decoding.
2524 verify_gla(struct vm *vm, int cpuid, uint64_t gla, struct vie *vie,
2525 enum vm_cpu_mode cpu_mode)
2528 uint64_t base, segbase, idx, gla2;
2529 enum vm_reg_name seg;
2530 struct seg_desc desc;
2532 /* Skip 'gla' verification */
2533 if (gla == VIE_INVALID_GLA)
2537 if (vie->base_register != VM_REG_LAST) {
2538 error = vm_get_register(vm, cpuid, vie->base_register, &base);
2540 printf("verify_gla: error %d getting base reg %d\n",
2541 error, vie->base_register);
2546 * RIP-relative addressing starts from the following
2549 if (vie->base_register == VM_REG_GUEST_RIP)
2550 base += vie->num_processed;
2554 if (vie->index_register != VM_REG_LAST) {
2555 error = vm_get_register(vm, cpuid, vie->index_register, &idx);
2557 printf("verify_gla: error %d getting index reg %d\n",
2558 error, vie->index_register);
2564 * From "Specifying a Segment Selector", Intel SDM, Vol 1
2566 * In 64-bit mode, segmentation is generally (but not
2567 * completely) disabled. The exceptions are the FS and GS
2570 * In legacy IA-32 mode, when the ESP or EBP register is used
2571 * as the base, the SS segment is the default segment. For
2572 * other data references, except when relative to stack or
2573 * string destination the DS segment is the default. These
2574 * can be overridden to allow other segments to be accessed.
2576 if (vie->segment_override)
2577 seg = vie->segment_register;
2578 else if (vie->base_register == VM_REG_GUEST_RSP ||
2579 vie->base_register == VM_REG_GUEST_RBP)
2580 seg = VM_REG_GUEST_SS;
2582 seg = VM_REG_GUEST_DS;
2583 if (cpu_mode == CPU_MODE_64BIT && seg != VM_REG_GUEST_FS &&
2584 seg != VM_REG_GUEST_GS) {
2587 error = vm_get_seg_desc(vm, cpuid, seg, &desc);
2589 printf("verify_gla: error %d getting segment"
2590 " descriptor %d", error,
2591 vie->segment_register);
2594 segbase = desc.base;
2597 gla2 = segbase + base + vie->scale * idx + vie->displacement;
2598 gla2 &= size2mask[vie->addrsize];
2600 printf("verify_gla mismatch: segbase(0x%0lx)"
2601 "base(0x%0lx), scale(%d), index(0x%0lx), "
2602 "disp(0x%0lx), gla(0x%0lx), gla2(0x%0lx)\n",
2603 segbase, base, vie->scale, idx, vie->displacement,
2612 vmm_decode_instruction(struct vm *vm, int cpuid, uint64_t gla,
2613 enum vm_cpu_mode cpu_mode, int cs_d, struct vie *vie)
2616 if (decode_prefixes(vie, cpu_mode, cs_d))
2619 if (decode_opcode(vie))
2622 if (decode_modrm(vie, cpu_mode))
2625 if (decode_sib(vie))
2628 if (decode_displacement(vie))
2631 if (decode_immediate(vie))
2634 if (decode_moffset(vie))
2637 if ((vie->op.op_flags & VIE_OP_F_NO_GLA_VERIFICATION) == 0) {
2638 if (verify_gla(vm, cpuid, gla, vie, cpu_mode))
2642 vie->decoded = 1; /* success */
2646 #endif /* _KERNEL */