2 * Copyright (c) 2012 Sandvine, Inc.
3 * Copyright (c) 2012 NetApp, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
36 #include <sys/systm.h>
42 #include <machine/vmparam.h>
43 #include <machine/vmm.h>
45 #include <sys/types.h>
46 #include <sys/errno.h>
47 #include <sys/_iovec.h>
49 #include <machine/vmm.h>
53 #define KASSERT(exp,msg) assert((exp))
56 #include <machine/vmm_instruction_emul.h>
58 #include <x86/specialreg.h>
60 /* struct vie_op.op_type */
79 /* struct vie_op.op_flags */
80 #define VIE_OP_F_IMM (1 << 0) /* 16/32-bit immediate operand */
81 #define VIE_OP_F_IMM8 (1 << 1) /* 8-bit immediate operand */
82 #define VIE_OP_F_MOFFSET (1 << 2) /* 16/32/64-bit immediate moffset */
83 #define VIE_OP_F_NO_MODRM (1 << 3)
84 #define VIE_OP_F_NO_GLA_VERIFICATION (1 << 4)
86 static const struct vie_op two_byte_opcodes[256] = {
89 .op_type = VIE_OP_TYPE_MOVZX,
93 .op_type = VIE_OP_TYPE_MOVZX,
97 .op_type = VIE_OP_TYPE_MOVSX,
101 static const struct vie_op one_byte_opcodes[256] = {
104 .op_type = VIE_OP_TYPE_TWO_BYTE
108 .op_type = VIE_OP_TYPE_SUB,
112 .op_type = VIE_OP_TYPE_CMP,
116 .op_type = VIE_OP_TYPE_MOV,
120 .op_type = VIE_OP_TYPE_MOV,
124 .op_type = VIE_OP_TYPE_MOV,
128 .op_type = VIE_OP_TYPE_MOV,
132 .op_type = VIE_OP_TYPE_MOV,
133 .op_flags = VIE_OP_F_MOFFSET | VIE_OP_F_NO_MODRM,
137 .op_type = VIE_OP_TYPE_MOV,
138 .op_flags = VIE_OP_F_MOFFSET | VIE_OP_F_NO_MODRM,
142 .op_type = VIE_OP_TYPE_MOVS,
143 .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
147 .op_type = VIE_OP_TYPE_MOVS,
148 .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
152 .op_type = VIE_OP_TYPE_STOS,
153 .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
157 .op_type = VIE_OP_TYPE_STOS,
158 .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
161 /* XXX Group 11 extended opcode - not just MOV */
163 .op_type = VIE_OP_TYPE_MOV,
164 .op_flags = VIE_OP_F_IMM8,
168 .op_type = VIE_OP_TYPE_MOV,
169 .op_flags = VIE_OP_F_IMM,
173 .op_type = VIE_OP_TYPE_AND,
176 /* XXX Group 1 extended opcode */
178 .op_type = VIE_OP_TYPE_GROUP1,
179 .op_flags = VIE_OP_F_IMM,
182 /* XXX Group 1 extended opcode */
184 .op_type = VIE_OP_TYPE_GROUP1,
185 .op_flags = VIE_OP_F_IMM8,
188 /* XXX Group 1A extended opcode - not just POP */
190 .op_type = VIE_OP_TYPE_POP,
193 /* XXX Group 5 extended opcode - not just PUSH */
195 .op_type = VIE_OP_TYPE_PUSH,
200 #define VIE_MOD_INDIRECT 0
201 #define VIE_MOD_INDIRECT_DISP8 1
202 #define VIE_MOD_INDIRECT_DISP32 2
203 #define VIE_MOD_DIRECT 3
207 #define VIE_RM_DISP32 5
209 #define GB (1024 * 1024 * 1024)
211 static enum vm_reg_name gpr_map[16] = {
230 static uint64_t size2mask[] = {
234 [8] = 0xffffffffffffffff,
238 vie_read_register(void *vm, int vcpuid, enum vm_reg_name reg, uint64_t *rval)
242 error = vm_get_register(vm, vcpuid, reg, rval);
248 vie_calc_bytereg(struct vie *vie, enum vm_reg_name *reg, int *lhbr)
251 *reg = gpr_map[vie->reg];
254 * 64-bit mode imposes limitations on accessing legacy high byte
257 * The legacy high-byte registers cannot be addressed if the REX
258 * prefix is present. In this case the values 4, 5, 6 and 7 of the
259 * 'ModRM:reg' field address %spl, %bpl, %sil and %dil respectively.
261 * If the REX prefix is not present then the values 4, 5, 6 and 7
262 * of the 'ModRM:reg' field address the legacy high-byte registers,
263 * %ah, %ch, %dh and %bh respectively.
265 if (!vie->rex_present) {
266 if (vie->reg & 0x4) {
268 *reg = gpr_map[vie->reg & 0x3];
274 vie_read_bytereg(void *vm, int vcpuid, struct vie *vie, uint8_t *rval)
278 enum vm_reg_name reg;
280 vie_calc_bytereg(vie, ®, &lhbr);
281 error = vm_get_register(vm, vcpuid, reg, &val);
284 * To obtain the value of a legacy high byte register shift the
285 * base register right by 8 bits (%ah = %rax >> 8).
295 vie_write_bytereg(void *vm, int vcpuid, struct vie *vie, uint8_t byte)
297 uint64_t origval, val, mask;
299 enum vm_reg_name reg;
301 vie_calc_bytereg(vie, ®, &lhbr);
302 error = vm_get_register(vm, vcpuid, reg, &origval);
308 * Shift left by 8 to store 'byte' in a legacy high
314 val |= origval & ~mask;
315 error = vm_set_register(vm, vcpuid, reg, val);
321 vie_update_register(void *vm, int vcpuid, enum vm_reg_name reg,
322 uint64_t val, int size)
330 error = vie_read_register(vm, vcpuid, reg, &origval);
333 val &= size2mask[size];
334 val |= origval & ~size2mask[size];
345 error = vm_set_register(vm, vcpuid, reg, val);
349 #define RFLAGS_STATUS_BITS (PSL_C | PSL_PF | PSL_AF | PSL_Z | PSL_N | PSL_V)
352 * Return the status flags that would result from doing (x - y).
356 getcc##sz(uint##sz##_t x, uint##sz##_t y) \
360 __asm __volatile("sub %2,%1; pushfq; popq %0" : \
361 "=r" (rflags), "+r" (x) : "m" (y)); \
371 getcc(int opsize, uint64_t x, uint64_t y)
373 KASSERT(opsize == 1 || opsize == 2 || opsize == 4 || opsize == 8,
374 ("getcc: invalid operand size %d", opsize));
377 return (getcc8(x, y));
378 else if (opsize == 2)
379 return (getcc16(x, y));
380 else if (opsize == 4)
381 return (getcc32(x, y));
383 return (getcc64(x, y));
387 emulate_mov(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
388 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
391 enum vm_reg_name reg;
398 switch (vie->op.op_byte) {
401 * MOV byte from reg (ModRM:reg) to mem (ModRM:r/m)
403 * REX + 88/r: mov r/m8, r8 (%ah, %ch, %dh, %bh not available)
405 size = 1; /* override for byte operation */
406 error = vie_read_bytereg(vm, vcpuid, vie, &byte);
408 error = memwrite(vm, vcpuid, gpa, byte, size, arg);
412 * MOV from reg (ModRM:reg) to mem (ModRM:r/m)
413 * 89/r: mov r/m16, r16
414 * 89/r: mov r/m32, r32
415 * REX.W + 89/r mov r/m64, r64
417 reg = gpr_map[vie->reg];
418 error = vie_read_register(vm, vcpuid, reg, &val);
420 val &= size2mask[size];
421 error = memwrite(vm, vcpuid, gpa, val, size, arg);
426 * MOV byte from mem (ModRM:r/m) to reg (ModRM:reg)
428 * REX + 8A/r: mov r8, r/m8
430 size = 1; /* override for byte operation */
431 error = memread(vm, vcpuid, gpa, &val, size, arg);
433 error = vie_write_bytereg(vm, vcpuid, vie, val);
437 * MOV from mem (ModRM:r/m) to reg (ModRM:reg)
438 * 8B/r: mov r16, r/m16
439 * 8B/r: mov r32, r/m32
440 * REX.W 8B/r: mov r64, r/m64
442 error = memread(vm, vcpuid, gpa, &val, size, arg);
444 reg = gpr_map[vie->reg];
445 error = vie_update_register(vm, vcpuid, reg, val, size);
450 * MOV from seg:moffset to AX/EAX/RAX
451 * A1: mov AX, moffs16
452 * A1: mov EAX, moffs32
453 * REX.W + A1: mov RAX, moffs64
455 error = memread(vm, vcpuid, gpa, &val, size, arg);
457 reg = VM_REG_GUEST_RAX;
458 error = vie_update_register(vm, vcpuid, reg, val, size);
463 * MOV from AX/EAX/RAX to seg:moffset
464 * A3: mov moffs16, AX
465 * A3: mov moffs32, EAX
466 * REX.W + A3: mov moffs64, RAX
468 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RAX, &val);
470 val &= size2mask[size];
471 error = memwrite(vm, vcpuid, gpa, val, size, arg);
476 * MOV from imm8 to mem (ModRM:r/m)
477 * C6/0 mov r/m8, imm8
478 * REX + C6/0 mov r/m8, imm8
480 size = 1; /* override for byte operation */
481 error = memwrite(vm, vcpuid, gpa, vie->immediate, size, arg);
485 * MOV from imm16/imm32 to mem (ModRM:r/m)
486 * C7/0 mov r/m16, imm16
487 * C7/0 mov r/m32, imm32
488 * REX.W + C7/0 mov r/m64, imm32 (sign-extended to 64-bits)
490 val = vie->immediate & size2mask[size];
491 error = memwrite(vm, vcpuid, gpa, val, size, arg);
501 emulate_movx(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
502 mem_region_read_t memread, mem_region_write_t memwrite,
506 enum vm_reg_name reg;
512 switch (vie->op.op_byte) {
515 * MOV and zero extend byte from mem (ModRM:r/m) to
518 * 0F B6/r movzx r16, r/m8
519 * 0F B6/r movzx r32, r/m8
520 * REX.W + 0F B6/r movzx r64, r/m8
523 /* get the first operand */
524 error = memread(vm, vcpuid, gpa, &val, 1, arg);
528 /* get the second operand */
529 reg = gpr_map[vie->reg];
531 /* zero-extend byte */
534 /* write the result */
535 error = vie_update_register(vm, vcpuid, reg, val, size);
539 * MOV and zero extend word from mem (ModRM:r/m) to
542 * 0F B7/r movzx r32, r/m16
543 * REX.W + 0F B7/r movzx r64, r/m16
545 error = memread(vm, vcpuid, gpa, &val, 2, arg);
549 reg = gpr_map[vie->reg];
551 /* zero-extend word */
554 error = vie_update_register(vm, vcpuid, reg, val, size);
558 * MOV and sign extend byte from mem (ModRM:r/m) to
561 * 0F BE/r movsx r16, r/m8
562 * 0F BE/r movsx r32, r/m8
563 * REX.W + 0F BE/r movsx r64, r/m8
566 /* get the first operand */
567 error = memread(vm, vcpuid, gpa, &val, 1, arg);
571 /* get the second operand */
572 reg = gpr_map[vie->reg];
574 /* sign extend byte */
577 /* write the result */
578 error = vie_update_register(vm, vcpuid, reg, val, size);
587 * Helper function to calculate and validate a linear address.
589 * Returns 0 on success and 1 if an exception was injected into the guest.
592 get_gla(void *vm, int vcpuid, struct vie *vie, struct vm_guest_paging *paging,
593 int opsize, int addrsize, int prot, enum vm_reg_name seg,
594 enum vm_reg_name gpr, uint64_t *gla)
596 struct seg_desc desc;
597 uint64_t cr0, val, rflags;
600 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_CR0, &cr0);
601 KASSERT(error == 0, ("%s: error %d getting cr0", __func__, error));
603 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
604 KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
606 error = vm_get_seg_desc(vm, vcpuid, seg, &desc);
607 KASSERT(error == 0, ("%s: error %d getting segment descriptor %d",
608 __func__, error, seg));
610 error = vie_read_register(vm, vcpuid, gpr, &val);
611 KASSERT(error == 0, ("%s: error %d getting register %d", __func__,
614 if (vie_calculate_gla(paging->cpu_mode, seg, &desc, val, opsize,
615 addrsize, prot, gla)) {
616 if (seg == VM_REG_GUEST_SS)
617 vm_inject_ss(vm, vcpuid, 0);
619 vm_inject_gp(vm, vcpuid);
623 if (vie_canonical_check(paging->cpu_mode, *gla)) {
624 if (seg == VM_REG_GUEST_SS)
625 vm_inject_ss(vm, vcpuid, 0);
627 vm_inject_gp(vm, vcpuid);
631 if (vie_alignment_check(paging->cpl, opsize, cr0, rflags, *gla)) {
632 vm_inject_ac(vm, vcpuid, 0);
640 emulate_movs(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
641 struct vm_guest_paging *paging, mem_region_read_t memread,
642 mem_region_write_t memwrite, void *arg)
645 struct vm_copyinfo copyinfo[2];
647 struct iovec copyinfo[2];
649 uint64_t dstaddr, srcaddr, dstgpa, srcgpa, val;
650 uint64_t rcx, rdi, rsi, rflags;
651 int error, opsize, seg, repeat;
653 opsize = (vie->op.op_byte == 0xA4) ? 1 : vie->opsize;
658 * XXX although the MOVS instruction is only supposed to be used with
659 * the "rep" prefix some guests like FreeBSD will use "repnz" instead.
661 * Empirically the "repnz" prefix has identical behavior to "rep"
662 * and the zero flag does not make a difference.
664 repeat = vie->repz_present | vie->repnz_present;
667 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RCX, &rcx);
668 KASSERT(!error, ("%s: error %d getting rcx", __func__, error));
671 * The count register is %rcx, %ecx or %cx depending on the
672 * address size of the instruction.
674 if ((rcx & vie_size2mask(vie->addrsize)) == 0)
679 * Source Destination Comments
680 * --------------------------------------------
681 * (1) memory memory n/a
682 * (2) memory mmio emulated
683 * (3) mmio memory emulated
684 * (4) mmio mmio emulated
686 * At this point we don't have sufficient information to distinguish
687 * between (2), (3) and (4). We use 'vm_copy_setup()' to tease this
688 * out because it will succeed only when operating on regular memory.
690 * XXX the emulation doesn't properly handle the case where 'gpa'
691 * is straddling the boundary between the normal memory and MMIO.
694 seg = vie->segment_override ? vie->segment_register : VM_REG_GUEST_DS;
695 error = get_gla(vm, vcpuid, vie, paging, opsize, vie->addrsize,
696 PROT_READ, seg, VM_REG_GUEST_RSI, &srcaddr);
700 error = vm_copy_setup(vm, vcpuid, paging, srcaddr, opsize, PROT_READ,
701 copyinfo, nitems(copyinfo));
704 * case (2): read from system memory and write to mmio.
706 vm_copyin(vm, vcpuid, copyinfo, &val, opsize);
707 vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
708 error = memwrite(vm, vcpuid, gpa, val, opsize, arg);
711 } else if (error > 0) {
713 * Resume guest execution to handle fault.
718 * 'vm_copy_setup()' is expected to fail for cases (3) and (4)
719 * if 'srcaddr' is in the mmio space.
722 error = get_gla(vm, vcpuid, vie, paging, opsize, vie->addrsize,
723 PROT_WRITE, VM_REG_GUEST_ES, VM_REG_GUEST_RDI, &dstaddr);
727 error = vm_copy_setup(vm, vcpuid, paging, dstaddr, opsize,
728 PROT_WRITE, copyinfo, nitems(copyinfo));
731 * case (3): read from MMIO and write to system memory.
733 * A MMIO read can have side-effects so we
734 * commit to it only after vm_copy_setup() is
735 * successful. If a page-fault needs to be
736 * injected into the guest then it will happen
737 * before the MMIO read is attempted.
739 error = memread(vm, vcpuid, gpa, &val, opsize, arg);
743 vm_copyout(vm, vcpuid, &val, copyinfo, opsize);
744 vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
745 } else if (error > 0) {
747 * Resume guest execution to handle fault.
752 * Case (4): read from and write to mmio.
754 error = vm_gla2gpa(vm, vcpuid, paging, srcaddr,
758 error = memread(vm, vcpuid, srcgpa, &val, opsize, arg);
762 error = vm_gla2gpa(vm, vcpuid, paging, dstaddr,
763 PROT_WRITE, &dstgpa);
766 error = memwrite(vm, vcpuid, dstgpa, val, opsize, arg);
772 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RSI, &rsi);
773 KASSERT(error == 0, ("%s: error %d getting rsi", __func__, error));
775 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RDI, &rdi);
776 KASSERT(error == 0, ("%s: error %d getting rdi", __func__, error));
778 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
779 KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
781 if (rflags & PSL_D) {
789 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RSI, rsi,
791 KASSERT(error == 0, ("%s: error %d updating rsi", __func__, error));
793 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RDI, rdi,
795 KASSERT(error == 0, ("%s: error %d updating rdi", __func__, error));
799 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RCX,
801 KASSERT(!error, ("%s: error %d updating rcx", __func__, error));
804 * Repeat the instruction if the count register is not zero.
806 if ((rcx & vie_size2mask(vie->addrsize)) != 0)
807 vm_restart_instruction(vm, vcpuid);
817 emulate_stos(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
818 struct vm_guest_paging *paging, mem_region_read_t memread,
819 mem_region_write_t memwrite, void *arg)
821 int error, opsize, repeat;
823 uint64_t rcx, rdi, rflags;
825 opsize = (vie->op.op_byte == 0xAA) ? 1 : vie->opsize;
826 repeat = vie->repz_present | vie->repnz_present;
829 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RCX, &rcx);
830 KASSERT(!error, ("%s: error %d getting rcx", __func__, error));
833 * The count register is %rcx, %ecx or %cx depending on the
834 * address size of the instruction.
836 if ((rcx & vie_size2mask(vie->addrsize)) == 0)
840 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RAX, &val);
841 KASSERT(!error, ("%s: error %d getting rax", __func__, error));
843 error = memwrite(vm, vcpuid, gpa, val, opsize, arg);
847 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RDI, &rdi);
848 KASSERT(error == 0, ("%s: error %d getting rdi", __func__, error));
850 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
851 KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
858 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RDI, rdi,
860 KASSERT(error == 0, ("%s: error %d updating rdi", __func__, error));
864 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RCX,
866 KASSERT(!error, ("%s: error %d updating rcx", __func__, error));
869 * Repeat the instruction if the count register is not zero.
871 if ((rcx & vie_size2mask(vie->addrsize)) != 0)
872 vm_restart_instruction(vm, vcpuid);
879 emulate_and(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
880 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
883 enum vm_reg_name reg;
884 uint64_t result, rflags, rflags2, val1, val2;
889 switch (vie->op.op_byte) {
892 * AND reg (ModRM:reg) and mem (ModRM:r/m) and store the
895 * 23/r and r16, r/m16
896 * 23/r and r32, r/m32
897 * REX.W + 23/r and r64, r/m64
900 /* get the first operand */
901 reg = gpr_map[vie->reg];
902 error = vie_read_register(vm, vcpuid, reg, &val1);
906 /* get the second operand */
907 error = memread(vm, vcpuid, gpa, &val2, size, arg);
911 /* perform the operation and write the result */
912 result = val1 & val2;
913 error = vie_update_register(vm, vcpuid, reg, result, size);
918 * AND mem (ModRM:r/m) with immediate and store the
921 * 81 /4 and r/m16, imm16
922 * 81 /4 and r/m32, imm32
923 * REX.W + 81 /4 and r/m64, imm32 sign-extended to 64
925 * 83 /4 and r/m16, imm8 sign-extended to 16
926 * 83 /4 and r/m32, imm8 sign-extended to 32
927 * REX.W + 83/4 and r/m64, imm8 sign-extended to 64
930 /* get the first operand */
931 error = memread(vm, vcpuid, gpa, &val1, size, arg);
936 * perform the operation with the pre-fetched immediate
937 * operand and write the result
939 result = val1 & vie->immediate;
940 error = memwrite(vm, vcpuid, gpa, result, size, arg);
948 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
953 * OF and CF are cleared; the SF, ZF and PF flags are set according
954 * to the result; AF is undefined.
956 * The updated status flags are obtained by subtracting 0 from 'result'.
958 rflags2 = getcc(size, result, 0);
959 rflags &= ~RFLAGS_STATUS_BITS;
960 rflags |= rflags2 & (PSL_PF | PSL_Z | PSL_N);
962 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, rflags, 8);
967 emulate_or(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
968 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
971 uint64_t val1, result, rflags, rflags2;
976 switch (vie->op.op_byte) {
980 * OR mem (ModRM:r/m) with immediate and store the
983 * 81 /1 or r/m16, imm16
984 * 81 /1 or r/m32, imm32
985 * REX.W + 81 /1 or r/m64, imm32 sign-extended to 64
987 * 83 /1 or r/m16, imm8 sign-extended to 16
988 * 83 /1 or r/m32, imm8 sign-extended to 32
989 * REX.W + 83/1 or r/m64, imm8 sign-extended to 64
992 /* get the first operand */
993 error = memread(vm, vcpuid, gpa, &val1, size, arg);
998 * perform the operation with the pre-fetched immediate
999 * operand and write the result
1001 result = val1 | vie->immediate;
1002 error = memwrite(vm, vcpuid, gpa, result, size, arg);
1010 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
1015 * OF and CF are cleared; the SF, ZF and PF flags are set according
1016 * to the result; AF is undefined.
1018 * The updated status flags are obtained by subtracting 0 from 'result'.
1020 rflags2 = getcc(size, result, 0);
1021 rflags &= ~RFLAGS_STATUS_BITS;
1022 rflags |= rflags2 & (PSL_PF | PSL_Z | PSL_N);
1024 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, rflags, 8);
1029 emulate_cmp(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
1030 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
1033 uint64_t op1, op2, rflags, rflags2;
1034 enum vm_reg_name reg;
1037 switch (vie->op.op_byte) {
1040 * 3B/r CMP r16, r/m16
1041 * 3B/r CMP r32, r/m32
1042 * REX.W + 3B/r CMP r64, r/m64
1044 * Compare first operand (reg) with second operand (r/m) and
1045 * set status flags in EFLAGS register. The comparison is
1046 * performed by subtracting the second operand from the first
1047 * operand and then setting the status flags.
1050 /* Get the first operand */
1051 reg = gpr_map[vie->reg];
1052 error = vie_read_register(vm, vcpuid, reg, &op1);
1056 /* Get the second operand */
1057 error = memread(vm, vcpuid, gpa, &op2, size, arg);
1061 rflags2 = getcc(size, op1, op2);
1066 * 81 /7 cmp r/m16, imm16
1067 * 81 /7 cmp r/m32, imm32
1068 * REX.W + 81 /7 cmp r/m64, imm32 sign-extended to 64
1070 * 83 /7 cmp r/m16, imm8 sign-extended to 16
1071 * 83 /7 cmp r/m32, imm8 sign-extended to 32
1072 * REX.W + 83 /7 cmp r/m64, imm8 sign-extended to 64
1074 * Compare mem (ModRM:r/m) with immediate and set
1075 * status flags according to the results. The
1076 * comparison is performed by subtracting the
1077 * immediate from the first operand and then setting
1082 /* get the first operand */
1083 error = memread(vm, vcpuid, gpa, &op1, size, arg);
1087 rflags2 = getcc(size, op1, vie->immediate);
1092 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
1095 rflags &= ~RFLAGS_STATUS_BITS;
1096 rflags |= rflags2 & RFLAGS_STATUS_BITS;
1098 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, rflags, 8);
1103 emulate_sub(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
1104 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
1107 uint64_t nval, rflags, rflags2, val1, val2;
1108 enum vm_reg_name reg;
1113 switch (vie->op.op_byte) {
1116 * SUB r/m from r and store the result in r
1118 * 2B/r SUB r16, r/m16
1119 * 2B/r SUB r32, r/m32
1120 * REX.W + 2B/r SUB r64, r/m64
1123 /* get the first operand */
1124 reg = gpr_map[vie->reg];
1125 error = vie_read_register(vm, vcpuid, reg, &val1);
1129 /* get the second operand */
1130 error = memread(vm, vcpuid, gpa, &val2, size, arg);
1134 /* perform the operation and write the result */
1136 error = vie_update_register(vm, vcpuid, reg, nval, size);
1143 rflags2 = getcc(size, val1, val2);
1144 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS,
1149 rflags &= ~RFLAGS_STATUS_BITS;
1150 rflags |= rflags2 & RFLAGS_STATUS_BITS;
1151 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS,
1159 emulate_stack_op(void *vm, int vcpuid, uint64_t mmio_gpa, struct vie *vie,
1160 struct vm_guest_paging *paging, mem_region_read_t memread,
1161 mem_region_write_t memwrite, void *arg)
1164 struct vm_copyinfo copyinfo[2];
1166 struct iovec copyinfo[2];
1168 struct seg_desc ss_desc;
1169 uint64_t cr0, rflags, rsp, stack_gla, val;
1170 int error, size, stackaddrsize, pushop;
1174 pushop = (vie->op.op_type == VIE_OP_TYPE_PUSH) ? 1 : 0;
1177 * From "Address-Size Attributes for Stack Accesses", Intel SDL, Vol 1
1179 if (paging->cpu_mode == CPU_MODE_REAL) {
1181 } else if (paging->cpu_mode == CPU_MODE_64BIT) {
1183 * "Stack Manipulation Instructions in 64-bit Mode", SDM, Vol 3
1184 * - Stack pointer size is always 64-bits.
1185 * - PUSH/POP of 32-bit values is not possible in 64-bit mode.
1186 * - 16-bit PUSH/POP is supported by using the operand size
1187 * override prefix (66H).
1190 size = vie->opsize_override ? 2 : 8;
1193 * In protected or compability mode the 'B' flag in the
1194 * stack-segment descriptor determines the size of the
1197 error = vm_get_seg_desc(vm, vcpuid, VM_REG_GUEST_SS, &ss_desc);
1198 KASSERT(error == 0, ("%s: error %d getting SS descriptor",
1200 if (SEG_DESC_DEF32(ss_desc.access))
1206 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_CR0, &cr0);
1207 KASSERT(error == 0, ("%s: error %d getting cr0", __func__, error));
1209 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
1210 KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
1212 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RSP, &rsp);
1213 KASSERT(error == 0, ("%s: error %d getting rsp", __func__, error));
1218 if (vie_calculate_gla(paging->cpu_mode, VM_REG_GUEST_SS, &ss_desc,
1219 rsp, size, stackaddrsize, pushop ? PROT_WRITE : PROT_READ,
1221 vm_inject_ss(vm, vcpuid, 0);
1225 if (vie_canonical_check(paging->cpu_mode, stack_gla)) {
1226 vm_inject_ss(vm, vcpuid, 0);
1230 if (vie_alignment_check(paging->cpl, size, cr0, rflags, stack_gla)) {
1231 vm_inject_ac(vm, vcpuid, 0);
1235 error = vm_copy_setup(vm, vcpuid, paging, stack_gla, size,
1236 pushop ? PROT_WRITE : PROT_READ, copyinfo, nitems(copyinfo));
1239 * XXX cannot return a negative error value here because it
1240 * ends up being the return value of the VM_RUN() ioctl and
1241 * is interpreted as a pseudo-error (for e.g. ERESTART).
1244 } else if (error == 1) {
1245 /* Resume guest execution to handle page fault */
1250 error = memread(vm, vcpuid, mmio_gpa, &val, size, arg);
1252 vm_copyout(vm, vcpuid, &val, copyinfo, size);
1254 vm_copyin(vm, vcpuid, copyinfo, &val, size);
1255 error = memwrite(vm, vcpuid, mmio_gpa, val, size, arg);
1258 vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
1261 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RSP, rsp,
1263 KASSERT(error == 0, ("error %d updating rsp", error));
1269 emulate_push(void *vm, int vcpuid, uint64_t mmio_gpa, struct vie *vie,
1270 struct vm_guest_paging *paging, mem_region_read_t memread,
1271 mem_region_write_t memwrite, void *arg)
1276 * Table A-6, "Opcode Extensions", Intel SDM, Vol 2.
1278 * PUSH is part of the group 5 extended opcodes and is identified
1279 * by ModRM:reg = b110.
1281 if ((vie->reg & 7) != 6)
1284 error = emulate_stack_op(vm, vcpuid, mmio_gpa, vie, paging, memread,
1290 emulate_pop(void *vm, int vcpuid, uint64_t mmio_gpa, struct vie *vie,
1291 struct vm_guest_paging *paging, mem_region_read_t memread,
1292 mem_region_write_t memwrite, void *arg)
1297 * Table A-6, "Opcode Extensions", Intel SDM, Vol 2.
1299 * POP is part of the group 1A extended opcodes and is identified
1300 * by ModRM:reg = b000.
1302 if ((vie->reg & 7) != 0)
1305 error = emulate_stack_op(vm, vcpuid, mmio_gpa, vie, paging, memread,
1311 emulate_group1(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
1312 struct vm_guest_paging *paging, mem_region_read_t memread,
1313 mem_region_write_t memwrite, void *memarg)
1317 switch (vie->reg & 7) {
1319 error = emulate_or(vm, vcpuid, gpa, vie,
1320 memread, memwrite, memarg);
1323 error = emulate_and(vm, vcpuid, gpa, vie,
1324 memread, memwrite, memarg);
1327 error = emulate_cmp(vm, vcpuid, gpa, vie,
1328 memread, memwrite, memarg);
1339 vmm_emulate_instruction(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
1340 struct vm_guest_paging *paging, mem_region_read_t memread,
1341 mem_region_write_t memwrite, void *memarg)
1348 switch (vie->op.op_type) {
1349 case VIE_OP_TYPE_GROUP1:
1350 error = emulate_group1(vm, vcpuid, gpa, vie, paging, memread,
1353 case VIE_OP_TYPE_POP:
1354 error = emulate_pop(vm, vcpuid, gpa, vie, paging, memread,
1357 case VIE_OP_TYPE_PUSH:
1358 error = emulate_push(vm, vcpuid, gpa, vie, paging, memread,
1361 case VIE_OP_TYPE_CMP:
1362 error = emulate_cmp(vm, vcpuid, gpa, vie,
1363 memread, memwrite, memarg);
1365 case VIE_OP_TYPE_MOV:
1366 error = emulate_mov(vm, vcpuid, gpa, vie,
1367 memread, memwrite, memarg);
1369 case VIE_OP_TYPE_MOVSX:
1370 case VIE_OP_TYPE_MOVZX:
1371 error = emulate_movx(vm, vcpuid, gpa, vie,
1372 memread, memwrite, memarg);
1374 case VIE_OP_TYPE_MOVS:
1375 error = emulate_movs(vm, vcpuid, gpa, vie, paging, memread,
1378 case VIE_OP_TYPE_STOS:
1379 error = emulate_stos(vm, vcpuid, gpa, vie, paging, memread,
1382 case VIE_OP_TYPE_AND:
1383 error = emulate_and(vm, vcpuid, gpa, vie,
1384 memread, memwrite, memarg);
1386 case VIE_OP_TYPE_OR:
1387 error = emulate_or(vm, vcpuid, gpa, vie,
1388 memread, memwrite, memarg);
1390 case VIE_OP_TYPE_SUB:
1391 error = emulate_sub(vm, vcpuid, gpa, vie,
1392 memread, memwrite, memarg);
1403 vie_alignment_check(int cpl, int size, uint64_t cr0, uint64_t rf, uint64_t gla)
1405 KASSERT(size == 1 || size == 2 || size == 4 || size == 8,
1406 ("%s: invalid size %d", __func__, size));
1407 KASSERT(cpl >= 0 && cpl <= 3, ("%s: invalid cpl %d", __func__, cpl));
1409 if (cpl != 3 || (cr0 & CR0_AM) == 0 || (rf & PSL_AC) == 0)
1412 return ((gla & (size - 1)) ? 1 : 0);
1416 vie_canonical_check(enum vm_cpu_mode cpu_mode, uint64_t gla)
1420 if (cpu_mode != CPU_MODE_64BIT)
1424 * The value of the bit 47 in the 'gla' should be replicated in the
1425 * most significant 16 bits.
1427 mask = ~((1UL << 48) - 1);
1428 if (gla & (1UL << 47))
1429 return ((gla & mask) != mask);
1431 return ((gla & mask) != 0);
1435 vie_size2mask(int size)
1437 KASSERT(size == 1 || size == 2 || size == 4 || size == 8,
1438 ("vie_size2mask: invalid size %d", size));
1439 return (size2mask[size]);
1443 vie_calculate_gla(enum vm_cpu_mode cpu_mode, enum vm_reg_name seg,
1444 struct seg_desc *desc, uint64_t offset, int length, int addrsize,
1445 int prot, uint64_t *gla)
1447 uint64_t firstoff, low_limit, high_limit, segbase;
1450 KASSERT(seg >= VM_REG_GUEST_ES && seg <= VM_REG_GUEST_GS,
1451 ("%s: invalid segment %d", __func__, seg));
1452 KASSERT(length == 1 || length == 2 || length == 4 || length == 8,
1453 ("%s: invalid operand size %d", __func__, length));
1454 KASSERT((prot & ~(PROT_READ | PROT_WRITE)) == 0,
1455 ("%s: invalid prot %#x", __func__, prot));
1458 if (cpu_mode == CPU_MODE_64BIT) {
1459 KASSERT(addrsize == 4 || addrsize == 8, ("%s: invalid address "
1460 "size %d for cpu_mode %d", __func__, addrsize, cpu_mode));
1463 KASSERT(addrsize == 2 || addrsize == 4, ("%s: invalid address "
1464 "size %d for cpu mode %d", __func__, addrsize, cpu_mode));
1467 * If the segment selector is loaded with a NULL selector
1468 * then the descriptor is unusable and attempting to use
1469 * it results in a #GP(0).
1471 if (SEG_DESC_UNUSABLE(desc->access))
1475 * The processor generates a #NP exception when a segment
1476 * register is loaded with a selector that points to a
1477 * descriptor that is not present. If this was the case then
1478 * it would have been checked before the VM-exit.
1480 KASSERT(SEG_DESC_PRESENT(desc->access),
1481 ("segment %d not present: %#x", seg, desc->access));
1484 * The descriptor type must indicate a code/data segment.
1486 type = SEG_DESC_TYPE(desc->access);
1487 KASSERT(type >= 16 && type <= 31, ("segment %d has invalid "
1488 "descriptor type %#x", seg, type));
1490 if (prot & PROT_READ) {
1491 /* #GP on a read access to a exec-only code segment */
1492 if ((type & 0xA) == 0x8)
1496 if (prot & PROT_WRITE) {
1498 * #GP on a write access to a code segment or a
1499 * read-only data segment.
1501 if (type & 0x8) /* code segment */
1504 if ((type & 0xA) == 0) /* read-only data seg */
1509 * 'desc->limit' is fully expanded taking granularity into
1512 if ((type & 0xC) == 0x4) {
1513 /* expand-down data segment */
1514 low_limit = desc->limit + 1;
1515 high_limit = SEG_DESC_DEF32(desc->access) ?
1516 0xffffffff : 0xffff;
1518 /* code segment or expand-up data segment */
1520 high_limit = desc->limit;
1523 while (length > 0) {
1524 offset &= vie_size2mask(addrsize);
1525 if (offset < low_limit || offset > high_limit)
1533 * In 64-bit mode all segments except %fs and %gs have a segment
1534 * base address of 0.
1536 if (cpu_mode == CPU_MODE_64BIT && seg != VM_REG_GUEST_FS &&
1537 seg != VM_REG_GUEST_GS) {
1540 segbase = desc->base;
1544 * Truncate 'firstoff' to the effective address size before adding
1545 * it to the segment base.
1547 firstoff &= vie_size2mask(addrsize);
1548 *gla = (segbase + firstoff) & vie_size2mask(glasize);
1554 vie_init(struct vie *vie, const char *inst_bytes, int inst_length)
1556 KASSERT(inst_length >= 0 && inst_length <= VIE_INST_SIZE,
1557 ("%s: invalid instruction length (%d)", __func__, inst_length));
1559 bzero(vie, sizeof(struct vie));
1561 vie->base_register = VM_REG_LAST;
1562 vie->index_register = VM_REG_LAST;
1563 vie->segment_register = VM_REG_LAST;
1566 bcopy(inst_bytes, vie->inst, inst_length);
1567 vie->num_valid = inst_length;
1572 pf_error_code(int usermode, int prot, int rsvd, uint64_t pte)
1577 error_code |= PGEX_P;
1578 if (prot & VM_PROT_WRITE)
1579 error_code |= PGEX_W;
1581 error_code |= PGEX_U;
1583 error_code |= PGEX_RSV;
1584 if (prot & VM_PROT_EXECUTE)
1585 error_code |= PGEX_I;
1587 return (error_code);
1591 ptp_release(void **cookie)
1593 if (*cookie != NULL) {
1594 vm_gpa_release(*cookie);
1600 ptp_hold(struct vm *vm, vm_paddr_t ptpphys, size_t len, void **cookie)
1604 ptp_release(cookie);
1605 ptr = vm_gpa_hold(vm, ptpphys, len, VM_PROT_RW, cookie);
1610 vm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
1611 uint64_t gla, int prot, uint64_t *gpa)
1613 int nlevels, pfcode, ptpshift, ptpindex, retval, usermode, writable;
1615 uint64_t *ptpbase, ptpphys, pte, pgsize;
1616 uint32_t *ptpbase32, pte32;
1619 usermode = (paging->cpl == 3 ? 1 : 0);
1620 writable = prot & VM_PROT_WRITE;
1625 ptpphys = paging->cr3; /* root of the page tables */
1626 ptp_release(&cookie);
1630 if (vie_canonical_check(paging->cpu_mode, gla)) {
1632 * XXX assuming a non-stack reference otherwise a stack fault
1633 * should be generated.
1635 vm_inject_gp(vm, vcpuid);
1639 if (paging->paging_mode == PAGING_MODE_FLAT) {
1644 if (paging->paging_mode == PAGING_MODE_32) {
1646 while (--nlevels >= 0) {
1647 /* Zero out the lower 12 bits. */
1650 ptpbase32 = ptp_hold(vm, ptpphys, PAGE_SIZE, &cookie);
1652 if (ptpbase32 == NULL)
1655 ptpshift = PAGE_SHIFT + nlevels * 10;
1656 ptpindex = (gla >> ptpshift) & 0x3FF;
1657 pgsize = 1UL << ptpshift;
1659 pte32 = ptpbase32[ptpindex];
1661 if ((pte32 & PG_V) == 0 ||
1662 (usermode && (pte32 & PG_U) == 0) ||
1663 (writable && (pte32 & PG_RW) == 0)) {
1664 pfcode = pf_error_code(usermode, prot, 0,
1666 vm_inject_pf(vm, vcpuid, pfcode, gla);
1671 * Emulate the x86 MMU's management of the accessed
1672 * and dirty flags. While the accessed flag is set
1673 * at every level of the page table, the dirty flag
1674 * is only set at the last level providing the guest
1677 if ((pte32 & PG_A) == 0) {
1678 if (atomic_cmpset_32(&ptpbase32[ptpindex],
1679 pte32, pte32 | PG_A) == 0) {
1684 /* XXX must be ignored if CR4.PSE=0 */
1685 if (nlevels > 0 && (pte32 & PG_PS) != 0)
1691 /* Set the dirty bit in the page table entry if necessary */
1692 if (writable && (pte32 & PG_M) == 0) {
1693 if (atomic_cmpset_32(&ptpbase32[ptpindex],
1694 pte32, pte32 | PG_M) == 0) {
1699 /* Zero out the lower 'ptpshift' bits */
1700 pte32 >>= ptpshift; pte32 <<= ptpshift;
1701 *gpa = pte32 | (gla & (pgsize - 1));
1705 if (paging->paging_mode == PAGING_MODE_PAE) {
1706 /* Zero out the lower 5 bits and the upper 32 bits */
1707 ptpphys &= 0xffffffe0UL;
1709 ptpbase = ptp_hold(vm, ptpphys, sizeof(*ptpbase) * 4, &cookie);
1710 if (ptpbase == NULL)
1713 ptpindex = (gla >> 30) & 0x3;
1715 pte = ptpbase[ptpindex];
1717 if ((pte & PG_V) == 0) {
1718 pfcode = pf_error_code(usermode, prot, 0, pte);
1719 vm_inject_pf(vm, vcpuid, pfcode, gla);
1728 while (--nlevels >= 0) {
1729 /* Zero out the lower 12 bits and the upper 12 bits */
1730 ptpphys >>= 12; ptpphys <<= 24; ptpphys >>= 12;
1732 ptpbase = ptp_hold(vm, ptpphys, PAGE_SIZE, &cookie);
1733 if (ptpbase == NULL)
1736 ptpshift = PAGE_SHIFT + nlevels * 9;
1737 ptpindex = (gla >> ptpshift) & 0x1FF;
1738 pgsize = 1UL << ptpshift;
1740 pte = ptpbase[ptpindex];
1742 if ((pte & PG_V) == 0 ||
1743 (usermode && (pte & PG_U) == 0) ||
1744 (writable && (pte & PG_RW) == 0)) {
1745 pfcode = pf_error_code(usermode, prot, 0, pte);
1746 vm_inject_pf(vm, vcpuid, pfcode, gla);
1750 /* Set the accessed bit in the page table entry */
1751 if ((pte & PG_A) == 0) {
1752 if (atomic_cmpset_64(&ptpbase[ptpindex],
1753 pte, pte | PG_A) == 0) {
1758 if (nlevels > 0 && (pte & PG_PS) != 0) {
1759 if (pgsize > 1 * GB) {
1760 pfcode = pf_error_code(usermode, prot, 1, pte);
1761 vm_inject_pf(vm, vcpuid, pfcode, gla);
1770 /* Set the dirty bit in the page table entry if necessary */
1771 if (writable && (pte & PG_M) == 0) {
1772 if (atomic_cmpset_64(&ptpbase[ptpindex], pte, pte | PG_M) == 0)
1776 /* Zero out the lower 'ptpshift' bits and the upper 12 bits */
1777 pte >>= ptpshift; pte <<= (ptpshift + 12); pte >>= 12;
1778 *gpa = pte | (gla & (pgsize - 1));
1780 ptp_release(&cookie);
1791 vmm_fetch_instruction(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
1792 uint64_t rip, int inst_length, struct vie *vie)
1794 struct vm_copyinfo copyinfo[2];
1797 if (inst_length > VIE_INST_SIZE)
1798 panic("vmm_fetch_instruction: invalid length %d", inst_length);
1800 prot = PROT_READ | PROT_EXEC;
1801 error = vm_copy_setup(vm, vcpuid, paging, rip, inst_length, prot,
1802 copyinfo, nitems(copyinfo));
1804 vm_copyin(vm, vcpuid, copyinfo, vie->inst, inst_length);
1805 vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
1806 vie->num_valid = inst_length;
1812 vie_peek(struct vie *vie, uint8_t *x)
1815 if (vie->num_processed < vie->num_valid) {
1816 *x = vie->inst[vie->num_processed];
1823 vie_advance(struct vie *vie)
1826 vie->num_processed++;
1830 segment_override(uint8_t x, int *seg)
1835 *seg = VM_REG_GUEST_CS;
1838 *seg = VM_REG_GUEST_SS;
1841 *seg = VM_REG_GUEST_DS;
1844 *seg = VM_REG_GUEST_ES;
1847 *seg = VM_REG_GUEST_FS;
1850 *seg = VM_REG_GUEST_GS;
1859 decode_prefixes(struct vie *vie, enum vm_cpu_mode cpu_mode, int cs_d)
1864 if (vie_peek(vie, &x))
1868 vie->opsize_override = 1;
1870 vie->addrsize_override = 1;
1872 vie->repz_present = 1;
1874 vie->repnz_present = 1;
1875 else if (segment_override(x, &vie->segment_register))
1876 vie->segment_override = 1;
1884 * From section 2.2.1, "REX Prefixes", Intel SDM Vol 2:
1885 * - Only one REX prefix is allowed per instruction.
1886 * - The REX prefix must immediately precede the opcode byte or the
1887 * escape opcode byte.
1888 * - If an instruction has a mandatory prefix (0x66, 0xF2 or 0xF3)
1889 * the mandatory prefix must come before the REX prefix.
1891 if (cpu_mode == CPU_MODE_64BIT && x >= 0x40 && x <= 0x4F) {
1892 vie->rex_present = 1;
1893 vie->rex_w = x & 0x8 ? 1 : 0;
1894 vie->rex_r = x & 0x4 ? 1 : 0;
1895 vie->rex_x = x & 0x2 ? 1 : 0;
1896 vie->rex_b = x & 0x1 ? 1 : 0;
1901 * Section "Operand-Size And Address-Size Attributes", Intel SDM, Vol 1
1903 if (cpu_mode == CPU_MODE_64BIT) {
1905 * Default address size is 64-bits and default operand size
1908 vie->addrsize = vie->addrsize_override ? 4 : 8;
1911 else if (vie->opsize_override)
1916 /* Default address and operand sizes are 32-bits */
1917 vie->addrsize = vie->addrsize_override ? 2 : 4;
1918 vie->opsize = vie->opsize_override ? 2 : 4;
1920 /* Default address and operand sizes are 16-bits */
1921 vie->addrsize = vie->addrsize_override ? 4 : 2;
1922 vie->opsize = vie->opsize_override ? 4 : 2;
1928 decode_two_byte_opcode(struct vie *vie)
1932 if (vie_peek(vie, &x))
1935 vie->op = two_byte_opcodes[x];
1937 if (vie->op.op_type == VIE_OP_TYPE_NONE)
1945 decode_opcode(struct vie *vie)
1949 if (vie_peek(vie, &x))
1952 vie->op = one_byte_opcodes[x];
1954 if (vie->op.op_type == VIE_OP_TYPE_NONE)
1959 if (vie->op.op_type == VIE_OP_TYPE_TWO_BYTE)
1960 return (decode_two_byte_opcode(vie));
1966 decode_modrm(struct vie *vie, enum vm_cpu_mode cpu_mode)
1970 if (vie->op.op_flags & VIE_OP_F_NO_MODRM)
1973 if (cpu_mode == CPU_MODE_REAL)
1976 if (vie_peek(vie, &x))
1979 vie->mod = (x >> 6) & 0x3;
1980 vie->rm = (x >> 0) & 0x7;
1981 vie->reg = (x >> 3) & 0x7;
1984 * A direct addressing mode makes no sense in the context of an EPT
1985 * fault. There has to be a memory access involved to cause the
1988 if (vie->mod == VIE_MOD_DIRECT)
1991 if ((vie->mod == VIE_MOD_INDIRECT && vie->rm == VIE_RM_DISP32) ||
1992 (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)) {
1994 * Table 2-5: Special Cases of REX Encodings
1996 * mod=0, r/m=5 is used in the compatibility mode to
1997 * indicate a disp32 without a base register.
1999 * mod!=3, r/m=4 is used in the compatibility mode to
2000 * indicate that the SIB byte is present.
2002 * The 'b' bit in the REX prefix is don't care in
2006 vie->rm |= (vie->rex_b << 3);
2009 vie->reg |= (vie->rex_r << 3);
2012 if (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)
2015 vie->base_register = gpr_map[vie->rm];
2018 case VIE_MOD_INDIRECT_DISP8:
2019 vie->disp_bytes = 1;
2021 case VIE_MOD_INDIRECT_DISP32:
2022 vie->disp_bytes = 4;
2024 case VIE_MOD_INDIRECT:
2025 if (vie->rm == VIE_RM_DISP32) {
2026 vie->disp_bytes = 4;
2028 * Table 2-7. RIP-Relative Addressing
2030 * In 64-bit mode mod=00 r/m=101 implies [rip] + disp32
2031 * whereas in compatibility mode it just implies disp32.
2034 if (cpu_mode == CPU_MODE_64BIT)
2035 vie->base_register = VM_REG_GUEST_RIP;
2037 vie->base_register = VM_REG_LAST;
2049 decode_sib(struct vie *vie)
2053 /* Proceed only if SIB byte is present */
2054 if (vie->mod == VIE_MOD_DIRECT || vie->rm != VIE_RM_SIB)
2057 if (vie_peek(vie, &x))
2060 /* De-construct the SIB byte */
2061 vie->ss = (x >> 6) & 0x3;
2062 vie->index = (x >> 3) & 0x7;
2063 vie->base = (x >> 0) & 0x7;
2065 /* Apply the REX prefix modifiers */
2066 vie->index |= vie->rex_x << 3;
2067 vie->base |= vie->rex_b << 3;
2070 case VIE_MOD_INDIRECT_DISP8:
2071 vie->disp_bytes = 1;
2073 case VIE_MOD_INDIRECT_DISP32:
2074 vie->disp_bytes = 4;
2078 if (vie->mod == VIE_MOD_INDIRECT &&
2079 (vie->base == 5 || vie->base == 13)) {
2081 * Special case when base register is unused if mod = 0
2082 * and base = %rbp or %r13.
2085 * Table 2-3: 32-bit Addressing Forms with the SIB Byte
2086 * Table 2-5: Special Cases of REX Encodings
2088 vie->disp_bytes = 4;
2090 vie->base_register = gpr_map[vie->base];
2094 * All encodings of 'index' are valid except for %rsp (4).
2097 * Table 2-3: 32-bit Addressing Forms with the SIB Byte
2098 * Table 2-5: Special Cases of REX Encodings
2100 if (vie->index != 4)
2101 vie->index_register = gpr_map[vie->index];
2103 /* 'scale' makes sense only in the context of an index register */
2104 if (vie->index_register < VM_REG_LAST)
2105 vie->scale = 1 << vie->ss;
2113 decode_displacement(struct vie *vie)
2124 if ((n = vie->disp_bytes) == 0)
2127 if (n != 1 && n != 4)
2128 panic("decode_displacement: invalid disp_bytes %d", n);
2130 for (i = 0; i < n; i++) {
2131 if (vie_peek(vie, &x))
2139 vie->displacement = u.signed8; /* sign-extended */
2141 vie->displacement = u.signed32; /* sign-extended */
2147 decode_immediate(struct vie *vie)
2158 /* Figure out immediate operand size (if any) */
2159 if (vie->op.op_flags & VIE_OP_F_IMM) {
2161 * Section 2.2.1.5 "Immediates", Intel SDM:
2162 * In 64-bit mode the typical size of immediate operands
2163 * remains 32-bits. When the operand size if 64-bits, the
2164 * processor sign-extends all immediates to 64-bits prior
2167 if (vie->opsize == 4 || vie->opsize == 8)
2171 } else if (vie->op.op_flags & VIE_OP_F_IMM8) {
2175 if ((n = vie->imm_bytes) == 0)
2178 KASSERT(n == 1 || n == 2 || n == 4,
2179 ("%s: invalid number of immediate bytes: %d", __func__, n));
2181 for (i = 0; i < n; i++) {
2182 if (vie_peek(vie, &x))
2189 /* sign-extend the immediate value before use */
2191 vie->immediate = u.signed8;
2193 vie->immediate = u.signed16;
2195 vie->immediate = u.signed32;
2201 decode_moffset(struct vie *vie)
2210 if ((vie->op.op_flags & VIE_OP_F_MOFFSET) == 0)
2214 * Section 2.2.1.4, "Direct Memory-Offset MOVs", Intel SDM:
2215 * The memory offset size follows the address-size of the instruction.
2218 KASSERT(n == 2 || n == 4 || n == 8, ("invalid moffset bytes: %d", n));
2221 for (i = 0; i < n; i++) {
2222 if (vie_peek(vie, &x))
2228 vie->displacement = u.u64;
2233 * Verify that all the bytes in the instruction buffer were consumed.
2236 verify_inst_length(struct vie *vie)
2239 if (vie->num_processed)
2246 * Verify that the 'guest linear address' provided as collateral of the nested
2247 * page table fault matches with our instruction decoding.
2250 verify_gla(struct vm *vm, int cpuid, uint64_t gla, struct vie *vie)
2253 uint64_t base, idx, gla2;
2255 /* Skip 'gla' verification */
2256 if (gla == VIE_INVALID_GLA)
2260 if (vie->base_register != VM_REG_LAST) {
2261 error = vm_get_register(vm, cpuid, vie->base_register, &base);
2263 printf("verify_gla: error %d getting base reg %d\n",
2264 error, vie->base_register);
2269 * RIP-relative addressing starts from the following
2272 if (vie->base_register == VM_REG_GUEST_RIP)
2273 base += vie->num_valid;
2277 if (vie->index_register != VM_REG_LAST) {
2278 error = vm_get_register(vm, cpuid, vie->index_register, &idx);
2280 printf("verify_gla: error %d getting index reg %d\n",
2281 error, vie->index_register);
2286 /* XXX assuming that the base address of the segment is 0 */
2287 gla2 = base + vie->scale * idx + vie->displacement;
2288 gla2 &= size2mask[vie->addrsize];
2290 printf("verify_gla mismatch: "
2291 "base(0x%0lx), scale(%d), index(0x%0lx), "
2292 "disp(0x%0lx), gla(0x%0lx), gla2(0x%0lx)\n",
2293 base, vie->scale, idx, vie->displacement, gla, gla2);
2301 vmm_decode_instruction(struct vm *vm, int cpuid, uint64_t gla,
2302 enum vm_cpu_mode cpu_mode, int cs_d, struct vie *vie)
2305 if (decode_prefixes(vie, cpu_mode, cs_d))
2308 if (decode_opcode(vie))
2311 if (decode_modrm(vie, cpu_mode))
2314 if (decode_sib(vie))
2317 if (decode_displacement(vie))
2320 if (decode_immediate(vie))
2323 if (decode_moffset(vie))
2326 if (verify_inst_length(vie))
2329 if ((vie->op.op_flags & VIE_OP_F_NO_GLA_VERIFICATION) == 0) {
2330 if (verify_gla(vm, cpuid, gla, vie))
2334 vie->decoded = 1; /* success */
2338 #endif /* _KERNEL */