2 * Copyright (c) 2011 NetApp, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
36 #include <machine/specialreg.h>
38 #include <machine/vmm.h>
39 #include "vmm_lapic.h"
42 #define VMM_MSR_F_EMULATE 0x01
43 #define VMM_MSR_F_READONLY 0x02
44 #define VMM_MSR_F_INVALID 0x04 /* guest_msr_valid() can override this */
52 static struct vmm_msr vmm_msr[] = {
57 { MSR_PAT, VMM_MSR_F_EMULATE | VMM_MSR_F_INVALID },
58 { MSR_BIOS_SIGN,VMM_MSR_F_EMULATE },
59 { MSR_MCG_CAP, VMM_MSR_F_EMULATE | VMM_MSR_F_READONLY },
60 { MSR_IA32_PLATFORM_ID, VMM_MSR_F_EMULATE | VMM_MSR_F_READONLY },
61 { MSR_IA32_MISC_ENABLE, VMM_MSR_F_EMULATE | VMM_MSR_F_READONLY },
64 #define vmm_msr_num (sizeof(vmm_msr) / sizeof(vmm_msr[0]))
65 CTASSERT(VMM_MSR_NUM >= vmm_msr_num);
67 #define readonly_msr(idx) \
68 ((vmm_msr[(idx)].flags & VMM_MSR_F_READONLY) != 0)
70 #define emulated_msr(idx) \
71 ((vmm_msr[(idx)].flags & VMM_MSR_F_EMULATE) != 0)
73 #define invalid_msr(idx) \
74 ((vmm_msr[(idx)].flags & VMM_MSR_F_INVALID) != 0)
81 for (i = 0; i < vmm_msr_num; i++) {
85 * XXX this assumes that the value of the host msr does not
86 * change after we have cached it.
88 vmm_msr[i].hostval = rdmsr(vmm_msr[i].num);
93 guest_msrs_init(struct vm *vm, int cpu)
96 uint64_t *guest_msrs, misc;
98 guest_msrs = vm_guest_msrs(vm, cpu);
100 for (i = 0; i < vmm_msr_num; i++) {
101 switch (vmm_msr[i].num) {
111 guest_msrs[i] = PAT_VALUE(0, PAT_WRITE_BACK) |
112 PAT_VALUE(1, PAT_WRITE_THROUGH) |
113 PAT_VALUE(2, PAT_UNCACHED) |
114 PAT_VALUE(3, PAT_UNCACHEABLE) |
115 PAT_VALUE(4, PAT_WRITE_BACK) |
116 PAT_VALUE(5, PAT_WRITE_THROUGH) |
117 PAT_VALUE(6, PAT_UNCACHED) |
118 PAT_VALUE(7, PAT_UNCACHEABLE);
120 case MSR_IA32_MISC_ENABLE:
121 misc = rdmsr(MSR_IA32_MISC_ENABLE);
124 * 11: branch trace disabled
125 * 12: PEBS unavailable
126 * Clear unsupported features
127 * 16: SpeedStep enable
128 * 18: enable MONITOR FSM
130 misc |= (1 << 12) | (1 << 11);
131 misc &= ~((1 << 18) | (1 << 16));
132 guest_msrs[i] = misc;
134 case MSR_IA32_PLATFORM_ID:
138 panic("guest_msrs_init: missing initialization for msr "
139 "0x%0x", vmm_msr[i].num);
145 msr_num_to_idx(u_int num)
149 for (i = 0; i < vmm_msr_num; i++)
150 if (vmm_msr[i].num == num)
157 emulate_wrmsr(struct vm *vm, int cpu, u_int num, uint64_t val, bool *retu)
160 uint64_t *guest_msrs;
163 return (lapic_wrmsr(vm, cpu, num, val, retu));
165 idx = msr_num_to_idx(num);
166 if (idx < 0 || invalid_msr(idx))
169 if (!readonly_msr(idx)) {
170 guest_msrs = vm_guest_msrs(vm, cpu);
172 /* Stash the value */
173 guest_msrs[idx] = val;
175 /* Update processor state for non-emulated MSRs */
176 if (!emulated_msr(idx))
177 wrmsr(vmm_msr[idx].num, val);
184 emulate_rdmsr(struct vm *vm, int cpu, u_int num, bool *retu)
188 uint64_t result, *guest_msrs;
190 if (lapic_msr(num)) {
191 error = lapic_rdmsr(vm, cpu, num, &result, retu);
195 idx = msr_num_to_idx(num);
196 if (idx < 0 || invalid_msr(idx)) {
201 guest_msrs = vm_guest_msrs(vm, cpu);
202 result = guest_msrs[idx];
205 * If this is not an emulated msr register make sure that the processor
206 * state matches our cached state.
208 if (!emulated_msr(idx) && (rdmsr(num) != result)) {
209 panic("emulate_rdmsr: msr 0x%0x has inconsistent cached "
210 "(0x%016lx) and actual (0x%016lx) values", num,
220 error = vm_set_register(vm, cpu, VM_REG_GUEST_RAX, eax);
222 panic("vm_set_register(rax) error %d", error);
223 error = vm_set_register(vm, cpu, VM_REG_GUEST_RDX, edx);
225 panic("vm_set_register(rdx) error %d", error);
231 restore_guest_msrs(struct vm *vm, int cpu)
234 uint64_t *guest_msrs;
236 guest_msrs = vm_guest_msrs(vm, cpu);
238 for (i = 0; i < vmm_msr_num; i++) {
242 wrmsr(vmm_msr[i].num, guest_msrs[i]);
247 restore_host_msrs(struct vm *vm, int cpu)
251 for (i = 0; i < vmm_msr_num; i++) {
255 wrmsr(vmm_msr[i].num, vmm_msr[i].hostval);
260 * Must be called by the CPU-specific code before any guests are
264 guest_msr_valid(int msr)
268 for (i = 0; i < vmm_msr_num; i++) {
269 if (vmm_msr[i].num == msr && invalid_msr(i)) {
270 vmm_msr[i].flags &= ~VMM_MSR_F_INVALID;