2 * Copyright (c) 2011 NetApp, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
36 #include <machine/specialreg.h>
38 #include <machine/vmm.h>
39 #include "vmm_lapic.h"
42 #define VMM_MSR_F_EMULATE 0x01
43 #define VMM_MSR_F_READONLY 0x02
44 #define VMM_MSR_F_INVALID 0x04 /* guest_msr_valid() can override this */
52 static struct vmm_msr vmm_msr[] = {
57 { MSR_PAT, VMM_MSR_F_EMULATE | VMM_MSR_F_INVALID },
58 { MSR_BIOS_SIGN,VMM_MSR_F_EMULATE },
59 { MSR_MCG_CAP, VMM_MSR_F_EMULATE | VMM_MSR_F_READONLY },
62 #define vmm_msr_num (sizeof(vmm_msr) / sizeof(vmm_msr[0]))
63 CTASSERT(VMM_MSR_NUM >= vmm_msr_num);
65 #define readonly_msr(idx) \
66 ((vmm_msr[(idx)].flags & VMM_MSR_F_READONLY) != 0)
68 #define emulated_msr(idx) \
69 ((vmm_msr[(idx)].flags & VMM_MSR_F_EMULATE) != 0)
71 #define invalid_msr(idx) \
72 ((vmm_msr[(idx)].flags & VMM_MSR_F_INVALID) != 0)
79 for (i = 0; i < vmm_msr_num; i++) {
83 * XXX this assumes that the value of the host msr does not
84 * change after we have cached it.
86 vmm_msr[i].hostval = rdmsr(vmm_msr[i].num);
91 guest_msrs_init(struct vm *vm, int cpu)
96 guest_msrs = vm_guest_msrs(vm, cpu);
98 for (i = 0; i < vmm_msr_num; i++) {
99 switch (vmm_msr[i].num) {
109 guest_msrs[i] = PAT_VALUE(0, PAT_WRITE_BACK) |
110 PAT_VALUE(1, PAT_WRITE_THROUGH) |
111 PAT_VALUE(2, PAT_UNCACHED) |
112 PAT_VALUE(3, PAT_UNCACHEABLE) |
113 PAT_VALUE(4, PAT_WRITE_BACK) |
114 PAT_VALUE(5, PAT_WRITE_THROUGH) |
115 PAT_VALUE(6, PAT_UNCACHED) |
116 PAT_VALUE(7, PAT_UNCACHEABLE);
119 panic("guest_msrs_init: missing initialization for msr "
120 "0x%0x", vmm_msr[i].num);
126 msr_num_to_idx(u_int num)
130 for (i = 0; i < vmm_msr_num; i++)
131 if (vmm_msr[i].num == num)
138 emulate_wrmsr(struct vm *vm, int cpu, u_int num, uint64_t val)
141 uint64_t *guest_msrs;
144 return (lapic_wrmsr(vm, cpu, num, val));
146 idx = msr_num_to_idx(num);
147 if (idx < 0 || invalid_msr(idx))
150 if (!readonly_msr(idx)) {
151 guest_msrs = vm_guest_msrs(vm, cpu);
153 /* Stash the value */
154 guest_msrs[idx] = val;
156 /* Update processor state for non-emulated MSRs */
157 if (!emulated_msr(idx))
158 wrmsr(vmm_msr[idx].num, val);
165 emulate_rdmsr(struct vm *vm, int cpu, u_int num)
169 uint64_t result, *guest_msrs;
171 if (lapic_msr(num)) {
172 error = lapic_rdmsr(vm, cpu, num, &result);
176 idx = msr_num_to_idx(num);
177 if (idx < 0 || invalid_msr(idx)) {
182 guest_msrs = vm_guest_msrs(vm, cpu);
183 result = guest_msrs[idx];
186 * If this is not an emulated msr register make sure that the processor
187 * state matches our cached state.
189 if (!emulated_msr(idx) && (rdmsr(num) != result)) {
190 panic("emulate_rdmsr: msr 0x%0x has inconsistent cached "
191 "(0x%016lx) and actual (0x%016lx) values", num,
201 error = vm_set_register(vm, cpu, VM_REG_GUEST_RAX, eax);
203 panic("vm_set_register(rax) error %d", error);
204 error = vm_set_register(vm, cpu, VM_REG_GUEST_RDX, edx);
206 panic("vm_set_register(rdx) error %d", error);
212 restore_guest_msrs(struct vm *vm, int cpu)
215 uint64_t *guest_msrs;
217 guest_msrs = vm_guest_msrs(vm, cpu);
219 for (i = 0; i < vmm_msr_num; i++) {
223 wrmsr(vmm_msr[i].num, guest_msrs[i]);
228 restore_host_msrs(struct vm *vm, int cpu)
232 for (i = 0; i < vmm_msr_num; i++) {
236 wrmsr(vmm_msr[i].num, vmm_msr[i].hostval);
241 * Must be called by the CPU-specific code before any guests are
245 guest_msr_valid(int msr)
249 for (i = 0; i < vmm_msr_num; i++) {
250 if (vmm_msr[i].num == msr && invalid_msr(i)) {
251 vmm_msr[i].flags &= ~VMM_MSR_F_INVALID;