2 * Copyright (c) 2011 NetApp, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
36 #include <machine/specialreg.h>
38 #include <machine/vmm.h>
39 #include "vmm_lapic.h"
42 #define VMM_MSR_F_EMULATE 0x01
43 #define VMM_MSR_F_READONLY 0x02
44 #define VMM_MSR_F_INVALID 0x04
52 static struct vmm_msr vmm_msr[] = {
57 { MSR_PAT, VMM_MSR_F_EMULATE | VMM_MSR_F_INVALID },
58 { MSR_BIOS_SIGN,VMM_MSR_F_EMULATE },
59 { MSR_MCG_CAP, VMM_MSR_F_EMULATE | VMM_MSR_F_READONLY },
62 #define vmm_msr_num (sizeof(vmm_msr) / sizeof(vmm_msr[0]))
63 CTASSERT(VMM_MSR_NUM >= vmm_msr_num);
65 #define readonly_msr(idx) \
66 ((vmm_msr[(idx)].flags & VMM_MSR_F_READONLY) != 0)
68 #define emulated_msr(idx) \
69 ((vmm_msr[(idx)].flags & VMM_MSR_F_EMULATE) != 0)
71 #define invalid_msr(idx) \
72 ((vmm_msr[(idx)].flags & VMM_MSR_F_INVALID) != 0)
79 for (i = 0; i < vmm_msr_num; i++) {
83 * XXX this assumes that the value of the host msr does not
84 * change after we have cached it.
86 vmm_msr[i].hostval = rdmsr(vmm_msr[i].num);
91 guest_msrs_init(struct vm *vm, int cpu)
96 guest_msrs = vm_guest_msrs(vm, cpu);
98 for (i = 0; i < vmm_msr_num; i++) {
99 switch (vmm_msr[i].num) {
109 guest_msrs[i] = PAT_VALUE(0, PAT_WRITE_BACK) |
110 PAT_VALUE(1, PAT_WRITE_THROUGH) |
111 PAT_VALUE(2, PAT_UNCACHED) |
112 PAT_VALUE(3, PAT_UNCACHEABLE) |
113 PAT_VALUE(4, PAT_WRITE_BACK) |
114 PAT_VALUE(5, PAT_WRITE_THROUGH) |
115 PAT_VALUE(6, PAT_UNCACHED) |
116 PAT_VALUE(7, PAT_UNCACHEABLE);
119 panic("guest_msrs_init: missing initialization for msr "
120 "0x%0x", vmm_msr[i].num);
126 msr_num_to_idx(u_int num)
130 for (i = 0; i < vmm_msr_num; i++)
131 if (vmm_msr[i].num == num)
138 emulate_wrmsr(struct vm *vm, int cpu, u_int num, uint64_t val)
141 uint64_t *guest_msrs;
146 return (lapic_wrmsr(vm, cpu, num, val));
148 idx = msr_num_to_idx(num);
152 if (invalid_msr(idx))
155 if (!readonly_msr(idx)) {
156 guest_msrs = vm_guest_msrs(vm, cpu);
158 /* Stash the value */
159 guest_msrs[idx] = val;
161 /* Update processor state for non-emulated MSRs */
162 if (!emulated_msr(idx))
163 wrmsr(vmm_msr[idx].num, val);
172 emulate_rdmsr(struct vm *vm, int cpu, u_int num)
174 int error, handled, idx;
176 uint64_t result, *guest_msrs;
180 if (lapic_msr(num)) {
181 handled = lapic_rdmsr(vm, cpu, num, &result);
185 idx = msr_num_to_idx(num);
189 if (invalid_msr(idx))
192 guest_msrs = vm_guest_msrs(vm, cpu);
193 result = guest_msrs[idx];
196 * If this is not an emulated msr register make sure that the processor
197 * state matches our cached state.
199 if (!emulated_msr(idx) && (rdmsr(num) != result)) {
200 panic("emulate_rdmsr: msr 0x%0x has inconsistent cached "
201 "(0x%016lx) and actual (0x%016lx) values", num,
211 error = vm_set_register(vm, cpu, VM_REG_GUEST_RAX, eax);
213 panic("vm_set_register(rax) error %d", error);
214 error = vm_set_register(vm, cpu, VM_REG_GUEST_RDX, edx);
216 panic("vm_set_register(rdx) error %d", error);
222 restore_guest_msrs(struct vm *vm, int cpu)
225 uint64_t *guest_msrs;
227 guest_msrs = vm_guest_msrs(vm, cpu);
229 for (i = 0; i < vmm_msr_num; i++) {
233 wrmsr(vmm_msr[i].num, guest_msrs[i]);
238 restore_host_msrs(struct vm *vm, int cpu)
242 for (i = 0; i < vmm_msr_num; i++) {
246 wrmsr(vmm_msr[i].num, vmm_msr[i].hostval);
251 * Must be called by the CPU-specific code before any guests are
255 guest_msr_valid(int msr)
259 for (i = 0; i < vmm_msr_num; i++) {
260 if (vmm_msr[i].num == msr && invalid_msr(i)) {
261 vmm_msr[i].flags &= ~VMM_MSR_F_INVALID;