]> CyberLeo.Net >> Repos - FreeBSD/releng/10.0.git/blob - sys/amd64/vmm/vmm_msr.c
- Copy stable/10 (r259064) to releng/10.0 as part of the
[FreeBSD/releng/10.0.git] / sys / amd64 / vmm / vmm_msr.c
1 /*-
2  * Copyright (c) 2011 NetApp, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/smp.h>
35
36 #include <machine/specialreg.h>
37
38 #include <machine/vmm.h>
39 #include "vmm_lapic.h"
40 #include "vmm_msr.h"
41
42 #define VMM_MSR_F_EMULATE       0x01
43 #define VMM_MSR_F_READONLY      0x02
44 #define VMM_MSR_F_INVALID       0x04  /* guest_msr_valid() can override this */
45
46 struct vmm_msr {
47         int             num;
48         int             flags;
49         uint64_t        hostval;
50 };
51
52 static struct vmm_msr vmm_msr[] = {
53         { MSR_LSTAR,    0 },
54         { MSR_CSTAR,    0 },
55         { MSR_STAR,     0 },
56         { MSR_SF_MASK,  0 },
57         { MSR_PAT,      VMM_MSR_F_EMULATE | VMM_MSR_F_INVALID },
58         { MSR_BIOS_SIGN,VMM_MSR_F_EMULATE },
59         { MSR_MCG_CAP,  VMM_MSR_F_EMULATE | VMM_MSR_F_READONLY },
60         { MSR_IA32_PLATFORM_ID, VMM_MSR_F_EMULATE | VMM_MSR_F_READONLY },
61         { MSR_IA32_MISC_ENABLE, VMM_MSR_F_EMULATE | VMM_MSR_F_READONLY },
62 };
63
64 #define vmm_msr_num     (sizeof(vmm_msr) / sizeof(vmm_msr[0]))
65 CTASSERT(VMM_MSR_NUM >= vmm_msr_num);
66
67 #define readonly_msr(idx)       \
68         ((vmm_msr[(idx)].flags & VMM_MSR_F_READONLY) != 0)
69
70 #define emulated_msr(idx)       \
71         ((vmm_msr[(idx)].flags & VMM_MSR_F_EMULATE) != 0)
72
73 #define invalid_msr(idx)        \
74         ((vmm_msr[(idx)].flags & VMM_MSR_F_INVALID) != 0)
75
76 void
77 vmm_msr_init(void)
78 {
79         int i;
80
81         for (i = 0; i < vmm_msr_num; i++) {
82                 if (emulated_msr(i))
83                         continue;
84                 /*
85                  * XXX this assumes that the value of the host msr does not
86                  * change after we have cached it.
87                  */
88                 vmm_msr[i].hostval = rdmsr(vmm_msr[i].num);
89         }
90 }
91
92 void
93 guest_msrs_init(struct vm *vm, int cpu)
94 {
95         int i;
96         uint64_t *guest_msrs, misc;
97
98         guest_msrs = vm_guest_msrs(vm, cpu);
99         
100         for (i = 0; i < vmm_msr_num; i++) {
101                 switch (vmm_msr[i].num) {
102                 case MSR_LSTAR:
103                 case MSR_CSTAR:
104                 case MSR_STAR:
105                 case MSR_SF_MASK:
106                 case MSR_BIOS_SIGN:
107                 case MSR_MCG_CAP:
108                         guest_msrs[i] = 0;
109                         break;
110                 case MSR_PAT:
111                         guest_msrs[i] = PAT_VALUE(0, PAT_WRITE_BACK)      |
112                                 PAT_VALUE(1, PAT_WRITE_THROUGH)   |
113                                 PAT_VALUE(2, PAT_UNCACHED)        |
114                                 PAT_VALUE(3, PAT_UNCACHEABLE)     |
115                                 PAT_VALUE(4, PAT_WRITE_BACK)      |
116                                 PAT_VALUE(5, PAT_WRITE_THROUGH)   |
117                                 PAT_VALUE(6, PAT_UNCACHED)        |
118                                 PAT_VALUE(7, PAT_UNCACHEABLE);
119                         break;
120                 case MSR_IA32_MISC_ENABLE:
121                         misc = rdmsr(MSR_IA32_MISC_ENABLE);
122                         /*
123                          * Set mandatory bits
124                          *  11:   branch trace disabled
125                          *  12:   PEBS unavailable
126                          * Clear unsupported features
127                          *  16:   SpeedStep enable
128                          *  18:   enable MONITOR FSM
129                          */
130                         misc |= (1 << 12) | (1 << 11);
131                         misc &= ~((1 << 18) | (1 << 16));
132                         guest_msrs[i] = misc;
133                         break;
134                 case MSR_IA32_PLATFORM_ID:
135                         guest_msrs[i] = 0;
136                         break;
137                 default:
138                         panic("guest_msrs_init: missing initialization for msr "
139                               "0x%0x", vmm_msr[i].num);
140                 }
141         }
142 }
143
144 static int
145 msr_num_to_idx(u_int num)
146 {
147         int i;
148
149         for (i = 0; i < vmm_msr_num; i++)
150                 if (vmm_msr[i].num == num)
151                         return (i);
152
153         return (-1);
154 }
155
156 int
157 emulate_wrmsr(struct vm *vm, int cpu, u_int num, uint64_t val)
158 {
159         int idx;
160         uint64_t *guest_msrs;
161
162         if (lapic_msr(num))
163                 return (lapic_wrmsr(vm, cpu, num, val));
164
165         idx = msr_num_to_idx(num);
166         if (idx < 0 || invalid_msr(idx))
167                 return (EINVAL);
168
169         if (!readonly_msr(idx)) {
170                 guest_msrs = vm_guest_msrs(vm, cpu);
171
172                 /* Stash the value */
173                 guest_msrs[idx] = val;
174
175                 /* Update processor state for non-emulated MSRs */
176                 if (!emulated_msr(idx))
177                         wrmsr(vmm_msr[idx].num, val);
178         }
179
180         return (0);
181 }
182
183 int
184 emulate_rdmsr(struct vm *vm, int cpu, u_int num)
185 {
186         int error, idx;
187         uint32_t eax, edx;
188         uint64_t result, *guest_msrs;
189
190         if (lapic_msr(num)) {
191                 error = lapic_rdmsr(vm, cpu, num, &result);
192                 goto done;
193         }
194
195         idx = msr_num_to_idx(num);
196         if (idx < 0 || invalid_msr(idx)) {
197                 error = EINVAL;
198                 goto done;
199         }
200
201         guest_msrs = vm_guest_msrs(vm, cpu);
202         result = guest_msrs[idx];
203
204         /*
205          * If this is not an emulated msr register make sure that the processor
206          * state matches our cached state.
207          */
208         if (!emulated_msr(idx) && (rdmsr(num) != result)) {
209                 panic("emulate_rdmsr: msr 0x%0x has inconsistent cached "
210                       "(0x%016lx) and actual (0x%016lx) values", num,
211                       result, rdmsr(num));
212         }
213
214         error = 0;
215
216 done:
217         if (error == 0) {
218                 eax = result;
219                 edx = result >> 32;
220                 error = vm_set_register(vm, cpu, VM_REG_GUEST_RAX, eax);
221                 if (error)
222                         panic("vm_set_register(rax) error %d", error);
223                 error = vm_set_register(vm, cpu, VM_REG_GUEST_RDX, edx);
224                 if (error)
225                         panic("vm_set_register(rdx) error %d", error);
226         }
227         return (error);
228 }
229
230 void
231 restore_guest_msrs(struct vm *vm, int cpu)
232 {
233         int i;
234         uint64_t *guest_msrs;
235
236         guest_msrs = vm_guest_msrs(vm, cpu);
237
238         for (i = 0; i < vmm_msr_num; i++) {
239                 if (emulated_msr(i))
240                         continue;
241                 else
242                         wrmsr(vmm_msr[i].num, guest_msrs[i]);
243         }
244 }
245
246 void
247 restore_host_msrs(struct vm *vm, int cpu)
248 {
249         int i;
250
251         for (i = 0; i < vmm_msr_num; i++) {
252                 if (emulated_msr(i))
253                         continue;
254                 else
255                         wrmsr(vmm_msr[i].num, vmm_msr[i].hostval);
256         }
257 }
258
259 /*
260  * Must be called by the CPU-specific code before any guests are
261  * created
262  */
263 void
264 guest_msr_valid(int msr)
265 {
266         int i;
267
268         for (i = 0; i < vmm_msr_num; i++) {
269                 if (vmm_msr[i].num == msr && invalid_msr(i)) {
270                         vmm_msr[i].flags &= ~VMM_MSR_F_INVALID;
271                 }
272         }
273 }