2 * Copyright (c) 2011 NetApp, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/cpuset.h>
36 #include <sys/sysctl.h>
38 #include <machine/clock.h>
39 #include <machine/cpufunc.h>
40 #include <machine/md_var.h>
41 #include <machine/segments.h>
42 #include <machine/specialreg.h>
44 #include <machine/vmm.h>
52 static SYSCTL_NODE(_hw_vmm, OID_AUTO, topology, CTLFLAG_RD, 0, NULL);
54 #define CPUID_VM_HIGH 0x40000000
56 static const char bhyve_id[12] = "bhyve bhyve ";
58 static uint64_t bhyve_xcpuids;
59 SYSCTL_ULONG(_hw_vmm, OID_AUTO, bhyve_xcpuids, CTLFLAG_RW, &bhyve_xcpuids, 0,
60 "Number of times an unknown cpuid leaf was accessed");
63 * The default CPU topology is a single thread per package.
65 static u_int threads_per_core = 1;
66 SYSCTL_UINT(_hw_vmm_topology, OID_AUTO, threads_per_core, CTLFLAG_RDTUN,
67 &threads_per_core, 0, NULL);
69 static u_int cores_per_package = 1;
70 SYSCTL_UINT(_hw_vmm_topology, OID_AUTO, cores_per_package, CTLFLAG_RDTUN,
71 &cores_per_package, 0, NULL);
73 static int cpuid_leaf_b = 1;
74 SYSCTL_INT(_hw_vmm_topology, OID_AUTO, cpuid_leaf_b, CTLFLAG_RDTUN,
75 &cpuid_leaf_b, 0, NULL);
78 * Round up to the next power of two, if necessary, and then take log2.
79 * Returns -1 if argument is zero.
85 return (fls(x << (1 - powerof2(x))) - 1);
89 x86_emulate_cpuid(struct vm *vm, int vcpu_id,
90 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
92 const struct xsave_limits *limits;
94 int error, enable_invpcid, level, width, x2apic_id;
95 unsigned int func, regs[4], logical_cpus;
96 enum x2apic_state x2apic_state;
98 VCPU_CTR2(vm, vcpu_id, "cpuid %#x,%#x", *eax, *ecx);
101 * Requests for invalid CPUID levels should map to the highest
102 * available level instead.
104 if (cpu_exthigh != 0 && *eax >= 0x80000000) {
105 if (*eax > cpu_exthigh)
107 } else if (*eax >= 0x40000000) {
108 if (*eax > CPUID_VM_HIGH)
109 *eax = CPUID_VM_HIGH;
110 } else if (*eax > cpu_high) {
117 * In general the approach used for CPU topology is to
118 * advertise a flat topology where all CPUs are packages with
119 * no multi-core or SMT.
123 * Pass these through to the guest
125 case CPUID_0000_0000:
126 case CPUID_0000_0002:
127 case CPUID_0000_0003:
128 case CPUID_8000_0000:
129 case CPUID_8000_0002:
130 case CPUID_8000_0003:
131 case CPUID_8000_0004:
132 case CPUID_8000_0006:
133 cpuid_count(*eax, *ecx, regs);
135 case CPUID_8000_0008:
136 cpuid_count(*eax, *ecx, regs);
139 * XXX this might appear silly because AMD
140 * cpus don't have threads.
142 * However this matches the logical cpus as
143 * advertised by leaf 0x1 and will work even
144 * if the 'threads_per_core' tunable is set
145 * incorrectly on an AMD host.
147 logical_cpus = threads_per_core *
149 regs[2] = logical_cpus - 1;
153 case CPUID_8000_0001:
154 cpuid_count(*eax, *ecx, regs);
157 * Hide SVM and Topology Extension features from guest.
159 regs[2] &= ~(AMDID2_SVM | AMDID2_TOPOLOGY);
162 * Hide rdtscp/ia32_tsc_aux until we know how
165 regs[3] &= ~AMDID_RDTSCP;
168 case CPUID_8000_0007:
169 cpuid_count(*eax, *ecx, regs);
171 * If the host TSCs are not synchronized across
172 * physical cpus then we cannot advertise an
173 * invariant tsc to a vcpu.
175 * XXX This still falls short because the vcpu
176 * can observe the TSC moving backwards as it
177 * migrates across physical cpus. But at least
178 * it should discourage the guest from using the
179 * TSC to keep track of time.
182 regs[3] &= ~AMDPM_TSC_INVARIANT;
185 case CPUID_0000_0001:
188 error = vm_get_x2apic_state(vm, vcpu_id, &x2apic_state);
190 panic("x86_emulate_cpuid: error %d "
191 "fetching x2apic state", error);
195 * Override the APIC ID only in ebx
197 regs[1] &= ~(CPUID_LOCAL_APIC_ID);
198 regs[1] |= (vcpu_id << CPUID_0000_0001_APICID_SHIFT);
201 * Don't expose VMX, SpeedStep or TME capability.
202 * Advertise x2APIC capability and Hypervisor guest.
204 regs[2] &= ~(CPUID2_VMX | CPUID2_EST | CPUID2_TM2);
206 regs[2] |= CPUID2_HV;
208 if (x2apic_state != X2APIC_DISABLED)
209 regs[2] |= CPUID2_X2APIC;
211 regs[2] &= ~CPUID2_X2APIC;
214 * Only advertise CPUID2_XSAVE in the guest if
215 * the host is using XSAVE.
217 if (!(regs[2] & CPUID2_OSXSAVE))
218 regs[2] &= ~CPUID2_XSAVE;
221 * If CPUID2_XSAVE is being advertised and the
222 * guest has set CR4_XSAVE, set
225 regs[2] &= ~CPUID2_OSXSAVE;
226 if (regs[2] & CPUID2_XSAVE) {
227 error = vm_get_register(vm, vcpu_id,
228 VM_REG_GUEST_CR4, &cr4);
230 panic("x86_emulate_cpuid: error %d "
231 "fetching %%cr4", error);
233 regs[2] |= CPUID2_OSXSAVE;
237 * Hide monitor/mwait until we know how to deal with
238 * these instructions.
240 regs[2] &= ~CPUID2_MON;
243 * Hide the performance and debug features.
245 regs[2] &= ~CPUID2_PDCM;
248 * No TSC deadline support in the APIC yet
250 regs[2] &= ~CPUID2_TSCDLT;
253 * Hide thermal monitoring
255 regs[3] &= ~(CPUID_ACPI | CPUID_TM);
258 * Machine check handling is done in the host.
259 * Hide MTRR capability.
261 regs[3] &= ~(CPUID_MCA | CPUID_MCE | CPUID_MTRR);
264 * Hide the debug store capability.
266 regs[3] &= ~CPUID_DS;
268 logical_cpus = threads_per_core * cores_per_package;
269 regs[1] &= ~CPUID_HTT_CORES;
270 regs[1] |= (logical_cpus & 0xff) << 16;
271 regs[3] |= CPUID_HTT;
274 case CPUID_0000_0004:
275 cpuid_count(*eax, *ecx, regs);
277 if (regs[0] || regs[1] || regs[2] || regs[3]) {
279 regs[0] |= (cores_per_package - 1) << 26;
282 * - L1 and L2 are shared only by the logical
283 * processors in a single core.
284 * - L3 and above are shared by all logical
285 * processors in the package.
287 logical_cpus = threads_per_core;
288 level = (regs[0] >> 5) & 0x7;
290 logical_cpus *= cores_per_package;
291 regs[0] |= (logical_cpus - 1) << 14;
295 case CPUID_0000_0007:
303 cpuid_count(*eax, *ecx, regs);
305 /* Only leaf 0 is supported */
309 * Expose known-safe features.
311 regs[1] &= (CPUID_STDEXT_FSGSBASE |
312 CPUID_STDEXT_BMI1 | CPUID_STDEXT_HLE |
313 CPUID_STDEXT_AVX2 | CPUID_STDEXT_BMI2 |
314 CPUID_STDEXT_ERMS | CPUID_STDEXT_RTM |
315 CPUID_STDEXT_AVX512F |
316 CPUID_STDEXT_AVX512PF |
317 CPUID_STDEXT_AVX512ER |
318 CPUID_STDEXT_AVX512CD);
322 /* Advertise INVPCID if it is enabled. */
323 error = vm_get_capability(vm, vcpu_id,
324 VM_CAP_ENABLE_INVPCID, &enable_invpcid);
325 if (error == 0 && enable_invpcid)
326 regs[1] |= CPUID_STDEXT_INVPCID;
330 case CPUID_0000_0006:
331 case CPUID_0000_000A:
333 * Handle the access, but report 0 for
342 case CPUID_0000_000B:
344 * Processor topology enumeration
347 logical_cpus = threads_per_core;
348 width = log2(logical_cpus);
349 level = CPUID_TYPE_SMT;
354 logical_cpus = threads_per_core *
356 width = log2(logical_cpus);
357 level = CPUID_TYPE_CORE;
361 if (!cpuid_leaf_b || *ecx >= 2) {
368 regs[0] = width & 0x1f;
369 regs[1] = logical_cpus & 0xffff;
370 regs[2] = (level << 8) | (*ecx & 0xff);
374 case CPUID_0000_000D:
375 limits = vmm_get_xsave_limits();
376 if (!limits->xsave_enabled) {
384 cpuid_count(*eax, *ecx, regs);
388 * Only permit the guest to use bits
389 * that are active in the host in
390 * %xcr0. Also, claim that the
391 * maximum save area size is
392 * equivalent to the host's current
393 * save area size. Since this runs
394 * "inside" of vmrun(), it runs with
395 * the guest's xcr0, so the current
396 * save area size is correct as-is.
398 regs[0] &= limits->xcr0_allowed;
399 regs[2] = limits->xsave_max_size;
400 regs[3] &= (limits->xcr0_allowed >> 32);
403 /* Only permit XSAVEOPT. */
404 regs[0] &= CPUID_EXTSTATE_XSAVEOPT;
411 * If the leaf is for a permitted feature,
412 * pass through as-is, otherwise return
415 if (!(limits->xcr0_allowed & (1ul << *ecx))) {
426 regs[0] = CPUID_VM_HIGH;
427 bcopy(bhyve_id, ®s[1], 4);
428 bcopy(bhyve_id + 4, ®s[2], 4);
429 bcopy(bhyve_id + 8, ®s[3], 4);
434 * The leaf value has already been clamped so
435 * simply pass this through, keeping count of
436 * how many unhandled leaf values have been seen.
438 atomic_add_long(&bhyve_xcpuids, 1);
439 cpuid_count(*eax, *ecx, regs);