2 * Copyright (c) 2011 NetApp, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/sysctl.h>
37 #include <machine/clock.h>
38 #include <machine/cpufunc.h>
39 #include <machine/md_var.h>
40 #include <machine/segments.h>
41 #include <machine/specialreg.h>
43 #include <machine/vmm.h>
51 static SYSCTL_NODE(_hw_vmm, OID_AUTO, topology, CTLFLAG_RD, 0, NULL);
53 #define CPUID_VM_HIGH 0x40000000
55 static const char bhyve_id[12] = "bhyve bhyve ";
57 static uint64_t bhyve_xcpuids;
58 SYSCTL_ULONG(_hw_vmm, OID_AUTO, bhyve_xcpuids, CTLFLAG_RW, &bhyve_xcpuids, 0,
59 "Number of times an unknown cpuid leaf was accessed");
62 * The default CPU topology is a single thread per package.
64 static u_int threads_per_core = 1;
65 SYSCTL_UINT(_hw_vmm_topology, OID_AUTO, threads_per_core, CTLFLAG_RDTUN,
66 &threads_per_core, 0, NULL);
68 static u_int cores_per_package = 1;
69 SYSCTL_UINT(_hw_vmm_topology, OID_AUTO, cores_per_package, CTLFLAG_RDTUN,
70 &cores_per_package, 0, NULL);
72 static int cpuid_leaf_b = 1;
73 SYSCTL_INT(_hw_vmm_topology, OID_AUTO, cpuid_leaf_b, CTLFLAG_RDTUN,
74 &cpuid_leaf_b, 0, NULL);
77 * Round up to the next power of two, if necessary, and then take log2.
78 * Returns -1 if argument is zero.
84 return (fls(x << (1 - powerof2(x))) - 1);
88 x86_emulate_cpuid(struct vm *vm, int vcpu_id,
89 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
91 const struct xsave_limits *limits;
93 int error, enable_invpcid, level, width, x2apic_id;
94 unsigned int func, regs[4], logical_cpus;
95 enum x2apic_state x2apic_state;
97 VCPU_CTR2(vm, vcpu_id, "cpuid %#x,%#x", *eax, *ecx);
100 * Requests for invalid CPUID levels should map to the highest
101 * available level instead.
103 if (cpu_exthigh != 0 && *eax >= 0x80000000) {
104 if (*eax > cpu_exthigh)
106 } else if (*eax >= 0x40000000) {
107 if (*eax > CPUID_VM_HIGH)
108 *eax = CPUID_VM_HIGH;
109 } else if (*eax > cpu_high) {
116 * In general the approach used for CPU topology is to
117 * advertise a flat topology where all CPUs are packages with
118 * no multi-core or SMT.
122 * Pass these through to the guest
124 case CPUID_0000_0000:
125 case CPUID_0000_0002:
126 case CPUID_0000_0003:
127 case CPUID_8000_0000:
128 case CPUID_8000_0002:
129 case CPUID_8000_0003:
130 case CPUID_8000_0004:
131 case CPUID_8000_0006:
132 cpuid_count(*eax, *ecx, regs);
134 case CPUID_8000_0008:
135 cpuid_count(*eax, *ecx, regs);
138 * XXX this might appear silly because AMD
139 * cpus don't have threads.
141 * However this matches the logical cpus as
142 * advertised by leaf 0x1 and will work even
143 * if the 'threads_per_core' tunable is set
144 * incorrectly on an AMD host.
146 logical_cpus = threads_per_core *
148 regs[2] = logical_cpus - 1;
152 case CPUID_8000_0001:
153 cpuid_count(*eax, *ecx, regs);
156 * Hide SVM and Topology Extension features from guest.
158 regs[2] &= ~(AMDID2_SVM | AMDID2_TOPOLOGY);
161 * Don't advertise extended performance counter MSRs
164 regs[2] &= ~AMDID2_PCXC;
165 regs[2] &= ~AMDID2_PNXC;
166 regs[2] &= ~AMDID2_PTSCEL2I;
169 * Don't advertise Instruction Based Sampling feature.
171 regs[2] &= ~AMDID2_IBS;
173 /* NodeID MSR not available */
174 regs[2] &= ~AMDID2_NODE_ID;
176 /* Don't advertise the OS visible workaround feature */
177 regs[2] &= ~AMDID2_OSVW;
180 * Hide rdtscp/ia32_tsc_aux until we know how
183 regs[3] &= ~AMDID_RDTSCP;
186 case CPUID_8000_0007:
188 * AMD uses this leaf to advertise the processor's
189 * power monitoring and RAS capabilities. These
190 * features are hardware-specific and exposing
191 * them to a guest doesn't make a lot of sense.
193 * Intel uses this leaf only to advertise the
194 * "Invariant TSC" feature with all other bits
195 * being reserved (set to zero).
203 * "Invariant TSC" can be advertised to the guest if:
204 * - host TSC frequency is invariant
205 * - host TSCs are synchronized across physical cpus
207 * XXX This still falls short because the vcpu
208 * can observe the TSC moving backwards as it
209 * migrates across physical cpus. But at least
210 * it should discourage the guest from using the
211 * TSC to keep track of time.
213 if (tsc_is_invariant && smp_tsc)
214 regs[3] |= AMDPM_TSC_INVARIANT;
217 case CPUID_0000_0001:
220 error = vm_get_x2apic_state(vm, vcpu_id, &x2apic_state);
222 panic("x86_emulate_cpuid: error %d "
223 "fetching x2apic state", error);
227 * Override the APIC ID only in ebx
229 regs[1] &= ~(CPUID_LOCAL_APIC_ID);
230 regs[1] |= (vcpu_id << CPUID_0000_0001_APICID_SHIFT);
233 * Don't expose VMX, SpeedStep, TME or SMX capability.
234 * Advertise x2APIC capability and Hypervisor guest.
236 regs[2] &= ~(CPUID2_VMX | CPUID2_EST | CPUID2_TM2);
237 regs[2] &= ~(CPUID2_SMX);
239 regs[2] |= CPUID2_HV;
241 if (x2apic_state != X2APIC_DISABLED)
242 regs[2] |= CPUID2_X2APIC;
244 regs[2] &= ~CPUID2_X2APIC;
247 * Only advertise CPUID2_XSAVE in the guest if
248 * the host is using XSAVE.
250 if (!(regs[2] & CPUID2_OSXSAVE))
251 regs[2] &= ~CPUID2_XSAVE;
254 * If CPUID2_XSAVE is being advertised and the
255 * guest has set CR4_XSAVE, set
258 regs[2] &= ~CPUID2_OSXSAVE;
259 if (regs[2] & CPUID2_XSAVE) {
260 error = vm_get_register(vm, vcpu_id,
261 VM_REG_GUEST_CR4, &cr4);
263 panic("x86_emulate_cpuid: error %d "
264 "fetching %%cr4", error);
266 regs[2] |= CPUID2_OSXSAVE;
270 * Hide monitor/mwait until we know how to deal with
271 * these instructions.
273 regs[2] &= ~CPUID2_MON;
276 * Hide the performance and debug features.
278 regs[2] &= ~CPUID2_PDCM;
281 * No TSC deadline support in the APIC yet
283 regs[2] &= ~CPUID2_TSCDLT;
286 * Hide thermal monitoring
288 regs[3] &= ~(CPUID_ACPI | CPUID_TM);
291 * Hide the debug store capability.
293 regs[3] &= ~CPUID_DS;
296 * Advertise the Machine Check and MTRR capability.
298 * Some guest OSes (e.g. Windows) will not boot if
299 * these features are absent.
301 regs[3] |= (CPUID_MCA | CPUID_MCE | CPUID_MTRR);
303 logical_cpus = threads_per_core * cores_per_package;
304 regs[1] &= ~CPUID_HTT_CORES;
305 regs[1] |= (logical_cpus & 0xff) << 16;
306 regs[3] |= CPUID_HTT;
309 case CPUID_0000_0004:
310 cpuid_count(*eax, *ecx, regs);
312 if (regs[0] || regs[1] || regs[2] || regs[3]) {
314 regs[0] |= (cores_per_package - 1) << 26;
317 * - L1 and L2 are shared only by the logical
318 * processors in a single core.
319 * - L3 and above are shared by all logical
320 * processors in the package.
322 logical_cpus = threads_per_core;
323 level = (regs[0] >> 5) & 0x7;
325 logical_cpus *= cores_per_package;
326 regs[0] |= (logical_cpus - 1) << 14;
330 case CPUID_0000_0007:
338 cpuid_count(*eax, *ecx, regs);
340 /* Only leaf 0 is supported */
344 * Expose known-safe features.
346 regs[1] &= (CPUID_STDEXT_FSGSBASE |
347 CPUID_STDEXT_BMI1 | CPUID_STDEXT_HLE |
348 CPUID_STDEXT_AVX2 | CPUID_STDEXT_BMI2 |
349 CPUID_STDEXT_ERMS | CPUID_STDEXT_RTM |
350 CPUID_STDEXT_AVX512F |
351 CPUID_STDEXT_AVX512PF |
352 CPUID_STDEXT_AVX512ER |
353 CPUID_STDEXT_AVX512CD);
357 /* Advertise INVPCID if it is enabled. */
358 error = vm_get_capability(vm, vcpu_id,
359 VM_CAP_ENABLE_INVPCID, &enable_invpcid);
360 if (error == 0 && enable_invpcid)
361 regs[1] |= CPUID_STDEXT_INVPCID;
365 case CPUID_0000_0006:
366 regs[0] = CPUTPM1_ARAT;
372 case CPUID_0000_000A:
374 * Handle the access, but report 0 for
383 case CPUID_0000_000B:
385 * Processor topology enumeration
388 logical_cpus = threads_per_core;
389 width = log2(logical_cpus);
390 level = CPUID_TYPE_SMT;
395 logical_cpus = threads_per_core *
397 width = log2(logical_cpus);
398 level = CPUID_TYPE_CORE;
402 if (!cpuid_leaf_b || *ecx >= 2) {
409 regs[0] = width & 0x1f;
410 regs[1] = logical_cpus & 0xffff;
411 regs[2] = (level << 8) | (*ecx & 0xff);
415 case CPUID_0000_000D:
416 limits = vmm_get_xsave_limits();
417 if (!limits->xsave_enabled) {
425 cpuid_count(*eax, *ecx, regs);
429 * Only permit the guest to use bits
430 * that are active in the host in
431 * %xcr0. Also, claim that the
432 * maximum save area size is
433 * equivalent to the host's current
434 * save area size. Since this runs
435 * "inside" of vmrun(), it runs with
436 * the guest's xcr0, so the current
437 * save area size is correct as-is.
439 regs[0] &= limits->xcr0_allowed;
440 regs[2] = limits->xsave_max_size;
441 regs[3] &= (limits->xcr0_allowed >> 32);
444 /* Only permit XSAVEOPT. */
445 regs[0] &= CPUID_EXTSTATE_XSAVEOPT;
452 * If the leaf is for a permitted feature,
453 * pass through as-is, otherwise return
456 if (!(limits->xcr0_allowed & (1ul << *ecx))) {
467 regs[0] = CPUID_VM_HIGH;
468 bcopy(bhyve_id, ®s[1], 4);
469 bcopy(bhyve_id + 4, ®s[2], 4);
470 bcopy(bhyve_id + 8, ®s[3], 4);
475 * The leaf value has already been clamped so
476 * simply pass this through, keeping count of
477 * how many unhandled leaf values have been seen.
479 atomic_add_long(&bhyve_xcpuids, 1);
480 cpuid_count(*eax, *ecx, regs);
493 vm_cpuid_capability(struct vm *vm, int vcpuid, enum vm_cpuid_capability cap)
497 KASSERT(cap > 0 && cap < VCC_LAST, ("%s: invalid vm_cpu_capability %d",
501 * Simply passthrough the capabilities of the host cpu for now.
506 if (amd_feature & AMDID_NX)
510 if (amd_feature & AMDID_FFXSR)
514 if (amd_feature2 & AMDID2_TCE)
518 panic("%s: unknown vm_cpu_capability %d", __func__, cap);