2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011 NetApp, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/sysctl.h>
39 #include <machine/clock.h>
40 #include <machine/cpufunc.h>
41 #include <machine/md_var.h>
42 #include <machine/segments.h>
43 #include <machine/specialreg.h>
45 #include <machine/vmm.h>
53 static SYSCTL_NODE(_hw_vmm, OID_AUTO, topology, CTLFLAG_RD, 0, NULL);
55 #define CPUID_VM_HIGH 0x40000000
57 static const char bhyve_id[12] = "bhyve bhyve ";
59 static uint64_t bhyve_xcpuids;
60 SYSCTL_ULONG(_hw_vmm, OID_AUTO, bhyve_xcpuids, CTLFLAG_RW, &bhyve_xcpuids, 0,
61 "Number of times an unknown cpuid leaf was accessed");
63 #if __FreeBSD_version < 1200060 /* Remove after 11 EOL helps MFCing */
64 extern u_int threads_per_core;
65 SYSCTL_UINT(_hw_vmm_topology, OID_AUTO, threads_per_core, CTLFLAG_RDTUN,
66 &threads_per_core, 0, NULL);
68 extern u_int cores_per_package;
69 SYSCTL_UINT(_hw_vmm_topology, OID_AUTO, cores_per_package, CTLFLAG_RDTUN,
70 &cores_per_package, 0, NULL);
73 static int cpuid_leaf_b = 1;
74 SYSCTL_INT(_hw_vmm_topology, OID_AUTO, cpuid_leaf_b, CTLFLAG_RDTUN,
75 &cpuid_leaf_b, 0, NULL);
78 * Round up to the next power of two, if necessary, and then take log2.
79 * Returns -1 if argument is zero.
85 return (fls(x << (1 - powerof2(x))) - 1);
89 x86_emulate_cpuid(struct vm *vm, int vcpu_id,
90 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
92 const struct xsave_limits *limits;
94 int error, enable_invpcid, level, width, x2apic_id;
95 unsigned int func, regs[4], logical_cpus;
96 enum x2apic_state x2apic_state;
97 uint16_t cores, maxcpus, sockets, threads;
99 VCPU_CTR2(vm, vcpu_id, "cpuid %#x,%#x", *eax, *ecx);
102 * Requests for invalid CPUID levels should map to the highest
103 * available level instead.
105 if (cpu_exthigh != 0 && *eax >= 0x80000000) {
106 if (*eax > cpu_exthigh)
108 } else if (*eax >= 0x40000000) {
109 if (*eax > CPUID_VM_HIGH)
110 *eax = CPUID_VM_HIGH;
111 } else if (*eax > cpu_high) {
118 * In general the approach used for CPU topology is to
119 * advertise a flat topology where all CPUs are packages with
120 * no multi-core or SMT.
124 * Pass these through to the guest
126 case CPUID_0000_0000:
127 case CPUID_0000_0002:
128 case CPUID_0000_0003:
129 case CPUID_8000_0000:
130 case CPUID_8000_0002:
131 case CPUID_8000_0003:
132 case CPUID_8000_0004:
133 case CPUID_8000_0006:
134 cpuid_count(*eax, *ecx, regs);
136 case CPUID_8000_0008:
137 cpuid_count(*eax, *ecx, regs);
140 * As on Intel (0000_0007:0, EDX), mask out
141 * unsupported or unsafe AMD extended features
144 regs[1] &= (AMDFEID_CLZERO | AMDFEID_IRPERF |
147 vm_get_topology(vm, &sockets, &cores, &threads,
150 * Here, width is ApicIdCoreIdSize, present on
151 * at least Family 15h and newer. It
152 * represents the "number of bits in the
153 * initial apicid that indicate thread id
156 * Our topo_probe_amd() uses it for
157 * pkg_id_shift and other OSes may rely on it.
159 width = MIN(0xF, log2(threads * cores));
162 logical_cpus = MIN(0xFF, threads * cores - 1);
163 regs[2] = (width << AMDID_COREID_SIZE_SHIFT) | logical_cpus;
167 case CPUID_8000_0001:
168 cpuid_count(*eax, *ecx, regs);
171 * Hide SVM from guest.
173 regs[2] &= ~AMDID2_SVM;
176 * Don't advertise extended performance counter MSRs
179 regs[2] &= ~AMDID2_PCXC;
180 regs[2] &= ~AMDID2_PNXC;
181 regs[2] &= ~AMDID2_PTSCEL2I;
184 * Don't advertise Instruction Based Sampling feature.
186 regs[2] &= ~AMDID2_IBS;
188 /* NodeID MSR not available */
189 regs[2] &= ~AMDID2_NODE_ID;
191 /* Don't advertise the OS visible workaround feature */
192 regs[2] &= ~AMDID2_OSVW;
194 /* Hide mwaitx/monitorx capability from the guest */
195 regs[2] &= ~AMDID2_MWAITX;
198 * Hide rdtscp/ia32_tsc_aux until we know how
201 regs[3] &= ~AMDID_RDTSCP;
204 case CPUID_8000_0007:
206 * AMD uses this leaf to advertise the processor's
207 * power monitoring and RAS capabilities. These
208 * features are hardware-specific and exposing
209 * them to a guest doesn't make a lot of sense.
211 * Intel uses this leaf only to advertise the
212 * "Invariant TSC" feature with all other bits
213 * being reserved (set to zero).
221 * "Invariant TSC" can be advertised to the guest if:
222 * - host TSC frequency is invariant
223 * - host TSCs are synchronized across physical cpus
225 * XXX This still falls short because the vcpu
226 * can observe the TSC moving backwards as it
227 * migrates across physical cpus. But at least
228 * it should discourage the guest from using the
229 * TSC to keep track of time.
231 if (tsc_is_invariant && smp_tsc)
232 regs[3] |= AMDPM_TSC_INVARIANT;
235 case CPUID_8000_001D:
236 /* AMD Cache topology, like 0000_0004 for Intel. */
241 * Similar to Intel, generate a ficticious cache
242 * topology for the guest with L3 shared by the
243 * package, and L1 and L2 local to a core.
245 vm_get_topology(vm, &sockets, &cores, &threads,
249 logical_cpus = threads;
251 func = 1; /* data cache */
254 logical_cpus = threads;
256 func = 3; /* unified cache */
259 logical_cpus = threads * cores;
261 func = 3; /* unified cache */
270 logical_cpus = MIN(0xfff, logical_cpus - 1);
271 regs[0] = (logical_cpus << 14) | (1 << 8) |
273 regs[1] = (func > 0) ? (CACHE_LINE_SIZE - 1) : 0;
278 case CPUID_8000_001E:
279 /* AMD Family 16h+ additional identifiers */
280 if (!vmm_is_amd() || CPUID_TO_FAMILY(cpu_id) < 0x16)
283 vm_get_topology(vm, &sockets, &cores, &threads,
286 threads = MIN(0xFF, threads - 1);
287 regs[1] = (threads << 8) |
288 (vcpu_id >> log2(threads + 1));
290 * XXX Bhyve topology cannot yet represent >1 node per
297 case CPUID_0000_0001:
300 error = vm_get_x2apic_state(vm, vcpu_id, &x2apic_state);
302 panic("x86_emulate_cpuid: error %d "
303 "fetching x2apic state", error);
307 * Override the APIC ID only in ebx
309 regs[1] &= ~(CPUID_LOCAL_APIC_ID);
310 regs[1] |= (vcpu_id << CPUID_0000_0001_APICID_SHIFT);
313 * Don't expose VMX, SpeedStep, TME or SMX capability.
314 * Advertise x2APIC capability and Hypervisor guest.
316 regs[2] &= ~(CPUID2_VMX | CPUID2_EST | CPUID2_TM2);
317 regs[2] &= ~(CPUID2_SMX);
319 regs[2] |= CPUID2_HV;
321 if (x2apic_state != X2APIC_DISABLED)
322 regs[2] |= CPUID2_X2APIC;
324 regs[2] &= ~CPUID2_X2APIC;
327 * Only advertise CPUID2_XSAVE in the guest if
328 * the host is using XSAVE.
330 if (!(regs[2] & CPUID2_OSXSAVE))
331 regs[2] &= ~CPUID2_XSAVE;
334 * If CPUID2_XSAVE is being advertised and the
335 * guest has set CR4_XSAVE, set
338 regs[2] &= ~CPUID2_OSXSAVE;
339 if (regs[2] & CPUID2_XSAVE) {
340 error = vm_get_register(vm, vcpu_id,
341 VM_REG_GUEST_CR4, &cr4);
343 panic("x86_emulate_cpuid: error %d "
344 "fetching %%cr4", error);
346 regs[2] |= CPUID2_OSXSAVE;
350 * Hide monitor/mwait until we know how to deal with
351 * these instructions.
353 regs[2] &= ~CPUID2_MON;
356 * Hide the performance and debug features.
358 regs[2] &= ~CPUID2_PDCM;
361 * No TSC deadline support in the APIC yet
363 regs[2] &= ~CPUID2_TSCDLT;
366 * Hide thermal monitoring
368 regs[3] &= ~(CPUID_ACPI | CPUID_TM);
371 * Hide the debug store capability.
373 regs[3] &= ~CPUID_DS;
376 * Advertise the Machine Check and MTRR capability.
378 * Some guest OSes (e.g. Windows) will not boot if
379 * these features are absent.
381 regs[3] |= (CPUID_MCA | CPUID_MCE | CPUID_MTRR);
383 vm_get_topology(vm, &sockets, &cores, &threads,
385 logical_cpus = threads * cores;
386 regs[1] &= ~CPUID_HTT_CORES;
387 regs[1] |= (logical_cpus & 0xff) << 16;
388 regs[3] |= CPUID_HTT;
391 case CPUID_0000_0004:
392 cpuid_count(*eax, *ecx, regs);
394 if (regs[0] || regs[1] || regs[2] || regs[3]) {
395 vm_get_topology(vm, &sockets, &cores, &threads,
398 regs[0] |= (cores - 1) << 26;
401 * - L1 and L2 are shared only by the logical
402 * processors in a single core.
403 * - L3 and above are shared by all logical
404 * processors in the package.
406 logical_cpus = threads;
407 level = (regs[0] >> 5) & 0x7;
409 logical_cpus *= cores;
410 regs[0] |= (logical_cpus - 1) << 14;
414 case CPUID_0000_0007:
422 cpuid_count(*eax, *ecx, regs);
424 /* Only leaf 0 is supported */
428 * Expose known-safe features.
430 regs[1] &= (CPUID_STDEXT_FSGSBASE |
431 CPUID_STDEXT_BMI1 | CPUID_STDEXT_HLE |
432 CPUID_STDEXT_AVX2 | CPUID_STDEXT_BMI2 |
433 CPUID_STDEXT_ERMS | CPUID_STDEXT_RTM |
434 CPUID_STDEXT_AVX512F |
435 CPUID_STDEXT_RDSEED |
436 CPUID_STDEXT_AVX512PF |
437 CPUID_STDEXT_AVX512ER |
438 CPUID_STDEXT_AVX512CD | CPUID_STDEXT_SHA);
440 regs[3] &= CPUID_STDEXT3_MD_CLEAR;
442 /* Advertise INVPCID if it is enabled. */
443 error = vm_get_capability(vm, vcpu_id,
444 VM_CAP_ENABLE_INVPCID, &enable_invpcid);
445 if (error == 0 && enable_invpcid)
446 regs[1] |= CPUID_STDEXT_INVPCID;
450 case CPUID_0000_0006:
451 regs[0] = CPUTPM1_ARAT;
457 case CPUID_0000_000A:
459 * Handle the access, but report 0 for
468 case CPUID_0000_000B:
470 * Intel processor topology enumeration
472 if (vmm_is_intel()) {
473 vm_get_topology(vm, &sockets, &cores, &threads,
476 logical_cpus = threads;
477 width = log2(logical_cpus);
478 level = CPUID_TYPE_SMT;
483 logical_cpus = threads * cores;
484 width = log2(logical_cpus);
485 level = CPUID_TYPE_CORE;
489 if (!cpuid_leaf_b || *ecx >= 2) {
496 regs[0] = width & 0x1f;
497 regs[1] = logical_cpus & 0xffff;
498 regs[2] = (level << 8) | (*ecx & 0xff);
508 case CPUID_0000_000D:
509 limits = vmm_get_xsave_limits();
510 if (!limits->xsave_enabled) {
518 cpuid_count(*eax, *ecx, regs);
522 * Only permit the guest to use bits
523 * that are active in the host in
524 * %xcr0. Also, claim that the
525 * maximum save area size is
526 * equivalent to the host's current
527 * save area size. Since this runs
528 * "inside" of vmrun(), it runs with
529 * the guest's xcr0, so the current
530 * save area size is correct as-is.
532 regs[0] &= limits->xcr0_allowed;
533 regs[2] = limits->xsave_max_size;
534 regs[3] &= (limits->xcr0_allowed >> 32);
537 /* Only permit XSAVEOPT. */
538 regs[0] &= CPUID_EXTSTATE_XSAVEOPT;
545 * If the leaf is for a permitted feature,
546 * pass through as-is, otherwise return
549 if (!(limits->xcr0_allowed & (1ul << *ecx))) {
560 regs[0] = CPUID_VM_HIGH;
561 bcopy(bhyve_id, ®s[1], 4);
562 bcopy(bhyve_id + 4, ®s[2], 4);
563 bcopy(bhyve_id + 8, ®s[3], 4);
569 * The leaf value has already been clamped so
570 * simply pass this through, keeping count of
571 * how many unhandled leaf values have been seen.
573 atomic_add_long(&bhyve_xcpuids, 1);
574 cpuid_count(*eax, *ecx, regs);
587 vm_cpuid_capability(struct vm *vm, int vcpuid, enum vm_cpuid_capability cap)
591 KASSERT(cap > 0 && cap < VCC_LAST, ("%s: invalid vm_cpu_capability %d",
595 * Simply passthrough the capabilities of the host cpu for now.
600 if (amd_feature & AMDID_NX)
604 if (amd_feature & AMDID_FFXSR)
608 if (amd_feature2 & AMDID2_TCE)
612 panic("%s: unknown vm_cpu_capability %d", __func__, cap);