1 //===-- cpu_model.c - Support for __cpu_model builtin ------------*- C -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is based on LLVM's lib/Support/Host.cpp.
10 // It implements the operating system Host concept and builtin
11 // __cpu_model for the compiler_rt library, for x86 only.
13 //===----------------------------------------------------------------------===//
15 #if (defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || \
17 (defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER))
29 #ifndef __has_attribute
30 #define __has_attribute(attr) 0
33 enum VendorSignatures {
34 SIG_INTEL = 0x756e6547, // Genu
35 SIG_AMD = 0x68747541, // Auth
38 enum ProcessorVendors {
63 enum ProcessorSubtypes {
64 INTEL_COREI7_NEHALEM = 1,
65 INTEL_COREI7_WESTMERE,
66 INTEL_COREI7_SANDYBRIDGE,
75 INTEL_COREI7_IVYBRIDGE,
77 INTEL_COREI7_BROADWELL,
79 INTEL_COREI7_SKYLAKE_AVX512,
80 INTEL_COREI7_CANNONLAKE,
81 INTEL_COREI7_ICELAKE_CLIENT,
82 INTEL_COREI7_ICELAKE_SERVER,
84 INTEL_COREI7_CASCADELAKE,
88 enum ProcessorFeatures {
117 FEATURE_AVX5124VNNIW,
118 FEATURE_AVX5124FMAPS,
119 FEATURE_AVX512VPOPCNTDQ,
127 // The check below for i386 was copied from clang's cpuid.h (__get_cpuid_max).
128 // Check motivated by bug reports for OpenSSL crashing on CPUs without CPUID
129 // support. Consequently, for i386, the presence of CPUID is checked first
130 // via the corresponding eflags bit.
131 static bool isCpuIdSupported() {
132 #if defined(__GNUC__) || defined(__clang__)
133 #if defined(__i386__)
134 int __cpuid_supported;
137 " movl %%eax,%%ecx\n"
138 " xorl $0x00200000,%%eax\n"
144 " cmpl %%eax,%%ecx\n"
148 : "=r"(__cpuid_supported)
151 if (!__cpuid_supported)
159 // This code is copied from lib/Support/Host.cpp.
160 // Changes to either file should be mirrored in the other.
162 /// getX86CpuIDAndInfo - Execute the specified cpuid and return the 4 values in
163 /// the specified arguments. If we can't run cpuid on the host, return true.
164 static bool getX86CpuIDAndInfo(unsigned value, unsigned *rEAX, unsigned *rEBX,
165 unsigned *rECX, unsigned *rEDX) {
166 #if defined(__GNUC__) || defined(__clang__)
167 #if defined(__x86_64__)
168 // gcc doesn't know cpuid would clobber ebx/rbx. Preserve it manually.
169 // FIXME: should we save this for Clang?
170 __asm__("movq\t%%rbx, %%rsi\n\t"
172 "xchgq\t%%rbx, %%rsi\n\t"
173 : "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX)
176 #elif defined(__i386__)
177 __asm__("movl\t%%ebx, %%esi\n\t"
179 "xchgl\t%%ebx, %%esi\n\t"
180 : "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX)
186 #elif defined(_MSC_VER)
187 // The MSVC intrinsic is portable across x86 and x64.
189 __cpuid(registers, value);
190 *rEAX = registers[0];
191 *rEBX = registers[1];
192 *rECX = registers[2];
193 *rEDX = registers[3];
200 /// getX86CpuIDAndInfoEx - Execute the specified cpuid with subleaf and return
201 /// the 4 values in the specified arguments. If we can't run cpuid on the host,
203 static bool getX86CpuIDAndInfoEx(unsigned value, unsigned subleaf,
204 unsigned *rEAX, unsigned *rEBX, unsigned *rECX,
206 #if defined(__GNUC__) || defined(__clang__)
207 #if defined(__x86_64__)
208 // gcc doesn't know cpuid would clobber ebx/rbx. Preserve it manually.
209 // FIXME: should we save this for Clang?
210 __asm__("movq\t%%rbx, %%rsi\n\t"
212 "xchgq\t%%rbx, %%rsi\n\t"
213 : "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX)
214 : "a"(value), "c"(subleaf));
216 #elif defined(__i386__)
217 __asm__("movl\t%%ebx, %%esi\n\t"
219 "xchgl\t%%ebx, %%esi\n\t"
220 : "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX)
221 : "a"(value), "c"(subleaf));
226 #elif defined(_MSC_VER)
228 __cpuidex(registers, value, subleaf);
229 *rEAX = registers[0];
230 *rEBX = registers[1];
231 *rECX = registers[2];
232 *rEDX = registers[3];
239 // Read control register 0 (XCR0). Used to detect features such as AVX.
240 static bool getX86XCR0(unsigned *rEAX, unsigned *rEDX) {
241 #if defined(__GNUC__) || defined(__clang__)
242 // Check xgetbv; this uses a .byte sequence instead of the instruction
243 // directly because older assemblers do not include support for xgetbv and
244 // there is no easy way to conditionally compile based on the assembler used.
245 __asm__(".byte 0x0f, 0x01, 0xd0" : "=a"(*rEAX), "=d"(*rEDX) : "c"(0));
247 #elif defined(_MSC_FULL_VER) && defined(_XCR_XFEATURE_ENABLED_MASK)
248 unsigned long long Result = _xgetbv(_XCR_XFEATURE_ENABLED_MASK);
250 *rEDX = Result >> 32;
257 static void detectX86FamilyModel(unsigned EAX, unsigned *Family,
259 *Family = (EAX >> 8) & 0xf; // Bits 8 - 11
260 *Model = (EAX >> 4) & 0xf; // Bits 4 - 7
261 if (*Family == 6 || *Family == 0xf) {
263 // Examine extended family ID if family ID is F.
264 *Family += (EAX >> 20) & 0xff; // Bits 20 - 27
265 // Examine extended model ID if family ID is 6 or F.
266 *Model += ((EAX >> 16) & 0xf) << 4; // Bits 16 - 19
270 static void getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model,
273 unsigned Features2, unsigned *Type,
280 case 0x0f: // Intel Core 2 Duo processor, Intel Core 2 Duo mobile
281 // processor, Intel Core 2 Quad processor, Intel Core 2 Quad
282 // mobile processor, Intel Core 2 Extreme processor, Intel
283 // Pentium Dual-Core processor, Intel Xeon processor, model
284 // 0Fh. All processors are manufactured using the 65 nm process.
285 case 0x16: // Intel Celeron processor model 16h. All processors are
286 // manufactured using the 65 nm process
287 case 0x17: // Intel Core 2 Extreme processor, Intel Xeon processor, model
288 // 17h. All processors are manufactured using the 45 nm process.
290 // 45nm: Penryn , Wolfdale, Yorkfield (XE)
291 case 0x1d: // Intel Xeon processor MP. All processors are manufactured using
292 // the 45 nm process.
293 *Type = INTEL_CORE2; // "penryn"
295 case 0x1a: // Intel Core i7 processor and Intel Xeon processor. All
296 // processors are manufactured using the 45 nm process.
297 case 0x1e: // Intel(R) Core(TM) i7 CPU 870 @ 2.93GHz.
298 // As found in a Summer 2010 model iMac.
300 case 0x2e: // Nehalem EX
301 *Type = INTEL_COREI7; // "nehalem"
302 *Subtype = INTEL_COREI7_NEHALEM;
304 case 0x25: // Intel Core i7, laptop version.
305 case 0x2c: // Intel Core i7 processor and Intel Xeon processor. All
306 // processors are manufactured using the 32 nm process.
307 case 0x2f: // Westmere EX
308 *Type = INTEL_COREI7; // "westmere"
309 *Subtype = INTEL_COREI7_WESTMERE;
311 case 0x2a: // Intel Core i7 processor. All processors are manufactured
312 // using the 32 nm process.
314 *Type = INTEL_COREI7; //"sandybridge"
315 *Subtype = INTEL_COREI7_SANDYBRIDGE;
318 case 0x3e: // Ivy Bridge EP
319 *Type = INTEL_COREI7; // "ivybridge"
320 *Subtype = INTEL_COREI7_IVYBRIDGE;
328 *Type = INTEL_COREI7; // "haswell"
329 *Subtype = INTEL_COREI7_HASWELL;
337 *Type = INTEL_COREI7; // "broadwell"
338 *Subtype = INTEL_COREI7_BROADWELL;
342 case 0x4e: // Skylake mobile
343 case 0x5e: // Skylake desktop
344 case 0x8e: // Kaby Lake mobile
345 case 0x9e: // Kaby Lake desktop
346 *Type = INTEL_COREI7; // "skylake"
347 *Subtype = INTEL_COREI7_SKYLAKE;
352 *Type = INTEL_COREI7;
353 if (Features2 & (1 << (FEATURE_AVX512VNNI - 32)))
354 *Subtype = INTEL_COREI7_CASCADELAKE; // "cascadelake"
356 *Subtype = INTEL_COREI7_SKYLAKE_AVX512; // "skylake-avx512"
361 *Type = INTEL_COREI7;
362 *Subtype = INTEL_COREI7_CANNONLAKE; // "cannonlake"
368 *Type = INTEL_COREI7;
369 *Subtype = INTEL_COREI7_ICELAKE_CLIENT; // "icelake-client"
375 *Type = INTEL_COREI7;
376 *Subtype = INTEL_COREI7_ICELAKE_SERVER; // "icelake-server"
379 case 0x1c: // Most 45 nm Intel Atom processors
380 case 0x26: // 45 nm Atom Lincroft
381 case 0x27: // 32 nm Atom Medfield
382 case 0x35: // 32 nm Atom Midview
383 case 0x36: // 32 nm Atom Midview
384 *Type = INTEL_BONNELL;
387 // Atom Silvermont codes from the Intel software optimization guide.
393 case 0x4c: // really airmont
394 *Type = INTEL_SILVERMONT;
395 break; // "silvermont"
397 case 0x5c: // Apollo Lake
398 case 0x5f: // Denverton
399 *Type = INTEL_GOLDMONT;
402 *Type = INTEL_GOLDMONT_PLUS;
405 *Type = INTEL_TREMONT;
409 *Type = INTEL_KNL; // knl
413 *Type = INTEL_KNM; // knm
416 default: // Unknown family 6 CPU.
425 static void getAMDProcessorTypeAndSubtype(unsigned Family, unsigned Model,
426 unsigned Features, unsigned Features2,
427 unsigned *Type, unsigned *Subtype) {
428 // FIXME: this poorly matches the generated SubtargetFeatureKV table. There
429 // appears to be no way to generate the wide variety of AMD-specific targets
430 // from the information returned from CPUID.
433 *Type = AMDFAM10H; // "amdfam10"
436 *Subtype = AMDFAM10H_BARCELONA;
439 *Subtype = AMDFAM10H_SHANGHAI;
442 *Subtype = AMDFAM10H_ISTANBUL;
451 if (Model >= 0x60 && Model <= 0x7f) {
452 *Subtype = AMDFAM15H_BDVER4;
453 break; // "bdver4"; 60h-7Fh: Excavator
455 if (Model >= 0x30 && Model <= 0x3f) {
456 *Subtype = AMDFAM15H_BDVER3;
457 break; // "bdver3"; 30h-3Fh: Steamroller
459 if ((Model >= 0x10 && Model <= 0x1f) || Model == 0x02) {
460 *Subtype = AMDFAM15H_BDVER2;
461 break; // "bdver2"; 02h, 10h-1Fh: Piledriver
464 *Subtype = AMDFAM15H_BDVER1;
465 break; // "bdver1"; 00h-0Fh: Bulldozer
473 if (Model >= 0x30 && Model <= 0x3f) {
474 *Subtype = AMDFAM17H_ZNVER2;
475 break; // "znver2"; 30h-3fh: Zen2
478 *Subtype = AMDFAM17H_ZNVER1;
479 break; // "znver1"; 00h-0Fh: Zen1
487 static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf,
488 unsigned *FeaturesOut,
489 unsigned *Features2Out) {
490 unsigned Features = 0;
491 unsigned Features2 = 0;
494 #define setFeature(F) \
497 Features |= 1U << (F & 0x1f); \
499 Features2 |= 1U << ((F - 32) & 0x1f); \
503 setFeature(FEATURE_CMOV);
505 setFeature(FEATURE_MMX);
507 setFeature(FEATURE_SSE);
509 setFeature(FEATURE_SSE2);
512 setFeature(FEATURE_SSE3);
514 setFeature(FEATURE_PCLMUL);
516 setFeature(FEATURE_SSSE3);
518 setFeature(FEATURE_FMA);
520 setFeature(FEATURE_SSE4_1);
522 setFeature(FEATURE_SSE4_2);
524 setFeature(FEATURE_POPCNT);
526 setFeature(FEATURE_AES);
528 // If CPUID indicates support for XSAVE, XRESTORE and AVX, and XGETBV
529 // indicates that the AVX registers will be saved and restored on context
530 // switch, then we have full AVX support.
531 const unsigned AVXBits = (1 << 27) | (1 << 28);
532 bool HasAVX = ((ECX & AVXBits) == AVXBits) && !getX86XCR0(&EAX, &EDX) &&
533 ((EAX & 0x6) == 0x6);
534 bool HasAVX512Save = HasAVX && ((EAX & 0xe0) == 0xe0);
537 setFeature(FEATURE_AVX);
540 MaxLeaf >= 0x7 && !getX86CpuIDAndInfoEx(0x7, 0x0, &EAX, &EBX, &ECX, &EDX);
542 if (HasLeaf7 && ((EBX >> 3) & 1))
543 setFeature(FEATURE_BMI);
544 if (HasLeaf7 && ((EBX >> 5) & 1) && HasAVX)
545 setFeature(FEATURE_AVX2);
546 if (HasLeaf7 && ((EBX >> 8) & 1))
547 setFeature(FEATURE_BMI2);
548 if (HasLeaf7 && ((EBX >> 16) & 1) && HasAVX512Save)
549 setFeature(FEATURE_AVX512F);
550 if (HasLeaf7 && ((EBX >> 17) & 1) && HasAVX512Save)
551 setFeature(FEATURE_AVX512DQ);
552 if (HasLeaf7 && ((EBX >> 21) & 1) && HasAVX512Save)
553 setFeature(FEATURE_AVX512IFMA);
554 if (HasLeaf7 && ((EBX >> 26) & 1) && HasAVX512Save)
555 setFeature(FEATURE_AVX512PF);
556 if (HasLeaf7 && ((EBX >> 27) & 1) && HasAVX512Save)
557 setFeature(FEATURE_AVX512ER);
558 if (HasLeaf7 && ((EBX >> 28) & 1) && HasAVX512Save)
559 setFeature(FEATURE_AVX512CD);
560 if (HasLeaf7 && ((EBX >> 30) & 1) && HasAVX512Save)
561 setFeature(FEATURE_AVX512BW);
562 if (HasLeaf7 && ((EBX >> 31) & 1) && HasAVX512Save)
563 setFeature(FEATURE_AVX512VL);
565 if (HasLeaf7 && ((ECX >> 1) & 1) && HasAVX512Save)
566 setFeature(FEATURE_AVX512VBMI);
567 if (HasLeaf7 && ((ECX >> 6) & 1) && HasAVX512Save)
568 setFeature(FEATURE_AVX512VBMI2);
569 if (HasLeaf7 && ((ECX >> 8) & 1))
570 setFeature(FEATURE_GFNI);
571 if (HasLeaf7 && ((ECX >> 10) & 1) && HasAVX)
572 setFeature(FEATURE_VPCLMULQDQ);
573 if (HasLeaf7 && ((ECX >> 11) & 1) && HasAVX512Save)
574 setFeature(FEATURE_AVX512VNNI);
575 if (HasLeaf7 && ((ECX >> 12) & 1) && HasAVX512Save)
576 setFeature(FEATURE_AVX512BITALG);
577 if (HasLeaf7 && ((ECX >> 14) & 1) && HasAVX512Save)
578 setFeature(FEATURE_AVX512VPOPCNTDQ);
580 if (HasLeaf7 && ((EDX >> 2) & 1) && HasAVX512Save)
581 setFeature(FEATURE_AVX5124VNNIW);
582 if (HasLeaf7 && ((EDX >> 3) & 1) && HasAVX512Save)
583 setFeature(FEATURE_AVX5124FMAPS);
585 unsigned MaxExtLevel;
586 getX86CpuIDAndInfo(0x80000000, &MaxExtLevel, &EBX, &ECX, &EDX);
588 bool HasExtLeaf1 = MaxExtLevel >= 0x80000001 &&
589 !getX86CpuIDAndInfo(0x80000001, &EAX, &EBX, &ECX, &EDX);
590 if (HasExtLeaf1 && ((ECX >> 6) & 1))
591 setFeature(FEATURE_SSE4_A);
592 if (HasExtLeaf1 && ((ECX >> 11) & 1))
593 setFeature(FEATURE_XOP);
594 if (HasExtLeaf1 && ((ECX >> 16) & 1))
595 setFeature(FEATURE_FMA4);
597 *FeaturesOut = Features;
598 *Features2Out = Features2;
602 #if defined(HAVE_INIT_PRIORITY)
603 #define CONSTRUCTOR_ATTRIBUTE __attribute__((__constructor__ 101))
604 #elif __has_attribute(__constructor__)
605 #define CONSTRUCTOR_ATTRIBUTE __attribute__((__constructor__))
607 // FIXME: For MSVC, we should make a function pointer global in .CRT$X?? so that
608 // this runs during initialization.
609 #define CONSTRUCTOR_ATTRIBUTE
613 __attribute__((visibility("hidden")))
615 int __cpu_indicator_init(void) CONSTRUCTOR_ATTRIBUTE;
618 __attribute__((visibility("hidden")))
620 struct __processor_model {
621 unsigned int __cpu_vendor;
622 unsigned int __cpu_type;
623 unsigned int __cpu_subtype;
624 unsigned int __cpu_features[1];
625 } __cpu_model = {0, 0, 0, {0}};
628 __attribute__((visibility("hidden")))
630 unsigned int __cpu_features2;
632 // A constructor function that is sets __cpu_model and __cpu_features2 with
633 // the right values. This needs to run only once. This constructor is
634 // given the highest priority and it should run before constructors without
635 // the priority set. However, it still runs after ifunc initializers and
636 // needs to be called explicitly there.
638 int CONSTRUCTOR_ATTRIBUTE __cpu_indicator_init(void) {
639 unsigned EAX, EBX, ECX, EDX;
640 unsigned MaxLeaf = 5;
642 unsigned Model, Family, Brand_id;
643 unsigned Features = 0;
644 unsigned Features2 = 0;
646 // This function needs to run just once.
647 if (__cpu_model.__cpu_vendor)
650 if (!isCpuIdSupported())
653 // Assume cpuid insn present. Run in level 0 to get vendor id.
654 if (getX86CpuIDAndInfo(0, &MaxLeaf, &Vendor, &ECX, &EDX) || MaxLeaf < 1) {
655 __cpu_model.__cpu_vendor = VENDOR_OTHER;
658 getX86CpuIDAndInfo(1, &EAX, &EBX, &ECX, &EDX);
659 detectX86FamilyModel(EAX, &Family, &Model);
660 Brand_id = EBX & 0xff;
662 // Find available features.
663 getAvailableFeatures(ECX, EDX, MaxLeaf, &Features, &Features2);
664 __cpu_model.__cpu_features[0] = Features;
665 __cpu_features2 = Features2;
667 if (Vendor == SIG_INTEL) {
669 getIntelProcessorTypeAndSubtype(Family, Model, Brand_id, Features,
670 Features2, &(__cpu_model.__cpu_type),
671 &(__cpu_model.__cpu_subtype));
672 __cpu_model.__cpu_vendor = VENDOR_INTEL;
673 } else if (Vendor == SIG_AMD) {
675 getAMDProcessorTypeAndSubtype(Family, Model, Features, Features2,
676 &(__cpu_model.__cpu_type),
677 &(__cpu_model.__cpu_subtype));
678 __cpu_model.__cpu_vendor = VENDOR_AMD;
680 __cpu_model.__cpu_vendor = VENDOR_OTHER;
682 assert(__cpu_model.__cpu_vendor < VENDOR_MAX);
683 assert(__cpu_model.__cpu_type < CPU_TYPE_MAX);
684 assert(__cpu_model.__cpu_subtype < CPU_SUBTYPE_MAX);