2 * Copyright (c) 2014 Andrew Turner
3 * Copyright (c) 2014 The FreeBSD Foundation
6 * Portions of this software were developed by Semihalf
7 * under sponsorship of the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/kernel.h>
39 #include <sys/sysctl.h>
40 #include <sys/systm.h>
42 #include <machine/atomic.h>
43 #include <machine/cpu.h>
44 #include <machine/cpufunc.h>
46 static int ident_lock;
48 char machine[] = "arm64";
50 SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0,
54 * Per-CPU affinity as provided in MPIDR_EL1
55 * Indexed by CPU number in logical order selected by the system.
56 * Relevant fields can be extracted using CPU_AFFn macros,
57 * Aff3.Aff2.Aff1.Aff0 construct a unique CPU address in the system.
60 * Aff1 - Cluster number
61 * Aff0 - CPU number in Aff1 cluster
63 uint64_t __cpu_affinity[MAXCPU];
64 static u_int cpu_aff_levels;
71 const char *cpu_impl_name;
72 const char *cpu_part_name;
79 uint64_t id_aa64isar0;
80 uint64_t id_aa64isar1;
81 uint64_t id_aa64mmfr0;
82 uint64_t id_aa64mmfr1;
87 struct cpu_desc cpu_desc[MAXCPU];
88 static u_int cpu_print_regs;
89 #define PRINT_ID_AA64_AFR0 0x00000001
90 #define PRINT_ID_AA64_AFR1 0x00000002
91 #define PRINT_ID_AA64_DFR0 0x00000004
92 #define PRINT_ID_AA64_DFR1 0x00000008
93 #define PRINT_ID_AA64_ISAR0 0x00000010
94 #define PRINT_ID_AA64_ISAR1 0x00000020
95 #define PRINT_ID_AA64_MMFR0 0x00000040
96 #define PRINT_ID_AA64_MMFR1 0x00000080
97 #define PRINT_ID_AA64_PFR0 0x00000100
98 #define PRINT_ID_AA64_PFR1 0x00000200
102 const char *part_name;
104 #define CPU_PART_NONE { 0, "Unknown Processor" }
106 struct cpu_implementers {
108 const char *impl_name;
110 * Part number is implementation defined
111 * so each vendor will have its own set of values and names.
113 const struct cpu_parts *cpu_parts;
115 #define CPU_IMPLEMENTER_NONE { 0, "Unknown Implementer", cpu_parts_none }
118 * Per-implementer table of (PartNum, CPU Name) pairs.
121 static const struct cpu_parts cpu_parts_arm[] = {
122 { CPU_PART_FOUNDATION, "Foundation-Model" },
123 { CPU_PART_CORTEX_A53, "Cortex-A53" },
124 { CPU_PART_CORTEX_A57, "Cortex-A57" },
128 static const struct cpu_parts cpu_parts_cavium[] = {
129 { CPU_PART_THUNDER, "Thunder" },
134 static const struct cpu_parts cpu_parts_none[] = {
139 * Implementers table.
141 const struct cpu_implementers cpu_implementers[] = {
142 { CPU_IMPL_ARM, "ARM", cpu_parts_arm },
143 { CPU_IMPL_BROADCOM, "Broadcom", cpu_parts_none },
144 { CPU_IMPL_CAVIUM, "Cavium", cpu_parts_cavium },
145 { CPU_IMPL_DEC, "DEC", cpu_parts_none },
146 { CPU_IMPL_INFINEON, "IFX", cpu_parts_none },
147 { CPU_IMPL_FREESCALE, "Freescale", cpu_parts_none },
148 { CPU_IMPL_NVIDIA, "NVIDIA", cpu_parts_none },
149 { CPU_IMPL_APM, "APM", cpu_parts_none },
150 { CPU_IMPL_QUALCOMM, "Qualcomm", cpu_parts_none },
151 { CPU_IMPL_MARVELL, "Marvell", cpu_parts_none },
152 { CPU_IMPL_INTEL, "Intel", cpu_parts_none },
153 CPU_IMPLEMENTER_NONE,
157 identify_cpu_sysinit(void *dummy __unused)
162 print_cpu_features(cpu);
165 SYSINIT(idenrity_cpu, SI_SUB_SMP, SI_ORDER_ANY, identify_cpu_sysinit, NULL);
168 print_cpu_features(u_int cpu)
172 printf("CPU%3d: %s %s r%dp%d", cpu, cpu_desc[cpu].cpu_impl_name,
173 cpu_desc[cpu].cpu_part_name, cpu_desc[cpu].cpu_variant,
174 cpu_desc[cpu].cpu_revision);
176 printf(" affinity:");
177 switch(cpu_aff_levels) {
180 printf(" %2d", CPU_AFF3(cpu_desc[cpu].mpidr));
183 printf(" %2d", CPU_AFF2(cpu_desc[cpu].mpidr));
186 printf(" %2d", CPU_AFF1(cpu_desc[cpu].mpidr));
189 case 0: /* On UP this will be zero */
190 printf(" %2d", CPU_AFF0(cpu_desc[cpu].mpidr));
196 * There is a hardware errata where, if one CPU is performing a TLB
197 * invalidation while another is performing a store-exclusive the
198 * store-exclusive may return the wrong status. A workaround seems
199 * to be to use an IPI to invalidate on each CPU, however given the
200 * limited number of affected units (pass 1.1 is the evaluation
201 * hardware revision), and the lack of information from Cavium
202 * this has not been implemented.
204 * At the time of writing this the only information is from:
205 * https://lkml.org/lkml/2016/8/4/722
208 * XXX: CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1 on its own also
209 * triggers on pass 2.0+.
211 if (cpu == 0 && CPU_VAR(PCPU_GET(midr)) == 0 &&
212 CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1)
213 printf("WARNING: ThunderX Pass 1.1 detected.\nThis has known "
214 "hardware bugs that may cause the incorrect operation of "
215 "atomic operations.\n");
217 if (cpu != 0 && cpu_print_regs == 0)
220 #define SEP_STR ((printed++) == 0) ? "" : ","
222 /* AArch64 Instruction Set Attribute Register 0 */
223 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR0) != 0) {
225 printf(" Instruction Set Attributes 0 = <");
227 switch (ID_AA64ISAR0_RDM(cpu_desc[cpu].id_aa64isar0)) {
228 case ID_AA64ISAR0_RDM_NONE:
230 case ID_AA64ISAR0_RDM_IMPL:
231 printf("%sRDM", SEP_STR);
234 printf("%sUnknown RDM", SEP_STR);
237 switch (ID_AA64ISAR0_ATOMIC(cpu_desc[cpu].id_aa64isar0)) {
238 case ID_AA64ISAR0_ATOMIC_NONE:
240 case ID_AA64ISAR0_ATOMIC_IMPL:
241 printf("%sAtomic", SEP_STR);
244 printf("%sUnknown Atomic", SEP_STR);
247 switch (ID_AA64ISAR0_AES(cpu_desc[cpu].id_aa64isar0)) {
248 case ID_AA64ISAR0_AES_NONE:
250 case ID_AA64ISAR0_AES_BASE:
251 printf("%sAES", SEP_STR);
253 case ID_AA64ISAR0_AES_PMULL:
254 printf("%sAES+PMULL", SEP_STR);
257 printf("%sUnknown AES", SEP_STR);
261 switch (ID_AA64ISAR0_SHA1(cpu_desc[cpu].id_aa64isar0)) {
262 case ID_AA64ISAR0_SHA1_NONE:
264 case ID_AA64ISAR0_SHA1_BASE:
265 printf("%sSHA1", SEP_STR);
268 printf("%sUnknown SHA1", SEP_STR);
272 switch (ID_AA64ISAR0_SHA2(cpu_desc[cpu].id_aa64isar0)) {
273 case ID_AA64ISAR0_SHA2_NONE:
275 case ID_AA64ISAR0_SHA2_BASE:
276 printf("%sSHA2", SEP_STR);
279 printf("%sUnknown SHA2", SEP_STR);
283 switch (ID_AA64ISAR0_CRC32(cpu_desc[cpu].id_aa64isar0)) {
284 case ID_AA64ISAR0_CRC32_NONE:
286 case ID_AA64ISAR0_CRC32_BASE:
287 printf("%sCRC32", SEP_STR);
290 printf("%sUnknown CRC32", SEP_STR);
294 if ((cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK) != 0)
295 printf("%s%#lx", SEP_STR,
296 cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK);
301 /* AArch64 Instruction Set Attribute Register 1 */
302 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR1) != 0) {
303 printf(" Instruction Set Attributes 1 = <%#lx>\n",
304 cpu_desc[cpu].id_aa64isar1);
307 /* AArch64 Processor Feature Register 0 */
308 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR0) != 0) {
310 printf(" Processor Features 0 = <");
311 switch (ID_AA64PFR0_GIC(cpu_desc[cpu].id_aa64pfr0)) {
312 case ID_AA64PFR0_GIC_CPUIF_NONE:
314 case ID_AA64PFR0_GIC_CPUIF_EN:
315 printf("%sGIC", SEP_STR);
318 printf("%sUnknown GIC interface", SEP_STR);
322 switch (ID_AA64PFR0_ADV_SIMD(cpu_desc[cpu].id_aa64pfr0)) {
323 case ID_AA64PFR0_ADV_SIMD_NONE:
325 case ID_AA64PFR0_ADV_SIMD_IMPL:
326 printf("%sAdvSIMD", SEP_STR);
329 printf("%sUnknown AdvSIMD", SEP_STR);
333 switch (ID_AA64PFR0_FP(cpu_desc[cpu].id_aa64pfr0)) {
334 case ID_AA64PFR0_FP_NONE:
336 case ID_AA64PFR0_FP_IMPL:
337 printf("%sFloat", SEP_STR);
340 printf("%sUnknown Float", SEP_STR);
344 switch (ID_AA64PFR0_EL3(cpu_desc[cpu].id_aa64pfr0)) {
345 case ID_AA64PFR0_EL3_NONE:
346 printf("%sNo EL3", SEP_STR);
348 case ID_AA64PFR0_EL3_64:
349 printf("%sEL3", SEP_STR);
351 case ID_AA64PFR0_EL3_64_32:
352 printf("%sEL3 32", SEP_STR);
355 printf("%sUnknown EL3", SEP_STR);
359 switch (ID_AA64PFR0_EL2(cpu_desc[cpu].id_aa64pfr0)) {
360 case ID_AA64PFR0_EL2_NONE:
361 printf("%sNo EL2", SEP_STR);
363 case ID_AA64PFR0_EL2_64:
364 printf("%sEL2", SEP_STR);
366 case ID_AA64PFR0_EL2_64_32:
367 printf("%sEL2 32", SEP_STR);
370 printf("%sUnknown EL2", SEP_STR);
374 switch (ID_AA64PFR0_EL1(cpu_desc[cpu].id_aa64pfr0)) {
375 case ID_AA64PFR0_EL1_64:
376 printf("%sEL1", SEP_STR);
378 case ID_AA64PFR0_EL1_64_32:
379 printf("%sEL1 32", SEP_STR);
382 printf("%sUnknown EL1", SEP_STR);
386 switch (ID_AA64PFR0_EL0(cpu_desc[cpu].id_aa64pfr0)) {
387 case ID_AA64PFR0_EL0_64:
388 printf("%sEL0", SEP_STR);
390 case ID_AA64PFR0_EL0_64_32:
391 printf("%sEL0 32", SEP_STR);
394 printf("%sUnknown EL0", SEP_STR);
398 if ((cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK) != 0)
399 printf("%s%#lx", SEP_STR,
400 cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK);
405 /* AArch64 Processor Feature Register 1 */
406 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR1) != 0) {
407 printf(" Processor Features 1 = <%#lx>\n",
408 cpu_desc[cpu].id_aa64pfr1);
411 /* AArch64 Memory Model Feature Register 0 */
412 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR0) != 0) {
414 printf(" Memory Model Features 0 = <");
415 switch (ID_AA64MMFR0_TGRAN4(cpu_desc[cpu].id_aa64mmfr0)) {
416 case ID_AA64MMFR0_TGRAN4_NONE:
418 case ID_AA64MMFR0_TGRAN4_IMPL:
419 printf("%s4k Granule", SEP_STR);
422 printf("%sUnknown 4k Granule", SEP_STR);
426 switch (ID_AA64MMFR0_TGRAN16(cpu_desc[cpu].id_aa64mmfr0)) {
427 case ID_AA64MMFR0_TGRAN16_NONE:
429 case ID_AA64MMFR0_TGRAN16_IMPL:
430 printf("%s16k Granule", SEP_STR);
433 printf("%sUnknown 16k Granule", SEP_STR);
437 switch (ID_AA64MMFR0_TGRAN64(cpu_desc[cpu].id_aa64mmfr0)) {
438 case ID_AA64MMFR0_TGRAN64_NONE:
440 case ID_AA64MMFR0_TGRAN64_IMPL:
441 printf("%s64k Granule", SEP_STR);
444 printf("%sUnknown 64k Granule", SEP_STR);
448 switch (ID_AA64MMFR0_BIGEND(cpu_desc[cpu].id_aa64mmfr0)) {
449 case ID_AA64MMFR0_BIGEND_FIXED:
451 case ID_AA64MMFR0_BIGEND_MIXED:
452 printf("%sMixedEndian", SEP_STR);
455 printf("%sUnknown Endian switching", SEP_STR);
459 switch (ID_AA64MMFR0_BIGEND_EL0(cpu_desc[cpu].id_aa64mmfr0)) {
460 case ID_AA64MMFR0_BIGEND_EL0_FIXED:
462 case ID_AA64MMFR0_BIGEND_EL0_MIXED:
463 printf("%sEL0 MixEndian", SEP_STR);
466 printf("%sUnknown EL0 Endian switching", SEP_STR);
470 switch (ID_AA64MMFR0_S_NS_MEM(cpu_desc[cpu].id_aa64mmfr0)) {
471 case ID_AA64MMFR0_S_NS_MEM_NONE:
473 case ID_AA64MMFR0_S_NS_MEM_DISTINCT:
474 printf("%sS/NS Mem", SEP_STR);
477 printf("%sUnknown S/NS Mem", SEP_STR);
481 switch (ID_AA64MMFR0_ASID_BITS(cpu_desc[cpu].id_aa64mmfr0)) {
482 case ID_AA64MMFR0_ASID_BITS_8:
483 printf("%s8bit ASID", SEP_STR);
485 case ID_AA64MMFR0_ASID_BITS_16:
486 printf("%s16bit ASID", SEP_STR);
489 printf("%sUnknown ASID", SEP_STR);
493 switch (ID_AA64MMFR0_PA_RANGE(cpu_desc[cpu].id_aa64mmfr0)) {
494 case ID_AA64MMFR0_PA_RANGE_4G:
495 printf("%s4GB PA", SEP_STR);
497 case ID_AA64MMFR0_PA_RANGE_64G:
498 printf("%s64GB PA", SEP_STR);
500 case ID_AA64MMFR0_PA_RANGE_1T:
501 printf("%s1TB PA", SEP_STR);
503 case ID_AA64MMFR0_PA_RANGE_4T:
504 printf("%s4TB PA", SEP_STR);
506 case ID_AA64MMFR0_PA_RANGE_16T:
507 printf("%s16TB PA", SEP_STR);
509 case ID_AA64MMFR0_PA_RANGE_256T:
510 printf("%s256TB PA", SEP_STR);
513 printf("%sUnknown PA Range", SEP_STR);
517 if ((cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK) != 0)
518 printf("%s%#lx", SEP_STR,
519 cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK);
523 /* AArch64 Memory Model Feature Register 1 */
524 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR1) != 0) {
526 printf(" Memory Model Features 1 = <");
528 switch (ID_AA64MMFR1_PAN(cpu_desc[cpu].id_aa64mmfr1)) {
529 case ID_AA64MMFR1_PAN_NONE:
531 case ID_AA64MMFR1_PAN_IMPL:
532 printf("%sPAN", SEP_STR);
535 printf("%sUnknown PAN", SEP_STR);
539 switch (ID_AA64MMFR1_LO(cpu_desc[cpu].id_aa64mmfr1)) {
540 case ID_AA64MMFR1_LO_NONE:
542 case ID_AA64MMFR1_LO_IMPL:
543 printf("%sLO", SEP_STR);
546 printf("%sUnknown LO", SEP_STR);
550 switch (ID_AA64MMFR1_HPDS(cpu_desc[cpu].id_aa64mmfr1)) {
551 case ID_AA64MMFR1_HPDS_NONE:
553 case ID_AA64MMFR1_HPDS_IMPL:
554 printf("%sHPDS", SEP_STR);
557 printf("%sUnknown HPDS", SEP_STR);
561 switch (ID_AA64MMFR1_VH(cpu_desc[cpu].id_aa64mmfr1)) {
562 case ID_AA64MMFR1_VH_NONE:
564 case ID_AA64MMFR1_VH_IMPL:
565 printf("%sVHE", SEP_STR);
568 printf("%sUnknown VHE", SEP_STR);
572 switch (ID_AA64MMFR1_VMIDBITS(cpu_desc[cpu].id_aa64mmfr1)) {
573 case ID_AA64MMFR1_VMIDBITS_8:
575 case ID_AA64MMFR1_VMIDBITS_16:
576 printf("%s16 VMID bits", SEP_STR);
579 printf("%sUnknown VMID bits", SEP_STR);
583 switch (ID_AA64MMFR1_HAFDBS(cpu_desc[cpu].id_aa64mmfr1)) {
584 case ID_AA64MMFR1_HAFDBS_NONE:
586 case ID_AA64MMFR1_HAFDBS_AF:
587 printf("%sAF", SEP_STR);
589 case ID_AA64MMFR1_HAFDBS_AF_DBS:
590 printf("%sAF+DBS", SEP_STR);
593 printf("%sUnknown Hardware update AF/DBS", SEP_STR);
597 if ((cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK) != 0)
598 printf("%s%#lx", SEP_STR,
599 cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK);
603 /* AArch64 Debug Feature Register 0 */
604 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR0) != 0) {
606 printf(" Debug Features 0 = <");
607 printf("%s%lu CTX Breakpoints", SEP_STR,
608 ID_AA64DFR0_CTX_CMPS(cpu_desc[cpu].id_aa64dfr0));
610 printf("%s%lu Watchpoints", SEP_STR,
611 ID_AA64DFR0_WRPS(cpu_desc[cpu].id_aa64dfr0));
613 printf("%s%lu Breakpoints", SEP_STR,
614 ID_AA64DFR0_BRPS(cpu_desc[cpu].id_aa64dfr0));
616 switch (ID_AA64DFR0_PMU_VER(cpu_desc[cpu].id_aa64dfr0)) {
617 case ID_AA64DFR0_PMU_VER_NONE:
619 case ID_AA64DFR0_PMU_VER_3:
620 printf("%sPMUv3", SEP_STR);
622 case ID_AA64DFR0_PMU_VER_3_1:
623 printf("%sPMUv3+16 bit evtCount", SEP_STR);
625 case ID_AA64DFR0_PMU_VER_IMPL:
626 printf("%sImplementation defined PMU", SEP_STR);
629 printf("%sUnknown PMU", SEP_STR);
633 switch (ID_AA64DFR0_TRACE_VER(cpu_desc[cpu].id_aa64dfr0)) {
634 case ID_AA64DFR0_TRACE_VER_NONE:
636 case ID_AA64DFR0_TRACE_VER_IMPL:
637 printf("%sTrace", SEP_STR);
640 printf("%sUnknown Trace", SEP_STR);
644 switch (ID_AA64DFR0_DEBUG_VER(cpu_desc[cpu].id_aa64dfr0)) {
645 case ID_AA64DFR0_DEBUG_VER_8:
646 printf("%sDebug v8", SEP_STR);
648 case ID_AA64DFR0_DEBUG_VER_8_VHE:
649 printf("%sDebug v8+VHE", SEP_STR);
652 printf("%sUnknown Debug", SEP_STR);
656 if (cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK)
657 printf("%s%#lx", SEP_STR,
658 cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK);
662 /* AArch64 Memory Model Feature Register 1 */
663 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR1) != 0) {
664 printf(" Debug Features 1 = <%#lx>\n",
665 cpu_desc[cpu].id_aa64dfr1);
668 /* AArch64 Auxiliary Feature Register 0 */
669 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR0) != 0) {
670 printf(" Auxiliary Features 0 = <%#lx>\n",
671 cpu_desc[cpu].id_aa64afr0);
674 /* AArch64 Auxiliary Feature Register 1 */
675 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR1) != 0) {
676 printf(" Auxiliary Features 1 = <%#lx>\n",
677 cpu_desc[cpu].id_aa64afr1);
691 const struct cpu_parts *cpu_partsp = NULL;
693 cpu = PCPU_GET(cpuid);
697 * Store midr to pcpu to allow fast reading
698 * from EL0, EL1 and assembly code.
700 PCPU_SET(midr, midr);
702 impl_id = CPU_IMPL(midr);
703 for (i = 0; i < nitems(cpu_implementers); i++) {
704 if (impl_id == cpu_implementers[i].impl_id ||
705 cpu_implementers[i].impl_id == 0) {
706 cpu_desc[cpu].cpu_impl = impl_id;
707 cpu_desc[cpu].cpu_impl_name = cpu_implementers[i].impl_name;
708 cpu_partsp = cpu_implementers[i].cpu_parts;
713 part_id = CPU_PART(midr);
714 for (i = 0; &cpu_partsp[i] != NULL; i++) {
715 if (part_id == cpu_partsp[i].part_id ||
716 cpu_partsp[i].part_id == 0) {
717 cpu_desc[cpu].cpu_part_num = part_id;
718 cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name;
723 cpu_desc[cpu].cpu_revision = CPU_REV(midr);
724 cpu_desc[cpu].cpu_variant = CPU_VAR(midr);
726 /* Save affinity for current CPU */
727 cpu_desc[cpu].mpidr = get_mpidr();
728 CPU_AFFINITY(cpu) = cpu_desc[cpu].mpidr & CPU_AFF_MASK;
730 cpu_desc[cpu].id_aa64dfr0 = READ_SPECIALREG(id_aa64dfr0_el1);
731 cpu_desc[cpu].id_aa64dfr1 = READ_SPECIALREG(id_aa64dfr1_el1);
732 cpu_desc[cpu].id_aa64isar0 = READ_SPECIALREG(id_aa64isar0_el1);
733 cpu_desc[cpu].id_aa64isar1 = READ_SPECIALREG(id_aa64isar1_el1);
734 cpu_desc[cpu].id_aa64mmfr0 = READ_SPECIALREG(id_aa64mmfr0_el1);
735 cpu_desc[cpu].id_aa64mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
736 cpu_desc[cpu].id_aa64pfr0 = READ_SPECIALREG(id_aa64pfr0_el1);
737 cpu_desc[cpu].id_aa64pfr1 = READ_SPECIALREG(id_aa64pfr1_el1);
741 * This code must run on one cpu at a time, but we are
742 * not scheduling on the current core so implement a
745 while (atomic_cmpset_acq_int(&ident_lock, 0, 1) == 0)
746 __asm __volatile("wfe" ::: "memory");
748 switch (cpu_aff_levels) {
750 if (CPU_AFF0(cpu_desc[cpu].mpidr) !=
751 CPU_AFF0(cpu_desc[0].mpidr))
755 if (CPU_AFF1(cpu_desc[cpu].mpidr) !=
756 CPU_AFF1(cpu_desc[0].mpidr))
760 if (CPU_AFF2(cpu_desc[cpu].mpidr) !=
761 CPU_AFF2(cpu_desc[0].mpidr))
765 if (CPU_AFF3(cpu_desc[cpu].mpidr) !=
766 CPU_AFF3(cpu_desc[0].mpidr))
771 if (cpu_desc[cpu].id_aa64afr0 != cpu_desc[0].id_aa64afr0)
772 cpu_print_regs |= PRINT_ID_AA64_AFR0;
773 if (cpu_desc[cpu].id_aa64afr1 != cpu_desc[0].id_aa64afr1)
774 cpu_print_regs |= PRINT_ID_AA64_AFR1;
776 if (cpu_desc[cpu].id_aa64dfr0 != cpu_desc[0].id_aa64dfr0)
777 cpu_print_regs |= PRINT_ID_AA64_DFR0;
778 if (cpu_desc[cpu].id_aa64dfr1 != cpu_desc[0].id_aa64dfr1)
779 cpu_print_regs |= PRINT_ID_AA64_DFR1;
781 if (cpu_desc[cpu].id_aa64isar0 != cpu_desc[0].id_aa64isar0)
782 cpu_print_regs |= PRINT_ID_AA64_ISAR0;
783 if (cpu_desc[cpu].id_aa64isar1 != cpu_desc[0].id_aa64isar1)
784 cpu_print_regs |= PRINT_ID_AA64_ISAR1;
786 if (cpu_desc[cpu].id_aa64mmfr0 != cpu_desc[0].id_aa64mmfr0)
787 cpu_print_regs |= PRINT_ID_AA64_MMFR0;
788 if (cpu_desc[cpu].id_aa64mmfr1 != cpu_desc[0].id_aa64mmfr1)
789 cpu_print_regs |= PRINT_ID_AA64_MMFR1;
791 if (cpu_desc[cpu].id_aa64pfr0 != cpu_desc[0].id_aa64pfr0)
792 cpu_print_regs |= PRINT_ID_AA64_PFR0;
793 if (cpu_desc[cpu].id_aa64pfr1 != cpu_desc[0].id_aa64pfr1)
794 cpu_print_regs |= PRINT_ID_AA64_PFR1;
796 /* Wake up the other CPUs */
797 atomic_store_rel_int(&ident_lock, 0);
798 __asm __volatile("sev" ::: "memory");