2 * Copyright (c) 2014 Andrew Turner
3 * Copyright (c) 2014 The FreeBSD Foundation
6 * Portions of this software were developed by Semihalf
7 * under sponsorship of the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/kernel.h>
39 #include <sys/sysctl.h>
40 #include <sys/systm.h>
42 #include <machine/atomic.h>
43 #include <machine/cpu.h>
44 #include <machine/cpufunc.h>
46 static int ident_lock;
48 char machine[] = "arm64";
50 SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0,
54 * Per-CPU affinity as provided in MPIDR_EL1
55 * Indexed by CPU number in logical order selected by the system.
56 * Relevant fields can be extracted using CPU_AFFn macros,
57 * Aff3.Aff2.Aff1.Aff0 construct a unique CPU address in the system.
60 * Aff1 - Cluster number
61 * Aff0 - CPU number in Aff1 cluster
63 uint64_t __cpu_affinity[MAXCPU];
64 static u_int cpu_aff_levels;
71 const char *cpu_impl_name;
72 const char *cpu_part_name;
79 uint64_t id_aa64isar0;
80 uint64_t id_aa64isar1;
81 uint64_t id_aa64mmfr0;
82 uint64_t id_aa64mmfr1;
87 struct cpu_desc cpu_desc[MAXCPU];
88 static u_int cpu_print_regs;
89 #define PRINT_ID_AA64_AFR0 0x00000001
90 #define PRINT_ID_AA64_AFR1 0x00000002
91 #define PRINT_ID_AA64_DFR0 0x00000010
92 #define PRINT_ID_AA64_DFR1 0x00000020
93 #define PRINT_ID_AA64_ISAR0 0x00000100
94 #define PRINT_ID_AA64_ISAR1 0x00000200
95 #define PRINT_ID_AA64_MMFR0 0x00001000
96 #define PRINT_ID_AA64_MMFR1 0x00002000
97 #define PRINT_ID_AA64_PFR0 0x00010000
98 #define PRINT_ID_AA64_PFR1 0x00020000
102 const char *part_name;
104 #define CPU_PART_NONE { 0, "Unknown Processor" }
106 struct cpu_implementers {
108 const char *impl_name;
110 * Part number is implementation defined
111 * so each vendor will have its own set of values and names.
113 const struct cpu_parts *cpu_parts;
115 #define CPU_IMPLEMENTER_NONE { 0, "Unknown Implementer", cpu_parts_none }
118 * Per-implementer table of (PartNum, CPU Name) pairs.
121 static const struct cpu_parts cpu_parts_arm[] = {
122 { CPU_PART_FOUNDATION, "Foundation-Model" },
123 { CPU_PART_CORTEX_A35, "Cortex-A35" },
124 { CPU_PART_CORTEX_A53, "Cortex-A53" },
125 { CPU_PART_CORTEX_A55, "Cortex-A55" },
126 { CPU_PART_CORTEX_A57, "Cortex-A57" },
127 { CPU_PART_CORTEX_A72, "Cortex-A72" },
128 { CPU_PART_CORTEX_A73, "Cortex-A73" },
129 { CPU_PART_CORTEX_A75, "Cortex-A75" },
133 static const struct cpu_parts cpu_parts_cavium[] = {
134 { CPU_PART_THUNDER, "Thunder" },
139 static const struct cpu_parts cpu_parts_none[] = {
144 * Implementers table.
146 const struct cpu_implementers cpu_implementers[] = {
147 { CPU_IMPL_ARM, "ARM", cpu_parts_arm },
148 { CPU_IMPL_BROADCOM, "Broadcom", cpu_parts_none },
149 { CPU_IMPL_CAVIUM, "Cavium", cpu_parts_cavium },
150 { CPU_IMPL_DEC, "DEC", cpu_parts_none },
151 { CPU_IMPL_INFINEON, "IFX", cpu_parts_none },
152 { CPU_IMPL_FREESCALE, "Freescale", cpu_parts_none },
153 { CPU_IMPL_NVIDIA, "NVIDIA", cpu_parts_none },
154 { CPU_IMPL_APM, "APM", cpu_parts_none },
155 { CPU_IMPL_QUALCOMM, "Qualcomm", cpu_parts_none },
156 { CPU_IMPL_MARVELL, "Marvell", cpu_parts_none },
157 { CPU_IMPL_INTEL, "Intel", cpu_parts_none },
158 CPU_IMPLEMENTER_NONE,
162 identify_cpu_sysinit(void *dummy __unused)
167 print_cpu_features(cpu);
170 SYSINIT(idenrity_cpu, SI_SUB_SMP, SI_ORDER_ANY, identify_cpu_sysinit, NULL);
173 print_cpu_features(u_int cpu)
177 printf("CPU%3d: %s %s r%dp%d", cpu, cpu_desc[cpu].cpu_impl_name,
178 cpu_desc[cpu].cpu_part_name, cpu_desc[cpu].cpu_variant,
179 cpu_desc[cpu].cpu_revision);
181 printf(" affinity:");
182 switch(cpu_aff_levels) {
185 printf(" %2d", CPU_AFF3(cpu_desc[cpu].mpidr));
188 printf(" %2d", CPU_AFF2(cpu_desc[cpu].mpidr));
191 printf(" %2d", CPU_AFF1(cpu_desc[cpu].mpidr));
194 case 0: /* On UP this will be zero */
195 printf(" %2d", CPU_AFF0(cpu_desc[cpu].mpidr));
201 * There is a hardware errata where, if one CPU is performing a TLB
202 * invalidation while another is performing a store-exclusive the
203 * store-exclusive may return the wrong status. A workaround seems
204 * to be to use an IPI to invalidate on each CPU, however given the
205 * limited number of affected units (pass 1.1 is the evaluation
206 * hardware revision), and the lack of information from Cavium
207 * this has not been implemented.
209 * At the time of writing this the only information is from:
210 * https://lkml.org/lkml/2016/8/4/722
213 * XXX: CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1 on its own also
214 * triggers on pass 2.0+.
216 if (cpu == 0 && CPU_VAR(PCPU_GET(midr)) == 0 &&
217 CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1)
218 printf("WARNING: ThunderX Pass 1.1 detected.\nThis has known "
219 "hardware bugs that may cause the incorrect operation of "
220 "atomic operations.\n");
222 if (cpu != 0 && cpu_print_regs == 0)
225 #define SEP_STR ((printed++) == 0) ? "" : ","
227 /* AArch64 Instruction Set Attribute Register 0 */
228 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR0) != 0) {
230 printf(" Instruction Set Attributes 0 = <");
232 switch (ID_AA64ISAR0_RDM(cpu_desc[cpu].id_aa64isar0)) {
233 case ID_AA64ISAR0_RDM_NONE:
235 case ID_AA64ISAR0_RDM_IMPL:
236 printf("%sRDM", SEP_STR);
239 printf("%sUnknown RDM", SEP_STR);
242 switch (ID_AA64ISAR0_ATOMIC(cpu_desc[cpu].id_aa64isar0)) {
243 case ID_AA64ISAR0_ATOMIC_NONE:
245 case ID_AA64ISAR0_ATOMIC_IMPL:
246 printf("%sAtomic", SEP_STR);
249 printf("%sUnknown Atomic", SEP_STR);
252 switch (ID_AA64ISAR0_AES(cpu_desc[cpu].id_aa64isar0)) {
253 case ID_AA64ISAR0_AES_NONE:
255 case ID_AA64ISAR0_AES_BASE:
256 printf("%sAES", SEP_STR);
258 case ID_AA64ISAR0_AES_PMULL:
259 printf("%sAES+PMULL", SEP_STR);
262 printf("%sUnknown AES", SEP_STR);
266 switch (ID_AA64ISAR0_SHA1(cpu_desc[cpu].id_aa64isar0)) {
267 case ID_AA64ISAR0_SHA1_NONE:
269 case ID_AA64ISAR0_SHA1_BASE:
270 printf("%sSHA1", SEP_STR);
273 printf("%sUnknown SHA1", SEP_STR);
277 switch (ID_AA64ISAR0_SHA2(cpu_desc[cpu].id_aa64isar0)) {
278 case ID_AA64ISAR0_SHA2_NONE:
280 case ID_AA64ISAR0_SHA2_BASE:
281 printf("%sSHA2", SEP_STR);
284 printf("%sUnknown SHA2", SEP_STR);
288 switch (ID_AA64ISAR0_CRC32(cpu_desc[cpu].id_aa64isar0)) {
289 case ID_AA64ISAR0_CRC32_NONE:
291 case ID_AA64ISAR0_CRC32_BASE:
292 printf("%sCRC32", SEP_STR);
295 printf("%sUnknown CRC32", SEP_STR);
299 if ((cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK) != 0)
300 printf("%s%#lx", SEP_STR,
301 cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK);
306 /* AArch64 Instruction Set Attribute Register 1 */
307 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR1) != 0) {
308 printf(" Instruction Set Attributes 1 = <%#lx>\n",
309 cpu_desc[cpu].id_aa64isar1);
312 /* AArch64 Processor Feature Register 0 */
313 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR0) != 0) {
315 printf(" Processor Features 0 = <");
316 switch (ID_AA64PFR0_GIC(cpu_desc[cpu].id_aa64pfr0)) {
317 case ID_AA64PFR0_GIC_CPUIF_NONE:
319 case ID_AA64PFR0_GIC_CPUIF_EN:
320 printf("%sGIC", SEP_STR);
323 printf("%sUnknown GIC interface", SEP_STR);
327 switch (ID_AA64PFR0_ADV_SIMD(cpu_desc[cpu].id_aa64pfr0)) {
328 case ID_AA64PFR0_ADV_SIMD_NONE:
330 case ID_AA64PFR0_ADV_SIMD_IMPL:
331 printf("%sAdvSIMD", SEP_STR);
334 printf("%sUnknown AdvSIMD", SEP_STR);
338 switch (ID_AA64PFR0_FP(cpu_desc[cpu].id_aa64pfr0)) {
339 case ID_AA64PFR0_FP_NONE:
341 case ID_AA64PFR0_FP_IMPL:
342 printf("%sFloat", SEP_STR);
345 printf("%sUnknown Float", SEP_STR);
349 switch (ID_AA64PFR0_EL3(cpu_desc[cpu].id_aa64pfr0)) {
350 case ID_AA64PFR0_EL3_NONE:
351 printf("%sNo EL3", SEP_STR);
353 case ID_AA64PFR0_EL3_64:
354 printf("%sEL3", SEP_STR);
356 case ID_AA64PFR0_EL3_64_32:
357 printf("%sEL3 32", SEP_STR);
360 printf("%sUnknown EL3", SEP_STR);
364 switch (ID_AA64PFR0_EL2(cpu_desc[cpu].id_aa64pfr0)) {
365 case ID_AA64PFR0_EL2_NONE:
366 printf("%sNo EL2", SEP_STR);
368 case ID_AA64PFR0_EL2_64:
369 printf("%sEL2", SEP_STR);
371 case ID_AA64PFR0_EL2_64_32:
372 printf("%sEL2 32", SEP_STR);
375 printf("%sUnknown EL2", SEP_STR);
379 switch (ID_AA64PFR0_EL1(cpu_desc[cpu].id_aa64pfr0)) {
380 case ID_AA64PFR0_EL1_64:
381 printf("%sEL1", SEP_STR);
383 case ID_AA64PFR0_EL1_64_32:
384 printf("%sEL1 32", SEP_STR);
387 printf("%sUnknown EL1", SEP_STR);
391 switch (ID_AA64PFR0_EL0(cpu_desc[cpu].id_aa64pfr0)) {
392 case ID_AA64PFR0_EL0_64:
393 printf("%sEL0", SEP_STR);
395 case ID_AA64PFR0_EL0_64_32:
396 printf("%sEL0 32", SEP_STR);
399 printf("%sUnknown EL0", SEP_STR);
403 if ((cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK) != 0)
404 printf("%s%#lx", SEP_STR,
405 cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK);
410 /* AArch64 Processor Feature Register 1 */
411 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR1) != 0) {
412 printf(" Processor Features 1 = <%#lx>\n",
413 cpu_desc[cpu].id_aa64pfr1);
416 /* AArch64 Memory Model Feature Register 0 */
417 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR0) != 0) {
419 printf(" Memory Model Features 0 = <");
420 switch (ID_AA64MMFR0_TGRAN4(cpu_desc[cpu].id_aa64mmfr0)) {
421 case ID_AA64MMFR0_TGRAN4_NONE:
423 case ID_AA64MMFR0_TGRAN4_IMPL:
424 printf("%s4k Granule", SEP_STR);
427 printf("%sUnknown 4k Granule", SEP_STR);
431 switch (ID_AA64MMFR0_TGRAN16(cpu_desc[cpu].id_aa64mmfr0)) {
432 case ID_AA64MMFR0_TGRAN16_NONE:
434 case ID_AA64MMFR0_TGRAN16_IMPL:
435 printf("%s16k Granule", SEP_STR);
438 printf("%sUnknown 16k Granule", SEP_STR);
442 switch (ID_AA64MMFR0_TGRAN64(cpu_desc[cpu].id_aa64mmfr0)) {
443 case ID_AA64MMFR0_TGRAN64_NONE:
445 case ID_AA64MMFR0_TGRAN64_IMPL:
446 printf("%s64k Granule", SEP_STR);
449 printf("%sUnknown 64k Granule", SEP_STR);
453 switch (ID_AA64MMFR0_BIGEND(cpu_desc[cpu].id_aa64mmfr0)) {
454 case ID_AA64MMFR0_BIGEND_FIXED:
456 case ID_AA64MMFR0_BIGEND_MIXED:
457 printf("%sMixedEndian", SEP_STR);
460 printf("%sUnknown Endian switching", SEP_STR);
464 switch (ID_AA64MMFR0_BIGEND_EL0(cpu_desc[cpu].id_aa64mmfr0)) {
465 case ID_AA64MMFR0_BIGEND_EL0_FIXED:
467 case ID_AA64MMFR0_BIGEND_EL0_MIXED:
468 printf("%sEL0 MixEndian", SEP_STR);
471 printf("%sUnknown EL0 Endian switching", SEP_STR);
475 switch (ID_AA64MMFR0_S_NS_MEM(cpu_desc[cpu].id_aa64mmfr0)) {
476 case ID_AA64MMFR0_S_NS_MEM_NONE:
478 case ID_AA64MMFR0_S_NS_MEM_DISTINCT:
479 printf("%sS/NS Mem", SEP_STR);
482 printf("%sUnknown S/NS Mem", SEP_STR);
486 switch (ID_AA64MMFR0_ASID_BITS(cpu_desc[cpu].id_aa64mmfr0)) {
487 case ID_AA64MMFR0_ASID_BITS_8:
488 printf("%s8bit ASID", SEP_STR);
490 case ID_AA64MMFR0_ASID_BITS_16:
491 printf("%s16bit ASID", SEP_STR);
494 printf("%sUnknown ASID", SEP_STR);
498 switch (ID_AA64MMFR0_PA_RANGE(cpu_desc[cpu].id_aa64mmfr0)) {
499 case ID_AA64MMFR0_PA_RANGE_4G:
500 printf("%s4GB PA", SEP_STR);
502 case ID_AA64MMFR0_PA_RANGE_64G:
503 printf("%s64GB PA", SEP_STR);
505 case ID_AA64MMFR0_PA_RANGE_1T:
506 printf("%s1TB PA", SEP_STR);
508 case ID_AA64MMFR0_PA_RANGE_4T:
509 printf("%s4TB PA", SEP_STR);
511 case ID_AA64MMFR0_PA_RANGE_16T:
512 printf("%s16TB PA", SEP_STR);
514 case ID_AA64MMFR0_PA_RANGE_256T:
515 printf("%s256TB PA", SEP_STR);
518 printf("%sUnknown PA Range", SEP_STR);
522 if ((cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK) != 0)
523 printf("%s%#lx", SEP_STR,
524 cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK);
528 /* AArch64 Memory Model Feature Register 1 */
529 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR1) != 0) {
531 printf(" Memory Model Features 1 = <");
533 switch (ID_AA64MMFR1_PAN(cpu_desc[cpu].id_aa64mmfr1)) {
534 case ID_AA64MMFR1_PAN_NONE:
536 case ID_AA64MMFR1_PAN_IMPL:
537 printf("%sPAN", SEP_STR);
540 printf("%sUnknown PAN", SEP_STR);
544 switch (ID_AA64MMFR1_LO(cpu_desc[cpu].id_aa64mmfr1)) {
545 case ID_AA64MMFR1_LO_NONE:
547 case ID_AA64MMFR1_LO_IMPL:
548 printf("%sLO", SEP_STR);
551 printf("%sUnknown LO", SEP_STR);
555 switch (ID_AA64MMFR1_HPDS(cpu_desc[cpu].id_aa64mmfr1)) {
556 case ID_AA64MMFR1_HPDS_NONE:
558 case ID_AA64MMFR1_HPDS_IMPL:
559 printf("%sHPDS", SEP_STR);
562 printf("%sUnknown HPDS", SEP_STR);
566 switch (ID_AA64MMFR1_VH(cpu_desc[cpu].id_aa64mmfr1)) {
567 case ID_AA64MMFR1_VH_NONE:
569 case ID_AA64MMFR1_VH_IMPL:
570 printf("%sVHE", SEP_STR);
573 printf("%sUnknown VHE", SEP_STR);
577 switch (ID_AA64MMFR1_VMIDBITS(cpu_desc[cpu].id_aa64mmfr1)) {
578 case ID_AA64MMFR1_VMIDBITS_8:
580 case ID_AA64MMFR1_VMIDBITS_16:
581 printf("%s16 VMID bits", SEP_STR);
584 printf("%sUnknown VMID bits", SEP_STR);
588 switch (ID_AA64MMFR1_HAFDBS(cpu_desc[cpu].id_aa64mmfr1)) {
589 case ID_AA64MMFR1_HAFDBS_NONE:
591 case ID_AA64MMFR1_HAFDBS_AF:
592 printf("%sAF", SEP_STR);
594 case ID_AA64MMFR1_HAFDBS_AF_DBS:
595 printf("%sAF+DBS", SEP_STR);
598 printf("%sUnknown Hardware update AF/DBS", SEP_STR);
602 if ((cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK) != 0)
603 printf("%s%#lx", SEP_STR,
604 cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK);
608 /* AArch64 Debug Feature Register 0 */
609 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR0) != 0) {
611 printf(" Debug Features 0 = <");
612 printf("%s%lu CTX Breakpoints", SEP_STR,
613 ID_AA64DFR0_CTX_CMPS(cpu_desc[cpu].id_aa64dfr0));
615 printf("%s%lu Watchpoints", SEP_STR,
616 ID_AA64DFR0_WRPS(cpu_desc[cpu].id_aa64dfr0));
618 printf("%s%lu Breakpoints", SEP_STR,
619 ID_AA64DFR0_BRPS(cpu_desc[cpu].id_aa64dfr0));
621 switch (ID_AA64DFR0_PMU_VER(cpu_desc[cpu].id_aa64dfr0)) {
622 case ID_AA64DFR0_PMU_VER_NONE:
624 case ID_AA64DFR0_PMU_VER_3:
625 printf("%sPMUv3", SEP_STR);
627 case ID_AA64DFR0_PMU_VER_3_1:
628 printf("%sPMUv3+16 bit evtCount", SEP_STR);
630 case ID_AA64DFR0_PMU_VER_IMPL:
631 printf("%sImplementation defined PMU", SEP_STR);
634 printf("%sUnknown PMU", SEP_STR);
638 switch (ID_AA64DFR0_TRACE_VER(cpu_desc[cpu].id_aa64dfr0)) {
639 case ID_AA64DFR0_TRACE_VER_NONE:
641 case ID_AA64DFR0_TRACE_VER_IMPL:
642 printf("%sTrace", SEP_STR);
645 printf("%sUnknown Trace", SEP_STR);
649 switch (ID_AA64DFR0_DEBUG_VER(cpu_desc[cpu].id_aa64dfr0)) {
650 case ID_AA64DFR0_DEBUG_VER_8:
651 printf("%sDebug v8", SEP_STR);
653 case ID_AA64DFR0_DEBUG_VER_8_VHE:
654 printf("%sDebug v8+VHE", SEP_STR);
657 printf("%sUnknown Debug", SEP_STR);
661 if (cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK)
662 printf("%s%#lx", SEP_STR,
663 cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK);
667 /* AArch64 Memory Model Feature Register 1 */
668 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR1) != 0) {
669 printf(" Debug Features 1 = <%#lx>\n",
670 cpu_desc[cpu].id_aa64dfr1);
673 /* AArch64 Auxiliary Feature Register 0 */
674 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR0) != 0) {
675 printf(" Auxiliary Features 0 = <%#lx>\n",
676 cpu_desc[cpu].id_aa64afr0);
679 /* AArch64 Auxiliary Feature Register 1 */
680 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR1) != 0) {
681 printf(" Auxiliary Features 1 = <%#lx>\n",
682 cpu_desc[cpu].id_aa64afr1);
696 const struct cpu_parts *cpu_partsp = NULL;
698 cpu = PCPU_GET(cpuid);
702 * Store midr to pcpu to allow fast reading
703 * from EL0, EL1 and assembly code.
705 PCPU_SET(midr, midr);
707 impl_id = CPU_IMPL(midr);
708 for (i = 0; i < nitems(cpu_implementers); i++) {
709 if (impl_id == cpu_implementers[i].impl_id ||
710 cpu_implementers[i].impl_id == 0) {
711 cpu_desc[cpu].cpu_impl = impl_id;
712 cpu_desc[cpu].cpu_impl_name = cpu_implementers[i].impl_name;
713 cpu_partsp = cpu_implementers[i].cpu_parts;
718 part_id = CPU_PART(midr);
719 for (i = 0; &cpu_partsp[i] != NULL; i++) {
720 if (part_id == cpu_partsp[i].part_id ||
721 cpu_partsp[i].part_id == 0) {
722 cpu_desc[cpu].cpu_part_num = part_id;
723 cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name;
728 cpu_desc[cpu].cpu_revision = CPU_REV(midr);
729 cpu_desc[cpu].cpu_variant = CPU_VAR(midr);
731 /* Save affinity for current CPU */
732 cpu_desc[cpu].mpidr = get_mpidr();
733 CPU_AFFINITY(cpu) = cpu_desc[cpu].mpidr & CPU_AFF_MASK;
735 cpu_desc[cpu].id_aa64dfr0 = READ_SPECIALREG(id_aa64dfr0_el1);
736 cpu_desc[cpu].id_aa64dfr1 = READ_SPECIALREG(id_aa64dfr1_el1);
737 cpu_desc[cpu].id_aa64isar0 = READ_SPECIALREG(id_aa64isar0_el1);
738 cpu_desc[cpu].id_aa64isar1 = READ_SPECIALREG(id_aa64isar1_el1);
739 cpu_desc[cpu].id_aa64mmfr0 = READ_SPECIALREG(id_aa64mmfr0_el1);
740 cpu_desc[cpu].id_aa64mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
741 cpu_desc[cpu].id_aa64pfr0 = READ_SPECIALREG(id_aa64pfr0_el1);
742 cpu_desc[cpu].id_aa64pfr1 = READ_SPECIALREG(id_aa64pfr1_el1);
746 * This code must run on one cpu at a time, but we are
747 * not scheduling on the current core so implement a
750 while (atomic_cmpset_acq_int(&ident_lock, 0, 1) == 0)
751 __asm __volatile("wfe" ::: "memory");
753 switch (cpu_aff_levels) {
755 if (CPU_AFF0(cpu_desc[cpu].mpidr) !=
756 CPU_AFF0(cpu_desc[0].mpidr))
760 if (CPU_AFF1(cpu_desc[cpu].mpidr) !=
761 CPU_AFF1(cpu_desc[0].mpidr))
765 if (CPU_AFF2(cpu_desc[cpu].mpidr) !=
766 CPU_AFF2(cpu_desc[0].mpidr))
770 if (CPU_AFF3(cpu_desc[cpu].mpidr) !=
771 CPU_AFF3(cpu_desc[0].mpidr))
776 if (cpu_desc[cpu].id_aa64afr0 != cpu_desc[0].id_aa64afr0)
777 cpu_print_regs |= PRINT_ID_AA64_AFR0;
778 if (cpu_desc[cpu].id_aa64afr1 != cpu_desc[0].id_aa64afr1)
779 cpu_print_regs |= PRINT_ID_AA64_AFR1;
781 if (cpu_desc[cpu].id_aa64dfr0 != cpu_desc[0].id_aa64dfr0)
782 cpu_print_regs |= PRINT_ID_AA64_DFR0;
783 if (cpu_desc[cpu].id_aa64dfr1 != cpu_desc[0].id_aa64dfr1)
784 cpu_print_regs |= PRINT_ID_AA64_DFR1;
786 if (cpu_desc[cpu].id_aa64isar0 != cpu_desc[0].id_aa64isar0)
787 cpu_print_regs |= PRINT_ID_AA64_ISAR0;
788 if (cpu_desc[cpu].id_aa64isar1 != cpu_desc[0].id_aa64isar1)
789 cpu_print_regs |= PRINT_ID_AA64_ISAR1;
791 if (cpu_desc[cpu].id_aa64mmfr0 != cpu_desc[0].id_aa64mmfr0)
792 cpu_print_regs |= PRINT_ID_AA64_MMFR0;
793 if (cpu_desc[cpu].id_aa64mmfr1 != cpu_desc[0].id_aa64mmfr1)
794 cpu_print_regs |= PRINT_ID_AA64_MMFR1;
796 if (cpu_desc[cpu].id_aa64pfr0 != cpu_desc[0].id_aa64pfr0)
797 cpu_print_regs |= PRINT_ID_AA64_PFR0;
798 if (cpu_desc[cpu].id_aa64pfr1 != cpu_desc[0].id_aa64pfr1)
799 cpu_print_regs |= PRINT_ID_AA64_PFR1;
801 /* Wake up the other CPUs */
802 atomic_store_rel_int(&ident_lock, 0);
803 __asm __volatile("sev" ::: "memory");