2 * Copyright (c) 2014 Andrew Turner
3 * Copyright (c) 2014 The FreeBSD Foundation
6 * Portions of this software were developed by Semihalf
7 * under sponsorship of the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/kernel.h>
40 #include <sys/sysctl.h>
41 #include <sys/systm.h>
43 #include <machine/atomic.h>
44 #include <machine/cpu.h>
45 #include <machine/cpufunc.h>
46 #include <machine/undefined.h>
48 static int ident_lock;
50 char machine[] = "arm64";
52 SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0,
56 * Per-CPU affinity as provided in MPIDR_EL1
57 * Indexed by CPU number in logical order selected by the system.
58 * Relevant fields can be extracted using CPU_AFFn macros,
59 * Aff3.Aff2.Aff1.Aff0 construct a unique CPU address in the system.
62 * Aff1 - Cluster number
63 * Aff0 - CPU number in Aff1 cluster
65 uint64_t __cpu_affinity[MAXCPU];
66 static u_int cpu_aff_levels;
73 const char *cpu_impl_name;
74 const char *cpu_part_name;
81 uint64_t id_aa64isar0;
82 uint64_t id_aa64isar1;
83 uint64_t id_aa64mmfr0;
84 uint64_t id_aa64mmfr1;
85 uint64_t id_aa64mmfr2;
90 struct cpu_desc cpu_desc[MAXCPU];
91 struct cpu_desc user_cpu_desc;
92 static u_int cpu_print_regs;
93 #define PRINT_ID_AA64_AFR0 0x00000001
94 #define PRINT_ID_AA64_AFR1 0x00000002
95 #define PRINT_ID_AA64_DFR0 0x00000010
96 #define PRINT_ID_AA64_DFR1 0x00000020
97 #define PRINT_ID_AA64_ISAR0 0x00000100
98 #define PRINT_ID_AA64_ISAR1 0x00000200
99 #define PRINT_ID_AA64_MMFR0 0x00001000
100 #define PRINT_ID_AA64_MMFR1 0x00002000
101 #define PRINT_ID_AA64_MMFR2 0x00004000
102 #define PRINT_ID_AA64_PFR0 0x00010000
103 #define PRINT_ID_AA64_PFR1 0x00020000
107 const char *part_name;
109 #define CPU_PART_NONE { 0, "Unknown Processor" }
111 struct cpu_implementers {
113 const char *impl_name;
115 * Part number is implementation defined
116 * so each vendor will have its own set of values and names.
118 const struct cpu_parts *cpu_parts;
120 #define CPU_IMPLEMENTER_NONE { 0, "Unknown Implementer", cpu_parts_none }
123 * Per-implementer table of (PartNum, CPU Name) pairs.
126 static const struct cpu_parts cpu_parts_arm[] = {
127 { CPU_PART_FOUNDATION, "Foundation-Model" },
128 { CPU_PART_CORTEX_A35, "Cortex-A35" },
129 { CPU_PART_CORTEX_A53, "Cortex-A53" },
130 { CPU_PART_CORTEX_A55, "Cortex-A55" },
131 { CPU_PART_CORTEX_A57, "Cortex-A57" },
132 { CPU_PART_CORTEX_A72, "Cortex-A72" },
133 { CPU_PART_CORTEX_A73, "Cortex-A73" },
134 { CPU_PART_CORTEX_A75, "Cortex-A75" },
138 static const struct cpu_parts cpu_parts_cavium[] = {
139 { CPU_PART_THUNDERX, "ThunderX" },
140 { CPU_PART_THUNDERX2, "ThunderX2" },
145 static const struct cpu_parts cpu_parts_none[] = {
150 * Implementers table.
152 const struct cpu_implementers cpu_implementers[] = {
153 { CPU_IMPL_ARM, "ARM", cpu_parts_arm },
154 { CPU_IMPL_BROADCOM, "Broadcom", cpu_parts_none },
155 { CPU_IMPL_CAVIUM, "Cavium", cpu_parts_cavium },
156 { CPU_IMPL_DEC, "DEC", cpu_parts_none },
157 { CPU_IMPL_INFINEON, "IFX", cpu_parts_none },
158 { CPU_IMPL_FREESCALE, "Freescale", cpu_parts_none },
159 { CPU_IMPL_NVIDIA, "NVIDIA", cpu_parts_none },
160 { CPU_IMPL_APM, "APM", cpu_parts_none },
161 { CPU_IMPL_QUALCOMM, "Qualcomm", cpu_parts_none },
162 { CPU_IMPL_MARVELL, "Marvell", cpu_parts_none },
163 { CPU_IMPL_INTEL, "Intel", cpu_parts_none },
164 CPU_IMPLEMENTER_NONE,
167 #define MRS_TYPE_MASK 0xf
168 #define MRS_INVALID 0
170 #define MRS_EXACT_VAL(x) (MRS_EXACT | ((x) << 4))
171 #define MRS_EXACT_FIELD(x) ((x) >> 4)
180 #define MRS_FIELD(_sign, _type, _shift) \
187 #define MRS_FIELD_END { .type = MRS_INVALID, }
189 static struct mrs_field id_aa64isar0_fields[] = {
190 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_DP_SHIFT),
191 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SM4_SHIFT),
192 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SM3_SHIFT),
193 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SHA3_SHIFT),
194 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_RDM_SHIFT),
195 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_ATOMIC_SHIFT),
196 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_CRC32_SHIFT),
197 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SHA2_SHIFT),
198 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SHA1_SHIFT),
199 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_AES_SHIFT),
203 static struct mrs_field id_aa64isar1_fields[] = {
204 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_GPI_SHIFT),
205 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_GPA_SHIFT),
206 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_LRCPC_SHIFT),
207 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_FCMA_SHIFT),
208 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_JSCVT_SHIFT),
209 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_API_SHIFT),
210 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_APA_SHIFT),
211 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_DPB_SHIFT),
215 static struct mrs_field id_aa64pfr0_fields[] = {
216 MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_SVE_SHIFT),
217 MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_RAS_SHIFT),
218 MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_GIC_SHIFT),
219 MRS_FIELD(true, MRS_LOWER, ID_AA64PFR0_ADV_SIMD_SHIFT),
220 MRS_FIELD(true, MRS_LOWER, ID_AA64PFR0_FP_SHIFT),
221 MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_EL3_SHIFT),
222 MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_EL2_SHIFT),
223 MRS_FIELD(false, MRS_LOWER, ID_AA64PFR0_EL1_SHIFT),
224 MRS_FIELD(false, MRS_LOWER, ID_AA64PFR0_EL0_SHIFT),
228 static struct mrs_field id_aa64dfr0_fields[] = {
229 MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_PMS_VER_SHIFT),
230 MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_CTX_CMPS_SHIFT),
231 MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_WRPS_SHIFT),
232 MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_BRPS_SHIFT),
233 MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_PMU_VER_SHIFT),
234 MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_TRACE_VER_SHIFT),
235 MRS_FIELD(false, MRS_EXACT_VAL(0x6), ID_AA64DFR0_DEBUG_VER_SHIFT),
239 struct mrs_user_reg {
243 struct mrs_field *fields;
246 static struct mrs_user_reg user_regs[] = {
247 { /* id_aa64isar0_el1 */
250 .offset = __offsetof(struct cpu_desc, id_aa64isar0),
251 .fields = id_aa64isar0_fields,
253 { /* id_aa64isar1_el1 */
256 .offset = __offsetof(struct cpu_desc, id_aa64isar1),
257 .fields = id_aa64isar1_fields,
259 { /* id_aa64pfr0_el1 */
262 .offset = __offsetof(struct cpu_desc, id_aa64pfr0),
263 .fields = id_aa64pfr0_fields,
265 { /* id_aa64dfr0_el1 */
268 .offset = __offsetof(struct cpu_desc, id_aa64dfr0),
269 .fields = id_aa64dfr0_fields,
273 #define CPU_DESC_FIELD(desc, idx) \
274 *(uint64_t *)((char *)&(desc) + user_regs[(idx)].offset)
277 user_mrs_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame,
281 int CRm, Op2, i, reg;
283 if ((insn & MRS_MASK) != MRS_VALUE)
287 * We only emulate Op0 == 3, Op1 == 0, CRn == 0, CRm == {0, 4-7}.
288 * These are in the EL1 CPU identification space.
289 * CRm == 0 holds MIDR_EL1, MPIDR_EL1, and REVID_EL1.
290 * CRm == {4-7} holds the ID_AA64 registers.
292 * For full details see the ARMv8 ARM (ARM DDI 0487C.a)
293 * Table D9-2 System instruction encodings for non-Debug System
296 if (mrs_Op0(insn) != 3 || mrs_Op1(insn) != 0 || mrs_CRn(insn) != 0)
300 if (CRm > 7 || (CRm < 4 && CRm != 0))
306 for (i = 0; i < nitems(user_regs); i++) {
307 if (user_regs[i].CRm == CRm && user_regs[i].Op2 == Op2) {
308 value = CPU_DESC_FIELD(user_cpu_desc, i);
316 value = READ_SPECIALREG(midr_el1);
319 value = READ_SPECIALREG(mpidr_el1);
322 value = READ_SPECIALREG(revidr_el1);
330 * We will handle this instruction, move to the next so we
331 * don't trap here again.
333 frame->tf_elr += INSN_SIZE;
335 reg = MRS_REGISTER(insn);
336 /* If reg is 31 then write to xzr, i.e. do nothing */
340 if (reg < nitems(frame->tf_x))
341 frame->tf_x[reg] = value;
343 frame->tf_lr = value;
349 update_user_regs(u_int cpu)
351 struct mrs_field *fields;
353 int i, j, cur_field, new_field;
355 for (i = 0; i < nitems(user_regs); i++) {
356 value = CPU_DESC_FIELD(cpu_desc[cpu], i);
360 cur = CPU_DESC_FIELD(user_cpu_desc, i);
362 fields = user_regs[i].fields;
363 for (j = 0; fields[j].type != 0; j++) {
364 switch (fields[j].type & MRS_TYPE_MASK) {
366 cur &= ~(0xfu << fields[j].shift);
368 (uint64_t)MRS_EXACT_FIELD(fields[j].type) <<
372 new_field = (value >> fields[j].shift) & 0xf;
373 cur_field = (cur >> fields[j].shift) & 0xf;
374 if ((fields[j].sign &&
375 (int)new_field < (int)cur_field) ||
377 (u_int)new_field < (u_int)cur_field)) {
378 cur &= ~(0xfu << fields[j].shift);
379 cur |= new_field << fields[j].shift;
383 panic("Invalid field type: %d", fields[j].type);
387 CPU_DESC_FIELD(user_cpu_desc, i) = cur;
392 identify_cpu_sysinit(void *dummy __unused)
396 /* Create a user visible cpu description with safe values */
397 memset(&user_cpu_desc, 0, sizeof(user_cpu_desc));
398 /* Safe values for these registers */
399 user_cpu_desc.id_aa64pfr0 = ID_AA64PFR0_ADV_SIMD_NONE |
400 ID_AA64PFR0_FP_NONE | ID_AA64PFR0_EL1_64 | ID_AA64PFR0_EL0_64;
401 user_cpu_desc.id_aa64dfr0 = ID_AA64DFR0_DEBUG_VER_8;
405 print_cpu_features(cpu);
406 update_user_regs(cpu);
409 install_undef_handler(true, user_mrs_handler);
411 SYSINIT(idenrity_cpu, SI_SUB_SMP, SI_ORDER_ANY, identify_cpu_sysinit, NULL);
414 print_cpu_features(u_int cpu)
419 sb = sbuf_new_auto();
420 sbuf_printf(sb, "CPU%3d: %s %s r%dp%d", cpu,
421 cpu_desc[cpu].cpu_impl_name, cpu_desc[cpu].cpu_part_name,
422 cpu_desc[cpu].cpu_variant, cpu_desc[cpu].cpu_revision);
424 sbuf_cat(sb, " affinity:");
425 switch(cpu_aff_levels) {
428 sbuf_printf(sb, " %2d", CPU_AFF3(cpu_desc[cpu].mpidr));
431 sbuf_printf(sb, " %2d", CPU_AFF2(cpu_desc[cpu].mpidr));
434 sbuf_printf(sb, " %2d", CPU_AFF1(cpu_desc[cpu].mpidr));
437 case 0: /* On UP this will be zero */
438 sbuf_printf(sb, " %2d", CPU_AFF0(cpu_desc[cpu].mpidr));
442 printf("%s\n", sbuf_data(sb));
446 * There is a hardware errata where, if one CPU is performing a TLB
447 * invalidation while another is performing a store-exclusive the
448 * store-exclusive may return the wrong status. A workaround seems
449 * to be to use an IPI to invalidate on each CPU, however given the
450 * limited number of affected units (pass 1.1 is the evaluation
451 * hardware revision), and the lack of information from Cavium
452 * this has not been implemented.
454 * At the time of writing this the only information is from:
455 * https://lkml.org/lkml/2016/8/4/722
458 * XXX: CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 on its own also
459 * triggers on pass 2.0+.
461 if (cpu == 0 && CPU_VAR(PCPU_GET(midr)) == 0 &&
462 CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1)
463 printf("WARNING: ThunderX Pass 1.1 detected.\nThis has known "
464 "hardware bugs that may cause the incorrect operation of "
465 "atomic operations.\n");
467 if (cpu != 0 && cpu_print_regs == 0)
470 #define SEP_STR ((printed++) == 0) ? "" : ","
472 /* AArch64 Instruction Set Attribute Register 0 */
473 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR0) != 0) {
475 sbuf_printf(sb, " Instruction Set Attributes 0 = <");
477 switch (ID_AA64ISAR0_RDM(cpu_desc[cpu].id_aa64isar0)) {
478 case ID_AA64ISAR0_RDM_NONE:
480 case ID_AA64ISAR0_RDM_IMPL:
481 sbuf_printf(sb, "%sRDM", SEP_STR);
484 sbuf_printf(sb, "%sUnknown RDM", SEP_STR);
487 switch (ID_AA64ISAR0_ATOMIC(cpu_desc[cpu].id_aa64isar0)) {
488 case ID_AA64ISAR0_ATOMIC_NONE:
490 case ID_AA64ISAR0_ATOMIC_IMPL:
491 sbuf_printf(sb, "%sAtomic", SEP_STR);
494 sbuf_printf(sb, "%sUnknown Atomic", SEP_STR);
497 switch (ID_AA64ISAR0_AES(cpu_desc[cpu].id_aa64isar0)) {
498 case ID_AA64ISAR0_AES_NONE:
500 case ID_AA64ISAR0_AES_BASE:
501 sbuf_printf(sb, "%sAES", SEP_STR);
503 case ID_AA64ISAR0_AES_PMULL:
504 sbuf_printf(sb, "%sAES+PMULL", SEP_STR);
507 sbuf_printf(sb, "%sUnknown AES", SEP_STR);
511 switch (ID_AA64ISAR0_SHA1(cpu_desc[cpu].id_aa64isar0)) {
512 case ID_AA64ISAR0_SHA1_NONE:
514 case ID_AA64ISAR0_SHA1_BASE:
515 sbuf_printf(sb, "%sSHA1", SEP_STR);
518 sbuf_printf(sb, "%sUnknown SHA1", SEP_STR);
522 switch (ID_AA64ISAR0_SHA2(cpu_desc[cpu].id_aa64isar0)) {
523 case ID_AA64ISAR0_SHA2_NONE:
525 case ID_AA64ISAR0_SHA2_BASE:
526 sbuf_printf(sb, "%sSHA2", SEP_STR);
528 case ID_AA64ISAR0_SHA2_512:
529 sbuf_printf(sb, "%sSHA2+SHA512", SEP_STR);
532 sbuf_printf(sb, "%sUnknown SHA2", SEP_STR);
536 switch (ID_AA64ISAR0_CRC32(cpu_desc[cpu].id_aa64isar0)) {
537 case ID_AA64ISAR0_CRC32_NONE:
539 case ID_AA64ISAR0_CRC32_BASE:
540 sbuf_printf(sb, "%sCRC32", SEP_STR);
543 sbuf_printf(sb, "%sUnknown CRC32", SEP_STR);
547 switch (ID_AA64ISAR0_SHA3(cpu_desc[cpu].id_aa64isar0)) {
548 case ID_AA64ISAR0_SHA3_NONE:
550 case ID_AA64ISAR0_SHA3_IMPL:
551 sbuf_printf(sb, "%sSHA3", SEP_STR);
554 sbuf_printf(sb, "%sUnknown SHA3", SEP_STR);
558 switch (ID_AA64ISAR0_SM3(cpu_desc[cpu].id_aa64isar0)) {
559 case ID_AA64ISAR0_SM3_NONE:
561 case ID_AA64ISAR0_SM3_IMPL:
562 sbuf_printf(sb, "%sSM3", SEP_STR);
565 sbuf_printf(sb, "%sUnknown SM3", SEP_STR);
569 switch (ID_AA64ISAR0_SM4(cpu_desc[cpu].id_aa64isar0)) {
570 case ID_AA64ISAR0_SM4_NONE:
572 case ID_AA64ISAR0_SM4_IMPL:
573 sbuf_printf(sb, "%sSM4", SEP_STR);
576 sbuf_printf(sb, "%sUnknown SM4", SEP_STR);
580 switch (ID_AA64ISAR0_DP(cpu_desc[cpu].id_aa64isar0)) {
581 case ID_AA64ISAR0_DP_NONE:
583 case ID_AA64ISAR0_DP_IMPL:
584 sbuf_printf(sb, "%sDotProd", SEP_STR);
587 sbuf_printf(sb, "%sUnknown DP", SEP_STR);
591 if ((cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK) != 0)
592 sbuf_printf(sb, "%s%#lx", SEP_STR,
593 cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK);
596 printf("%s>\n", sbuf_data(sb));
600 /* AArch64 Instruction Set Attribute Register 1 */
601 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR1) != 0) {
603 sbuf_printf(sb, " Instruction Set Attributes 1 = <");
605 switch (ID_AA64ISAR1_GPI(cpu_desc[cpu].id_aa64isar1)) {
606 case ID_AA64ISAR1_GPI_NONE:
608 case ID_AA64ISAR1_GPI_IMPL:
609 sbuf_printf(sb, "%sImpl GenericAuth", SEP_STR);
612 sbuf_printf(sb, "%sUnknown GenericAuth", SEP_STR);
616 switch (ID_AA64ISAR1_GPA(cpu_desc[cpu].id_aa64isar1)) {
617 case ID_AA64ISAR1_GPA_NONE:
619 case ID_AA64ISAR1_GPA_IMPL:
620 sbuf_printf(sb, "%sPrince GenericAuth", SEP_STR);
623 sbuf_printf(sb, "%sUnknown GenericAuth", SEP_STR);
627 switch (ID_AA64ISAR1_LRCPC(cpu_desc[cpu].id_aa64isar1)) {
628 case ID_AA64ISAR1_LRCPC_NONE:
630 case ID_AA64ISAR1_LRCPC_IMPL:
631 sbuf_printf(sb, "%sRCpc", SEP_STR);
634 sbuf_printf(sb, "%sUnknown RCpc", SEP_STR);
638 switch (ID_AA64ISAR1_FCMA(cpu_desc[cpu].id_aa64isar1)) {
639 case ID_AA64ISAR1_FCMA_NONE:
641 case ID_AA64ISAR1_FCMA_IMPL:
642 sbuf_printf(sb, "%sFCMA", SEP_STR);
645 sbuf_printf(sb, "%sUnknown FCMA", SEP_STR);
649 switch (ID_AA64ISAR1_JSCVT(cpu_desc[cpu].id_aa64isar1)) {
650 case ID_AA64ISAR1_JSCVT_NONE:
652 case ID_AA64ISAR1_JSCVT_IMPL:
653 sbuf_printf(sb, "%sJS Conv", SEP_STR);
656 sbuf_printf(sb, "%sUnknown JS Conv", SEP_STR);
660 switch (ID_AA64ISAR1_API(cpu_desc[cpu].id_aa64isar1)) {
661 case ID_AA64ISAR1_API_NONE:
663 case ID_AA64ISAR1_API_IMPL:
664 sbuf_printf(sb, "%sImpl AddrAuth", SEP_STR);
667 sbuf_printf(sb, "%sUnknown Impl AddrAuth", SEP_STR);
671 switch (ID_AA64ISAR1_APA(cpu_desc[cpu].id_aa64isar1)) {
672 case ID_AA64ISAR1_APA_NONE:
674 case ID_AA64ISAR1_APA_IMPL:
675 sbuf_printf(sb, "%sPrince AddrAuth", SEP_STR);
678 sbuf_printf(sb, "%sUnknown Prince AddrAuth", SEP_STR);
682 switch (ID_AA64ISAR1_DPB(cpu_desc[cpu].id_aa64isar1)) {
683 case ID_AA64ISAR1_DPB_NONE:
685 case ID_AA64ISAR1_DPB_IMPL:
686 sbuf_printf(sb, "%sDC CVAP", SEP_STR);
689 sbuf_printf(sb, "%sUnknown DC CVAP", SEP_STR);
693 if ((cpu_desc[cpu].id_aa64isar1 & ~ID_AA64ISAR1_MASK) != 0)
694 sbuf_printf(sb, "%s%#lx", SEP_STR,
695 cpu_desc[cpu].id_aa64isar1 & ~ID_AA64ISAR1_MASK);
697 printf("%s>\n", sbuf_data(sb));
701 /* AArch64 Processor Feature Register 0 */
702 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR0) != 0) {
704 sbuf_printf(sb, " Processor Features 0 = <");
706 switch (ID_AA64PFR0_SVE(cpu_desc[cpu].id_aa64pfr0)) {
707 case ID_AA64PFR0_SVE_NONE:
709 case ID_AA64PFR0_SVE_IMPL:
710 sbuf_printf(sb, "%sSVE", SEP_STR);
713 sbuf_printf(sb, "%sUnknown SVE", SEP_STR);
717 switch (ID_AA64PFR0_RAS(cpu_desc[cpu].id_aa64pfr0)) {
718 case ID_AA64PFR0_RAS_NONE:
720 case ID_AA64PFR0_RAS_V1:
721 sbuf_printf(sb, "%sRASv1", SEP_STR);
724 sbuf_printf(sb, "%sUnknown RAS", SEP_STR);
728 switch (ID_AA64PFR0_GIC(cpu_desc[cpu].id_aa64pfr0)) {
729 case ID_AA64PFR0_GIC_CPUIF_NONE:
731 case ID_AA64PFR0_GIC_CPUIF_EN:
732 sbuf_printf(sb, "%sGIC", SEP_STR);
735 sbuf_printf(sb, "%sUnknown GIC interface", SEP_STR);
739 switch (ID_AA64PFR0_ADV_SIMD(cpu_desc[cpu].id_aa64pfr0)) {
740 case ID_AA64PFR0_ADV_SIMD_NONE:
742 case ID_AA64PFR0_ADV_SIMD_IMPL:
743 sbuf_printf(sb, "%sAdvSIMD", SEP_STR);
745 case ID_AA64PFR0_ADV_SIMD_HP:
746 sbuf_printf(sb, "%sAdvSIMD+HP", SEP_STR);
749 sbuf_printf(sb, "%sUnknown AdvSIMD", SEP_STR);
753 switch (ID_AA64PFR0_FP(cpu_desc[cpu].id_aa64pfr0)) {
754 case ID_AA64PFR0_FP_NONE:
756 case ID_AA64PFR0_FP_IMPL:
757 sbuf_printf(sb, "%sFloat", SEP_STR);
759 case ID_AA64PFR0_FP_HP:
760 sbuf_printf(sb, "%sFloat+HP", SEP_STR);
763 sbuf_printf(sb, "%sUnknown Float", SEP_STR);
767 switch (ID_AA64PFR0_EL3(cpu_desc[cpu].id_aa64pfr0)) {
768 case ID_AA64PFR0_EL3_NONE:
769 sbuf_printf(sb, "%sNo EL3", SEP_STR);
771 case ID_AA64PFR0_EL3_64:
772 sbuf_printf(sb, "%sEL3", SEP_STR);
774 case ID_AA64PFR0_EL3_64_32:
775 sbuf_printf(sb, "%sEL3 32", SEP_STR);
778 sbuf_printf(sb, "%sUnknown EL3", SEP_STR);
782 switch (ID_AA64PFR0_EL2(cpu_desc[cpu].id_aa64pfr0)) {
783 case ID_AA64PFR0_EL2_NONE:
784 sbuf_printf(sb, "%sNo EL2", SEP_STR);
786 case ID_AA64PFR0_EL2_64:
787 sbuf_printf(sb, "%sEL2", SEP_STR);
789 case ID_AA64PFR0_EL2_64_32:
790 sbuf_printf(sb, "%sEL2 32", SEP_STR);
793 sbuf_printf(sb, "%sUnknown EL2", SEP_STR);
797 switch (ID_AA64PFR0_EL1(cpu_desc[cpu].id_aa64pfr0)) {
798 case ID_AA64PFR0_EL1_64:
799 sbuf_printf(sb, "%sEL1", SEP_STR);
801 case ID_AA64PFR0_EL1_64_32:
802 sbuf_printf(sb, "%sEL1 32", SEP_STR);
805 sbuf_printf(sb, "%sUnknown EL1", SEP_STR);
809 switch (ID_AA64PFR0_EL0(cpu_desc[cpu].id_aa64pfr0)) {
810 case ID_AA64PFR0_EL0_64:
811 sbuf_printf(sb, "%sEL0", SEP_STR);
813 case ID_AA64PFR0_EL0_64_32:
814 sbuf_printf(sb, "%sEL0 32", SEP_STR);
817 sbuf_printf(sb, "%sUnknown EL0", SEP_STR);
821 if ((cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK) != 0)
822 sbuf_printf(sb, "%s%#lx", SEP_STR,
823 cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK);
826 printf("%s>\n", sbuf_data(sb));
830 /* AArch64 Processor Feature Register 1 */
831 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR1) != 0) {
832 printf(" Processor Features 1 = <%#lx>\n",
833 cpu_desc[cpu].id_aa64pfr1);
836 /* AArch64 Memory Model Feature Register 0 */
837 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR0) != 0) {
839 sbuf_printf(sb, " Memory Model Features 0 = <");
840 switch (ID_AA64MMFR0_TGRAN4(cpu_desc[cpu].id_aa64mmfr0)) {
841 case ID_AA64MMFR0_TGRAN4_NONE:
843 case ID_AA64MMFR0_TGRAN4_IMPL:
844 sbuf_printf(sb, "%s4k Granule", SEP_STR);
847 sbuf_printf(sb, "%sUnknown 4k Granule", SEP_STR);
851 switch (ID_AA64MMFR0_TGRAN16(cpu_desc[cpu].id_aa64mmfr0)) {
852 case ID_AA64MMFR0_TGRAN16_NONE:
854 case ID_AA64MMFR0_TGRAN16_IMPL:
855 sbuf_printf(sb, "%s16k Granule", SEP_STR);
858 sbuf_printf(sb, "%sUnknown 16k Granule", SEP_STR);
862 switch (ID_AA64MMFR0_TGRAN64(cpu_desc[cpu].id_aa64mmfr0)) {
863 case ID_AA64MMFR0_TGRAN64_NONE:
865 case ID_AA64MMFR0_TGRAN64_IMPL:
866 sbuf_printf(sb, "%s64k Granule", SEP_STR);
869 sbuf_printf(sb, "%sUnknown 64k Granule", SEP_STR);
873 switch (ID_AA64MMFR0_BIGEND(cpu_desc[cpu].id_aa64mmfr0)) {
874 case ID_AA64MMFR0_BIGEND_FIXED:
876 case ID_AA64MMFR0_BIGEND_MIXED:
877 sbuf_printf(sb, "%sMixedEndian", SEP_STR);
880 sbuf_printf(sb, "%sUnknown Endian switching", SEP_STR);
884 switch (ID_AA64MMFR0_BIGEND_EL0(cpu_desc[cpu].id_aa64mmfr0)) {
885 case ID_AA64MMFR0_BIGEND_EL0_FIXED:
887 case ID_AA64MMFR0_BIGEND_EL0_MIXED:
888 sbuf_printf(sb, "%sEL0 MixEndian", SEP_STR);
891 sbuf_printf(sb, "%sUnknown EL0 Endian switching", SEP_STR);
895 switch (ID_AA64MMFR0_S_NS_MEM(cpu_desc[cpu].id_aa64mmfr0)) {
896 case ID_AA64MMFR0_S_NS_MEM_NONE:
898 case ID_AA64MMFR0_S_NS_MEM_DISTINCT:
899 sbuf_printf(sb, "%sS/NS Mem", SEP_STR);
902 sbuf_printf(sb, "%sUnknown S/NS Mem", SEP_STR);
906 switch (ID_AA64MMFR0_ASID_BITS(cpu_desc[cpu].id_aa64mmfr0)) {
907 case ID_AA64MMFR0_ASID_BITS_8:
908 sbuf_printf(sb, "%s8bit ASID", SEP_STR);
910 case ID_AA64MMFR0_ASID_BITS_16:
911 sbuf_printf(sb, "%s16bit ASID", SEP_STR);
914 sbuf_printf(sb, "%sUnknown ASID", SEP_STR);
918 switch (ID_AA64MMFR0_PA_RANGE(cpu_desc[cpu].id_aa64mmfr0)) {
919 case ID_AA64MMFR0_PA_RANGE_4G:
920 sbuf_printf(sb, "%s4GB PA", SEP_STR);
922 case ID_AA64MMFR0_PA_RANGE_64G:
923 sbuf_printf(sb, "%s64GB PA", SEP_STR);
925 case ID_AA64MMFR0_PA_RANGE_1T:
926 sbuf_printf(sb, "%s1TB PA", SEP_STR);
928 case ID_AA64MMFR0_PA_RANGE_4T:
929 sbuf_printf(sb, "%s4TB PA", SEP_STR);
931 case ID_AA64MMFR0_PA_RANGE_16T:
932 sbuf_printf(sb, "%s16TB PA", SEP_STR);
934 case ID_AA64MMFR0_PA_RANGE_256T:
935 sbuf_printf(sb, "%s256TB PA", SEP_STR);
937 case ID_AA64MMFR0_PA_RANGE_4P:
938 sbuf_printf(sb, "%s4PB PA", SEP_STR);
941 sbuf_printf(sb, "%sUnknown PA Range", SEP_STR);
945 if ((cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK) != 0)
946 sbuf_printf(sb, "%s%#lx", SEP_STR,
947 cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK);
949 printf("%s>\n", sbuf_data(sb));
953 /* AArch64 Memory Model Feature Register 1 */
954 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR1) != 0) {
956 sbuf_printf(sb, " Memory Model Features 1 = <");
958 switch (ID_AA64MMFR1_XNX(cpu_desc[cpu].id_aa64mmfr1)) {
959 case ID_AA64MMFR1_XNX_NONE:
961 case ID_AA64MMFR1_XNX_IMPL:
962 sbuf_printf(sb, "%sEL2 XN", SEP_STR);
965 sbuf_printf(sb, "%sUnknown XNX", SEP_STR);
969 switch (ID_AA64MMFR1_SPEC_SEI(cpu_desc[cpu].id_aa64mmfr1)) {
970 case ID_AA64MMFR1_SPEC_SEI_NONE:
972 case ID_AA64MMFR1_SPEC_SEI_IMPL:
973 sbuf_printf(sb, "%sSpecSEI", SEP_STR);
976 sbuf_printf(sb, "%sUnknown SpecSEI", SEP_STR);
980 switch (ID_AA64MMFR1_PAN(cpu_desc[cpu].id_aa64mmfr1)) {
981 case ID_AA64MMFR1_PAN_NONE:
983 case ID_AA64MMFR1_PAN_IMPL:
984 sbuf_printf(sb, "%sPAN", SEP_STR);
986 case ID_AA64MMFR1_PAN_ATS1E1:
987 sbuf_printf(sb, "%sPAN+AT", SEP_STR);
990 sbuf_printf(sb, "%sUnknown PAN", SEP_STR);
994 switch (ID_AA64MMFR1_LO(cpu_desc[cpu].id_aa64mmfr1)) {
995 case ID_AA64MMFR1_LO_NONE:
997 case ID_AA64MMFR1_LO_IMPL:
998 sbuf_printf(sb, "%sLO", SEP_STR);
1001 sbuf_printf(sb, "%sUnknown LO", SEP_STR);
1005 switch (ID_AA64MMFR1_HPDS(cpu_desc[cpu].id_aa64mmfr1)) {
1006 case ID_AA64MMFR1_HPDS_NONE:
1008 case ID_AA64MMFR1_HPDS_HPD:
1009 sbuf_printf(sb, "%sHPDS", SEP_STR);
1011 case ID_AA64MMFR1_HPDS_TTPBHA:
1012 sbuf_printf(sb, "%sTTPBHA", SEP_STR);
1015 sbuf_printf(sb, "%sUnknown HPDS", SEP_STR);
1019 switch (ID_AA64MMFR1_VH(cpu_desc[cpu].id_aa64mmfr1)) {
1020 case ID_AA64MMFR1_VH_NONE:
1022 case ID_AA64MMFR1_VH_IMPL:
1023 sbuf_printf(sb, "%sVHE", SEP_STR);
1026 sbuf_printf(sb, "%sUnknown VHE", SEP_STR);
1030 switch (ID_AA64MMFR1_VMIDBITS(cpu_desc[cpu].id_aa64mmfr1)) {
1031 case ID_AA64MMFR1_VMIDBITS_8:
1033 case ID_AA64MMFR1_VMIDBITS_16:
1034 sbuf_printf(sb, "%s16 VMID bits", SEP_STR);
1037 sbuf_printf(sb, "%sUnknown VMID bits", SEP_STR);
1041 switch (ID_AA64MMFR1_HAFDBS(cpu_desc[cpu].id_aa64mmfr1)) {
1042 case ID_AA64MMFR1_HAFDBS_NONE:
1044 case ID_AA64MMFR1_HAFDBS_AF:
1045 sbuf_printf(sb, "%sAF", SEP_STR);
1047 case ID_AA64MMFR1_HAFDBS_AF_DBS:
1048 sbuf_printf(sb, "%sAF+DBS", SEP_STR);
1051 sbuf_printf(sb, "%sUnknown Hardware update AF/DBS", SEP_STR);
1055 if ((cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK) != 0)
1056 sbuf_printf(sb, "%s%#lx", SEP_STR,
1057 cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK);
1059 printf("%s>\n", sbuf_data(sb));
1063 /* AArch64 Memory Model Feature Register 2 */
1064 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR2) != 0) {
1066 sbuf_printf(sb, " Memory Model Features 2 = <");
1068 switch (ID_AA64MMFR2_NV(cpu_desc[cpu].id_aa64mmfr2)) {
1069 case ID_AA64MMFR2_NV_NONE:
1071 case ID_AA64MMFR2_NV_IMPL:
1072 sbuf_printf(sb, "%sNestedVirt", SEP_STR);
1075 sbuf_printf(sb, "%sUnknown NestedVirt", SEP_STR);
1079 switch (ID_AA64MMFR2_CCIDX(cpu_desc[cpu].id_aa64mmfr2)) {
1080 case ID_AA64MMFR2_CCIDX_32:
1081 sbuf_printf(sb, "%s32b CCIDX", SEP_STR);
1083 case ID_AA64MMFR2_CCIDX_64:
1084 sbuf_printf(sb, "%s64b CCIDX", SEP_STR);
1087 sbuf_printf(sb, "%sUnknown CCIDX", SEP_STR);
1091 switch (ID_AA64MMFR2_VA_RANGE(cpu_desc[cpu].id_aa64mmfr2)) {
1092 case ID_AA64MMFR2_VA_RANGE_48:
1093 sbuf_printf(sb, "%s48b VA", SEP_STR);
1095 case ID_AA64MMFR2_VA_RANGE_52:
1096 sbuf_printf(sb, "%s52b VA", SEP_STR);
1099 sbuf_printf(sb, "%sUnknown VA Range", SEP_STR);
1103 switch (ID_AA64MMFR2_IESB(cpu_desc[cpu].id_aa64mmfr2)) {
1104 case ID_AA64MMFR2_IESB_NONE:
1106 case ID_AA64MMFR2_IESB_IMPL:
1107 sbuf_printf(sb, "%sIESB", SEP_STR);
1110 sbuf_printf(sb, "%sUnknown IESB", SEP_STR);
1114 switch (ID_AA64MMFR2_LSM(cpu_desc[cpu].id_aa64mmfr2)) {
1115 case ID_AA64MMFR2_LSM_NONE:
1117 case ID_AA64MMFR2_LSM_IMPL:
1118 sbuf_printf(sb, "%sLSM", SEP_STR);
1121 sbuf_printf(sb, "%sUnknown LSM", SEP_STR);
1125 switch (ID_AA64MMFR2_UAO(cpu_desc[cpu].id_aa64mmfr2)) {
1126 case ID_AA64MMFR2_UAO_NONE:
1128 case ID_AA64MMFR2_UAO_IMPL:
1129 sbuf_printf(sb, "%sUAO", SEP_STR);
1132 sbuf_printf(sb, "%sUnknown UAO", SEP_STR);
1136 switch (ID_AA64MMFR2_CNP(cpu_desc[cpu].id_aa64mmfr2)) {
1137 case ID_AA64MMFR2_CNP_NONE:
1139 case ID_AA64MMFR2_CNP_IMPL:
1140 sbuf_printf(sb, "%sCnP", SEP_STR);
1143 sbuf_printf(sb, "%sUnknown CnP", SEP_STR);
1147 if ((cpu_desc[cpu].id_aa64mmfr2 & ~ID_AA64MMFR2_MASK) != 0)
1148 sbuf_printf(sb, "%s%#lx", SEP_STR,
1149 cpu_desc[cpu].id_aa64mmfr2 & ~ID_AA64MMFR2_MASK);
1151 printf("%s>\n", sbuf_data(sb));
1155 /* AArch64 Debug Feature Register 0 */
1156 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR0) != 0) {
1158 sbuf_printf(sb, " Debug Features 0 = <");
1159 switch(ID_AA64DFR0_PMS_VER(cpu_desc[cpu].id_aa64dfr0)) {
1160 case ID_AA64DFR0_PMS_VER_NONE:
1162 case ID_AA64DFR0_PMS_VER_V1:
1163 sbuf_printf(sb, "%sSPE v1", SEP_STR);
1166 sbuf_printf(sb, "%sUnknown SPE", SEP_STR);
1170 sbuf_printf(sb, "%s%lu CTX Breakpoints", SEP_STR,
1171 ID_AA64DFR0_CTX_CMPS(cpu_desc[cpu].id_aa64dfr0));
1173 sbuf_printf(sb, "%s%lu Watchpoints", SEP_STR,
1174 ID_AA64DFR0_WRPS(cpu_desc[cpu].id_aa64dfr0));
1176 sbuf_printf(sb, "%s%lu Breakpoints", SEP_STR,
1177 ID_AA64DFR0_BRPS(cpu_desc[cpu].id_aa64dfr0));
1179 switch (ID_AA64DFR0_PMU_VER(cpu_desc[cpu].id_aa64dfr0)) {
1180 case ID_AA64DFR0_PMU_VER_NONE:
1182 case ID_AA64DFR0_PMU_VER_3:
1183 sbuf_printf(sb, "%sPMUv3", SEP_STR);
1185 case ID_AA64DFR0_PMU_VER_3_1:
1186 sbuf_printf(sb, "%sPMUv3+16 bit evtCount", SEP_STR);
1188 case ID_AA64DFR0_PMU_VER_IMPL:
1189 sbuf_printf(sb, "%sImplementation defined PMU", SEP_STR);
1192 sbuf_printf(sb, "%sUnknown PMU", SEP_STR);
1196 switch (ID_AA64DFR0_TRACE_VER(cpu_desc[cpu].id_aa64dfr0)) {
1197 case ID_AA64DFR0_TRACE_VER_NONE:
1199 case ID_AA64DFR0_TRACE_VER_IMPL:
1200 sbuf_printf(sb, "%sTrace", SEP_STR);
1203 sbuf_printf(sb, "%sUnknown Trace", SEP_STR);
1207 switch (ID_AA64DFR0_DEBUG_VER(cpu_desc[cpu].id_aa64dfr0)) {
1208 case ID_AA64DFR0_DEBUG_VER_8:
1209 sbuf_printf(sb, "%sDebug v8", SEP_STR);
1211 case ID_AA64DFR0_DEBUG_VER_8_VHE:
1212 sbuf_printf(sb, "%sDebug v8+VHE", SEP_STR);
1214 case ID_AA64DFR0_DEBUG_VER_8_2:
1215 sbuf_printf(sb, "%sDebug v8.2", SEP_STR);
1218 sbuf_printf(sb, "%sUnknown Debug", SEP_STR);
1222 if (cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK)
1223 sbuf_printf(sb, "%s%#lx", SEP_STR,
1224 cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK);
1226 printf("%s>\n", sbuf_data(sb));
1230 /* AArch64 Memory Model Feature Register 1 */
1231 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR1) != 0) {
1232 printf(" Debug Features 1 = <%#lx>\n",
1233 cpu_desc[cpu].id_aa64dfr1);
1236 /* AArch64 Auxiliary Feature Register 0 */
1237 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR0) != 0) {
1238 printf(" Auxiliary Features 0 = <%#lx>\n",
1239 cpu_desc[cpu].id_aa64afr0);
1242 /* AArch64 Auxiliary Feature Register 1 */
1243 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR1) != 0) {
1244 printf(" Auxiliary Features 1 = <%#lx>\n",
1245 cpu_desc[cpu].id_aa64afr1);
1261 const struct cpu_parts *cpu_partsp = NULL;
1263 cpu = PCPU_GET(cpuid);
1267 * Store midr to pcpu to allow fast reading
1268 * from EL0, EL1 and assembly code.
1270 PCPU_SET(midr, midr);
1272 impl_id = CPU_IMPL(midr);
1273 for (i = 0; i < nitems(cpu_implementers); i++) {
1274 if (impl_id == cpu_implementers[i].impl_id ||
1275 cpu_implementers[i].impl_id == 0) {
1276 cpu_desc[cpu].cpu_impl = impl_id;
1277 cpu_desc[cpu].cpu_impl_name = cpu_implementers[i].impl_name;
1278 cpu_partsp = cpu_implementers[i].cpu_parts;
1283 part_id = CPU_PART(midr);
1284 for (i = 0; &cpu_partsp[i] != NULL; i++) {
1285 if (part_id == cpu_partsp[i].part_id ||
1286 cpu_partsp[i].part_id == 0) {
1287 cpu_desc[cpu].cpu_part_num = part_id;
1288 cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name;
1293 cpu_desc[cpu].cpu_revision = CPU_REV(midr);
1294 cpu_desc[cpu].cpu_variant = CPU_VAR(midr);
1296 /* Save affinity for current CPU */
1297 cpu_desc[cpu].mpidr = get_mpidr();
1298 CPU_AFFINITY(cpu) = cpu_desc[cpu].mpidr & CPU_AFF_MASK;
1300 cpu_desc[cpu].id_aa64dfr0 = READ_SPECIALREG(ID_AA64DFR0_EL1);
1301 cpu_desc[cpu].id_aa64dfr1 = READ_SPECIALREG(ID_AA64DFR1_EL1);
1302 cpu_desc[cpu].id_aa64isar0 = READ_SPECIALREG(ID_AA64ISAR0_EL1);
1303 cpu_desc[cpu].id_aa64isar1 = READ_SPECIALREG(ID_AA64ISAR1_EL1);
1304 cpu_desc[cpu].id_aa64mmfr0 = READ_SPECIALREG(ID_AA64MMFR0_EL1);
1305 cpu_desc[cpu].id_aa64mmfr1 = READ_SPECIALREG(ID_AA64MMFR1_EL1);
1306 cpu_desc[cpu].id_aa64mmfr2 = READ_SPECIALREG(ID_AA64MMFR2_EL1);
1307 cpu_desc[cpu].id_aa64pfr0 = READ_SPECIALREG(ID_AA64PFR0_EL1);
1308 cpu_desc[cpu].id_aa64pfr1 = READ_SPECIALREG(ID_AA64PFR1_EL1);
1312 * This code must run on one cpu at a time, but we are
1313 * not scheduling on the current core so implement a
1316 while (atomic_cmpset_acq_int(&ident_lock, 0, 1) == 0)
1317 __asm __volatile("wfe" ::: "memory");
1319 switch (cpu_aff_levels) {
1321 if (CPU_AFF0(cpu_desc[cpu].mpidr) !=
1322 CPU_AFF0(cpu_desc[0].mpidr))
1326 if (CPU_AFF1(cpu_desc[cpu].mpidr) !=
1327 CPU_AFF1(cpu_desc[0].mpidr))
1331 if (CPU_AFF2(cpu_desc[cpu].mpidr) !=
1332 CPU_AFF2(cpu_desc[0].mpidr))
1336 if (CPU_AFF3(cpu_desc[cpu].mpidr) !=
1337 CPU_AFF3(cpu_desc[0].mpidr))
1342 if (cpu_desc[cpu].id_aa64afr0 != cpu_desc[0].id_aa64afr0)
1343 cpu_print_regs |= PRINT_ID_AA64_AFR0;
1344 if (cpu_desc[cpu].id_aa64afr1 != cpu_desc[0].id_aa64afr1)
1345 cpu_print_regs |= PRINT_ID_AA64_AFR1;
1347 if (cpu_desc[cpu].id_aa64dfr0 != cpu_desc[0].id_aa64dfr0)
1348 cpu_print_regs |= PRINT_ID_AA64_DFR0;
1349 if (cpu_desc[cpu].id_aa64dfr1 != cpu_desc[0].id_aa64dfr1)
1350 cpu_print_regs |= PRINT_ID_AA64_DFR1;
1352 if (cpu_desc[cpu].id_aa64isar0 != cpu_desc[0].id_aa64isar0)
1353 cpu_print_regs |= PRINT_ID_AA64_ISAR0;
1354 if (cpu_desc[cpu].id_aa64isar1 != cpu_desc[0].id_aa64isar1)
1355 cpu_print_regs |= PRINT_ID_AA64_ISAR1;
1357 if (cpu_desc[cpu].id_aa64mmfr0 != cpu_desc[0].id_aa64mmfr0)
1358 cpu_print_regs |= PRINT_ID_AA64_MMFR0;
1359 if (cpu_desc[cpu].id_aa64mmfr1 != cpu_desc[0].id_aa64mmfr1)
1360 cpu_print_regs |= PRINT_ID_AA64_MMFR1;
1361 if (cpu_desc[cpu].id_aa64mmfr2 != cpu_desc[0].id_aa64mmfr2)
1362 cpu_print_regs |= PRINT_ID_AA64_MMFR2;
1364 if (cpu_desc[cpu].id_aa64pfr0 != cpu_desc[0].id_aa64pfr0)
1365 cpu_print_regs |= PRINT_ID_AA64_PFR0;
1366 if (cpu_desc[cpu].id_aa64pfr1 != cpu_desc[0].id_aa64pfr1)
1367 cpu_print_regs |= PRINT_ID_AA64_PFR1;
1369 /* Wake up the other CPUs */
1370 atomic_store_rel_int(&ident_lock, 0);
1371 __asm __volatile("sev" ::: "memory");