2 * Copyright (c) 2014 Andrew Turner
3 * Copyright (c) 2014 The FreeBSD Foundation
6 * Portions of this software were developed by Semihalf
7 * under sponsorship of the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/kernel.h>
40 #include <sys/sysctl.h>
41 #include <sys/systm.h>
43 #include <machine/atomic.h>
44 #include <machine/cpu.h>
45 #include <machine/cpufunc.h>
46 #include <machine/undefined.h>
47 #include <machine/elf.h>
49 static int ident_lock;
50 static void print_cpu_features(u_int cpu);
51 static u_long parse_cpu_features_hwcap(u_int cpu);
53 char machine[] = "arm64";
56 extern int adaptive_machine_arch;
60 sysctl_hw_machine(SYSCTL_HANDLER_ARGS)
63 static const char machine32[] = "arm";
67 if ((req->flags & SCTL_MASK32) != 0 && adaptive_machine_arch)
68 error = SYSCTL_OUT(req, machine32, sizeof(machine32));
71 error = SYSCTL_OUT(req, machine, sizeof(machine));
75 SYSCTL_PROC(_hw, HW_MACHINE, machine, CTLTYPE_STRING | CTLFLAG_RD |
76 CTLFLAG_MPSAFE, NULL, 0, sysctl_hw_machine, "A", "Machine class");
78 static char cpu_model[64];
79 SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD,
80 cpu_model, sizeof(cpu_model), "Machine model");
83 * Per-CPU affinity as provided in MPIDR_EL1
84 * Indexed by CPU number in logical order selected by the system.
85 * Relevant fields can be extracted using CPU_AFFn macros,
86 * Aff3.Aff2.Aff1.Aff0 construct a unique CPU address in the system.
89 * Aff1 - Cluster number
90 * Aff0 - CPU number in Aff1 cluster
92 uint64_t __cpu_affinity[MAXCPU];
93 static u_int cpu_aff_levels;
100 const char *cpu_impl_name;
101 const char *cpu_part_name;
104 uint64_t id_aa64afr0;
105 uint64_t id_aa64afr1;
106 uint64_t id_aa64dfr0;
107 uint64_t id_aa64dfr1;
108 uint64_t id_aa64isar0;
109 uint64_t id_aa64isar1;
110 uint64_t id_aa64mmfr0;
111 uint64_t id_aa64mmfr1;
112 uint64_t id_aa64mmfr2;
113 uint64_t id_aa64pfr0;
114 uint64_t id_aa64pfr1;
117 struct cpu_desc cpu_desc[MAXCPU];
118 struct cpu_desc user_cpu_desc;
119 static u_int cpu_print_regs;
120 #define PRINT_ID_AA64_AFR0 0x00000001
121 #define PRINT_ID_AA64_AFR1 0x00000002
122 #define PRINT_ID_AA64_DFR0 0x00000010
123 #define PRINT_ID_AA64_DFR1 0x00000020
124 #define PRINT_ID_AA64_ISAR0 0x00000100
125 #define PRINT_ID_AA64_ISAR1 0x00000200
126 #define PRINT_ID_AA64_MMFR0 0x00001000
127 #define PRINT_ID_AA64_MMFR1 0x00002000
128 #define PRINT_ID_AA64_MMFR2 0x00004000
129 #define PRINT_ID_AA64_PFR0 0x00010000
130 #define PRINT_ID_AA64_PFR1 0x00020000
134 const char *part_name;
136 #define CPU_PART_NONE { 0, "Unknown Processor" }
138 struct cpu_implementers {
140 const char *impl_name;
142 * Part number is implementation defined
143 * so each vendor will have its own set of values and names.
145 const struct cpu_parts *cpu_parts;
147 #define CPU_IMPLEMENTER_NONE { 0, "Unknown Implementer", cpu_parts_none }
150 * Per-implementer table of (PartNum, CPU Name) pairs.
153 static const struct cpu_parts cpu_parts_arm[] = {
154 { CPU_PART_FOUNDATION, "Foundation-Model" },
155 { CPU_PART_CORTEX_A35, "Cortex-A35" },
156 { CPU_PART_CORTEX_A53, "Cortex-A53" },
157 { CPU_PART_CORTEX_A55, "Cortex-A55" },
158 { CPU_PART_CORTEX_A57, "Cortex-A57" },
159 { CPU_PART_CORTEX_A72, "Cortex-A72" },
160 { CPU_PART_CORTEX_A73, "Cortex-A73" },
161 { CPU_PART_CORTEX_A75, "Cortex-A75" },
165 static const struct cpu_parts cpu_parts_cavium[] = {
166 { CPU_PART_THUNDERX, "ThunderX" },
167 { CPU_PART_THUNDERX2, "ThunderX2" },
172 static const struct cpu_parts cpu_parts_none[] = {
177 * Implementers table.
179 const struct cpu_implementers cpu_implementers[] = {
180 { CPU_IMPL_ARM, "ARM", cpu_parts_arm },
181 { CPU_IMPL_BROADCOM, "Broadcom", cpu_parts_none },
182 { CPU_IMPL_CAVIUM, "Cavium", cpu_parts_cavium },
183 { CPU_IMPL_DEC, "DEC", cpu_parts_none },
184 { CPU_IMPL_INFINEON, "IFX", cpu_parts_none },
185 { CPU_IMPL_FREESCALE, "Freescale", cpu_parts_none },
186 { CPU_IMPL_NVIDIA, "NVIDIA", cpu_parts_none },
187 { CPU_IMPL_APM, "APM", cpu_parts_none },
188 { CPU_IMPL_QUALCOMM, "Qualcomm", cpu_parts_none },
189 { CPU_IMPL_MARVELL, "Marvell", cpu_parts_none },
190 { CPU_IMPL_INTEL, "Intel", cpu_parts_none },
191 CPU_IMPLEMENTER_NONE,
194 #define MRS_TYPE_MASK 0xf
195 #define MRS_INVALID 0
197 #define MRS_EXACT_VAL(x) (MRS_EXACT | ((x) << 4))
198 #define MRS_EXACT_FIELD(x) ((x) >> 4)
207 #define MRS_FIELD(_sign, _type, _shift) \
214 #define MRS_FIELD_END { .type = MRS_INVALID, }
216 static struct mrs_field id_aa64isar0_fields[] = {
217 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_DP_SHIFT),
218 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SM4_SHIFT),
219 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SM3_SHIFT),
220 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SHA3_SHIFT),
221 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_RDM_SHIFT),
222 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_Atomic_SHIFT),
223 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_CRC32_SHIFT),
224 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SHA2_SHIFT),
225 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SHA1_SHIFT),
226 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_AES_SHIFT),
230 static struct mrs_field id_aa64isar1_fields[] = {
231 MRS_FIELD(false, MRS_EXACT, ID_AA64ISAR1_GPI_SHIFT),
232 MRS_FIELD(false, MRS_EXACT, ID_AA64ISAR1_GPA_SHIFT),
233 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_LRCPC_SHIFT),
234 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_FCMA_SHIFT),
235 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_JSCVT_SHIFT),
236 MRS_FIELD(false, MRS_EXACT, ID_AA64ISAR1_API_SHIFT),
237 MRS_FIELD(false, MRS_EXACT, ID_AA64ISAR1_APA_SHIFT),
238 MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_DPB_SHIFT),
242 static struct mrs_field id_aa64pfr0_fields[] = {
243 MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_SVE_SHIFT),
244 MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_RAS_SHIFT),
245 MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_GIC_SHIFT),
246 MRS_FIELD(true, MRS_LOWER, ID_AA64PFR0_AdvSIMD_SHIFT),
247 MRS_FIELD(true, MRS_LOWER, ID_AA64PFR0_FP_SHIFT),
248 MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_EL3_SHIFT),
249 MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_EL2_SHIFT),
250 MRS_FIELD(false, MRS_LOWER, ID_AA64PFR0_EL1_SHIFT),
251 MRS_FIELD(false, MRS_LOWER, ID_AA64PFR0_EL0_SHIFT),
255 static struct mrs_field id_aa64dfr0_fields[] = {
256 MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_PMSVer_SHIFT),
257 MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_CTX_CMPs_SHIFT),
258 MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_WRPs_SHIFT),
259 MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_BRPs_SHIFT),
260 MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_PMUVer_SHIFT),
261 MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_TraceVer_SHIFT),
262 MRS_FIELD(false, MRS_EXACT_VAL(0x6), ID_AA64DFR0_DebugVer_SHIFT),
266 struct mrs_user_reg {
270 struct mrs_field *fields;
273 static struct mrs_user_reg user_regs[] = {
274 { /* id_aa64isar0_el1 */
277 .offset = __offsetof(struct cpu_desc, id_aa64isar0),
278 .fields = id_aa64isar0_fields,
280 { /* id_aa64isar1_el1 */
283 .offset = __offsetof(struct cpu_desc, id_aa64isar1),
284 .fields = id_aa64isar1_fields,
286 { /* id_aa64pfr0_el1 */
289 .offset = __offsetof(struct cpu_desc, id_aa64pfr0),
290 .fields = id_aa64pfr0_fields,
292 { /* id_aa64dfr0_el1 */
295 .offset = __offsetof(struct cpu_desc, id_aa64dfr0),
296 .fields = id_aa64dfr0_fields,
300 #define CPU_DESC_FIELD(desc, idx) \
301 *(uint64_t *)((char *)&(desc) + user_regs[(idx)].offset)
304 user_mrs_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame,
308 int CRm, Op2, i, reg;
310 if ((insn & MRS_MASK) != MRS_VALUE)
314 * We only emulate Op0 == 3, Op1 == 0, CRn == 0, CRm == {0, 4-7}.
315 * These are in the EL1 CPU identification space.
316 * CRm == 0 holds MIDR_EL1, MPIDR_EL1, and REVID_EL1.
317 * CRm == {4-7} holds the ID_AA64 registers.
319 * For full details see the ARMv8 ARM (ARM DDI 0487C.a)
320 * Table D9-2 System instruction encodings for non-Debug System
323 if (mrs_Op0(insn) != 3 || mrs_Op1(insn) != 0 || mrs_CRn(insn) != 0)
327 if (CRm > 7 || (CRm < 4 && CRm != 0))
333 for (i = 0; i < nitems(user_regs); i++) {
334 if (user_regs[i].CRm == CRm && user_regs[i].Op2 == Op2) {
335 value = CPU_DESC_FIELD(user_cpu_desc, i);
343 value = READ_SPECIALREG(midr_el1);
346 value = READ_SPECIALREG(mpidr_el1);
349 value = READ_SPECIALREG(revidr_el1);
357 * We will handle this instruction, move to the next so we
358 * don't trap here again.
360 frame->tf_elr += INSN_SIZE;
362 reg = MRS_REGISTER(insn);
363 /* If reg is 31 then write to xzr, i.e. do nothing */
367 if (reg < nitems(frame->tf_x))
368 frame->tf_x[reg] = value;
370 frame->tf_lr = value;
376 update_user_regs(u_int cpu)
378 struct mrs_field *fields;
380 int i, j, cur_field, new_field;
382 for (i = 0; i < nitems(user_regs); i++) {
383 value = CPU_DESC_FIELD(cpu_desc[cpu], i);
387 cur = CPU_DESC_FIELD(user_cpu_desc, i);
389 fields = user_regs[i].fields;
390 for (j = 0; fields[j].type != 0; j++) {
391 switch (fields[j].type & MRS_TYPE_MASK) {
393 cur &= ~(0xfu << fields[j].shift);
395 (uint64_t)MRS_EXACT_FIELD(fields[j].type) <<
399 new_field = (value >> fields[j].shift) & 0xf;
400 cur_field = (cur >> fields[j].shift) & 0xf;
401 if ((fields[j].sign &&
402 (int)new_field < (int)cur_field) ||
404 (u_int)new_field < (u_int)cur_field)) {
405 cur &= ~(0xfu << fields[j].shift);
406 cur |= new_field << fields[j].shift;
410 panic("Invalid field type: %d", fields[j].type);
414 CPU_DESC_FIELD(user_cpu_desc, i) = cur;
419 extern u_long elf_hwcap;
422 identify_cpu_sysinit(void *dummy __unused)
427 /* Create a user visible cpu description with safe values */
428 memset(&user_cpu_desc, 0, sizeof(user_cpu_desc));
429 /* Safe values for these registers */
430 user_cpu_desc.id_aa64pfr0 = ID_AA64PFR0_AdvSIMD_NONE |
431 ID_AA64PFR0_FP_NONE | ID_AA64PFR0_EL1_64 | ID_AA64PFR0_EL0_64;
432 user_cpu_desc.id_aa64dfr0 = ID_AA64DFR0_DebugVer_8;
436 print_cpu_features(cpu);
437 hwcap = parse_cpu_features_hwcap(cpu);
442 update_user_regs(cpu);
445 install_undef_handler(true, user_mrs_handler);
447 SYSINIT(idenrity_cpu, SI_SUB_SMP, SI_ORDER_ANY, identify_cpu_sysinit, NULL);
450 parse_cpu_features_hwcap(u_int cpu)
454 if (ID_AA64ISAR0_DP(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_DP_IMPL)
455 hwcap |= HWCAP_ASIMDDP;
457 if (ID_AA64ISAR0_SM4(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_SM4_IMPL)
460 if (ID_AA64ISAR0_SM3(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_SM3_IMPL)
463 if (ID_AA64ISAR0_RDM(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_RDM_IMPL)
464 hwcap |= HWCAP_ASIMDRDM;
466 if (ID_AA64ISAR0_Atomic(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_Atomic_IMPL)
467 hwcap |= HWCAP_ATOMICS;
469 if (ID_AA64ISAR0_CRC32(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_CRC32_BASE)
470 hwcap |= HWCAP_CRC32;
472 switch (ID_AA64ISAR0_SHA2(cpu_desc[cpu].id_aa64isar0)) {
473 case ID_AA64ISAR0_SHA2_BASE:
476 case ID_AA64ISAR0_SHA2_512:
477 hwcap |= HWCAP_SHA2 | HWCAP_SHA512;
483 if (ID_AA64ISAR0_SHA1(cpu_desc[cpu].id_aa64isar0))
486 switch (ID_AA64ISAR0_AES(cpu_desc[cpu].id_aa64isar0)) {
487 case ID_AA64ISAR0_AES_BASE:
490 case ID_AA64ISAR0_AES_PMULL:
491 hwcap |= HWCAP_PMULL | HWCAP_AES;
497 if (ID_AA64ISAR1_LRCPC(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_LRCPC_IMPL)
498 hwcap |= HWCAP_LRCPC;
500 if (ID_AA64ISAR1_FCMA(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_FCMA_IMPL)
503 if (ID_AA64ISAR1_JSCVT(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_JSCVT_IMPL)
504 hwcap |= HWCAP_JSCVT;
506 if (ID_AA64ISAR1_DPB(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_DPB_IMPL)
507 hwcap |= HWCAP_DCPOP;
509 if (ID_AA64PFR0_SVE(cpu_desc[cpu].id_aa64pfr0) == ID_AA64PFR0_SVE_IMPL)
512 switch (ID_AA64PFR0_AdvSIMD(cpu_desc[cpu].id_aa64pfr0)) {
513 case ID_AA64PFR0_AdvSIMD_IMPL:
514 hwcap |= HWCAP_ASIMD;
516 case ID_AA64PFR0_AdvSIMD_HP:
517 hwcap |= HWCAP_ASIMD | HWCAP_ASIMDDP;
523 switch (ID_AA64PFR0_FP(cpu_desc[cpu].id_aa64pfr0)) {
524 case ID_AA64PFR0_FP_IMPL:
527 case ID_AA64PFR0_FP_HP:
528 hwcap |= HWCAP_FP | HWCAP_FPHP;
538 print_cpu_features(u_int cpu)
543 sb = sbuf_new_auto();
544 sbuf_printf(sb, "CPU%3d: %s %s r%dp%d", cpu,
545 cpu_desc[cpu].cpu_impl_name, cpu_desc[cpu].cpu_part_name,
546 cpu_desc[cpu].cpu_variant, cpu_desc[cpu].cpu_revision);
548 sbuf_cat(sb, " affinity:");
549 switch(cpu_aff_levels) {
552 sbuf_printf(sb, " %2d", CPU_AFF3(cpu_desc[cpu].mpidr));
555 sbuf_printf(sb, " %2d", CPU_AFF2(cpu_desc[cpu].mpidr));
558 sbuf_printf(sb, " %2d", CPU_AFF1(cpu_desc[cpu].mpidr));
561 case 0: /* On UP this will be zero */
562 sbuf_printf(sb, " %2d", CPU_AFF0(cpu_desc[cpu].mpidr));
566 printf("%s\n", sbuf_data(sb));
570 * There is a hardware errata where, if one CPU is performing a TLB
571 * invalidation while another is performing a store-exclusive the
572 * store-exclusive may return the wrong status. A workaround seems
573 * to be to use an IPI to invalidate on each CPU, however given the
574 * limited number of affected units (pass 1.1 is the evaluation
575 * hardware revision), and the lack of information from Cavium
576 * this has not been implemented.
578 * At the time of writing this the only information is from:
579 * https://lkml.org/lkml/2016/8/4/722
582 * XXX: CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 on its own also
583 * triggers on pass 2.0+.
585 if (cpu == 0 && CPU_VAR(PCPU_GET(midr)) == 0 &&
586 CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1)
587 printf("WARNING: ThunderX Pass 1.1 detected.\nThis has known "
588 "hardware bugs that may cause the incorrect operation of "
589 "atomic operations.\n");
591 #define SEP_STR ((printed++) == 0) ? "" : ","
593 /* AArch64 Instruction Set Attribute Register 0 */
594 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR0) != 0) {
596 sbuf_printf(sb, " Instruction Set Attributes 0 = <");
598 switch (ID_AA64ISAR0_DP(cpu_desc[cpu].id_aa64isar0)) {
599 case ID_AA64ISAR0_DP_NONE:
601 case ID_AA64ISAR0_DP_IMPL:
602 sbuf_printf(sb, "%sDotProd", SEP_STR);
605 sbuf_printf(sb, "%sUnknown DP", SEP_STR);
609 switch (ID_AA64ISAR0_SM4(cpu_desc[cpu].id_aa64isar0)) {
610 case ID_AA64ISAR0_SM4_NONE:
612 case ID_AA64ISAR0_SM4_IMPL:
613 sbuf_printf(sb, "%sSM4", SEP_STR);
616 sbuf_printf(sb, "%sUnknown SM4", SEP_STR);
620 switch (ID_AA64ISAR0_SM3(cpu_desc[cpu].id_aa64isar0)) {
621 case ID_AA64ISAR0_SM3_NONE:
623 case ID_AA64ISAR0_SM3_IMPL:
624 sbuf_printf(sb, "%sSM3", SEP_STR);
627 sbuf_printf(sb, "%sUnknown SM3", SEP_STR);
631 switch (ID_AA64ISAR0_SHA3(cpu_desc[cpu].id_aa64isar0)) {
632 case ID_AA64ISAR0_SHA3_NONE:
634 case ID_AA64ISAR0_SHA3_IMPL:
635 sbuf_printf(sb, "%sSHA3", SEP_STR);
638 sbuf_printf(sb, "%sUnknown SHA3", SEP_STR);
642 switch (ID_AA64ISAR0_RDM(cpu_desc[cpu].id_aa64isar0)) {
643 case ID_AA64ISAR0_RDM_NONE:
645 case ID_AA64ISAR0_RDM_IMPL:
646 sbuf_printf(sb, "%sRDM", SEP_STR);
649 sbuf_printf(sb, "%sUnknown RDM", SEP_STR);
652 switch (ID_AA64ISAR0_Atomic(cpu_desc[cpu].id_aa64isar0)) {
653 case ID_AA64ISAR0_Atomic_NONE:
655 case ID_AA64ISAR0_Atomic_IMPL:
656 sbuf_printf(sb, "%sAtomic", SEP_STR);
659 sbuf_printf(sb, "%sUnknown Atomic", SEP_STR);
662 switch (ID_AA64ISAR0_CRC32(cpu_desc[cpu].id_aa64isar0)) {
663 case ID_AA64ISAR0_CRC32_NONE:
665 case ID_AA64ISAR0_CRC32_BASE:
666 sbuf_printf(sb, "%sCRC32", SEP_STR);
669 sbuf_printf(sb, "%sUnknown CRC32", SEP_STR);
673 switch (ID_AA64ISAR0_SHA2(cpu_desc[cpu].id_aa64isar0)) {
674 case ID_AA64ISAR0_SHA2_NONE:
676 case ID_AA64ISAR0_SHA2_BASE:
677 sbuf_printf(sb, "%sSHA2", SEP_STR);
679 case ID_AA64ISAR0_SHA2_512:
680 sbuf_printf(sb, "%sSHA2+SHA512", SEP_STR);
683 sbuf_printf(sb, "%sUnknown SHA2", SEP_STR);
687 switch (ID_AA64ISAR0_SHA1(cpu_desc[cpu].id_aa64isar0)) {
688 case ID_AA64ISAR0_SHA1_NONE:
690 case ID_AA64ISAR0_SHA1_BASE:
691 sbuf_printf(sb, "%sSHA1", SEP_STR);
694 sbuf_printf(sb, "%sUnknown SHA1", SEP_STR);
698 switch (ID_AA64ISAR0_AES(cpu_desc[cpu].id_aa64isar0)) {
699 case ID_AA64ISAR0_AES_NONE:
701 case ID_AA64ISAR0_AES_BASE:
702 sbuf_printf(sb, "%sAES", SEP_STR);
704 case ID_AA64ISAR0_AES_PMULL:
705 sbuf_printf(sb, "%sAES+PMULL", SEP_STR);
708 sbuf_printf(sb, "%sUnknown AES", SEP_STR);
712 if ((cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK) != 0)
713 sbuf_printf(sb, "%s%#lx", SEP_STR,
714 cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK);
717 printf("%s>\n", sbuf_data(sb));
721 /* AArch64 Instruction Set Attribute Register 1 */
722 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR1) != 0) {
724 sbuf_printf(sb, " Instruction Set Attributes 1 = <");
726 switch (ID_AA64ISAR1_GPI(cpu_desc[cpu].id_aa64isar1)) {
727 case ID_AA64ISAR1_GPI_NONE:
729 case ID_AA64ISAR1_GPI_IMPL:
730 sbuf_printf(sb, "%sImpl GenericAuth", SEP_STR);
733 sbuf_printf(sb, "%sUnknown GenericAuth", SEP_STR);
737 switch (ID_AA64ISAR1_GPA(cpu_desc[cpu].id_aa64isar1)) {
738 case ID_AA64ISAR1_GPA_NONE:
740 case ID_AA64ISAR1_GPA_IMPL:
741 sbuf_printf(sb, "%sPrince GenericAuth", SEP_STR);
744 sbuf_printf(sb, "%sUnknown GenericAuth", SEP_STR);
748 switch (ID_AA64ISAR1_LRCPC(cpu_desc[cpu].id_aa64isar1)) {
749 case ID_AA64ISAR1_LRCPC_NONE:
751 case ID_AA64ISAR1_LRCPC_IMPL:
752 sbuf_printf(sb, "%sRCpc", SEP_STR);
755 sbuf_printf(sb, "%sUnknown RCpc", SEP_STR);
759 switch (ID_AA64ISAR1_FCMA(cpu_desc[cpu].id_aa64isar1)) {
760 case ID_AA64ISAR1_FCMA_NONE:
762 case ID_AA64ISAR1_FCMA_IMPL:
763 sbuf_printf(sb, "%sFCMA", SEP_STR);
766 sbuf_printf(sb, "%sUnknown FCMA", SEP_STR);
770 switch (ID_AA64ISAR1_JSCVT(cpu_desc[cpu].id_aa64isar1)) {
771 case ID_AA64ISAR1_JSCVT_NONE:
773 case ID_AA64ISAR1_JSCVT_IMPL:
774 sbuf_printf(sb, "%sJS Conv", SEP_STR);
777 sbuf_printf(sb, "%sUnknown JS Conv", SEP_STR);
781 switch (ID_AA64ISAR1_API(cpu_desc[cpu].id_aa64isar1)) {
782 case ID_AA64ISAR1_API_NONE:
784 case ID_AA64ISAR1_API_IMPL:
785 sbuf_printf(sb, "%sImpl AddrAuth", SEP_STR);
788 sbuf_printf(sb, "%sUnknown Impl AddrAuth", SEP_STR);
792 switch (ID_AA64ISAR1_APA(cpu_desc[cpu].id_aa64isar1)) {
793 case ID_AA64ISAR1_APA_NONE:
795 case ID_AA64ISAR1_APA_IMPL:
796 sbuf_printf(sb, "%sPrince AddrAuth", SEP_STR);
799 sbuf_printf(sb, "%sUnknown Prince AddrAuth", SEP_STR);
803 switch (ID_AA64ISAR1_DPB(cpu_desc[cpu].id_aa64isar1)) {
804 case ID_AA64ISAR1_DPB_NONE:
806 case ID_AA64ISAR1_DPB_IMPL:
807 sbuf_printf(sb, "%sDC CVAP", SEP_STR);
810 sbuf_printf(sb, "%sUnknown DC CVAP", SEP_STR);
814 if ((cpu_desc[cpu].id_aa64isar1 & ~ID_AA64ISAR1_MASK) != 0)
815 sbuf_printf(sb, "%s%#lx", SEP_STR,
816 cpu_desc[cpu].id_aa64isar1 & ~ID_AA64ISAR1_MASK);
818 printf("%s>\n", sbuf_data(sb));
822 /* AArch64 Processor Feature Register 0 */
823 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR0) != 0) {
825 sbuf_printf(sb, " Processor Features 0 = <");
827 switch (ID_AA64PFR0_SVE(cpu_desc[cpu].id_aa64pfr0)) {
828 case ID_AA64PFR0_SVE_NONE:
830 case ID_AA64PFR0_SVE_IMPL:
831 sbuf_printf(sb, "%sSVE", SEP_STR);
834 sbuf_printf(sb, "%sUnknown SVE", SEP_STR);
838 switch (ID_AA64PFR0_RAS(cpu_desc[cpu].id_aa64pfr0)) {
839 case ID_AA64PFR0_RAS_NONE:
841 case ID_AA64PFR0_RAS_V1:
842 sbuf_printf(sb, "%sRASv1", SEP_STR);
845 sbuf_printf(sb, "%sUnknown RAS", SEP_STR);
849 switch (ID_AA64PFR0_GIC(cpu_desc[cpu].id_aa64pfr0)) {
850 case ID_AA64PFR0_GIC_CPUIF_NONE:
852 case ID_AA64PFR0_GIC_CPUIF_EN:
853 sbuf_printf(sb, "%sGIC", SEP_STR);
856 sbuf_printf(sb, "%sUnknown GIC interface", SEP_STR);
860 switch (ID_AA64PFR0_AdvSIMD(cpu_desc[cpu].id_aa64pfr0)) {
861 case ID_AA64PFR0_AdvSIMD_NONE:
863 case ID_AA64PFR0_AdvSIMD_IMPL:
864 sbuf_printf(sb, "%sAdvSIMD", SEP_STR);
866 case ID_AA64PFR0_AdvSIMD_HP:
867 sbuf_printf(sb, "%sAdvSIMD+HP", SEP_STR);
870 sbuf_printf(sb, "%sUnknown AdvSIMD", SEP_STR);
874 switch (ID_AA64PFR0_FP(cpu_desc[cpu].id_aa64pfr0)) {
875 case ID_AA64PFR0_FP_NONE:
877 case ID_AA64PFR0_FP_IMPL:
878 sbuf_printf(sb, "%sFloat", SEP_STR);
880 case ID_AA64PFR0_FP_HP:
881 sbuf_printf(sb, "%sFloat+HP", SEP_STR);
884 sbuf_printf(sb, "%sUnknown Float", SEP_STR);
888 switch (ID_AA64PFR0_EL3(cpu_desc[cpu].id_aa64pfr0)) {
889 case ID_AA64PFR0_EL3_NONE:
890 sbuf_printf(sb, "%sNo EL3", SEP_STR);
892 case ID_AA64PFR0_EL3_64:
893 sbuf_printf(sb, "%sEL3", SEP_STR);
895 case ID_AA64PFR0_EL3_64_32:
896 sbuf_printf(sb, "%sEL3 32", SEP_STR);
899 sbuf_printf(sb, "%sUnknown EL3", SEP_STR);
903 switch (ID_AA64PFR0_EL2(cpu_desc[cpu].id_aa64pfr0)) {
904 case ID_AA64PFR0_EL2_NONE:
905 sbuf_printf(sb, "%sNo EL2", SEP_STR);
907 case ID_AA64PFR0_EL2_64:
908 sbuf_printf(sb, "%sEL2", SEP_STR);
910 case ID_AA64PFR0_EL2_64_32:
911 sbuf_printf(sb, "%sEL2 32", SEP_STR);
914 sbuf_printf(sb, "%sUnknown EL2", SEP_STR);
918 switch (ID_AA64PFR0_EL1(cpu_desc[cpu].id_aa64pfr0)) {
919 case ID_AA64PFR0_EL1_64:
920 sbuf_printf(sb, "%sEL1", SEP_STR);
922 case ID_AA64PFR0_EL1_64_32:
923 sbuf_printf(sb, "%sEL1 32", SEP_STR);
926 sbuf_printf(sb, "%sUnknown EL1", SEP_STR);
930 switch (ID_AA64PFR0_EL0(cpu_desc[cpu].id_aa64pfr0)) {
931 case ID_AA64PFR0_EL0_64:
932 sbuf_printf(sb, "%sEL0", SEP_STR);
934 case ID_AA64PFR0_EL0_64_32:
935 sbuf_printf(sb, "%sEL0 32", SEP_STR);
938 sbuf_printf(sb, "%sUnknown EL0", SEP_STR);
942 if ((cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK) != 0)
943 sbuf_printf(sb, "%s%#lx", SEP_STR,
944 cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK);
947 printf("%s>\n", sbuf_data(sb));
951 /* AArch64 Processor Feature Register 1 */
952 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR1) != 0) {
953 printf(" Processor Features 1 = <%#lx>\n",
954 cpu_desc[cpu].id_aa64pfr1);
957 /* AArch64 Memory Model Feature Register 0 */
958 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR0) != 0) {
960 sbuf_printf(sb, " Memory Model Features 0 = <");
961 switch (ID_AA64MMFR0_TGran4(cpu_desc[cpu].id_aa64mmfr0)) {
962 case ID_AA64MMFR0_TGran4_NONE:
964 case ID_AA64MMFR0_TGran4_IMPL:
965 sbuf_printf(sb, "%s4k Granule", SEP_STR);
968 sbuf_printf(sb, "%sUnknown 4k Granule", SEP_STR);
972 switch (ID_AA64MMFR0_TGran64(cpu_desc[cpu].id_aa64mmfr0)) {
973 case ID_AA64MMFR0_TGran64_NONE:
975 case ID_AA64MMFR0_TGran64_IMPL:
976 sbuf_printf(sb, "%s64k Granule", SEP_STR);
979 sbuf_printf(sb, "%sUnknown 64k Granule", SEP_STR);
983 switch (ID_AA64MMFR0_TGran16(cpu_desc[cpu].id_aa64mmfr0)) {
984 case ID_AA64MMFR0_TGran16_NONE:
986 case ID_AA64MMFR0_TGran16_IMPL:
987 sbuf_printf(sb, "%s16k Granule", SEP_STR);
990 sbuf_printf(sb, "%sUnknown 16k Granule", SEP_STR);
994 switch (ID_AA64MMFR0_BigEndEL0(cpu_desc[cpu].id_aa64mmfr0)) {
995 case ID_AA64MMFR0_BigEndEL0_FIXED:
997 case ID_AA64MMFR0_BigEndEL0_MIXED:
998 sbuf_printf(sb, "%sEL0 MixEndian", SEP_STR);
1001 sbuf_printf(sb, "%sUnknown EL0 Endian switching", SEP_STR);
1005 switch (ID_AA64MMFR0_SNSMem(cpu_desc[cpu].id_aa64mmfr0)) {
1006 case ID_AA64MMFR0_SNSMem_NONE:
1008 case ID_AA64MMFR0_SNSMem_DISTINCT:
1009 sbuf_printf(sb, "%sS/NS Mem", SEP_STR);
1012 sbuf_printf(sb, "%sUnknown S/NS Mem", SEP_STR);
1016 switch (ID_AA64MMFR0_BigEnd(cpu_desc[cpu].id_aa64mmfr0)) {
1017 case ID_AA64MMFR0_BigEnd_FIXED:
1019 case ID_AA64MMFR0_BigEnd_MIXED:
1020 sbuf_printf(sb, "%sMixedEndian", SEP_STR);
1023 sbuf_printf(sb, "%sUnknown Endian switching", SEP_STR);
1027 switch (ID_AA64MMFR0_ASIDBits(cpu_desc[cpu].id_aa64mmfr0)) {
1028 case ID_AA64MMFR0_ASIDBits_8:
1029 sbuf_printf(sb, "%s8bit ASID", SEP_STR);
1031 case ID_AA64MMFR0_ASIDBits_16:
1032 sbuf_printf(sb, "%s16bit ASID", SEP_STR);
1035 sbuf_printf(sb, "%sUnknown ASID", SEP_STR);
1039 switch (ID_AA64MMFR0_PARange(cpu_desc[cpu].id_aa64mmfr0)) {
1040 case ID_AA64MMFR0_PARange_4G:
1041 sbuf_printf(sb, "%s4GB PA", SEP_STR);
1043 case ID_AA64MMFR0_PARange_64G:
1044 sbuf_printf(sb, "%s64GB PA", SEP_STR);
1046 case ID_AA64MMFR0_PARange_1T:
1047 sbuf_printf(sb, "%s1TB PA", SEP_STR);
1049 case ID_AA64MMFR0_PARange_4T:
1050 sbuf_printf(sb, "%s4TB PA", SEP_STR);
1052 case ID_AA64MMFR0_PARange_16T:
1053 sbuf_printf(sb, "%s16TB PA", SEP_STR);
1055 case ID_AA64MMFR0_PARange_256T:
1056 sbuf_printf(sb, "%s256TB PA", SEP_STR);
1058 case ID_AA64MMFR0_PARange_4P:
1059 sbuf_printf(sb, "%s4PB PA", SEP_STR);
1062 sbuf_printf(sb, "%sUnknown PA Range", SEP_STR);
1066 if ((cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK) != 0)
1067 sbuf_printf(sb, "%s%#lx", SEP_STR,
1068 cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK);
1070 printf("%s>\n", sbuf_data(sb));
1074 /* AArch64 Memory Model Feature Register 1 */
1075 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR1) != 0) {
1077 sbuf_printf(sb, " Memory Model Features 1 = <");
1079 switch (ID_AA64MMFR1_XNX(cpu_desc[cpu].id_aa64mmfr1)) {
1080 case ID_AA64MMFR1_XNX_NONE:
1082 case ID_AA64MMFR1_XNX_IMPL:
1083 sbuf_printf(sb, "%sEL2 XN", SEP_STR);
1086 sbuf_printf(sb, "%sUnknown XNX", SEP_STR);
1090 switch (ID_AA64MMFR1_SpecSEI(cpu_desc[cpu].id_aa64mmfr1)) {
1091 case ID_AA64MMFR1_SpecSEI_NONE:
1093 case ID_AA64MMFR1_SpecSEI_IMPL:
1094 sbuf_printf(sb, "%sSpecSEI", SEP_STR);
1097 sbuf_printf(sb, "%sUnknown SpecSEI", SEP_STR);
1101 switch (ID_AA64MMFR1_PAN(cpu_desc[cpu].id_aa64mmfr1)) {
1102 case ID_AA64MMFR1_PAN_NONE:
1104 case ID_AA64MMFR1_PAN_IMPL:
1105 sbuf_printf(sb, "%sPAN", SEP_STR);
1107 case ID_AA64MMFR1_PAN_ATS1E1:
1108 sbuf_printf(sb, "%sPAN+AT", SEP_STR);
1111 sbuf_printf(sb, "%sUnknown PAN", SEP_STR);
1115 switch (ID_AA64MMFR1_LO(cpu_desc[cpu].id_aa64mmfr1)) {
1116 case ID_AA64MMFR1_LO_NONE:
1118 case ID_AA64MMFR1_LO_IMPL:
1119 sbuf_printf(sb, "%sLO", SEP_STR);
1122 sbuf_printf(sb, "%sUnknown LO", SEP_STR);
1126 switch (ID_AA64MMFR1_HPDS(cpu_desc[cpu].id_aa64mmfr1)) {
1127 case ID_AA64MMFR1_HPDS_NONE:
1129 case ID_AA64MMFR1_HPDS_HPD:
1130 sbuf_printf(sb, "%sHPDS", SEP_STR);
1132 case ID_AA64MMFR1_HPDS_TTPBHA:
1133 sbuf_printf(sb, "%sTTPBHA", SEP_STR);
1136 sbuf_printf(sb, "%sUnknown HPDS", SEP_STR);
1140 switch (ID_AA64MMFR1_VH(cpu_desc[cpu].id_aa64mmfr1)) {
1141 case ID_AA64MMFR1_VH_NONE:
1143 case ID_AA64MMFR1_VH_IMPL:
1144 sbuf_printf(sb, "%sVHE", SEP_STR);
1147 sbuf_printf(sb, "%sUnknown VHE", SEP_STR);
1151 switch (ID_AA64MMFR1_VMIDBits(cpu_desc[cpu].id_aa64mmfr1)) {
1152 case ID_AA64MMFR1_VMIDBits_8:
1154 case ID_AA64MMFR1_VMIDBits_16:
1155 sbuf_printf(sb, "%s16 VMID bits", SEP_STR);
1158 sbuf_printf(sb, "%sUnknown VMID bits", SEP_STR);
1162 switch (ID_AA64MMFR1_HAFDBS(cpu_desc[cpu].id_aa64mmfr1)) {
1163 case ID_AA64MMFR1_HAFDBS_NONE:
1165 case ID_AA64MMFR1_HAFDBS_AF:
1166 sbuf_printf(sb, "%sAF", SEP_STR);
1168 case ID_AA64MMFR1_HAFDBS_AF_DBS:
1169 sbuf_printf(sb, "%sAF+DBS", SEP_STR);
1172 sbuf_printf(sb, "%sUnknown Hardware update AF/DBS", SEP_STR);
1176 if ((cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK) != 0)
1177 sbuf_printf(sb, "%s%#lx", SEP_STR,
1178 cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK);
1180 printf("%s>\n", sbuf_data(sb));
1184 /* AArch64 Memory Model Feature Register 2 */
1185 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR2) != 0) {
1187 sbuf_printf(sb, " Memory Model Features 2 = <");
1189 switch (ID_AA64MMFR2_NV(cpu_desc[cpu].id_aa64mmfr2)) {
1190 case ID_AA64MMFR2_NV_NONE:
1192 case ID_AA64MMFR2_NV_IMPL:
1193 sbuf_printf(sb, "%sNestedVirt", SEP_STR);
1196 sbuf_printf(sb, "%sUnknown NestedVirt", SEP_STR);
1200 switch (ID_AA64MMFR2_CCIDX(cpu_desc[cpu].id_aa64mmfr2)) {
1201 case ID_AA64MMFR2_CCIDX_32:
1202 sbuf_printf(sb, "%s32b CCIDX", SEP_STR);
1204 case ID_AA64MMFR2_CCIDX_64:
1205 sbuf_printf(sb, "%s64b CCIDX", SEP_STR);
1208 sbuf_printf(sb, "%sUnknown CCIDX", SEP_STR);
1212 switch (ID_AA64MMFR2_VARange(cpu_desc[cpu].id_aa64mmfr2)) {
1213 case ID_AA64MMFR2_VARange_48:
1214 sbuf_printf(sb, "%s48b VA", SEP_STR);
1216 case ID_AA64MMFR2_VARange_52:
1217 sbuf_printf(sb, "%s52b VA", SEP_STR);
1220 sbuf_printf(sb, "%sUnknown VA Range", SEP_STR);
1224 switch (ID_AA64MMFR2_IESB(cpu_desc[cpu].id_aa64mmfr2)) {
1225 case ID_AA64MMFR2_IESB_NONE:
1227 case ID_AA64MMFR2_IESB_IMPL:
1228 sbuf_printf(sb, "%sIESB", SEP_STR);
1231 sbuf_printf(sb, "%sUnknown IESB", SEP_STR);
1235 switch (ID_AA64MMFR2_LSM(cpu_desc[cpu].id_aa64mmfr2)) {
1236 case ID_AA64MMFR2_LSM_NONE:
1238 case ID_AA64MMFR2_LSM_IMPL:
1239 sbuf_printf(sb, "%sLSM", SEP_STR);
1242 sbuf_printf(sb, "%sUnknown LSM", SEP_STR);
1246 switch (ID_AA64MMFR2_UAO(cpu_desc[cpu].id_aa64mmfr2)) {
1247 case ID_AA64MMFR2_UAO_NONE:
1249 case ID_AA64MMFR2_UAO_IMPL:
1250 sbuf_printf(sb, "%sUAO", SEP_STR);
1253 sbuf_printf(sb, "%sUnknown UAO", SEP_STR);
1257 switch (ID_AA64MMFR2_CnP(cpu_desc[cpu].id_aa64mmfr2)) {
1258 case ID_AA64MMFR2_CnP_NONE:
1260 case ID_AA64MMFR2_CnP_IMPL:
1261 sbuf_printf(sb, "%sCnP", SEP_STR);
1264 sbuf_printf(sb, "%sUnknown CnP", SEP_STR);
1268 if ((cpu_desc[cpu].id_aa64mmfr2 & ~ID_AA64MMFR2_MASK) != 0)
1269 sbuf_printf(sb, "%s%#lx", SEP_STR,
1270 cpu_desc[cpu].id_aa64mmfr2 & ~ID_AA64MMFR2_MASK);
1272 printf("%s>\n", sbuf_data(sb));
1276 /* AArch64 Debug Feature Register 0 */
1277 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR0) != 0) {
1279 sbuf_printf(sb, " Debug Features 0 = <");
1280 switch(ID_AA64DFR0_PMSVer(cpu_desc[cpu].id_aa64dfr0)) {
1281 case ID_AA64DFR0_PMSVer_NONE:
1283 case ID_AA64DFR0_PMSVer_V1:
1284 sbuf_printf(sb, "%sSPE v1", SEP_STR);
1287 sbuf_printf(sb, "%sUnknown SPE", SEP_STR);
1291 sbuf_printf(sb, "%s%lu CTX Breakpoints", SEP_STR,
1292 ID_AA64DFR0_CTX_CMPs(cpu_desc[cpu].id_aa64dfr0));
1294 sbuf_printf(sb, "%s%lu Watchpoints", SEP_STR,
1295 ID_AA64DFR0_WRPs(cpu_desc[cpu].id_aa64dfr0));
1297 sbuf_printf(sb, "%s%lu Breakpoints", SEP_STR,
1298 ID_AA64DFR0_BRPs(cpu_desc[cpu].id_aa64dfr0));
1300 switch (ID_AA64DFR0_PMUVer(cpu_desc[cpu].id_aa64dfr0)) {
1301 case ID_AA64DFR0_PMUVer_NONE:
1303 case ID_AA64DFR0_PMUVer_3:
1304 sbuf_printf(sb, "%sPMUv3", SEP_STR);
1306 case ID_AA64DFR0_PMUVer_3_1:
1307 sbuf_printf(sb, "%sPMUv3+16 bit evtCount", SEP_STR);
1309 case ID_AA64DFR0_PMUVer_IMPL:
1310 sbuf_printf(sb, "%sImplementation defined PMU", SEP_STR);
1313 sbuf_printf(sb, "%sUnknown PMU", SEP_STR);
1317 switch (ID_AA64DFR0_TraceVer(cpu_desc[cpu].id_aa64dfr0)) {
1318 case ID_AA64DFR0_TraceVer_NONE:
1320 case ID_AA64DFR0_TraceVer_IMPL:
1321 sbuf_printf(sb, "%sTrace", SEP_STR);
1324 sbuf_printf(sb, "%sUnknown Trace", SEP_STR);
1328 switch (ID_AA64DFR0_DebugVer(cpu_desc[cpu].id_aa64dfr0)) {
1329 case ID_AA64DFR0_DebugVer_8:
1330 sbuf_printf(sb, "%sDebug v8", SEP_STR);
1332 case ID_AA64DFR0_DebugVer_8_VHE:
1333 sbuf_printf(sb, "%sDebug v8+VHE", SEP_STR);
1335 case ID_AA64DFR0_DebugVer_8_2:
1336 sbuf_printf(sb, "%sDebug v8.2", SEP_STR);
1339 sbuf_printf(sb, "%sUnknown Debug", SEP_STR);
1343 if (cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK)
1344 sbuf_printf(sb, "%s%#lx", SEP_STR,
1345 cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK);
1347 printf("%s>\n", sbuf_data(sb));
1351 /* AArch64 Memory Model Feature Register 1 */
1352 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR1) != 0) {
1353 printf(" Debug Features 1 = <%#lx>\n",
1354 cpu_desc[cpu].id_aa64dfr1);
1357 /* AArch64 Auxiliary Feature Register 0 */
1358 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR0) != 0) {
1359 printf(" Auxiliary Features 0 = <%#lx>\n",
1360 cpu_desc[cpu].id_aa64afr0);
1363 /* AArch64 Auxiliary Feature Register 1 */
1364 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR1) != 0) {
1365 printf(" Auxiliary Features 1 = <%#lx>\n",
1366 cpu_desc[cpu].id_aa64afr1);
1382 const struct cpu_parts *cpu_partsp = NULL;
1384 cpu = PCPU_GET(cpuid);
1388 * Store midr to pcpu to allow fast reading
1389 * from EL0, EL1 and assembly code.
1391 PCPU_SET(midr, midr);
1393 impl_id = CPU_IMPL(midr);
1394 for (i = 0; i < nitems(cpu_implementers); i++) {
1395 if (impl_id == cpu_implementers[i].impl_id ||
1396 cpu_implementers[i].impl_id == 0) {
1397 cpu_desc[cpu].cpu_impl = impl_id;
1398 cpu_desc[cpu].cpu_impl_name = cpu_implementers[i].impl_name;
1399 cpu_partsp = cpu_implementers[i].cpu_parts;
1404 part_id = CPU_PART(midr);
1405 for (i = 0; &cpu_partsp[i] != NULL; i++) {
1406 if (part_id == cpu_partsp[i].part_id ||
1407 cpu_partsp[i].part_id == 0) {
1408 cpu_desc[cpu].cpu_part_num = part_id;
1409 cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name;
1414 cpu_desc[cpu].cpu_revision = CPU_REV(midr);
1415 cpu_desc[cpu].cpu_variant = CPU_VAR(midr);
1417 snprintf(cpu_model, sizeof(cpu_model), "%s %s r%dp%d",
1418 cpu_desc[cpu].cpu_impl_name, cpu_desc[cpu].cpu_part_name,
1419 cpu_desc[cpu].cpu_variant, cpu_desc[cpu].cpu_revision);
1421 /* Save affinity for current CPU */
1422 cpu_desc[cpu].mpidr = get_mpidr();
1423 CPU_AFFINITY(cpu) = cpu_desc[cpu].mpidr & CPU_AFF_MASK;
1425 cpu_desc[cpu].id_aa64dfr0 = READ_SPECIALREG(ID_AA64DFR0_EL1);
1426 cpu_desc[cpu].id_aa64dfr1 = READ_SPECIALREG(ID_AA64DFR1_EL1);
1427 cpu_desc[cpu].id_aa64isar0 = READ_SPECIALREG(ID_AA64ISAR0_EL1);
1428 cpu_desc[cpu].id_aa64isar1 = READ_SPECIALREG(ID_AA64ISAR1_EL1);
1429 cpu_desc[cpu].id_aa64mmfr0 = READ_SPECIALREG(ID_AA64MMFR0_EL1);
1430 cpu_desc[cpu].id_aa64mmfr1 = READ_SPECIALREG(ID_AA64MMFR1_EL1);
1431 cpu_desc[cpu].id_aa64mmfr2 = READ_SPECIALREG(ID_AA64MMFR2_EL1);
1432 cpu_desc[cpu].id_aa64pfr0 = READ_SPECIALREG(ID_AA64PFR0_EL1);
1433 cpu_desc[cpu].id_aa64pfr1 = READ_SPECIALREG(ID_AA64PFR1_EL1);
1437 * This code must run on one cpu at a time, but we are
1438 * not scheduling on the current core so implement a
1441 while (atomic_cmpset_acq_int(&ident_lock, 0, 1) == 0)
1442 __asm __volatile("wfe" ::: "memory");
1444 switch (cpu_aff_levels) {
1446 if (CPU_AFF0(cpu_desc[cpu].mpidr) !=
1447 CPU_AFF0(cpu_desc[0].mpidr))
1451 if (CPU_AFF1(cpu_desc[cpu].mpidr) !=
1452 CPU_AFF1(cpu_desc[0].mpidr))
1456 if (CPU_AFF2(cpu_desc[cpu].mpidr) !=
1457 CPU_AFF2(cpu_desc[0].mpidr))
1461 if (CPU_AFF3(cpu_desc[cpu].mpidr) !=
1462 CPU_AFF3(cpu_desc[0].mpidr))
1467 if (cpu_desc[cpu].id_aa64afr0 != cpu_desc[0].id_aa64afr0)
1468 cpu_print_regs |= PRINT_ID_AA64_AFR0;
1469 if (cpu_desc[cpu].id_aa64afr1 != cpu_desc[0].id_aa64afr1)
1470 cpu_print_regs |= PRINT_ID_AA64_AFR1;
1472 if (cpu_desc[cpu].id_aa64dfr0 != cpu_desc[0].id_aa64dfr0)
1473 cpu_print_regs |= PRINT_ID_AA64_DFR0;
1474 if (cpu_desc[cpu].id_aa64dfr1 != cpu_desc[0].id_aa64dfr1)
1475 cpu_print_regs |= PRINT_ID_AA64_DFR1;
1477 if (cpu_desc[cpu].id_aa64isar0 != cpu_desc[0].id_aa64isar0)
1478 cpu_print_regs |= PRINT_ID_AA64_ISAR0;
1479 if (cpu_desc[cpu].id_aa64isar1 != cpu_desc[0].id_aa64isar1)
1480 cpu_print_regs |= PRINT_ID_AA64_ISAR1;
1482 if (cpu_desc[cpu].id_aa64mmfr0 != cpu_desc[0].id_aa64mmfr0)
1483 cpu_print_regs |= PRINT_ID_AA64_MMFR0;
1484 if (cpu_desc[cpu].id_aa64mmfr1 != cpu_desc[0].id_aa64mmfr1)
1485 cpu_print_regs |= PRINT_ID_AA64_MMFR1;
1486 if (cpu_desc[cpu].id_aa64mmfr2 != cpu_desc[0].id_aa64mmfr2)
1487 cpu_print_regs |= PRINT_ID_AA64_MMFR2;
1489 if (cpu_desc[cpu].id_aa64pfr0 != cpu_desc[0].id_aa64pfr0)
1490 cpu_print_regs |= PRINT_ID_AA64_PFR0;
1491 if (cpu_desc[cpu].id_aa64pfr1 != cpu_desc[0].id_aa64pfr1)
1492 cpu_print_regs |= PRINT_ID_AA64_PFR1;
1494 /* Wake up the other CPUs */
1495 atomic_store_rel_int(&ident_lock, 0);
1496 __asm __volatile("sev" ::: "memory");