2 * Copyright (c) 2014 Andrew Turner
3 * Copyright (c) 2014 The FreeBSD Foundation
6 * Portions of this software were developed by Semihalf
7 * under sponsorship of the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/kernel.h>
40 #include <sys/sysctl.h>
41 #include <sys/systm.h>
43 #include <machine/atomic.h>
44 #include <machine/cpu.h>
45 #include <machine/cpufunc.h>
47 static int ident_lock;
49 char machine[] = "arm64";
51 SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0,
55 * Per-CPU affinity as provided in MPIDR_EL1
56 * Indexed by CPU number in logical order selected by the system.
57 * Relevant fields can be extracted using CPU_AFFn macros,
58 * Aff3.Aff2.Aff1.Aff0 construct a unique CPU address in the system.
61 * Aff1 - Cluster number
62 * Aff0 - CPU number in Aff1 cluster
64 uint64_t __cpu_affinity[MAXCPU];
65 static u_int cpu_aff_levels;
72 const char *cpu_impl_name;
73 const char *cpu_part_name;
80 uint64_t id_aa64isar0;
81 uint64_t id_aa64isar1;
82 uint64_t id_aa64mmfr0;
83 uint64_t id_aa64mmfr1;
84 uint64_t id_aa64mmfr2;
89 struct cpu_desc cpu_desc[MAXCPU];
90 static u_int cpu_print_regs;
91 #define PRINT_ID_AA64_AFR0 0x00000001
92 #define PRINT_ID_AA64_AFR1 0x00000002
93 #define PRINT_ID_AA64_DFR0 0x00000010
94 #define PRINT_ID_AA64_DFR1 0x00000020
95 #define PRINT_ID_AA64_ISAR0 0x00000100
96 #define PRINT_ID_AA64_ISAR1 0x00000200
97 #define PRINT_ID_AA64_MMFR0 0x00001000
98 #define PRINT_ID_AA64_MMFR1 0x00002000
99 #define PRINT_ID_AA64_MMFR2 0x00004000
100 #define PRINT_ID_AA64_PFR0 0x00010000
101 #define PRINT_ID_AA64_PFR1 0x00020000
105 const char *part_name;
107 #define CPU_PART_NONE { 0, "Unknown Processor" }
109 struct cpu_implementers {
111 const char *impl_name;
113 * Part number is implementation defined
114 * so each vendor will have its own set of values and names.
116 const struct cpu_parts *cpu_parts;
118 #define CPU_IMPLEMENTER_NONE { 0, "Unknown Implementer", cpu_parts_none }
121 * Per-implementer table of (PartNum, CPU Name) pairs.
124 static const struct cpu_parts cpu_parts_arm[] = {
125 { CPU_PART_FOUNDATION, "Foundation-Model" },
126 { CPU_PART_CORTEX_A35, "Cortex-A35" },
127 { CPU_PART_CORTEX_A53, "Cortex-A53" },
128 { CPU_PART_CORTEX_A55, "Cortex-A55" },
129 { CPU_PART_CORTEX_A57, "Cortex-A57" },
130 { CPU_PART_CORTEX_A72, "Cortex-A72" },
131 { CPU_PART_CORTEX_A73, "Cortex-A73" },
132 { CPU_PART_CORTEX_A75, "Cortex-A75" },
136 static const struct cpu_parts cpu_parts_cavium[] = {
137 { CPU_PART_THUNDERX, "ThunderX" },
138 { CPU_PART_THUNDERX2, "ThunderX2" },
143 static const struct cpu_parts cpu_parts_none[] = {
148 * Implementers table.
150 const struct cpu_implementers cpu_implementers[] = {
151 { CPU_IMPL_ARM, "ARM", cpu_parts_arm },
152 { CPU_IMPL_BROADCOM, "Broadcom", cpu_parts_none },
153 { CPU_IMPL_CAVIUM, "Cavium", cpu_parts_cavium },
154 { CPU_IMPL_DEC, "DEC", cpu_parts_none },
155 { CPU_IMPL_INFINEON, "IFX", cpu_parts_none },
156 { CPU_IMPL_FREESCALE, "Freescale", cpu_parts_none },
157 { CPU_IMPL_NVIDIA, "NVIDIA", cpu_parts_none },
158 { CPU_IMPL_APM, "APM", cpu_parts_none },
159 { CPU_IMPL_QUALCOMM, "Qualcomm", cpu_parts_none },
160 { CPU_IMPL_MARVELL, "Marvell", cpu_parts_none },
161 { CPU_IMPL_INTEL, "Intel", cpu_parts_none },
162 CPU_IMPLEMENTER_NONE,
166 identify_cpu_sysinit(void *dummy __unused)
171 print_cpu_features(cpu);
174 SYSINIT(idenrity_cpu, SI_SUB_SMP, SI_ORDER_ANY, identify_cpu_sysinit, NULL);
177 print_cpu_features(u_int cpu)
182 sb = sbuf_new_auto();
183 sbuf_printf(sb, "CPU%3d: %s %s r%dp%d", cpu,
184 cpu_desc[cpu].cpu_impl_name, cpu_desc[cpu].cpu_part_name,
185 cpu_desc[cpu].cpu_variant, cpu_desc[cpu].cpu_revision);
187 sbuf_cat(sb, " affinity:");
188 switch(cpu_aff_levels) {
191 sbuf_printf(sb, " %2d", CPU_AFF3(cpu_desc[cpu].mpidr));
194 sbuf_printf(sb, " %2d", CPU_AFF2(cpu_desc[cpu].mpidr));
197 sbuf_printf(sb, " %2d", CPU_AFF1(cpu_desc[cpu].mpidr));
200 case 0: /* On UP this will be zero */
201 sbuf_printf(sb, " %2d", CPU_AFF0(cpu_desc[cpu].mpidr));
205 printf("%s\n", sbuf_data(sb));
209 * There is a hardware errata where, if one CPU is performing a TLB
210 * invalidation while another is performing a store-exclusive the
211 * store-exclusive may return the wrong status. A workaround seems
212 * to be to use an IPI to invalidate on each CPU, however given the
213 * limited number of affected units (pass 1.1 is the evaluation
214 * hardware revision), and the lack of information from Cavium
215 * this has not been implemented.
217 * At the time of writing this the only information is from:
218 * https://lkml.org/lkml/2016/8/4/722
221 * XXX: CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 on its own also
222 * triggers on pass 2.0+.
224 if (cpu == 0 && CPU_VAR(PCPU_GET(midr)) == 0 &&
225 CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1)
226 printf("WARNING: ThunderX Pass 1.1 detected.\nThis has known "
227 "hardware bugs that may cause the incorrect operation of "
228 "atomic operations.\n");
230 if (cpu != 0 && cpu_print_regs == 0)
233 #define SEP_STR ((printed++) == 0) ? "" : ","
235 /* AArch64 Instruction Set Attribute Register 0 */
236 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR0) != 0) {
238 sbuf_printf(sb, " Instruction Set Attributes 0 = <");
240 switch (ID_AA64ISAR0_RDM(cpu_desc[cpu].id_aa64isar0)) {
241 case ID_AA64ISAR0_RDM_NONE:
243 case ID_AA64ISAR0_RDM_IMPL:
244 sbuf_printf(sb, "%sRDM", SEP_STR);
247 sbuf_printf(sb, "%sUnknown RDM", SEP_STR);
250 switch (ID_AA64ISAR0_ATOMIC(cpu_desc[cpu].id_aa64isar0)) {
251 case ID_AA64ISAR0_ATOMIC_NONE:
253 case ID_AA64ISAR0_ATOMIC_IMPL:
254 sbuf_printf(sb, "%sAtomic", SEP_STR);
257 sbuf_printf(sb, "%sUnknown Atomic", SEP_STR);
260 switch (ID_AA64ISAR0_AES(cpu_desc[cpu].id_aa64isar0)) {
261 case ID_AA64ISAR0_AES_NONE:
263 case ID_AA64ISAR0_AES_BASE:
264 sbuf_printf(sb, "%sAES", SEP_STR);
266 case ID_AA64ISAR0_AES_PMULL:
267 sbuf_printf(sb, "%sAES+PMULL", SEP_STR);
270 sbuf_printf(sb, "%sUnknown AES", SEP_STR);
274 switch (ID_AA64ISAR0_SHA1(cpu_desc[cpu].id_aa64isar0)) {
275 case ID_AA64ISAR0_SHA1_NONE:
277 case ID_AA64ISAR0_SHA1_BASE:
278 sbuf_printf(sb, "%sSHA1", SEP_STR);
281 sbuf_printf(sb, "%sUnknown SHA1", SEP_STR);
285 switch (ID_AA64ISAR0_SHA2(cpu_desc[cpu].id_aa64isar0)) {
286 case ID_AA64ISAR0_SHA2_NONE:
288 case ID_AA64ISAR0_SHA2_BASE:
289 sbuf_printf(sb, "%sSHA2", SEP_STR);
291 case ID_AA64ISAR0_SHA2_512:
292 sbuf_printf(sb, "%sSHA2+SHA512", SEP_STR);
295 sbuf_printf(sb, "%sUnknown SHA2", SEP_STR);
299 switch (ID_AA64ISAR0_CRC32(cpu_desc[cpu].id_aa64isar0)) {
300 case ID_AA64ISAR0_CRC32_NONE:
302 case ID_AA64ISAR0_CRC32_BASE:
303 sbuf_printf(sb, "%sCRC32", SEP_STR);
306 sbuf_printf(sb, "%sUnknown CRC32", SEP_STR);
310 switch (ID_AA64ISAR0_SHA3(cpu_desc[cpu].id_aa64isar0)) {
311 case ID_AA64ISAR0_SHA3_NONE:
313 case ID_AA64ISAR0_SHA3_IMPL:
314 sbuf_printf(sb, "%sSHA3", SEP_STR);
317 sbuf_printf(sb, "%sUnknown SHA3", SEP_STR);
321 switch (ID_AA64ISAR0_SM3(cpu_desc[cpu].id_aa64isar0)) {
322 case ID_AA64ISAR0_SM3_NONE:
324 case ID_AA64ISAR0_SM3_IMPL:
325 sbuf_printf(sb, "%sSM3", SEP_STR);
328 sbuf_printf(sb, "%sUnknown SM3", SEP_STR);
332 switch (ID_AA64ISAR0_SM4(cpu_desc[cpu].id_aa64isar0)) {
333 case ID_AA64ISAR0_SM4_NONE:
335 case ID_AA64ISAR0_SM4_IMPL:
336 sbuf_printf(sb, "%sSM4", SEP_STR);
339 sbuf_printf(sb, "%sUnknown SM4", SEP_STR);
343 switch (ID_AA64ISAR0_DP(cpu_desc[cpu].id_aa64isar0)) {
344 case ID_AA64ISAR0_DP_NONE:
346 case ID_AA64ISAR0_DP_IMPL:
347 sbuf_printf(sb, "%sDotProd", SEP_STR);
350 sbuf_printf(sb, "%sUnknown DP", SEP_STR);
354 if ((cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK) != 0)
355 sbuf_printf(sb, "%s%#lx", SEP_STR,
356 cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK);
359 printf("%s>\n", sbuf_data(sb));
363 /* AArch64 Instruction Set Attribute Register 1 */
364 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR1) != 0) {
366 sbuf_printf(sb, " Instruction Set Attributes 1 = <");
368 switch (ID_AA64ISAR1_GPI(cpu_desc[cpu].id_aa64isar1)) {
369 case ID_AA64ISAR1_GPI_NONE:
371 case ID_AA64ISAR1_GPI_IMPL:
372 sbuf_printf(sb, "%sImpl GenericAuth", SEP_STR);
375 sbuf_printf(sb, "%sUnknown GenericAuth", SEP_STR);
379 switch (ID_AA64ISAR1_GPA(cpu_desc[cpu].id_aa64isar1)) {
380 case ID_AA64ISAR1_GPA_NONE:
382 case ID_AA64ISAR1_GPA_IMPL:
383 sbuf_printf(sb, "%sPrince GenericAuth", SEP_STR);
386 sbuf_printf(sb, "%sUnknown GenericAuth", SEP_STR);
390 switch (ID_AA64ISAR1_LRCPC(cpu_desc[cpu].id_aa64isar1)) {
391 case ID_AA64ISAR1_LRCPC_NONE:
393 case ID_AA64ISAR1_LRCPC_IMPL:
394 sbuf_printf(sb, "%sRCpc", SEP_STR);
397 sbuf_printf(sb, "%sUnknown RCpc", SEP_STR);
401 switch (ID_AA64ISAR1_FCMA(cpu_desc[cpu].id_aa64isar1)) {
402 case ID_AA64ISAR1_FCMA_NONE:
404 case ID_AA64ISAR1_FCMA_IMPL:
405 sbuf_printf(sb, "%sFCMA", SEP_STR);
408 sbuf_printf(sb, "%sUnknown FCMA", SEP_STR);
412 switch (ID_AA64ISAR1_JSCVT(cpu_desc[cpu].id_aa64isar1)) {
413 case ID_AA64ISAR1_JSCVT_NONE:
415 case ID_AA64ISAR1_JSCVT_IMPL:
416 sbuf_printf(sb, "%sJS Conv", SEP_STR);
419 sbuf_printf(sb, "%sUnknown JS Conv", SEP_STR);
423 switch (ID_AA64ISAR1_API(cpu_desc[cpu].id_aa64isar1)) {
424 case ID_AA64ISAR1_API_NONE:
426 case ID_AA64ISAR1_API_IMPL:
427 sbuf_printf(sb, "%sImpl AddrAuth", SEP_STR);
430 sbuf_printf(sb, "%sUnknown Impl AddrAuth", SEP_STR);
434 switch (ID_AA64ISAR1_APA(cpu_desc[cpu].id_aa64isar1)) {
435 case ID_AA64ISAR1_APA_NONE:
437 case ID_AA64ISAR1_APA_IMPL:
438 sbuf_printf(sb, "%sPrince AddrAuth", SEP_STR);
441 sbuf_printf(sb, "%sUnknown Prince AddrAuth", SEP_STR);
445 switch (ID_AA64ISAR1_DPB(cpu_desc[cpu].id_aa64isar1)) {
446 case ID_AA64ISAR1_DPB_NONE:
448 case ID_AA64ISAR1_DPB_IMPL:
449 sbuf_printf(sb, "%sDC CVAP", SEP_STR);
452 sbuf_printf(sb, "%sUnknown DC CVAP", SEP_STR);
456 if ((cpu_desc[cpu].id_aa64isar1 & ~ID_AA64ISAR1_MASK) != 0)
457 sbuf_printf(sb, "%s%#lx", SEP_STR,
458 cpu_desc[cpu].id_aa64isar1 & ~ID_AA64ISAR1_MASK);
460 printf("%s>\n", sbuf_data(sb));
464 /* AArch64 Processor Feature Register 0 */
465 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR0) != 0) {
467 sbuf_printf(sb, " Processor Features 0 = <");
469 switch (ID_AA64PFR0_SVE(cpu_desc[cpu].id_aa64pfr0)) {
470 case ID_AA64PFR0_SVE_NONE:
472 case ID_AA64PFR0_SVE_IMPL:
473 sbuf_printf(sb, "%sSVE", SEP_STR);
476 sbuf_printf(sb, "%sUnknown SVE", SEP_STR);
480 switch (ID_AA64PFR0_RAS(cpu_desc[cpu].id_aa64pfr0)) {
481 case ID_AA64PFR0_RAS_NONE:
483 case ID_AA64PFR0_RAS_V1:
484 sbuf_printf(sb, "%sRASv1", SEP_STR);
487 sbuf_printf(sb, "%sUnknown RAS", SEP_STR);
491 switch (ID_AA64PFR0_GIC(cpu_desc[cpu].id_aa64pfr0)) {
492 case ID_AA64PFR0_GIC_CPUIF_NONE:
494 case ID_AA64PFR0_GIC_CPUIF_EN:
495 sbuf_printf(sb, "%sGIC", SEP_STR);
498 sbuf_printf(sb, "%sUnknown GIC interface", SEP_STR);
502 switch (ID_AA64PFR0_ADV_SIMD(cpu_desc[cpu].id_aa64pfr0)) {
503 case ID_AA64PFR0_ADV_SIMD_NONE:
505 case ID_AA64PFR0_ADV_SIMD_IMPL:
506 sbuf_printf(sb, "%sAdvSIMD", SEP_STR);
508 case ID_AA64PFR0_ADV_SIMD_HP:
509 sbuf_printf(sb, "%sAdvSIMD+HP", SEP_STR);
512 sbuf_printf(sb, "%sUnknown AdvSIMD", SEP_STR);
516 switch (ID_AA64PFR0_FP(cpu_desc[cpu].id_aa64pfr0)) {
517 case ID_AA64PFR0_FP_NONE:
519 case ID_AA64PFR0_FP_IMPL:
520 sbuf_printf(sb, "%sFloat", SEP_STR);
522 case ID_AA64PFR0_FP_HP:
523 sbuf_printf(sb, "%sFloat+HP", SEP_STR);
526 sbuf_printf(sb, "%sUnknown Float", SEP_STR);
530 switch (ID_AA64PFR0_EL3(cpu_desc[cpu].id_aa64pfr0)) {
531 case ID_AA64PFR0_EL3_NONE:
532 sbuf_printf(sb, "%sNo EL3", SEP_STR);
534 case ID_AA64PFR0_EL3_64:
535 sbuf_printf(sb, "%sEL3", SEP_STR);
537 case ID_AA64PFR0_EL3_64_32:
538 sbuf_printf(sb, "%sEL3 32", SEP_STR);
541 sbuf_printf(sb, "%sUnknown EL3", SEP_STR);
545 switch (ID_AA64PFR0_EL2(cpu_desc[cpu].id_aa64pfr0)) {
546 case ID_AA64PFR0_EL2_NONE:
547 sbuf_printf(sb, "%sNo EL2", SEP_STR);
549 case ID_AA64PFR0_EL2_64:
550 sbuf_printf(sb, "%sEL2", SEP_STR);
552 case ID_AA64PFR0_EL2_64_32:
553 sbuf_printf(sb, "%sEL2 32", SEP_STR);
556 sbuf_printf(sb, "%sUnknown EL2", SEP_STR);
560 switch (ID_AA64PFR0_EL1(cpu_desc[cpu].id_aa64pfr0)) {
561 case ID_AA64PFR0_EL1_64:
562 sbuf_printf(sb, "%sEL1", SEP_STR);
564 case ID_AA64PFR0_EL1_64_32:
565 sbuf_printf(sb, "%sEL1 32", SEP_STR);
568 sbuf_printf(sb, "%sUnknown EL1", SEP_STR);
572 switch (ID_AA64PFR0_EL0(cpu_desc[cpu].id_aa64pfr0)) {
573 case ID_AA64PFR0_EL0_64:
574 sbuf_printf(sb, "%sEL0", SEP_STR);
576 case ID_AA64PFR0_EL0_64_32:
577 sbuf_printf(sb, "%sEL0 32", SEP_STR);
580 sbuf_printf(sb, "%sUnknown EL0", SEP_STR);
584 if ((cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK) != 0)
585 sbuf_printf(sb, "%s%#lx", SEP_STR,
586 cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK);
589 printf("%s>\n", sbuf_data(sb));
593 /* AArch64 Processor Feature Register 1 */
594 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR1) != 0) {
595 printf(" Processor Features 1 = <%#lx>\n",
596 cpu_desc[cpu].id_aa64pfr1);
599 /* AArch64 Memory Model Feature Register 0 */
600 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR0) != 0) {
602 sbuf_printf(sb, " Memory Model Features 0 = <");
603 switch (ID_AA64MMFR0_TGRAN4(cpu_desc[cpu].id_aa64mmfr0)) {
604 case ID_AA64MMFR0_TGRAN4_NONE:
606 case ID_AA64MMFR0_TGRAN4_IMPL:
607 sbuf_printf(sb, "%s4k Granule", SEP_STR);
610 sbuf_printf(sb, "%sUnknown 4k Granule", SEP_STR);
614 switch (ID_AA64MMFR0_TGRAN16(cpu_desc[cpu].id_aa64mmfr0)) {
615 case ID_AA64MMFR0_TGRAN16_NONE:
617 case ID_AA64MMFR0_TGRAN16_IMPL:
618 sbuf_printf(sb, "%s16k Granule", SEP_STR);
621 sbuf_printf(sb, "%sUnknown 16k Granule", SEP_STR);
625 switch (ID_AA64MMFR0_TGRAN64(cpu_desc[cpu].id_aa64mmfr0)) {
626 case ID_AA64MMFR0_TGRAN64_NONE:
628 case ID_AA64MMFR0_TGRAN64_IMPL:
629 sbuf_printf(sb, "%s64k Granule", SEP_STR);
632 sbuf_printf(sb, "%sUnknown 64k Granule", SEP_STR);
636 switch (ID_AA64MMFR0_BIGEND(cpu_desc[cpu].id_aa64mmfr0)) {
637 case ID_AA64MMFR0_BIGEND_FIXED:
639 case ID_AA64MMFR0_BIGEND_MIXED:
640 sbuf_printf(sb, "%sMixedEndian", SEP_STR);
643 sbuf_printf(sb, "%sUnknown Endian switching", SEP_STR);
647 switch (ID_AA64MMFR0_BIGEND_EL0(cpu_desc[cpu].id_aa64mmfr0)) {
648 case ID_AA64MMFR0_BIGEND_EL0_FIXED:
650 case ID_AA64MMFR0_BIGEND_EL0_MIXED:
651 sbuf_printf(sb, "%sEL0 MixEndian", SEP_STR);
654 sbuf_printf(sb, "%sUnknown EL0 Endian switching", SEP_STR);
658 switch (ID_AA64MMFR0_S_NS_MEM(cpu_desc[cpu].id_aa64mmfr0)) {
659 case ID_AA64MMFR0_S_NS_MEM_NONE:
661 case ID_AA64MMFR0_S_NS_MEM_DISTINCT:
662 sbuf_printf(sb, "%sS/NS Mem", SEP_STR);
665 sbuf_printf(sb, "%sUnknown S/NS Mem", SEP_STR);
669 switch (ID_AA64MMFR0_ASID_BITS(cpu_desc[cpu].id_aa64mmfr0)) {
670 case ID_AA64MMFR0_ASID_BITS_8:
671 sbuf_printf(sb, "%s8bit ASID", SEP_STR);
673 case ID_AA64MMFR0_ASID_BITS_16:
674 sbuf_printf(sb, "%s16bit ASID", SEP_STR);
677 sbuf_printf(sb, "%sUnknown ASID", SEP_STR);
681 switch (ID_AA64MMFR0_PA_RANGE(cpu_desc[cpu].id_aa64mmfr0)) {
682 case ID_AA64MMFR0_PA_RANGE_4G:
683 sbuf_printf(sb, "%s4GB PA", SEP_STR);
685 case ID_AA64MMFR0_PA_RANGE_64G:
686 sbuf_printf(sb, "%s64GB PA", SEP_STR);
688 case ID_AA64MMFR0_PA_RANGE_1T:
689 sbuf_printf(sb, "%s1TB PA", SEP_STR);
691 case ID_AA64MMFR0_PA_RANGE_4T:
692 sbuf_printf(sb, "%s4TB PA", SEP_STR);
694 case ID_AA64MMFR0_PA_RANGE_16T:
695 sbuf_printf(sb, "%s16TB PA", SEP_STR);
697 case ID_AA64MMFR0_PA_RANGE_256T:
698 sbuf_printf(sb, "%s256TB PA", SEP_STR);
700 case ID_AA64MMFR0_PA_RANGE_4P:
701 sbuf_printf(sb, "%s4PB PA", SEP_STR);
704 sbuf_printf(sb, "%sUnknown PA Range", SEP_STR);
708 if ((cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK) != 0)
709 sbuf_printf(sb, "%s%#lx", SEP_STR,
710 cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK);
712 printf("%s>\n", sbuf_data(sb));
716 /* AArch64 Memory Model Feature Register 1 */
717 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR1) != 0) {
719 sbuf_printf(sb, " Memory Model Features 1 = <");
721 switch (ID_AA64MMFR1_XNX(cpu_desc[cpu].id_aa64mmfr1)) {
722 case ID_AA64MMFR1_XNX_NONE:
724 case ID_AA64MMFR1_XNX_IMPL:
725 sbuf_printf(sb, "%sEL2 XN", SEP_STR);
728 sbuf_printf(sb, "%sUnknown XNX", SEP_STR);
732 switch (ID_AA64MMFR1_SPEC_SEI(cpu_desc[cpu].id_aa64mmfr1)) {
733 case ID_AA64MMFR1_SPEC_SEI_NONE:
735 case ID_AA64MMFR1_SPEC_SEI_IMPL:
736 sbuf_printf(sb, "%sSpecSEI", SEP_STR);
739 sbuf_printf(sb, "%sUnknown SpecSEI", SEP_STR);
743 switch (ID_AA64MMFR1_PAN(cpu_desc[cpu].id_aa64mmfr1)) {
744 case ID_AA64MMFR1_PAN_NONE:
746 case ID_AA64MMFR1_PAN_IMPL:
747 sbuf_printf(sb, "%sPAN", SEP_STR);
749 case ID_AA64MMFR1_PAN_ATS1E1:
750 sbuf_printf(sb, "%sPAN+AT", SEP_STR);
753 sbuf_printf(sb, "%sUnknown PAN", SEP_STR);
757 switch (ID_AA64MMFR1_LO(cpu_desc[cpu].id_aa64mmfr1)) {
758 case ID_AA64MMFR1_LO_NONE:
760 case ID_AA64MMFR1_LO_IMPL:
761 sbuf_printf(sb, "%sLO", SEP_STR);
764 sbuf_printf(sb, "%sUnknown LO", SEP_STR);
768 switch (ID_AA64MMFR1_HPDS(cpu_desc[cpu].id_aa64mmfr1)) {
769 case ID_AA64MMFR1_HPDS_NONE:
771 case ID_AA64MMFR1_HPDS_HPD:
772 sbuf_printf(sb, "%sHPDS", SEP_STR);
774 case ID_AA64MMFR1_HPDS_TTPBHA:
775 sbuf_printf(sb, "%sTTPBHA", SEP_STR);
778 sbuf_printf(sb, "%sUnknown HPDS", SEP_STR);
782 switch (ID_AA64MMFR1_VH(cpu_desc[cpu].id_aa64mmfr1)) {
783 case ID_AA64MMFR1_VH_NONE:
785 case ID_AA64MMFR1_VH_IMPL:
786 sbuf_printf(sb, "%sVHE", SEP_STR);
789 sbuf_printf(sb, "%sUnknown VHE", SEP_STR);
793 switch (ID_AA64MMFR1_VMIDBITS(cpu_desc[cpu].id_aa64mmfr1)) {
794 case ID_AA64MMFR1_VMIDBITS_8:
796 case ID_AA64MMFR1_VMIDBITS_16:
797 sbuf_printf(sb, "%s16 VMID bits", SEP_STR);
800 sbuf_printf(sb, "%sUnknown VMID bits", SEP_STR);
804 switch (ID_AA64MMFR1_HAFDBS(cpu_desc[cpu].id_aa64mmfr1)) {
805 case ID_AA64MMFR1_HAFDBS_NONE:
807 case ID_AA64MMFR1_HAFDBS_AF:
808 sbuf_printf(sb, "%sAF", SEP_STR);
810 case ID_AA64MMFR1_HAFDBS_AF_DBS:
811 sbuf_printf(sb, "%sAF+DBS", SEP_STR);
814 sbuf_printf(sb, "%sUnknown Hardware update AF/DBS", SEP_STR);
818 if ((cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK) != 0)
819 sbuf_printf(sb, "%s%#lx", SEP_STR,
820 cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK);
822 printf("%s>\n", sbuf_data(sb));
826 /* AArch64 Memory Model Feature Register 2 */
827 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR2) != 0) {
829 sbuf_printf(sb, " Memory Model Features 2 = <");
831 switch (ID_AA64MMFR2_NV(cpu_desc[cpu].id_aa64mmfr2)) {
832 case ID_AA64MMFR2_NV_NONE:
834 case ID_AA64MMFR2_NV_IMPL:
835 sbuf_printf(sb, "%sNestedVirt", SEP_STR);
838 sbuf_printf(sb, "%sUnknown NestedVirt", SEP_STR);
842 switch (ID_AA64MMFR2_CCIDX(cpu_desc[cpu].id_aa64mmfr2)) {
843 case ID_AA64MMFR2_CCIDX_32:
844 sbuf_printf(sb, "%s32b CCIDX", SEP_STR);
846 case ID_AA64MMFR2_CCIDX_64:
847 sbuf_printf(sb, "%s64b CCIDX", SEP_STR);
850 sbuf_printf(sb, "%sUnknown CCIDX", SEP_STR);
854 switch (ID_AA64MMFR2_VA_RANGE(cpu_desc[cpu].id_aa64mmfr2)) {
855 case ID_AA64MMFR2_VA_RANGE_48:
856 sbuf_printf(sb, "%s48b VA", SEP_STR);
858 case ID_AA64MMFR2_VA_RANGE_52:
859 sbuf_printf(sb, "%s52b VA", SEP_STR);
862 sbuf_printf(sb, "%sUnknown VA Range", SEP_STR);
866 switch (ID_AA64MMFR2_IESB(cpu_desc[cpu].id_aa64mmfr2)) {
867 case ID_AA64MMFR2_IESB_NONE:
869 case ID_AA64MMFR2_IESB_IMPL:
870 sbuf_printf(sb, "%sIESB", SEP_STR);
873 sbuf_printf(sb, "%sUnknown IESB", SEP_STR);
877 switch (ID_AA64MMFR2_LSM(cpu_desc[cpu].id_aa64mmfr2)) {
878 case ID_AA64MMFR2_LSM_NONE:
880 case ID_AA64MMFR2_LSM_IMPL:
881 sbuf_printf(sb, "%sLSM", SEP_STR);
884 sbuf_printf(sb, "%sUnknown LSM", SEP_STR);
888 switch (ID_AA64MMFR2_UAO(cpu_desc[cpu].id_aa64mmfr2)) {
889 case ID_AA64MMFR2_UAO_NONE:
891 case ID_AA64MMFR2_UAO_IMPL:
892 sbuf_printf(sb, "%sUAO", SEP_STR);
895 sbuf_printf(sb, "%sUnknown UAO", SEP_STR);
899 switch (ID_AA64MMFR2_CNP(cpu_desc[cpu].id_aa64mmfr2)) {
900 case ID_AA64MMFR2_CNP_NONE:
902 case ID_AA64MMFR2_CNP_IMPL:
903 sbuf_printf(sb, "%sCnP", SEP_STR);
906 sbuf_printf(sb, "%sUnknown CnP", SEP_STR);
910 if ((cpu_desc[cpu].id_aa64mmfr2 & ~ID_AA64MMFR2_MASK) != 0)
911 sbuf_printf(sb, "%s%#lx", SEP_STR,
912 cpu_desc[cpu].id_aa64mmfr2 & ~ID_AA64MMFR2_MASK);
914 printf("%s>\n", sbuf_data(sb));
918 /* AArch64 Debug Feature Register 0 */
919 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR0) != 0) {
921 sbuf_printf(sb, " Debug Features 0 = <");
922 switch(ID_AA64DFR0_PMS_VER(cpu_desc[cpu].id_aa64dfr0)) {
923 case ID_AA64DFR0_PMS_VER_NONE:
925 case ID_AA64DFR0_PMS_VER_V1:
926 sbuf_printf(sb, "%sSPE v1", SEP_STR);
929 sbuf_printf(sb, "%sUnknown SPE", SEP_STR);
933 sbuf_printf(sb, "%s%lu CTX Breakpoints", SEP_STR,
934 ID_AA64DFR0_CTX_CMPS(cpu_desc[cpu].id_aa64dfr0));
936 sbuf_printf(sb, "%s%lu Watchpoints", SEP_STR,
937 ID_AA64DFR0_WRPS(cpu_desc[cpu].id_aa64dfr0));
939 sbuf_printf(sb, "%s%lu Breakpoints", SEP_STR,
940 ID_AA64DFR0_BRPS(cpu_desc[cpu].id_aa64dfr0));
942 switch (ID_AA64DFR0_PMU_VER(cpu_desc[cpu].id_aa64dfr0)) {
943 case ID_AA64DFR0_PMU_VER_NONE:
945 case ID_AA64DFR0_PMU_VER_3:
946 sbuf_printf(sb, "%sPMUv3", SEP_STR);
948 case ID_AA64DFR0_PMU_VER_3_1:
949 sbuf_printf(sb, "%sPMUv3+16 bit evtCount", SEP_STR);
951 case ID_AA64DFR0_PMU_VER_IMPL:
952 sbuf_printf(sb, "%sImplementation defined PMU", SEP_STR);
955 sbuf_printf(sb, "%sUnknown PMU", SEP_STR);
959 switch (ID_AA64DFR0_TRACE_VER(cpu_desc[cpu].id_aa64dfr0)) {
960 case ID_AA64DFR0_TRACE_VER_NONE:
962 case ID_AA64DFR0_TRACE_VER_IMPL:
963 sbuf_printf(sb, "%sTrace", SEP_STR);
966 sbuf_printf(sb, "%sUnknown Trace", SEP_STR);
970 switch (ID_AA64DFR0_DEBUG_VER(cpu_desc[cpu].id_aa64dfr0)) {
971 case ID_AA64DFR0_DEBUG_VER_8:
972 sbuf_printf(sb, "%sDebug v8", SEP_STR);
974 case ID_AA64DFR0_DEBUG_VER_8_VHE:
975 sbuf_printf(sb, "%sDebug v8+VHE", SEP_STR);
977 case ID_AA64DFR0_DEBUG_VER_8_2:
978 sbuf_printf(sb, "%sDebug v8.2", SEP_STR);
981 sbuf_printf(sb, "%sUnknown Debug", SEP_STR);
985 if (cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK)
986 sbuf_printf(sb, "%s%#lx", SEP_STR,
987 cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK);
989 printf("%s>\n", sbuf_data(sb));
993 /* AArch64 Memory Model Feature Register 1 */
994 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR1) != 0) {
995 printf(" Debug Features 1 = <%#lx>\n",
996 cpu_desc[cpu].id_aa64dfr1);
999 /* AArch64 Auxiliary Feature Register 0 */
1000 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR0) != 0) {
1001 printf(" Auxiliary Features 0 = <%#lx>\n",
1002 cpu_desc[cpu].id_aa64afr0);
1005 /* AArch64 Auxiliary Feature Register 1 */
1006 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR1) != 0) {
1007 printf(" Auxiliary Features 1 = <%#lx>\n",
1008 cpu_desc[cpu].id_aa64afr1);
1024 const struct cpu_parts *cpu_partsp = NULL;
1026 cpu = PCPU_GET(cpuid);
1030 * Store midr to pcpu to allow fast reading
1031 * from EL0, EL1 and assembly code.
1033 PCPU_SET(midr, midr);
1035 impl_id = CPU_IMPL(midr);
1036 for (i = 0; i < nitems(cpu_implementers); i++) {
1037 if (impl_id == cpu_implementers[i].impl_id ||
1038 cpu_implementers[i].impl_id == 0) {
1039 cpu_desc[cpu].cpu_impl = impl_id;
1040 cpu_desc[cpu].cpu_impl_name = cpu_implementers[i].impl_name;
1041 cpu_partsp = cpu_implementers[i].cpu_parts;
1046 part_id = CPU_PART(midr);
1047 for (i = 0; &cpu_partsp[i] != NULL; i++) {
1048 if (part_id == cpu_partsp[i].part_id ||
1049 cpu_partsp[i].part_id == 0) {
1050 cpu_desc[cpu].cpu_part_num = part_id;
1051 cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name;
1056 cpu_desc[cpu].cpu_revision = CPU_REV(midr);
1057 cpu_desc[cpu].cpu_variant = CPU_VAR(midr);
1059 /* Save affinity for current CPU */
1060 cpu_desc[cpu].mpidr = get_mpidr();
1061 CPU_AFFINITY(cpu) = cpu_desc[cpu].mpidr & CPU_AFF_MASK;
1063 cpu_desc[cpu].id_aa64dfr0 = READ_SPECIALREG(ID_AA64DFR0_EL1);
1064 cpu_desc[cpu].id_aa64dfr1 = READ_SPECIALREG(ID_AA64DFR1_EL1);
1065 cpu_desc[cpu].id_aa64isar0 = READ_SPECIALREG(ID_AA64ISAR0_EL1);
1066 cpu_desc[cpu].id_aa64isar1 = READ_SPECIALREG(ID_AA64ISAR1_EL1);
1067 cpu_desc[cpu].id_aa64mmfr0 = READ_SPECIALREG(ID_AA64MMFR0_EL1);
1068 cpu_desc[cpu].id_aa64mmfr1 = READ_SPECIALREG(ID_AA64MMFR1_EL1);
1069 cpu_desc[cpu].id_aa64mmfr2 = READ_SPECIALREG(ID_AA64MMFR2_EL1);
1070 cpu_desc[cpu].id_aa64pfr0 = READ_SPECIALREG(ID_AA64PFR0_EL1);
1071 cpu_desc[cpu].id_aa64pfr1 = READ_SPECIALREG(ID_AA64PFR1_EL1);
1075 * This code must run on one cpu at a time, but we are
1076 * not scheduling on the current core so implement a
1079 while (atomic_cmpset_acq_int(&ident_lock, 0, 1) == 0)
1080 __asm __volatile("wfe" ::: "memory");
1082 switch (cpu_aff_levels) {
1084 if (CPU_AFF0(cpu_desc[cpu].mpidr) !=
1085 CPU_AFF0(cpu_desc[0].mpidr))
1089 if (CPU_AFF1(cpu_desc[cpu].mpidr) !=
1090 CPU_AFF1(cpu_desc[0].mpidr))
1094 if (CPU_AFF2(cpu_desc[cpu].mpidr) !=
1095 CPU_AFF2(cpu_desc[0].mpidr))
1099 if (CPU_AFF3(cpu_desc[cpu].mpidr) !=
1100 CPU_AFF3(cpu_desc[0].mpidr))
1105 if (cpu_desc[cpu].id_aa64afr0 != cpu_desc[0].id_aa64afr0)
1106 cpu_print_regs |= PRINT_ID_AA64_AFR0;
1107 if (cpu_desc[cpu].id_aa64afr1 != cpu_desc[0].id_aa64afr1)
1108 cpu_print_regs |= PRINT_ID_AA64_AFR1;
1110 if (cpu_desc[cpu].id_aa64dfr0 != cpu_desc[0].id_aa64dfr0)
1111 cpu_print_regs |= PRINT_ID_AA64_DFR0;
1112 if (cpu_desc[cpu].id_aa64dfr1 != cpu_desc[0].id_aa64dfr1)
1113 cpu_print_regs |= PRINT_ID_AA64_DFR1;
1115 if (cpu_desc[cpu].id_aa64isar0 != cpu_desc[0].id_aa64isar0)
1116 cpu_print_regs |= PRINT_ID_AA64_ISAR0;
1117 if (cpu_desc[cpu].id_aa64isar1 != cpu_desc[0].id_aa64isar1)
1118 cpu_print_regs |= PRINT_ID_AA64_ISAR1;
1120 if (cpu_desc[cpu].id_aa64mmfr0 != cpu_desc[0].id_aa64mmfr0)
1121 cpu_print_regs |= PRINT_ID_AA64_MMFR0;
1122 if (cpu_desc[cpu].id_aa64mmfr1 != cpu_desc[0].id_aa64mmfr1)
1123 cpu_print_regs |= PRINT_ID_AA64_MMFR1;
1124 if (cpu_desc[cpu].id_aa64mmfr2 != cpu_desc[0].id_aa64mmfr2)
1125 cpu_print_regs |= PRINT_ID_AA64_MMFR2;
1127 if (cpu_desc[cpu].id_aa64pfr0 != cpu_desc[0].id_aa64pfr0)
1128 cpu_print_regs |= PRINT_ID_AA64_PFR0;
1129 if (cpu_desc[cpu].id_aa64pfr1 != cpu_desc[0].id_aa64pfr1)
1130 cpu_print_regs |= PRINT_ID_AA64_PFR1;
1132 /* Wake up the other CPUs */
1133 atomic_store_rel_int(&ident_lock, 0);
1134 __asm __volatile("sev" ::: "memory");