2 * Copyright (c) 2014 Andrew Turner
3 * Copyright (c) 2014 The FreeBSD Foundation
6 * Portions of this software were developed by Semihalf
7 * under sponsorship of the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/kernel.h>
39 #include <sys/sysctl.h>
40 #include <sys/systm.h>
42 #include <machine/atomic.h>
43 #include <machine/cpu.h>
44 #include <machine/cpufunc.h>
46 static int ident_lock;
48 char machine[] = "arm64";
50 SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0,
54 * Per-CPU affinity as provided in MPIDR_EL1
55 * Indexed by CPU number in logical order selected by the system.
56 * Relevant fields can be extracted using CPU_AFFn macros,
57 * Aff3.Aff2.Aff1.Aff0 construct a unique CPU address in the system.
60 * Aff1 - Cluster number
61 * Aff0 - CPU number in Aff1 cluster
63 uint64_t __cpu_affinity[MAXCPU];
64 static u_int cpu_aff_levels;
71 const char *cpu_impl_name;
72 const char *cpu_part_name;
79 uint64_t id_aa64isar0;
80 uint64_t id_aa64isar1;
81 uint64_t id_aa64mmfr0;
82 uint64_t id_aa64mmfr1;
83 uint64_t id_aa64mmfr2;
88 struct cpu_desc cpu_desc[MAXCPU];
89 static u_int cpu_print_regs;
90 #define PRINT_ID_AA64_AFR0 0x00000001
91 #define PRINT_ID_AA64_AFR1 0x00000002
92 #define PRINT_ID_AA64_DFR0 0x00000010
93 #define PRINT_ID_AA64_DFR1 0x00000020
94 #define PRINT_ID_AA64_ISAR0 0x00000100
95 #define PRINT_ID_AA64_ISAR1 0x00000200
96 #define PRINT_ID_AA64_MMFR0 0x00001000
97 #define PRINT_ID_AA64_MMFR1 0x00002000
98 #define PRINT_ID_AA64_MMFR2 0x00004000
99 #define PRINT_ID_AA64_PFR0 0x00010000
100 #define PRINT_ID_AA64_PFR1 0x00020000
104 const char *part_name;
106 #define CPU_PART_NONE { 0, "Unknown Processor" }
108 struct cpu_implementers {
110 const char *impl_name;
112 * Part number is implementation defined
113 * so each vendor will have its own set of values and names.
115 const struct cpu_parts *cpu_parts;
117 #define CPU_IMPLEMENTER_NONE { 0, "Unknown Implementer", cpu_parts_none }
120 * Per-implementer table of (PartNum, CPU Name) pairs.
123 static const struct cpu_parts cpu_parts_arm[] = {
124 { CPU_PART_FOUNDATION, "Foundation-Model" },
125 { CPU_PART_CORTEX_A35, "Cortex-A35" },
126 { CPU_PART_CORTEX_A53, "Cortex-A53" },
127 { CPU_PART_CORTEX_A55, "Cortex-A55" },
128 { CPU_PART_CORTEX_A57, "Cortex-A57" },
129 { CPU_PART_CORTEX_A72, "Cortex-A72" },
130 { CPU_PART_CORTEX_A73, "Cortex-A73" },
131 { CPU_PART_CORTEX_A75, "Cortex-A75" },
135 static const struct cpu_parts cpu_parts_cavium[] = {
136 { CPU_PART_THUNDERX, "ThunderX" },
137 { CPU_PART_THUNDERX2, "ThunderX2" },
142 static const struct cpu_parts cpu_parts_none[] = {
147 * Implementers table.
149 const struct cpu_implementers cpu_implementers[] = {
150 { CPU_IMPL_ARM, "ARM", cpu_parts_arm },
151 { CPU_IMPL_BROADCOM, "Broadcom", cpu_parts_none },
152 { CPU_IMPL_CAVIUM, "Cavium", cpu_parts_cavium },
153 { CPU_IMPL_DEC, "DEC", cpu_parts_none },
154 { CPU_IMPL_INFINEON, "IFX", cpu_parts_none },
155 { CPU_IMPL_FREESCALE, "Freescale", cpu_parts_none },
156 { CPU_IMPL_NVIDIA, "NVIDIA", cpu_parts_none },
157 { CPU_IMPL_APM, "APM", cpu_parts_none },
158 { CPU_IMPL_QUALCOMM, "Qualcomm", cpu_parts_none },
159 { CPU_IMPL_MARVELL, "Marvell", cpu_parts_none },
160 { CPU_IMPL_INTEL, "Intel", cpu_parts_none },
161 CPU_IMPLEMENTER_NONE,
165 identify_cpu_sysinit(void *dummy __unused)
170 print_cpu_features(cpu);
173 SYSINIT(idenrity_cpu, SI_SUB_SMP, SI_ORDER_ANY, identify_cpu_sysinit, NULL);
176 print_cpu_features(u_int cpu)
180 printf("CPU%3d: %s %s r%dp%d", cpu, cpu_desc[cpu].cpu_impl_name,
181 cpu_desc[cpu].cpu_part_name, cpu_desc[cpu].cpu_variant,
182 cpu_desc[cpu].cpu_revision);
184 printf(" affinity:");
185 switch(cpu_aff_levels) {
188 printf(" %2d", CPU_AFF3(cpu_desc[cpu].mpidr));
191 printf(" %2d", CPU_AFF2(cpu_desc[cpu].mpidr));
194 printf(" %2d", CPU_AFF1(cpu_desc[cpu].mpidr));
197 case 0: /* On UP this will be zero */
198 printf(" %2d", CPU_AFF0(cpu_desc[cpu].mpidr));
204 * There is a hardware errata where, if one CPU is performing a TLB
205 * invalidation while another is performing a store-exclusive the
206 * store-exclusive may return the wrong status. A workaround seems
207 * to be to use an IPI to invalidate on each CPU, however given the
208 * limited number of affected units (pass 1.1 is the evaluation
209 * hardware revision), and the lack of information from Cavium
210 * this has not been implemented.
212 * At the time of writing this the only information is from:
213 * https://lkml.org/lkml/2016/8/4/722
216 * XXX: CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 on its own also
217 * triggers on pass 2.0+.
219 if (cpu == 0 && CPU_VAR(PCPU_GET(midr)) == 0 &&
220 CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1)
221 printf("WARNING: ThunderX Pass 1.1 detected.\nThis has known "
222 "hardware bugs that may cause the incorrect operation of "
223 "atomic operations.\n");
225 if (cpu != 0 && cpu_print_regs == 0)
228 #define SEP_STR ((printed++) == 0) ? "" : ","
230 /* AArch64 Instruction Set Attribute Register 0 */
231 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR0) != 0) {
233 printf(" Instruction Set Attributes 0 = <");
235 switch (ID_AA64ISAR0_RDM(cpu_desc[cpu].id_aa64isar0)) {
236 case ID_AA64ISAR0_RDM_NONE:
238 case ID_AA64ISAR0_RDM_IMPL:
239 printf("%sRDM", SEP_STR);
242 printf("%sUnknown RDM", SEP_STR);
245 switch (ID_AA64ISAR0_ATOMIC(cpu_desc[cpu].id_aa64isar0)) {
246 case ID_AA64ISAR0_ATOMIC_NONE:
248 case ID_AA64ISAR0_ATOMIC_IMPL:
249 printf("%sAtomic", SEP_STR);
252 printf("%sUnknown Atomic", SEP_STR);
255 switch (ID_AA64ISAR0_AES(cpu_desc[cpu].id_aa64isar0)) {
256 case ID_AA64ISAR0_AES_NONE:
258 case ID_AA64ISAR0_AES_BASE:
259 printf("%sAES", SEP_STR);
261 case ID_AA64ISAR0_AES_PMULL:
262 printf("%sAES+PMULL", SEP_STR);
265 printf("%sUnknown AES", SEP_STR);
269 switch (ID_AA64ISAR0_SHA1(cpu_desc[cpu].id_aa64isar0)) {
270 case ID_AA64ISAR0_SHA1_NONE:
272 case ID_AA64ISAR0_SHA1_BASE:
273 printf("%sSHA1", SEP_STR);
276 printf("%sUnknown SHA1", SEP_STR);
280 switch (ID_AA64ISAR0_SHA2(cpu_desc[cpu].id_aa64isar0)) {
281 case ID_AA64ISAR0_SHA2_NONE:
283 case ID_AA64ISAR0_SHA2_BASE:
284 printf("%sSHA2", SEP_STR);
286 case ID_AA64ISAR0_SHA2_512:
287 printf("%sSHA2+SHA512", SEP_STR);
290 printf("%sUnknown SHA2", SEP_STR);
294 switch (ID_AA64ISAR0_CRC32(cpu_desc[cpu].id_aa64isar0)) {
295 case ID_AA64ISAR0_CRC32_NONE:
297 case ID_AA64ISAR0_CRC32_BASE:
298 printf("%sCRC32", SEP_STR);
301 printf("%sUnknown CRC32", SEP_STR);
305 switch (ID_AA64ISAR0_SHA3(cpu_desc[cpu].id_aa64isar0)) {
306 case ID_AA64ISAR0_SHA3_NONE:
308 case ID_AA64ISAR0_SHA3_IMPL:
309 printf("%sSHA3", SEP_STR);
312 printf("%sUnknown SHA3", SEP_STR);
316 switch (ID_AA64ISAR0_SM3(cpu_desc[cpu].id_aa64isar0)) {
317 case ID_AA64ISAR0_SM3_NONE:
319 case ID_AA64ISAR0_SM3_IMPL:
320 printf("%sSM3", SEP_STR);
323 printf("%sUnknown SM3", SEP_STR);
327 switch (ID_AA64ISAR0_SM4(cpu_desc[cpu].id_aa64isar0)) {
328 case ID_AA64ISAR0_SM4_NONE:
330 case ID_AA64ISAR0_SM4_IMPL:
331 printf("%sSM4", SEP_STR);
334 printf("%sUnknown SM4", SEP_STR);
338 switch (ID_AA64ISAR0_DP(cpu_desc[cpu].id_aa64isar0)) {
339 case ID_AA64ISAR0_DP_NONE:
341 case ID_AA64ISAR0_DP_IMPL:
342 printf("%sDotProd", SEP_STR);
345 printf("%sUnknown DP", SEP_STR);
349 if ((cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK) != 0)
350 printf("%s%#lx", SEP_STR,
351 cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK);
356 /* AArch64 Instruction Set Attribute Register 1 */
357 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR1) != 0) {
359 printf(" Instruction Set Attributes 1 = <");
361 switch (ID_AA64ISAR1_GPI(cpu_desc[cpu].id_aa64isar1)) {
362 case ID_AA64ISAR1_GPI_NONE:
364 case ID_AA64ISAR1_GPI_IMPL:
365 printf("%sImpl GenericAuth", SEP_STR);
368 printf("%sUnknown GenericAuth", SEP_STR);
372 switch (ID_AA64ISAR1_GPA(cpu_desc[cpu].id_aa64isar1)) {
373 case ID_AA64ISAR1_GPA_NONE:
375 case ID_AA64ISAR1_GPA_IMPL:
376 printf("%sPrince GenericAuth", SEP_STR);
379 printf("%sUnknown GenericAuth", SEP_STR);
383 switch (ID_AA64ISAR1_LRCPC(cpu_desc[cpu].id_aa64isar1)) {
384 case ID_AA64ISAR1_LRCPC_NONE:
386 case ID_AA64ISAR1_LRCPC_IMPL:
387 printf("%sRCpc", SEP_STR);
390 printf("%sUnknown RCpc", SEP_STR);
394 switch (ID_AA64ISAR1_FCMA(cpu_desc[cpu].id_aa64isar1)) {
395 case ID_AA64ISAR1_FCMA_NONE:
397 case ID_AA64ISAR1_FCMA_IMPL:
398 printf("%sFCMA", SEP_STR);
401 printf("%sUnknown FCMA", SEP_STR);
405 switch (ID_AA64ISAR1_JSCVT(cpu_desc[cpu].id_aa64isar1)) {
406 case ID_AA64ISAR1_JSCVT_NONE:
408 case ID_AA64ISAR1_JSCVT_IMPL:
409 printf("%sJS Conv", SEP_STR);
412 printf("%sUnknown JS Conv", SEP_STR);
416 switch (ID_AA64ISAR1_API(cpu_desc[cpu].id_aa64isar1)) {
417 case ID_AA64ISAR1_API_NONE:
419 case ID_AA64ISAR1_API_IMPL:
420 printf("%sImpl AddrAuth", SEP_STR);
423 printf("%sUnknown Impl AddrAuth", SEP_STR);
427 switch (ID_AA64ISAR1_APA(cpu_desc[cpu].id_aa64isar1)) {
428 case ID_AA64ISAR1_APA_NONE:
430 case ID_AA64ISAR1_APA_IMPL:
431 printf("%sPrince AddrAuth", SEP_STR);
434 printf("%sUnknown Prince AddrAuth", SEP_STR);
438 switch (ID_AA64ISAR1_DPB(cpu_desc[cpu].id_aa64isar1)) {
439 case ID_AA64ISAR1_DPB_NONE:
441 case ID_AA64ISAR1_DPB_IMPL:
442 printf("%sDC CVAP", SEP_STR);
445 printf("%sUnknown DC CVAP", SEP_STR);
449 if ((cpu_desc[cpu].id_aa64isar1 & ~ID_AA64ISAR1_MASK) != 0)
450 printf("%s%#lx", SEP_STR,
451 cpu_desc[cpu].id_aa64isar1 & ~ID_AA64ISAR1_MASK);
455 /* AArch64 Processor Feature Register 0 */
456 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR0) != 0) {
458 printf(" Processor Features 0 = <");
460 switch (ID_AA64PFR0_SVE(cpu_desc[cpu].id_aa64pfr0)) {
461 case ID_AA64PFR0_SVE_NONE:
463 case ID_AA64PFR0_SVE_IMPL:
464 printf("%sSVE", SEP_STR);
467 printf("%sUnknown SVE", SEP_STR);
471 switch (ID_AA64PFR0_RAS(cpu_desc[cpu].id_aa64pfr0)) {
472 case ID_AA64PFR0_RAS_NONE:
474 case ID_AA64PFR0_RAS_V1:
475 printf("%sRASv1", SEP_STR);
478 printf("%sUnknown RAS", SEP_STR);
482 switch (ID_AA64PFR0_GIC(cpu_desc[cpu].id_aa64pfr0)) {
483 case ID_AA64PFR0_GIC_CPUIF_NONE:
485 case ID_AA64PFR0_GIC_CPUIF_EN:
486 printf("%sGIC", SEP_STR);
489 printf("%sUnknown GIC interface", SEP_STR);
493 switch (ID_AA64PFR0_ADV_SIMD(cpu_desc[cpu].id_aa64pfr0)) {
494 case ID_AA64PFR0_ADV_SIMD_NONE:
496 case ID_AA64PFR0_ADV_SIMD_IMPL:
497 printf("%sAdvSIMD", SEP_STR);
499 case ID_AA64PFR0_ADV_SIMD_HP:
500 printf("%sAdvSIMD+HP", SEP_STR);
503 printf("%sUnknown AdvSIMD", SEP_STR);
507 switch (ID_AA64PFR0_FP(cpu_desc[cpu].id_aa64pfr0)) {
508 case ID_AA64PFR0_FP_NONE:
510 case ID_AA64PFR0_FP_IMPL:
511 printf("%sFloat", SEP_STR);
513 case ID_AA64PFR0_FP_HP:
514 printf("%sFloat+HP", SEP_STR);
517 printf("%sUnknown Float", SEP_STR);
521 switch (ID_AA64PFR0_EL3(cpu_desc[cpu].id_aa64pfr0)) {
522 case ID_AA64PFR0_EL3_NONE:
523 printf("%sNo EL3", SEP_STR);
525 case ID_AA64PFR0_EL3_64:
526 printf("%sEL3", SEP_STR);
528 case ID_AA64PFR0_EL3_64_32:
529 printf("%sEL3 32", SEP_STR);
532 printf("%sUnknown EL3", SEP_STR);
536 switch (ID_AA64PFR0_EL2(cpu_desc[cpu].id_aa64pfr0)) {
537 case ID_AA64PFR0_EL2_NONE:
538 printf("%sNo EL2", SEP_STR);
540 case ID_AA64PFR0_EL2_64:
541 printf("%sEL2", SEP_STR);
543 case ID_AA64PFR0_EL2_64_32:
544 printf("%sEL2 32", SEP_STR);
547 printf("%sUnknown EL2", SEP_STR);
551 switch (ID_AA64PFR0_EL1(cpu_desc[cpu].id_aa64pfr0)) {
552 case ID_AA64PFR0_EL1_64:
553 printf("%sEL1", SEP_STR);
555 case ID_AA64PFR0_EL1_64_32:
556 printf("%sEL1 32", SEP_STR);
559 printf("%sUnknown EL1", SEP_STR);
563 switch (ID_AA64PFR0_EL0(cpu_desc[cpu].id_aa64pfr0)) {
564 case ID_AA64PFR0_EL0_64:
565 printf("%sEL0", SEP_STR);
567 case ID_AA64PFR0_EL0_64_32:
568 printf("%sEL0 32", SEP_STR);
571 printf("%sUnknown EL0", SEP_STR);
575 if ((cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK) != 0)
576 printf("%s%#lx", SEP_STR,
577 cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK);
582 /* AArch64 Processor Feature Register 1 */
583 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR1) != 0) {
584 printf(" Processor Features 1 = <%#lx>\n",
585 cpu_desc[cpu].id_aa64pfr1);
588 /* AArch64 Memory Model Feature Register 0 */
589 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR0) != 0) {
591 printf(" Memory Model Features 0 = <");
592 switch (ID_AA64MMFR0_TGRAN4(cpu_desc[cpu].id_aa64mmfr0)) {
593 case ID_AA64MMFR0_TGRAN4_NONE:
595 case ID_AA64MMFR0_TGRAN4_IMPL:
596 printf("%s4k Granule", SEP_STR);
599 printf("%sUnknown 4k Granule", SEP_STR);
603 switch (ID_AA64MMFR0_TGRAN16(cpu_desc[cpu].id_aa64mmfr0)) {
604 case ID_AA64MMFR0_TGRAN16_NONE:
606 case ID_AA64MMFR0_TGRAN16_IMPL:
607 printf("%s16k Granule", SEP_STR);
610 printf("%sUnknown 16k Granule", SEP_STR);
614 switch (ID_AA64MMFR0_TGRAN64(cpu_desc[cpu].id_aa64mmfr0)) {
615 case ID_AA64MMFR0_TGRAN64_NONE:
617 case ID_AA64MMFR0_TGRAN64_IMPL:
618 printf("%s64k Granule", SEP_STR);
621 printf("%sUnknown 64k Granule", SEP_STR);
625 switch (ID_AA64MMFR0_BIGEND(cpu_desc[cpu].id_aa64mmfr0)) {
626 case ID_AA64MMFR0_BIGEND_FIXED:
628 case ID_AA64MMFR0_BIGEND_MIXED:
629 printf("%sMixedEndian", SEP_STR);
632 printf("%sUnknown Endian switching", SEP_STR);
636 switch (ID_AA64MMFR0_BIGEND_EL0(cpu_desc[cpu].id_aa64mmfr0)) {
637 case ID_AA64MMFR0_BIGEND_EL0_FIXED:
639 case ID_AA64MMFR0_BIGEND_EL0_MIXED:
640 printf("%sEL0 MixEndian", SEP_STR);
643 printf("%sUnknown EL0 Endian switching", SEP_STR);
647 switch (ID_AA64MMFR0_S_NS_MEM(cpu_desc[cpu].id_aa64mmfr0)) {
648 case ID_AA64MMFR0_S_NS_MEM_NONE:
650 case ID_AA64MMFR0_S_NS_MEM_DISTINCT:
651 printf("%sS/NS Mem", SEP_STR);
654 printf("%sUnknown S/NS Mem", SEP_STR);
658 switch (ID_AA64MMFR0_ASID_BITS(cpu_desc[cpu].id_aa64mmfr0)) {
659 case ID_AA64MMFR0_ASID_BITS_8:
660 printf("%s8bit ASID", SEP_STR);
662 case ID_AA64MMFR0_ASID_BITS_16:
663 printf("%s16bit ASID", SEP_STR);
666 printf("%sUnknown ASID", SEP_STR);
670 switch (ID_AA64MMFR0_PA_RANGE(cpu_desc[cpu].id_aa64mmfr0)) {
671 case ID_AA64MMFR0_PA_RANGE_4G:
672 printf("%s4GB PA", SEP_STR);
674 case ID_AA64MMFR0_PA_RANGE_64G:
675 printf("%s64GB PA", SEP_STR);
677 case ID_AA64MMFR0_PA_RANGE_1T:
678 printf("%s1TB PA", SEP_STR);
680 case ID_AA64MMFR0_PA_RANGE_4T:
681 printf("%s4TB PA", SEP_STR);
683 case ID_AA64MMFR0_PA_RANGE_16T:
684 printf("%s16TB PA", SEP_STR);
686 case ID_AA64MMFR0_PA_RANGE_256T:
687 printf("%s256TB PA", SEP_STR);
689 case ID_AA64MMFR0_PA_RANGE_4P:
690 printf("%s4PB PA", SEP_STR);
693 printf("%sUnknown PA Range", SEP_STR);
697 if ((cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK) != 0)
698 printf("%s%#lx", SEP_STR,
699 cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK);
703 /* AArch64 Memory Model Feature Register 1 */
704 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR1) != 0) {
706 printf(" Memory Model Features 1 = <");
708 switch (ID_AA64MMFR1_XNX(cpu_desc[cpu].id_aa64mmfr1)) {
709 case ID_AA64MMFR1_XNX_NONE:
711 case ID_AA64MMFR1_XNX_IMPL:
712 printf("%sEL2 XN", SEP_STR);
715 printf("%sUnknown XNX", SEP_STR);
719 switch (ID_AA64MMFR1_SPEC_SEI(cpu_desc[cpu].id_aa64mmfr1)) {
720 case ID_AA64MMFR1_SPEC_SEI_NONE:
722 case ID_AA64MMFR1_SPEC_SEI_IMPL:
723 printf("%sSpecSEI", SEP_STR);
726 printf("%sUnknown SpecSEI", SEP_STR);
730 switch (ID_AA64MMFR1_PAN(cpu_desc[cpu].id_aa64mmfr1)) {
731 case ID_AA64MMFR1_PAN_NONE:
733 case ID_AA64MMFR1_PAN_IMPL:
734 printf("%sPAN", SEP_STR);
736 case ID_AA64MMFR1_PAN_ATS1E1:
737 printf("%sPAN+AT", SEP_STR);
740 printf("%sUnknown PAN", SEP_STR);
744 switch (ID_AA64MMFR1_LO(cpu_desc[cpu].id_aa64mmfr1)) {
745 case ID_AA64MMFR1_LO_NONE:
747 case ID_AA64MMFR1_LO_IMPL:
748 printf("%sLO", SEP_STR);
751 printf("%sUnknown LO", SEP_STR);
755 switch (ID_AA64MMFR1_HPDS(cpu_desc[cpu].id_aa64mmfr1)) {
756 case ID_AA64MMFR1_HPDS_NONE:
758 case ID_AA64MMFR1_HPDS_HPD:
759 printf("%sHPDS", SEP_STR);
761 case ID_AA64MMFR1_HPDS_TTPBHA:
762 printf("%sTTPBHA", SEP_STR);
765 printf("%sUnknown HPDS", SEP_STR);
769 switch (ID_AA64MMFR1_VH(cpu_desc[cpu].id_aa64mmfr1)) {
770 case ID_AA64MMFR1_VH_NONE:
772 case ID_AA64MMFR1_VH_IMPL:
773 printf("%sVHE", SEP_STR);
776 printf("%sUnknown VHE", SEP_STR);
780 switch (ID_AA64MMFR1_VMIDBITS(cpu_desc[cpu].id_aa64mmfr1)) {
781 case ID_AA64MMFR1_VMIDBITS_8:
783 case ID_AA64MMFR1_VMIDBITS_16:
784 printf("%s16 VMID bits", SEP_STR);
787 printf("%sUnknown VMID bits", SEP_STR);
791 switch (ID_AA64MMFR1_HAFDBS(cpu_desc[cpu].id_aa64mmfr1)) {
792 case ID_AA64MMFR1_HAFDBS_NONE:
794 case ID_AA64MMFR1_HAFDBS_AF:
795 printf("%sAF", SEP_STR);
797 case ID_AA64MMFR1_HAFDBS_AF_DBS:
798 printf("%sAF+DBS", SEP_STR);
801 printf("%sUnknown Hardware update AF/DBS", SEP_STR);
805 if ((cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK) != 0)
806 printf("%s%#lx", SEP_STR,
807 cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK);
811 /* AArch64 Memory Model Feature Register 2 */
812 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR2) != 0) {
814 printf(" Memory Model Features 2 = <");
816 switch (ID_AA64MMFR2_NV(cpu_desc[cpu].id_aa64mmfr2)) {
817 case ID_AA64MMFR2_NV_NONE:
819 case ID_AA64MMFR2_NV_IMPL:
820 printf("%sNestedVirt", SEP_STR);
823 printf("%sUnknown NestedVirt", SEP_STR);
827 switch (ID_AA64MMFR2_CCIDX(cpu_desc[cpu].id_aa64mmfr2)) {
828 case ID_AA64MMFR2_CCIDX_32:
829 printf("%s32b CCIDX", SEP_STR);
831 case ID_AA64MMFR2_CCIDX_64:
832 printf("%s64b CCIDX", SEP_STR);
835 printf("%sUnknown CCIDX", SEP_STR);
839 switch (ID_AA64MMFR2_VA_RANGE(cpu_desc[cpu].id_aa64mmfr2)) {
840 case ID_AA64MMFR2_VA_RANGE_48:
841 printf("%s48b VA", SEP_STR);
843 case ID_AA64MMFR2_VA_RANGE_52:
844 printf("%s52b VA", SEP_STR);
847 printf("%sUnknown VA Range", SEP_STR);
851 switch (ID_AA64MMFR2_IESB(cpu_desc[cpu].id_aa64mmfr2)) {
852 case ID_AA64MMFR2_IESB_NONE:
854 case ID_AA64MMFR2_IESB_IMPL:
855 printf("%sIESB", SEP_STR);
858 printf("%sUnknown IESB", SEP_STR);
862 switch (ID_AA64MMFR2_LSM(cpu_desc[cpu].id_aa64mmfr2)) {
863 case ID_AA64MMFR2_LSM_NONE:
865 case ID_AA64MMFR2_LSM_IMPL:
866 printf("%sLSM", SEP_STR);
869 printf("%sUnknown LSM", SEP_STR);
873 switch (ID_AA64MMFR2_UAO(cpu_desc[cpu].id_aa64mmfr2)) {
874 case ID_AA64MMFR2_UAO_NONE:
876 case ID_AA64MMFR2_UAO_IMPL:
877 printf("%sUAO", SEP_STR);
880 printf("%sUnknown UAO", SEP_STR);
884 switch (ID_AA64MMFR2_CNP(cpu_desc[cpu].id_aa64mmfr2)) {
885 case ID_AA64MMFR2_CNP_NONE:
887 case ID_AA64MMFR2_CNP_IMPL:
888 printf("%sCnP", SEP_STR);
891 printf("%sUnknown CnP", SEP_STR);
895 if ((cpu_desc[cpu].id_aa64mmfr2 & ~ID_AA64MMFR2_MASK) != 0)
896 printf("%s%#lx", SEP_STR,
897 cpu_desc[cpu].id_aa64mmfr2 & ~ID_AA64MMFR2_MASK);
901 /* AArch64 Debug Feature Register 0 */
902 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR0) != 0) {
904 printf(" Debug Features 0 = <");
905 switch(ID_AA64DFR0_PMS_VER(cpu_desc[cpu].id_aa64dfr0)) {
906 case ID_AA64DFR0_PMS_VER_NONE:
908 case ID_AA64DFR0_PMS_VER_V1:
909 printf("%sSPE v1", SEP_STR);
912 printf("%sUnknown SPE", SEP_STR);
916 printf("%s%lu CTX Breakpoints", SEP_STR,
917 ID_AA64DFR0_CTX_CMPS(cpu_desc[cpu].id_aa64dfr0));
919 printf("%s%lu Watchpoints", SEP_STR,
920 ID_AA64DFR0_WRPS(cpu_desc[cpu].id_aa64dfr0));
922 printf("%s%lu Breakpoints", SEP_STR,
923 ID_AA64DFR0_BRPS(cpu_desc[cpu].id_aa64dfr0));
925 switch (ID_AA64DFR0_PMU_VER(cpu_desc[cpu].id_aa64dfr0)) {
926 case ID_AA64DFR0_PMU_VER_NONE:
928 case ID_AA64DFR0_PMU_VER_3:
929 printf("%sPMUv3", SEP_STR);
931 case ID_AA64DFR0_PMU_VER_3_1:
932 printf("%sPMUv3+16 bit evtCount", SEP_STR);
934 case ID_AA64DFR0_PMU_VER_IMPL:
935 printf("%sImplementation defined PMU", SEP_STR);
938 printf("%sUnknown PMU", SEP_STR);
942 switch (ID_AA64DFR0_TRACE_VER(cpu_desc[cpu].id_aa64dfr0)) {
943 case ID_AA64DFR0_TRACE_VER_NONE:
945 case ID_AA64DFR0_TRACE_VER_IMPL:
946 printf("%sTrace", SEP_STR);
949 printf("%sUnknown Trace", SEP_STR);
953 switch (ID_AA64DFR0_DEBUG_VER(cpu_desc[cpu].id_aa64dfr0)) {
954 case ID_AA64DFR0_DEBUG_VER_8:
955 printf("%sDebug v8", SEP_STR);
957 case ID_AA64DFR0_DEBUG_VER_8_VHE:
958 printf("%sDebug v8+VHE", SEP_STR);
960 case ID_AA64DFR0_DEBUG_VER_8_2:
961 printf("%sDebug v8.2", SEP_STR);
964 printf("%sUnknown Debug", SEP_STR);
968 if (cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK)
969 printf("%s%#lx", SEP_STR,
970 cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK);
974 /* AArch64 Memory Model Feature Register 1 */
975 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR1) != 0) {
976 printf(" Debug Features 1 = <%#lx>\n",
977 cpu_desc[cpu].id_aa64dfr1);
980 /* AArch64 Auxiliary Feature Register 0 */
981 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR0) != 0) {
982 printf(" Auxiliary Features 0 = <%#lx>\n",
983 cpu_desc[cpu].id_aa64afr0);
986 /* AArch64 Auxiliary Feature Register 1 */
987 if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR1) != 0) {
988 printf(" Auxiliary Features 1 = <%#lx>\n",
989 cpu_desc[cpu].id_aa64afr1);
1003 const struct cpu_parts *cpu_partsp = NULL;
1005 cpu = PCPU_GET(cpuid);
1009 * Store midr to pcpu to allow fast reading
1010 * from EL0, EL1 and assembly code.
1012 PCPU_SET(midr, midr);
1014 impl_id = CPU_IMPL(midr);
1015 for (i = 0; i < nitems(cpu_implementers); i++) {
1016 if (impl_id == cpu_implementers[i].impl_id ||
1017 cpu_implementers[i].impl_id == 0) {
1018 cpu_desc[cpu].cpu_impl = impl_id;
1019 cpu_desc[cpu].cpu_impl_name = cpu_implementers[i].impl_name;
1020 cpu_partsp = cpu_implementers[i].cpu_parts;
1025 part_id = CPU_PART(midr);
1026 for (i = 0; &cpu_partsp[i] != NULL; i++) {
1027 if (part_id == cpu_partsp[i].part_id ||
1028 cpu_partsp[i].part_id == 0) {
1029 cpu_desc[cpu].cpu_part_num = part_id;
1030 cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name;
1035 cpu_desc[cpu].cpu_revision = CPU_REV(midr);
1036 cpu_desc[cpu].cpu_variant = CPU_VAR(midr);
1038 /* Save affinity for current CPU */
1039 cpu_desc[cpu].mpidr = get_mpidr();
1040 CPU_AFFINITY(cpu) = cpu_desc[cpu].mpidr & CPU_AFF_MASK;
1042 cpu_desc[cpu].id_aa64dfr0 = READ_SPECIALREG(ID_AA64DFR0_EL1);
1043 cpu_desc[cpu].id_aa64dfr1 = READ_SPECIALREG(ID_AA64DFR1_EL1);
1044 cpu_desc[cpu].id_aa64isar0 = READ_SPECIALREG(ID_AA64ISAR0_EL1);
1045 cpu_desc[cpu].id_aa64isar1 = READ_SPECIALREG(ID_AA64ISAR1_EL1);
1046 cpu_desc[cpu].id_aa64mmfr0 = READ_SPECIALREG(ID_AA64MMFR0_EL1);
1047 cpu_desc[cpu].id_aa64mmfr1 = READ_SPECIALREG(ID_AA64MMFR1_EL1);
1048 cpu_desc[cpu].id_aa64mmfr2 = READ_SPECIALREG(ID_AA64MMFR2_EL1);
1049 cpu_desc[cpu].id_aa64pfr0 = READ_SPECIALREG(ID_AA64PFR0_EL1);
1050 cpu_desc[cpu].id_aa64pfr1 = READ_SPECIALREG(ID_AA64PFR1_EL1);
1054 * This code must run on one cpu at a time, but we are
1055 * not scheduling on the current core so implement a
1058 while (atomic_cmpset_acq_int(&ident_lock, 0, 1) == 0)
1059 __asm __volatile("wfe" ::: "memory");
1061 switch (cpu_aff_levels) {
1063 if (CPU_AFF0(cpu_desc[cpu].mpidr) !=
1064 CPU_AFF0(cpu_desc[0].mpidr))
1068 if (CPU_AFF1(cpu_desc[cpu].mpidr) !=
1069 CPU_AFF1(cpu_desc[0].mpidr))
1073 if (CPU_AFF2(cpu_desc[cpu].mpidr) !=
1074 CPU_AFF2(cpu_desc[0].mpidr))
1078 if (CPU_AFF3(cpu_desc[cpu].mpidr) !=
1079 CPU_AFF3(cpu_desc[0].mpidr))
1084 if (cpu_desc[cpu].id_aa64afr0 != cpu_desc[0].id_aa64afr0)
1085 cpu_print_regs |= PRINT_ID_AA64_AFR0;
1086 if (cpu_desc[cpu].id_aa64afr1 != cpu_desc[0].id_aa64afr1)
1087 cpu_print_regs |= PRINT_ID_AA64_AFR1;
1089 if (cpu_desc[cpu].id_aa64dfr0 != cpu_desc[0].id_aa64dfr0)
1090 cpu_print_regs |= PRINT_ID_AA64_DFR0;
1091 if (cpu_desc[cpu].id_aa64dfr1 != cpu_desc[0].id_aa64dfr1)
1092 cpu_print_regs |= PRINT_ID_AA64_DFR1;
1094 if (cpu_desc[cpu].id_aa64isar0 != cpu_desc[0].id_aa64isar0)
1095 cpu_print_regs |= PRINT_ID_AA64_ISAR0;
1096 if (cpu_desc[cpu].id_aa64isar1 != cpu_desc[0].id_aa64isar1)
1097 cpu_print_regs |= PRINT_ID_AA64_ISAR1;
1099 if (cpu_desc[cpu].id_aa64mmfr0 != cpu_desc[0].id_aa64mmfr0)
1100 cpu_print_regs |= PRINT_ID_AA64_MMFR0;
1101 if (cpu_desc[cpu].id_aa64mmfr1 != cpu_desc[0].id_aa64mmfr1)
1102 cpu_print_regs |= PRINT_ID_AA64_MMFR1;
1103 if (cpu_desc[cpu].id_aa64mmfr2 != cpu_desc[0].id_aa64mmfr2)
1104 cpu_print_regs |= PRINT_ID_AA64_MMFR2;
1106 if (cpu_desc[cpu].id_aa64pfr0 != cpu_desc[0].id_aa64pfr0)
1107 cpu_print_regs |= PRINT_ID_AA64_PFR0;
1108 if (cpu_desc[cpu].id_aa64pfr1 != cpu_desc[0].id_aa64pfr1)
1109 cpu_print_regs |= PRINT_ID_AA64_PFR1;
1111 /* Wake up the other CPUs */
1112 atomic_store_rel_int(&ident_lock, 0);
1113 __asm __volatile("sev" ::: "memory");