]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm64/arm64/identcpu.c
Export ID_AA64PFR0_EL1 to userland
[FreeBSD/FreeBSD.git] / sys / arm64 / arm64 / identcpu.c
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * Copyright (c) 2014 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Semihalf
7  * under sponsorship of the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/pcpu.h>
38 #include <sys/sbuf.h>
39 #include <sys/smp.h>
40 #include <sys/sysctl.h>
41 #include <sys/systm.h>
42
43 #include <machine/atomic.h>
44 #include <machine/cpu.h>
45 #include <machine/cpufunc.h>
46 #include <machine/undefined.h>
47
48 static int ident_lock;
49
50 char machine[] = "arm64";
51
52 SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0,
53     "Machine class");
54
55 /*
56  * Per-CPU affinity as provided in MPIDR_EL1
57  * Indexed by CPU number in logical order selected by the system.
58  * Relevant fields can be extracted using CPU_AFFn macros,
59  * Aff3.Aff2.Aff1.Aff0 construct a unique CPU address in the system.
60  *
61  * Fields used by us:
62  * Aff1 - Cluster number
63  * Aff0 - CPU number in Aff1 cluster
64  */
65 uint64_t __cpu_affinity[MAXCPU];
66 static u_int cpu_aff_levels;
67
68 struct cpu_desc {
69         u_int           cpu_impl;
70         u_int           cpu_part_num;
71         u_int           cpu_variant;
72         u_int           cpu_revision;
73         const char      *cpu_impl_name;
74         const char      *cpu_part_name;
75
76         uint64_t        mpidr;
77         uint64_t        id_aa64afr0;
78         uint64_t        id_aa64afr1;
79         uint64_t        id_aa64dfr0;
80         uint64_t        id_aa64dfr1;
81         uint64_t        id_aa64isar0;
82         uint64_t        id_aa64isar1;
83         uint64_t        id_aa64mmfr0;
84         uint64_t        id_aa64mmfr1;
85         uint64_t        id_aa64mmfr2;
86         uint64_t        id_aa64pfr0;
87         uint64_t        id_aa64pfr1;
88 };
89
90 struct cpu_desc cpu_desc[MAXCPU];
91 struct cpu_desc user_cpu_desc;
92 static u_int cpu_print_regs;
93 #define PRINT_ID_AA64_AFR0      0x00000001
94 #define PRINT_ID_AA64_AFR1      0x00000002
95 #define PRINT_ID_AA64_DFR0      0x00000010
96 #define PRINT_ID_AA64_DFR1      0x00000020
97 #define PRINT_ID_AA64_ISAR0     0x00000100
98 #define PRINT_ID_AA64_ISAR1     0x00000200
99 #define PRINT_ID_AA64_MMFR0     0x00001000
100 #define PRINT_ID_AA64_MMFR1     0x00002000
101 #define PRINT_ID_AA64_MMFR2     0x00004000
102 #define PRINT_ID_AA64_PFR0      0x00010000
103 #define PRINT_ID_AA64_PFR1      0x00020000
104
105 struct cpu_parts {
106         u_int           part_id;
107         const char      *part_name;
108 };
109 #define CPU_PART_NONE   { 0, "Unknown Processor" }
110
111 struct cpu_implementers {
112         u_int                   impl_id;
113         const char              *impl_name;
114         /*
115          * Part number is implementation defined
116          * so each vendor will have its own set of values and names.
117          */
118         const struct cpu_parts  *cpu_parts;
119 };
120 #define CPU_IMPLEMENTER_NONE    { 0, "Unknown Implementer", cpu_parts_none }
121
122 /*
123  * Per-implementer table of (PartNum, CPU Name) pairs.
124  */
125 /* ARM Ltd. */
126 static const struct cpu_parts cpu_parts_arm[] = {
127         { CPU_PART_FOUNDATION, "Foundation-Model" },
128         { CPU_PART_CORTEX_A35, "Cortex-A35" },
129         { CPU_PART_CORTEX_A53, "Cortex-A53" },
130         { CPU_PART_CORTEX_A55, "Cortex-A55" },
131         { CPU_PART_CORTEX_A57, "Cortex-A57" },
132         { CPU_PART_CORTEX_A72, "Cortex-A72" },
133         { CPU_PART_CORTEX_A73, "Cortex-A73" },
134         { CPU_PART_CORTEX_A75, "Cortex-A75" },
135         CPU_PART_NONE,
136 };
137 /* Cavium */
138 static const struct cpu_parts cpu_parts_cavium[] = {
139         { CPU_PART_THUNDERX, "ThunderX" },
140         { CPU_PART_THUNDERX2, "ThunderX2" },
141         CPU_PART_NONE,
142 };
143
144 /* Unknown */
145 static const struct cpu_parts cpu_parts_none[] = {
146         CPU_PART_NONE,
147 };
148
149 /*
150  * Implementers table.
151  */
152 const struct cpu_implementers cpu_implementers[] = {
153         { CPU_IMPL_ARM,         "ARM",          cpu_parts_arm },
154         { CPU_IMPL_BROADCOM,    "Broadcom",     cpu_parts_none },
155         { CPU_IMPL_CAVIUM,      "Cavium",       cpu_parts_cavium },
156         { CPU_IMPL_DEC,         "DEC",          cpu_parts_none },
157         { CPU_IMPL_INFINEON,    "IFX",          cpu_parts_none },
158         { CPU_IMPL_FREESCALE,   "Freescale",    cpu_parts_none },
159         { CPU_IMPL_NVIDIA,      "NVIDIA",       cpu_parts_none },
160         { CPU_IMPL_APM,         "APM",          cpu_parts_none },
161         { CPU_IMPL_QUALCOMM,    "Qualcomm",     cpu_parts_none },
162         { CPU_IMPL_MARVELL,     "Marvell",      cpu_parts_none },
163         { CPU_IMPL_INTEL,       "Intel",        cpu_parts_none },
164         CPU_IMPLEMENTER_NONE,
165 };
166
167 #define MRS_TYPE_MASK           0xf
168 #define MRS_INVALID             0
169 #define MRS_EXACT               1
170 #define MRS_EXACT_VAL(x)        (MRS_EXACT | ((x) << 4))
171 #define MRS_EXACT_FIELD(x)      ((x) >> 4)
172 #define MRS_LOWER               2
173
174 struct mrs_field {
175         bool            sign;
176         u_int           type;
177         u_int           shift;
178 };
179
180 #define MRS_FIELD(_sign, _type, _shift)                                 \
181         {                                                               \
182                 .sign = (_sign),                                        \
183                 .type = (_type),                                        \
184                 .shift = (_shift),                                      \
185         }
186
187 #define MRS_FIELD_END   { .type = MRS_INVALID, }
188
189 static struct mrs_field id_aa64pfr0_fields[] = {
190         MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_SVE_SHIFT),
191         MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_RAS_SHIFT),
192         MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_GIC_SHIFT),
193         MRS_FIELD(true,  MRS_LOWER, ID_AA64PFR0_ADV_SIMD_SHIFT),
194         MRS_FIELD(true,  MRS_LOWER, ID_AA64PFR0_FP_SHIFT),
195         MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_EL3_SHIFT),
196         MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_EL2_SHIFT),
197         MRS_FIELD(false, MRS_LOWER, ID_AA64PFR0_EL1_SHIFT),
198         MRS_FIELD(false, MRS_LOWER, ID_AA64PFR0_EL0_SHIFT),
199         MRS_FIELD_END,
200 };
201
202 static struct mrs_field id_aa64dfr0_fields[] = {
203         MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_PMS_VER_SHIFT),
204         MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_CTX_CMPS_SHIFT),
205         MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_WRPS_SHIFT),
206         MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_BRPS_SHIFT),
207         MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_PMU_VER_SHIFT),
208         MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_TRACE_VER_SHIFT),
209         MRS_FIELD(false, MRS_EXACT_VAL(0x6), ID_AA64DFR0_DEBUG_VER_SHIFT),
210         MRS_FIELD_END,
211 };
212
213 struct mrs_user_reg {
214         u_int           CRm;
215         u_int           Op2;
216         size_t          offset;
217         struct mrs_field *fields;
218 };
219
220 static struct mrs_user_reg user_regs[] = {
221         {       /* id_aa64pfr0_el1 */
222                 .CRm = 4,
223                 .Op2 = 0,
224                 .offset = __offsetof(struct cpu_desc, id_aa64pfr0),
225                 .fields = id_aa64pfr0_fields,
226         },
227         {       /* id_aa64dfr0_el1 */
228                 .CRm = 5,
229                 .Op2 = 0,
230                 .offset = __offsetof(struct cpu_desc, id_aa64dfr0),
231                 .fields = id_aa64dfr0_fields,
232         },
233 };
234
235 #define CPU_DESC_FIELD(desc, idx)                                       \
236     *(uint64_t *)((char *)&(desc) + user_regs[(idx)].offset)
237
238 static int
239 user_mrs_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame,
240     uint32_t esr)
241 {
242         uint64_t value;
243         int CRm, Op2, i, reg;
244
245         if ((insn & MRS_MASK) != MRS_VALUE)
246                 return (0);
247
248         /*
249          * We only emulate Op0 == 3, Op1 == 0, CRn == 0, CRm == {0, 4-7}.
250          * These are in the EL1 CPU identification space.
251          * CRm == 0 holds MIDR_EL1, MPIDR_EL1, and REVID_EL1.
252          * CRm == {4-7} holds the ID_AA64 registers.
253          *
254          * For full details see the ARMv8 ARM (ARM DDI 0487C.a)
255          * Table D9-2 System instruction encodings for non-Debug System
256          * register accesses.
257          */
258         if (mrs_Op0(insn) != 3 || mrs_Op1(insn) != 0 || mrs_CRn(insn) != 0)
259                 return (0);
260
261         CRm = mrs_CRm(insn);
262         if (CRm > 7 || (CRm < 4 && CRm != 0))
263                 return (0);
264
265         Op2 = mrs_Op2(insn);
266         value = 0;
267
268         for (i = 0; i < nitems(user_regs); i++) {
269                 if (user_regs[i].CRm == CRm && user_regs[i].Op2 == Op2) {
270                         value = CPU_DESC_FIELD(user_cpu_desc, i);
271                         break;
272                 }
273         }
274
275         if (CRm == 0) {
276                 switch (Op2) {
277                 case 0:
278                         value = READ_SPECIALREG(midr_el1);
279                         break;
280                 case 5:
281                         value = READ_SPECIALREG(mpidr_el1);
282                         break;
283                 case 6:
284                         value = READ_SPECIALREG(revidr_el1);
285                         break;
286                 default:
287                         return (0);
288                 }
289         }
290
291         /*
292          * We will handle this instruction, move to the next so we
293          * don't trap here again.
294          */
295         frame->tf_elr += INSN_SIZE;
296
297         reg = MRS_REGISTER(insn);
298         /* If reg is 31 then write to xzr, i.e. do nothing */
299         if (reg == 31)
300                 return (1);
301
302         if (reg < nitems(frame->tf_x))
303                 frame->tf_x[reg] = value;
304         else if (reg == 30)
305                 frame->tf_lr = value;
306
307         return (1);
308 }
309
310 static void
311 update_user_regs(u_int cpu)
312 {
313         struct mrs_field *fields;
314         uint64_t cur, value;
315         int i, j, cur_field, new_field;
316
317         for (i = 0; i < nitems(user_regs); i++) {
318                 value = CPU_DESC_FIELD(cpu_desc[cpu], i);
319                 if (cpu == 0)
320                         cur = value;
321                 else
322                         cur = CPU_DESC_FIELD(user_cpu_desc, i);
323
324                 fields = user_regs[i].fields;
325                 for (j = 0; fields[j].type != 0; j++) {
326                         switch (fields[j].type & MRS_TYPE_MASK) {
327                         case MRS_EXACT:
328                                 cur &= ~(0xfu << fields[j].shift);
329                                 cur |=
330                                     (uint64_t)MRS_EXACT_FIELD(fields[j].type) <<
331                                     fields[j].shift;
332                                 break;
333                         case MRS_LOWER:
334                                 new_field = (value >> fields[j].shift) & 0xf;
335                                 cur_field = (cur >> fields[j].shift) & 0xf;
336                                 if ((fields[j].sign &&
337                                      (int)new_field < (int)cur_field) ||
338                                     (!fields[j].sign &&
339                                      (u_int)new_field < (u_int)cur_field)) {
340                                         cur &= ~(0xfu << fields[j].shift);
341                                         cur |= new_field << fields[j].shift;
342                                 }
343                                 break;
344                         default:
345                                 panic("Invalid field type: %d", fields[j].type);
346                         }
347                 }
348
349                 CPU_DESC_FIELD(user_cpu_desc, i) = cur;
350         }
351 }
352
353 static void
354 identify_cpu_sysinit(void *dummy __unused)
355 {
356         int cpu;
357
358         /* Create a user visible cpu description with safe values */
359         memset(&user_cpu_desc, 0, sizeof(user_cpu_desc));
360         /* Safe values for these registers */
361         user_cpu_desc.id_aa64pfr0 = ID_AA64PFR0_ADV_SIMD_NONE |
362             ID_AA64PFR0_FP_NONE | ID_AA64PFR0_EL1_64 | ID_AA64PFR0_EL0_64;
363         user_cpu_desc.id_aa64dfr0 = ID_AA64DFR0_DEBUG_VER_8;
364
365
366         CPU_FOREACH(cpu) {
367                 print_cpu_features(cpu);
368                 update_user_regs(cpu);
369         }
370
371         install_undef_handler(true, user_mrs_handler);
372 }
373 SYSINIT(idenrity_cpu, SI_SUB_SMP, SI_ORDER_ANY, identify_cpu_sysinit, NULL);
374
375 void
376 print_cpu_features(u_int cpu)
377 {
378         struct sbuf *sb;
379         int printed;
380
381         sb = sbuf_new_auto();
382         sbuf_printf(sb, "CPU%3d: %s %s r%dp%d", cpu,
383             cpu_desc[cpu].cpu_impl_name, cpu_desc[cpu].cpu_part_name,
384             cpu_desc[cpu].cpu_variant, cpu_desc[cpu].cpu_revision);
385
386         sbuf_cat(sb, " affinity:");
387         switch(cpu_aff_levels) {
388         default:
389         case 4:
390                 sbuf_printf(sb, " %2d", CPU_AFF3(cpu_desc[cpu].mpidr));
391                 /* FALLTHROUGH */
392         case 3:
393                 sbuf_printf(sb, " %2d", CPU_AFF2(cpu_desc[cpu].mpidr));
394                 /* FALLTHROUGH */
395         case 2:
396                 sbuf_printf(sb, " %2d", CPU_AFF1(cpu_desc[cpu].mpidr));
397                 /* FALLTHROUGH */
398         case 1:
399         case 0: /* On UP this will be zero */
400                 sbuf_printf(sb, " %2d", CPU_AFF0(cpu_desc[cpu].mpidr));
401                 break;
402         }
403         sbuf_finish(sb);
404         printf("%s\n", sbuf_data(sb));
405         sbuf_clear(sb);
406
407         /*
408          * There is a hardware errata where, if one CPU is performing a TLB
409          * invalidation while another is performing a store-exclusive the
410          * store-exclusive may return the wrong status. A workaround seems
411          * to be to use an IPI to invalidate on each CPU, however given the
412          * limited number of affected units (pass 1.1 is the evaluation
413          * hardware revision), and the lack of information from Cavium
414          * this has not been implemented.
415          *
416          * At the time of writing this the only information is from:
417          * https://lkml.org/lkml/2016/8/4/722
418          */
419         /*
420          * XXX: CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 on its own also
421          * triggers on pass 2.0+.
422          */
423         if (cpu == 0 && CPU_VAR(PCPU_GET(midr)) == 0 &&
424             CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1)
425                 printf("WARNING: ThunderX Pass 1.1 detected.\nThis has known "
426                     "hardware bugs that may cause the incorrect operation of "
427                     "atomic operations.\n");
428
429         if (cpu != 0 && cpu_print_regs == 0)
430                 return;
431
432 #define SEP_STR ((printed++) == 0) ? "" : ","
433
434         /* AArch64 Instruction Set Attribute Register 0 */
435         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR0) != 0) {
436                 printed = 0;
437                 sbuf_printf(sb, " Instruction Set Attributes 0 = <");
438
439                 switch (ID_AA64ISAR0_RDM(cpu_desc[cpu].id_aa64isar0)) {
440                 case ID_AA64ISAR0_RDM_NONE:
441                         break;
442                 case ID_AA64ISAR0_RDM_IMPL:
443                         sbuf_printf(sb, "%sRDM", SEP_STR);
444                         break;
445                 default:
446                         sbuf_printf(sb, "%sUnknown RDM", SEP_STR);
447                 }
448
449                 switch (ID_AA64ISAR0_ATOMIC(cpu_desc[cpu].id_aa64isar0)) {
450                 case ID_AA64ISAR0_ATOMIC_NONE:
451                         break;
452                 case ID_AA64ISAR0_ATOMIC_IMPL:
453                         sbuf_printf(sb, "%sAtomic", SEP_STR);
454                         break;
455                 default:
456                         sbuf_printf(sb, "%sUnknown Atomic", SEP_STR);
457                 }
458
459                 switch (ID_AA64ISAR0_AES(cpu_desc[cpu].id_aa64isar0)) {
460                 case ID_AA64ISAR0_AES_NONE:
461                         break;
462                 case ID_AA64ISAR0_AES_BASE:
463                         sbuf_printf(sb, "%sAES", SEP_STR);
464                         break;
465                 case ID_AA64ISAR0_AES_PMULL:
466                         sbuf_printf(sb, "%sAES+PMULL", SEP_STR);
467                         break;
468                 default:
469                         sbuf_printf(sb, "%sUnknown AES", SEP_STR);
470                         break;
471                 }
472
473                 switch (ID_AA64ISAR0_SHA1(cpu_desc[cpu].id_aa64isar0)) {
474                 case ID_AA64ISAR0_SHA1_NONE:
475                         break;
476                 case ID_AA64ISAR0_SHA1_BASE:
477                         sbuf_printf(sb, "%sSHA1", SEP_STR);
478                         break;
479                 default:
480                         sbuf_printf(sb, "%sUnknown SHA1", SEP_STR);
481                         break;
482                 }
483
484                 switch (ID_AA64ISAR0_SHA2(cpu_desc[cpu].id_aa64isar0)) {
485                 case ID_AA64ISAR0_SHA2_NONE:
486                         break;
487                 case ID_AA64ISAR0_SHA2_BASE:
488                         sbuf_printf(sb, "%sSHA2", SEP_STR);
489                         break;
490                 case ID_AA64ISAR0_SHA2_512:
491                         sbuf_printf(sb, "%sSHA2+SHA512", SEP_STR);
492                         break;
493                 default:
494                         sbuf_printf(sb, "%sUnknown SHA2", SEP_STR);
495                         break;
496                 }
497
498                 switch (ID_AA64ISAR0_CRC32(cpu_desc[cpu].id_aa64isar0)) {
499                 case ID_AA64ISAR0_CRC32_NONE:
500                         break;
501                 case ID_AA64ISAR0_CRC32_BASE:
502                         sbuf_printf(sb, "%sCRC32", SEP_STR);
503                         break;
504                 default:
505                         sbuf_printf(sb, "%sUnknown CRC32", SEP_STR);
506                         break;
507                 }
508
509                 switch (ID_AA64ISAR0_SHA3(cpu_desc[cpu].id_aa64isar0)) {
510                 case ID_AA64ISAR0_SHA3_NONE:
511                         break;
512                 case ID_AA64ISAR0_SHA3_IMPL:
513                         sbuf_printf(sb, "%sSHA3", SEP_STR);
514                         break;
515                 default:
516                         sbuf_printf(sb, "%sUnknown SHA3", SEP_STR);
517                         break;
518                 }
519
520                 switch (ID_AA64ISAR0_SM3(cpu_desc[cpu].id_aa64isar0)) {
521                 case ID_AA64ISAR0_SM3_NONE:
522                         break;
523                 case ID_AA64ISAR0_SM3_IMPL:
524                         sbuf_printf(sb, "%sSM3", SEP_STR);
525                         break;
526                 default:
527                         sbuf_printf(sb, "%sUnknown SM3", SEP_STR);
528                         break;
529                 }
530
531                 switch (ID_AA64ISAR0_SM4(cpu_desc[cpu].id_aa64isar0)) {
532                 case ID_AA64ISAR0_SM4_NONE:
533                         break;
534                 case ID_AA64ISAR0_SM4_IMPL:
535                         sbuf_printf(sb, "%sSM4", SEP_STR);
536                         break;
537                 default:
538                         sbuf_printf(sb, "%sUnknown SM4", SEP_STR);
539                         break;
540                 }
541
542                 switch (ID_AA64ISAR0_DP(cpu_desc[cpu].id_aa64isar0)) {
543                 case ID_AA64ISAR0_DP_NONE:
544                         break;
545                 case ID_AA64ISAR0_DP_IMPL:
546                         sbuf_printf(sb, "%sDotProd", SEP_STR);
547                         break;
548                 default:
549                         sbuf_printf(sb, "%sUnknown DP", SEP_STR);
550                         break;
551                 }
552
553                 if ((cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK) != 0)
554                         sbuf_printf(sb, "%s%#lx", SEP_STR,
555                             cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK);
556
557                 sbuf_finish(sb);
558                 printf("%s>\n", sbuf_data(sb));
559                 sbuf_clear(sb);
560         }
561
562         /* AArch64 Instruction Set Attribute Register 1 */
563         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR1) != 0) {
564                 printed = 0;
565                 sbuf_printf(sb, " Instruction Set Attributes 1 = <");
566
567                 switch (ID_AA64ISAR1_GPI(cpu_desc[cpu].id_aa64isar1)) {
568                 case ID_AA64ISAR1_GPI_NONE:
569                         break;
570                 case ID_AA64ISAR1_GPI_IMPL:
571                         sbuf_printf(sb, "%sImpl GenericAuth", SEP_STR);
572                         break;
573                 default:
574                         sbuf_printf(sb, "%sUnknown GenericAuth", SEP_STR);
575                         break;
576                 }
577
578                 switch (ID_AA64ISAR1_GPA(cpu_desc[cpu].id_aa64isar1)) {
579                 case ID_AA64ISAR1_GPA_NONE:
580                         break;
581                 case ID_AA64ISAR1_GPA_IMPL:
582                         sbuf_printf(sb, "%sPrince GenericAuth", SEP_STR);
583                         break;
584                 default:
585                         sbuf_printf(sb, "%sUnknown GenericAuth", SEP_STR);
586                         break;
587                 }
588
589                 switch (ID_AA64ISAR1_LRCPC(cpu_desc[cpu].id_aa64isar1)) {
590                 case ID_AA64ISAR1_LRCPC_NONE:
591                         break;
592                 case ID_AA64ISAR1_LRCPC_IMPL:
593                         sbuf_printf(sb, "%sRCpc", SEP_STR);
594                         break;
595                 default:
596                         sbuf_printf(sb, "%sUnknown RCpc", SEP_STR);
597                         break;
598                 }
599
600                 switch (ID_AA64ISAR1_FCMA(cpu_desc[cpu].id_aa64isar1)) {
601                 case ID_AA64ISAR1_FCMA_NONE:
602                         break;
603                 case ID_AA64ISAR1_FCMA_IMPL:
604                         sbuf_printf(sb, "%sFCMA", SEP_STR);
605                         break;
606                 default:
607                         sbuf_printf(sb, "%sUnknown FCMA", SEP_STR);
608                         break;
609                 }
610
611                 switch (ID_AA64ISAR1_JSCVT(cpu_desc[cpu].id_aa64isar1)) {
612                 case ID_AA64ISAR1_JSCVT_NONE:
613                         break;
614                 case ID_AA64ISAR1_JSCVT_IMPL:
615                         sbuf_printf(sb, "%sJS Conv", SEP_STR);
616                         break;
617                 default:
618                         sbuf_printf(sb, "%sUnknown JS Conv", SEP_STR);
619                         break;
620                 }
621
622                 switch (ID_AA64ISAR1_API(cpu_desc[cpu].id_aa64isar1)) {
623                 case ID_AA64ISAR1_API_NONE:
624                         break;
625                 case ID_AA64ISAR1_API_IMPL:
626                         sbuf_printf(sb, "%sImpl AddrAuth", SEP_STR);
627                         break;
628                 default:
629                         sbuf_printf(sb, "%sUnknown Impl AddrAuth", SEP_STR);
630                         break;
631                 }
632
633                 switch (ID_AA64ISAR1_APA(cpu_desc[cpu].id_aa64isar1)) {
634                 case ID_AA64ISAR1_APA_NONE:
635                         break;
636                 case ID_AA64ISAR1_APA_IMPL:
637                         sbuf_printf(sb, "%sPrince AddrAuth", SEP_STR);
638                         break;
639                 default:
640                         sbuf_printf(sb, "%sUnknown Prince AddrAuth", SEP_STR);
641                         break;
642                 }
643
644                 switch (ID_AA64ISAR1_DPB(cpu_desc[cpu].id_aa64isar1)) {
645                 case ID_AA64ISAR1_DPB_NONE:
646                         break;
647                 case ID_AA64ISAR1_DPB_IMPL:
648                         sbuf_printf(sb, "%sDC CVAP", SEP_STR);
649                         break;
650                 default:
651                         sbuf_printf(sb, "%sUnknown DC CVAP", SEP_STR);
652                         break;
653                 }
654
655                 if ((cpu_desc[cpu].id_aa64isar1 & ~ID_AA64ISAR1_MASK) != 0)
656                         sbuf_printf(sb, "%s%#lx", SEP_STR,
657                             cpu_desc[cpu].id_aa64isar1 & ~ID_AA64ISAR1_MASK);
658                 sbuf_finish(sb);
659                 printf("%s>\n", sbuf_data(sb));
660                 sbuf_clear(sb);
661         }
662
663         /* AArch64 Processor Feature Register 0 */
664         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR0) != 0) {
665                 printed = 0;
666                 sbuf_printf(sb, "         Processor Features 0 = <");
667
668                 switch (ID_AA64PFR0_SVE(cpu_desc[cpu].id_aa64pfr0)) {
669                 case ID_AA64PFR0_SVE_NONE:
670                         break;
671                 case ID_AA64PFR0_SVE_IMPL:
672                         sbuf_printf(sb, "%sSVE", SEP_STR);
673                         break;
674                 default:
675                         sbuf_printf(sb, "%sUnknown SVE", SEP_STR);
676                         break;
677                 }
678
679                 switch (ID_AA64PFR0_RAS(cpu_desc[cpu].id_aa64pfr0)) {
680                 case ID_AA64PFR0_RAS_NONE:
681                         break;
682                 case ID_AA64PFR0_RAS_V1:
683                         sbuf_printf(sb, "%sRASv1", SEP_STR);
684                         break;
685                 default:
686                         sbuf_printf(sb, "%sUnknown RAS", SEP_STR);
687                         break;
688                 }
689
690                 switch (ID_AA64PFR0_GIC(cpu_desc[cpu].id_aa64pfr0)) {
691                 case ID_AA64PFR0_GIC_CPUIF_NONE:
692                         break;
693                 case ID_AA64PFR0_GIC_CPUIF_EN:
694                         sbuf_printf(sb, "%sGIC", SEP_STR);
695                         break;
696                 default:
697                         sbuf_printf(sb, "%sUnknown GIC interface", SEP_STR);
698                         break;
699                 }
700
701                 switch (ID_AA64PFR0_ADV_SIMD(cpu_desc[cpu].id_aa64pfr0)) {
702                 case ID_AA64PFR0_ADV_SIMD_NONE:
703                         break;
704                 case ID_AA64PFR0_ADV_SIMD_IMPL:
705                         sbuf_printf(sb, "%sAdvSIMD", SEP_STR);
706                         break;
707                 case ID_AA64PFR0_ADV_SIMD_HP:
708                         sbuf_printf(sb, "%sAdvSIMD+HP", SEP_STR);
709                         break;
710                 default:
711                         sbuf_printf(sb, "%sUnknown AdvSIMD", SEP_STR);
712                         break;
713                 }
714
715                 switch (ID_AA64PFR0_FP(cpu_desc[cpu].id_aa64pfr0)) {
716                 case ID_AA64PFR0_FP_NONE:
717                         break;
718                 case ID_AA64PFR0_FP_IMPL:
719                         sbuf_printf(sb, "%sFloat", SEP_STR);
720                         break;
721                 case ID_AA64PFR0_FP_HP:
722                         sbuf_printf(sb, "%sFloat+HP", SEP_STR);
723                         break;
724                 default:
725                         sbuf_printf(sb, "%sUnknown Float", SEP_STR);
726                         break;
727                 }
728
729                 switch (ID_AA64PFR0_EL3(cpu_desc[cpu].id_aa64pfr0)) {
730                 case ID_AA64PFR0_EL3_NONE:
731                         sbuf_printf(sb, "%sNo EL3", SEP_STR);
732                         break;
733                 case ID_AA64PFR0_EL3_64:
734                         sbuf_printf(sb, "%sEL3", SEP_STR);
735                         break;
736                 case ID_AA64PFR0_EL3_64_32:
737                         sbuf_printf(sb, "%sEL3 32", SEP_STR);
738                         break;
739                 default:
740                         sbuf_printf(sb, "%sUnknown EL3", SEP_STR);
741                         break;
742                 }
743
744                 switch (ID_AA64PFR0_EL2(cpu_desc[cpu].id_aa64pfr0)) {
745                 case ID_AA64PFR0_EL2_NONE:
746                         sbuf_printf(sb, "%sNo EL2", SEP_STR);
747                         break;
748                 case ID_AA64PFR0_EL2_64:
749                         sbuf_printf(sb, "%sEL2", SEP_STR);
750                         break;
751                 case ID_AA64PFR0_EL2_64_32:
752                         sbuf_printf(sb, "%sEL2 32", SEP_STR);
753                         break;
754                 default:
755                         sbuf_printf(sb, "%sUnknown EL2", SEP_STR);
756                         break;
757                 }
758
759                 switch (ID_AA64PFR0_EL1(cpu_desc[cpu].id_aa64pfr0)) {
760                 case ID_AA64PFR0_EL1_64:
761                         sbuf_printf(sb, "%sEL1", SEP_STR);
762                         break;
763                 case ID_AA64PFR0_EL1_64_32:
764                         sbuf_printf(sb, "%sEL1 32", SEP_STR);
765                         break;
766                 default:
767                         sbuf_printf(sb, "%sUnknown EL1", SEP_STR);
768                         break;
769                 }
770
771                 switch (ID_AA64PFR0_EL0(cpu_desc[cpu].id_aa64pfr0)) {
772                 case ID_AA64PFR0_EL0_64:
773                         sbuf_printf(sb, "%sEL0", SEP_STR);
774                         break;
775                 case ID_AA64PFR0_EL0_64_32:
776                         sbuf_printf(sb, "%sEL0 32", SEP_STR);
777                         break;
778                 default:
779                         sbuf_printf(sb, "%sUnknown EL0", SEP_STR);
780                         break;
781                 }
782
783                 if ((cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK) != 0)
784                         sbuf_printf(sb, "%s%#lx", SEP_STR,
785                             cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK);
786
787                 sbuf_finish(sb);
788                 printf("%s>\n", sbuf_data(sb));
789                 sbuf_clear(sb);
790         }
791
792         /* AArch64 Processor Feature Register 1 */
793         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR1) != 0) {
794                 printf("         Processor Features 1 = <%#lx>\n",
795                     cpu_desc[cpu].id_aa64pfr1);
796         }
797
798         /* AArch64 Memory Model Feature Register 0 */
799         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR0) != 0) {
800                 printed = 0;
801                 sbuf_printf(sb, "      Memory Model Features 0 = <");
802                 switch (ID_AA64MMFR0_TGRAN4(cpu_desc[cpu].id_aa64mmfr0)) {
803                 case ID_AA64MMFR0_TGRAN4_NONE:
804                         break;
805                 case ID_AA64MMFR0_TGRAN4_IMPL:
806                         sbuf_printf(sb, "%s4k Granule", SEP_STR);
807                         break;
808                 default:
809                         sbuf_printf(sb, "%sUnknown 4k Granule", SEP_STR);
810                         break;
811                 }
812
813                 switch (ID_AA64MMFR0_TGRAN16(cpu_desc[cpu].id_aa64mmfr0)) {
814                 case ID_AA64MMFR0_TGRAN16_NONE:
815                         break;
816                 case ID_AA64MMFR0_TGRAN16_IMPL:
817                         sbuf_printf(sb, "%s16k Granule", SEP_STR);
818                         break;
819                 default:
820                         sbuf_printf(sb, "%sUnknown 16k Granule", SEP_STR);
821                         break;
822                 }
823
824                 switch (ID_AA64MMFR0_TGRAN64(cpu_desc[cpu].id_aa64mmfr0)) {
825                 case ID_AA64MMFR0_TGRAN64_NONE:
826                         break;
827                 case ID_AA64MMFR0_TGRAN64_IMPL:
828                         sbuf_printf(sb, "%s64k Granule", SEP_STR);
829                         break;
830                 default:
831                         sbuf_printf(sb, "%sUnknown 64k Granule", SEP_STR);
832                         break;
833                 }
834
835                 switch (ID_AA64MMFR0_BIGEND(cpu_desc[cpu].id_aa64mmfr0)) {
836                 case ID_AA64MMFR0_BIGEND_FIXED:
837                         break;
838                 case ID_AA64MMFR0_BIGEND_MIXED:
839                         sbuf_printf(sb, "%sMixedEndian", SEP_STR);
840                         break;
841                 default:
842                         sbuf_printf(sb, "%sUnknown Endian switching", SEP_STR);
843                         break;
844                 }
845
846                 switch (ID_AA64MMFR0_BIGEND_EL0(cpu_desc[cpu].id_aa64mmfr0)) {
847                 case ID_AA64MMFR0_BIGEND_EL0_FIXED:
848                         break;
849                 case ID_AA64MMFR0_BIGEND_EL0_MIXED:
850                         sbuf_printf(sb, "%sEL0 MixEndian", SEP_STR);
851                         break;
852                 default:
853                         sbuf_printf(sb, "%sUnknown EL0 Endian switching", SEP_STR);
854                         break;
855                 }
856
857                 switch (ID_AA64MMFR0_S_NS_MEM(cpu_desc[cpu].id_aa64mmfr0)) {
858                 case ID_AA64MMFR0_S_NS_MEM_NONE:
859                         break;
860                 case ID_AA64MMFR0_S_NS_MEM_DISTINCT:
861                         sbuf_printf(sb, "%sS/NS Mem", SEP_STR);
862                         break;
863                 default:
864                         sbuf_printf(sb, "%sUnknown S/NS Mem", SEP_STR);
865                         break;
866                 }
867
868                 switch (ID_AA64MMFR0_ASID_BITS(cpu_desc[cpu].id_aa64mmfr0)) {
869                 case ID_AA64MMFR0_ASID_BITS_8:
870                         sbuf_printf(sb, "%s8bit ASID", SEP_STR);
871                         break;
872                 case ID_AA64MMFR0_ASID_BITS_16:
873                         sbuf_printf(sb, "%s16bit ASID", SEP_STR);
874                         break;
875                 default:
876                         sbuf_printf(sb, "%sUnknown ASID", SEP_STR);
877                         break;
878                 }
879
880                 switch (ID_AA64MMFR0_PA_RANGE(cpu_desc[cpu].id_aa64mmfr0)) {
881                 case ID_AA64MMFR0_PA_RANGE_4G:
882                         sbuf_printf(sb, "%s4GB PA", SEP_STR);
883                         break;
884                 case ID_AA64MMFR0_PA_RANGE_64G:
885                         sbuf_printf(sb, "%s64GB PA", SEP_STR);
886                         break;
887                 case ID_AA64MMFR0_PA_RANGE_1T:
888                         sbuf_printf(sb, "%s1TB PA", SEP_STR);
889                         break;
890                 case ID_AA64MMFR0_PA_RANGE_4T:
891                         sbuf_printf(sb, "%s4TB PA", SEP_STR);
892                         break;
893                 case ID_AA64MMFR0_PA_RANGE_16T:
894                         sbuf_printf(sb, "%s16TB PA", SEP_STR);
895                         break;
896                 case ID_AA64MMFR0_PA_RANGE_256T:
897                         sbuf_printf(sb, "%s256TB PA", SEP_STR);
898                         break;
899                 case ID_AA64MMFR0_PA_RANGE_4P:
900                         sbuf_printf(sb, "%s4PB PA", SEP_STR);
901                         break;
902                 default:
903                         sbuf_printf(sb, "%sUnknown PA Range", SEP_STR);
904                         break;
905                 }
906
907                 if ((cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK) != 0)
908                         sbuf_printf(sb, "%s%#lx", SEP_STR,
909                             cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK);
910                 sbuf_finish(sb);
911                 printf("%s>\n", sbuf_data(sb));
912                 sbuf_clear(sb);
913         }
914
915         /* AArch64 Memory Model Feature Register 1 */
916         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR1) != 0) {
917                 printed = 0;
918                 sbuf_printf(sb, "      Memory Model Features 1 = <");
919
920                 switch (ID_AA64MMFR1_XNX(cpu_desc[cpu].id_aa64mmfr1)) {
921                 case ID_AA64MMFR1_XNX_NONE:
922                         break;
923                 case ID_AA64MMFR1_XNX_IMPL:
924                         sbuf_printf(sb, "%sEL2 XN", SEP_STR);
925                         break;
926                 default:
927                         sbuf_printf(sb, "%sUnknown XNX", SEP_STR);
928                         break;
929                 }
930
931                 switch (ID_AA64MMFR1_SPEC_SEI(cpu_desc[cpu].id_aa64mmfr1)) {
932                 case ID_AA64MMFR1_SPEC_SEI_NONE:
933                         break;
934                 case ID_AA64MMFR1_SPEC_SEI_IMPL:
935                         sbuf_printf(sb, "%sSpecSEI", SEP_STR);
936                         break;
937                 default:
938                         sbuf_printf(sb, "%sUnknown SpecSEI", SEP_STR);
939                         break;
940                 }
941
942                 switch (ID_AA64MMFR1_PAN(cpu_desc[cpu].id_aa64mmfr1)) {
943                 case ID_AA64MMFR1_PAN_NONE:
944                         break;
945                 case ID_AA64MMFR1_PAN_IMPL:
946                         sbuf_printf(sb, "%sPAN", SEP_STR);
947                         break;
948                 case ID_AA64MMFR1_PAN_ATS1E1:
949                         sbuf_printf(sb, "%sPAN+AT", SEP_STR);
950                         break;
951                 default:
952                         sbuf_printf(sb, "%sUnknown PAN", SEP_STR);
953                         break;
954                 }
955
956                 switch (ID_AA64MMFR1_LO(cpu_desc[cpu].id_aa64mmfr1)) {
957                 case ID_AA64MMFR1_LO_NONE:
958                         break;
959                 case ID_AA64MMFR1_LO_IMPL:
960                         sbuf_printf(sb, "%sLO", SEP_STR);
961                         break;
962                 default:
963                         sbuf_printf(sb, "%sUnknown LO", SEP_STR);
964                         break;
965                 }
966
967                 switch (ID_AA64MMFR1_HPDS(cpu_desc[cpu].id_aa64mmfr1)) {
968                 case ID_AA64MMFR1_HPDS_NONE:
969                         break;
970                 case ID_AA64MMFR1_HPDS_HPD:
971                         sbuf_printf(sb, "%sHPDS", SEP_STR);
972                         break;
973                 case ID_AA64MMFR1_HPDS_TTPBHA:
974                         sbuf_printf(sb, "%sTTPBHA", SEP_STR);
975                         break;
976                 default:
977                         sbuf_printf(sb, "%sUnknown HPDS", SEP_STR);
978                         break;
979                 }
980
981                 switch (ID_AA64MMFR1_VH(cpu_desc[cpu].id_aa64mmfr1)) {
982                 case ID_AA64MMFR1_VH_NONE:
983                         break;
984                 case ID_AA64MMFR1_VH_IMPL:
985                         sbuf_printf(sb, "%sVHE", SEP_STR);
986                         break;
987                 default:
988                         sbuf_printf(sb, "%sUnknown VHE", SEP_STR);
989                         break;
990                 }
991
992                 switch (ID_AA64MMFR1_VMIDBITS(cpu_desc[cpu].id_aa64mmfr1)) {
993                 case ID_AA64MMFR1_VMIDBITS_8:
994                         break;
995                 case ID_AA64MMFR1_VMIDBITS_16:
996                         sbuf_printf(sb, "%s16 VMID bits", SEP_STR);
997                         break;
998                 default:
999                         sbuf_printf(sb, "%sUnknown VMID bits", SEP_STR);
1000                         break;
1001                 }
1002
1003                 switch (ID_AA64MMFR1_HAFDBS(cpu_desc[cpu].id_aa64mmfr1)) {
1004                 case ID_AA64MMFR1_HAFDBS_NONE:
1005                         break;
1006                 case ID_AA64MMFR1_HAFDBS_AF:
1007                         sbuf_printf(sb, "%sAF", SEP_STR);
1008                         break;
1009                 case ID_AA64MMFR1_HAFDBS_AF_DBS:
1010                         sbuf_printf(sb, "%sAF+DBS", SEP_STR);
1011                         break;
1012                 default:
1013                         sbuf_printf(sb, "%sUnknown Hardware update AF/DBS", SEP_STR);
1014                         break;
1015                 }
1016
1017                 if ((cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK) != 0)
1018                         sbuf_printf(sb, "%s%#lx", SEP_STR,
1019                             cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK);
1020                 sbuf_finish(sb);
1021                 printf("%s>\n", sbuf_data(sb));
1022                 sbuf_clear(sb);
1023         }
1024
1025         /* AArch64 Memory Model Feature Register 2 */
1026         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR2) != 0) {
1027                 printed = 0;
1028                 sbuf_printf(sb, "      Memory Model Features 2 = <");
1029
1030                 switch (ID_AA64MMFR2_NV(cpu_desc[cpu].id_aa64mmfr2)) {
1031                 case ID_AA64MMFR2_NV_NONE:
1032                         break;
1033                 case ID_AA64MMFR2_NV_IMPL:
1034                         sbuf_printf(sb, "%sNestedVirt", SEP_STR);
1035                         break;
1036                 default:
1037                         sbuf_printf(sb, "%sUnknown NestedVirt", SEP_STR);
1038                         break;
1039                 }
1040
1041                 switch (ID_AA64MMFR2_CCIDX(cpu_desc[cpu].id_aa64mmfr2)) {
1042                 case ID_AA64MMFR2_CCIDX_32:
1043                         sbuf_printf(sb, "%s32b CCIDX", SEP_STR);
1044                         break;
1045                 case ID_AA64MMFR2_CCIDX_64:
1046                         sbuf_printf(sb, "%s64b CCIDX", SEP_STR);
1047                         break;
1048                 default:
1049                         sbuf_printf(sb, "%sUnknown CCIDX", SEP_STR);
1050                         break;
1051                 }
1052
1053                 switch (ID_AA64MMFR2_VA_RANGE(cpu_desc[cpu].id_aa64mmfr2)) {
1054                 case ID_AA64MMFR2_VA_RANGE_48:
1055                         sbuf_printf(sb, "%s48b VA", SEP_STR);
1056                         break;
1057                 case ID_AA64MMFR2_VA_RANGE_52:
1058                         sbuf_printf(sb, "%s52b VA", SEP_STR);
1059                         break;
1060                 default:
1061                         sbuf_printf(sb, "%sUnknown VA Range", SEP_STR);
1062                         break;
1063                 }
1064
1065                 switch (ID_AA64MMFR2_IESB(cpu_desc[cpu].id_aa64mmfr2)) {
1066                 case ID_AA64MMFR2_IESB_NONE:
1067                         break;
1068                 case ID_AA64MMFR2_IESB_IMPL:
1069                         sbuf_printf(sb, "%sIESB", SEP_STR);
1070                         break;
1071                 default:
1072                         sbuf_printf(sb, "%sUnknown IESB", SEP_STR);
1073                         break;
1074                 }
1075
1076                 switch (ID_AA64MMFR2_LSM(cpu_desc[cpu].id_aa64mmfr2)) {
1077                 case ID_AA64MMFR2_LSM_NONE:
1078                         break;
1079                 case ID_AA64MMFR2_LSM_IMPL:
1080                         sbuf_printf(sb, "%sLSM", SEP_STR);
1081                         break;
1082                 default:
1083                         sbuf_printf(sb, "%sUnknown LSM", SEP_STR);
1084                         break;
1085                 }
1086
1087                 switch (ID_AA64MMFR2_UAO(cpu_desc[cpu].id_aa64mmfr2)) {
1088                 case ID_AA64MMFR2_UAO_NONE:
1089                         break;
1090                 case ID_AA64MMFR2_UAO_IMPL:
1091                         sbuf_printf(sb, "%sUAO", SEP_STR);
1092                         break;
1093                 default:
1094                         sbuf_printf(sb, "%sUnknown UAO", SEP_STR);
1095                         break;
1096                 }
1097
1098                 switch (ID_AA64MMFR2_CNP(cpu_desc[cpu].id_aa64mmfr2)) {
1099                 case ID_AA64MMFR2_CNP_NONE:
1100                         break;
1101                 case ID_AA64MMFR2_CNP_IMPL:
1102                         sbuf_printf(sb, "%sCnP", SEP_STR);
1103                         break;
1104                 default:
1105                         sbuf_printf(sb, "%sUnknown CnP", SEP_STR);
1106                         break;
1107                 }
1108
1109                 if ((cpu_desc[cpu].id_aa64mmfr2 & ~ID_AA64MMFR2_MASK) != 0)
1110                         sbuf_printf(sb, "%s%#lx", SEP_STR,
1111                             cpu_desc[cpu].id_aa64mmfr2 & ~ID_AA64MMFR2_MASK);
1112                 sbuf_finish(sb);
1113                 printf("%s>\n", sbuf_data(sb));
1114                 sbuf_clear(sb);
1115         }
1116
1117         /* AArch64 Debug Feature Register 0 */
1118         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR0) != 0) {
1119                 printed = 0;
1120                 sbuf_printf(sb, "             Debug Features 0 = <");
1121                 switch(ID_AA64DFR0_PMS_VER(cpu_desc[cpu].id_aa64dfr0)) {
1122                 case ID_AA64DFR0_PMS_VER_NONE:
1123                         break;
1124                 case ID_AA64DFR0_PMS_VER_V1:
1125                         sbuf_printf(sb, "%sSPE v1", SEP_STR);
1126                         break;
1127                 default:
1128                         sbuf_printf(sb, "%sUnknown SPE", SEP_STR);
1129                         break;
1130                 }
1131
1132                 sbuf_printf(sb, "%s%lu CTX Breakpoints", SEP_STR,
1133                     ID_AA64DFR0_CTX_CMPS(cpu_desc[cpu].id_aa64dfr0));
1134
1135                 sbuf_printf(sb, "%s%lu Watchpoints", SEP_STR,
1136                     ID_AA64DFR0_WRPS(cpu_desc[cpu].id_aa64dfr0));
1137
1138                 sbuf_printf(sb, "%s%lu Breakpoints", SEP_STR,
1139                     ID_AA64DFR0_BRPS(cpu_desc[cpu].id_aa64dfr0));
1140
1141                 switch (ID_AA64DFR0_PMU_VER(cpu_desc[cpu].id_aa64dfr0)) {
1142                 case ID_AA64DFR0_PMU_VER_NONE:
1143                         break;
1144                 case ID_AA64DFR0_PMU_VER_3:
1145                         sbuf_printf(sb, "%sPMUv3", SEP_STR);
1146                         break;
1147                 case ID_AA64DFR0_PMU_VER_3_1:
1148                         sbuf_printf(sb, "%sPMUv3+16 bit evtCount", SEP_STR);
1149                         break;
1150                 case ID_AA64DFR0_PMU_VER_IMPL:
1151                         sbuf_printf(sb, "%sImplementation defined PMU", SEP_STR);
1152                         break;
1153                 default:
1154                         sbuf_printf(sb, "%sUnknown PMU", SEP_STR);
1155                         break;
1156                 }
1157
1158                 switch (ID_AA64DFR0_TRACE_VER(cpu_desc[cpu].id_aa64dfr0)) {
1159                 case ID_AA64DFR0_TRACE_VER_NONE:
1160                         break;
1161                 case ID_AA64DFR0_TRACE_VER_IMPL:
1162                         sbuf_printf(sb, "%sTrace", SEP_STR);
1163                         break;
1164                 default:
1165                         sbuf_printf(sb, "%sUnknown Trace", SEP_STR);
1166                         break;
1167                 }
1168
1169                 switch (ID_AA64DFR0_DEBUG_VER(cpu_desc[cpu].id_aa64dfr0)) {
1170                 case ID_AA64DFR0_DEBUG_VER_8:
1171                         sbuf_printf(sb, "%sDebug v8", SEP_STR);
1172                         break;
1173                 case ID_AA64DFR0_DEBUG_VER_8_VHE:
1174                         sbuf_printf(sb, "%sDebug v8+VHE", SEP_STR);
1175                         break;
1176                 case ID_AA64DFR0_DEBUG_VER_8_2:
1177                         sbuf_printf(sb, "%sDebug v8.2", SEP_STR);
1178                         break;
1179                 default:
1180                         sbuf_printf(sb, "%sUnknown Debug", SEP_STR);
1181                         break;
1182                 }
1183
1184                 if (cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK)
1185                         sbuf_printf(sb, "%s%#lx", SEP_STR,
1186                             cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK);
1187                 sbuf_finish(sb);
1188                 printf("%s>\n", sbuf_data(sb));
1189                 sbuf_clear(sb);
1190         }
1191
1192         /* AArch64 Memory Model Feature Register 1 */
1193         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR1) != 0) {
1194                 printf("             Debug Features 1 = <%#lx>\n",
1195                     cpu_desc[cpu].id_aa64dfr1);
1196         }
1197
1198         /* AArch64 Auxiliary Feature Register 0 */
1199         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR0) != 0) {
1200                 printf("         Auxiliary Features 0 = <%#lx>\n",
1201                     cpu_desc[cpu].id_aa64afr0);
1202         }
1203
1204         /* AArch64 Auxiliary Feature Register 1 */
1205         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR1) != 0) {
1206                 printf("         Auxiliary Features 1 = <%#lx>\n",
1207                     cpu_desc[cpu].id_aa64afr1);
1208         }
1209
1210         sbuf_delete(sb);
1211         sb = NULL;
1212 #undef SEP_STR
1213 }
1214
1215 void
1216 identify_cpu(void)
1217 {
1218         u_int midr;
1219         u_int impl_id;
1220         u_int part_id;
1221         u_int cpu;
1222         size_t i;
1223         const struct cpu_parts *cpu_partsp = NULL;
1224
1225         cpu = PCPU_GET(cpuid);
1226         midr = get_midr();
1227
1228         /*
1229          * Store midr to pcpu to allow fast reading
1230          * from EL0, EL1 and assembly code.
1231          */
1232         PCPU_SET(midr, midr);
1233
1234         impl_id = CPU_IMPL(midr);
1235         for (i = 0; i < nitems(cpu_implementers); i++) {
1236                 if (impl_id == cpu_implementers[i].impl_id ||
1237                     cpu_implementers[i].impl_id == 0) {
1238                         cpu_desc[cpu].cpu_impl = impl_id;
1239                         cpu_desc[cpu].cpu_impl_name = cpu_implementers[i].impl_name;
1240                         cpu_partsp = cpu_implementers[i].cpu_parts;
1241                         break;
1242                 }
1243         }
1244
1245         part_id = CPU_PART(midr);
1246         for (i = 0; &cpu_partsp[i] != NULL; i++) {
1247                 if (part_id == cpu_partsp[i].part_id ||
1248                     cpu_partsp[i].part_id == 0) {
1249                         cpu_desc[cpu].cpu_part_num = part_id;
1250                         cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name;
1251                         break;
1252                 }
1253         }
1254
1255         cpu_desc[cpu].cpu_revision = CPU_REV(midr);
1256         cpu_desc[cpu].cpu_variant = CPU_VAR(midr);
1257
1258         /* Save affinity for current CPU */
1259         cpu_desc[cpu].mpidr = get_mpidr();
1260         CPU_AFFINITY(cpu) = cpu_desc[cpu].mpidr & CPU_AFF_MASK;
1261
1262         cpu_desc[cpu].id_aa64dfr0 = READ_SPECIALREG(ID_AA64DFR0_EL1);
1263         cpu_desc[cpu].id_aa64dfr1 = READ_SPECIALREG(ID_AA64DFR1_EL1);
1264         cpu_desc[cpu].id_aa64isar0 = READ_SPECIALREG(ID_AA64ISAR0_EL1);
1265         cpu_desc[cpu].id_aa64isar1 = READ_SPECIALREG(ID_AA64ISAR1_EL1);
1266         cpu_desc[cpu].id_aa64mmfr0 = READ_SPECIALREG(ID_AA64MMFR0_EL1);
1267         cpu_desc[cpu].id_aa64mmfr1 = READ_SPECIALREG(ID_AA64MMFR1_EL1);
1268         cpu_desc[cpu].id_aa64mmfr2 = READ_SPECIALREG(ID_AA64MMFR2_EL1);
1269         cpu_desc[cpu].id_aa64pfr0 = READ_SPECIALREG(ID_AA64PFR0_EL1);
1270         cpu_desc[cpu].id_aa64pfr1 = READ_SPECIALREG(ID_AA64PFR1_EL1);
1271
1272         if (cpu != 0) {
1273                 /*
1274                  * This code must run on one cpu at a time, but we are
1275                  * not scheduling on the current core so implement a
1276                  * simple spinlock.
1277                  */
1278                 while (atomic_cmpset_acq_int(&ident_lock, 0, 1) == 0)
1279                         __asm __volatile("wfe" ::: "memory");
1280
1281                 switch (cpu_aff_levels) {
1282                 case 0:
1283                         if (CPU_AFF0(cpu_desc[cpu].mpidr) !=
1284                             CPU_AFF0(cpu_desc[0].mpidr))
1285                                 cpu_aff_levels = 1;
1286                         /* FALLTHROUGH */
1287                 case 1:
1288                         if (CPU_AFF1(cpu_desc[cpu].mpidr) !=
1289                             CPU_AFF1(cpu_desc[0].mpidr))
1290                                 cpu_aff_levels = 2;
1291                         /* FALLTHROUGH */
1292                 case 2:
1293                         if (CPU_AFF2(cpu_desc[cpu].mpidr) !=
1294                             CPU_AFF2(cpu_desc[0].mpidr))
1295                                 cpu_aff_levels = 3;
1296                         /* FALLTHROUGH */
1297                 case 3:
1298                         if (CPU_AFF3(cpu_desc[cpu].mpidr) !=
1299                             CPU_AFF3(cpu_desc[0].mpidr))
1300                                 cpu_aff_levels = 4;
1301                         break;
1302                 }
1303
1304                 if (cpu_desc[cpu].id_aa64afr0 != cpu_desc[0].id_aa64afr0)
1305                         cpu_print_regs |= PRINT_ID_AA64_AFR0;
1306                 if (cpu_desc[cpu].id_aa64afr1 != cpu_desc[0].id_aa64afr1)
1307                         cpu_print_regs |= PRINT_ID_AA64_AFR1;
1308
1309                 if (cpu_desc[cpu].id_aa64dfr0 != cpu_desc[0].id_aa64dfr0)
1310                         cpu_print_regs |= PRINT_ID_AA64_DFR0;
1311                 if (cpu_desc[cpu].id_aa64dfr1 != cpu_desc[0].id_aa64dfr1)
1312                         cpu_print_regs |= PRINT_ID_AA64_DFR1;
1313
1314                 if (cpu_desc[cpu].id_aa64isar0 != cpu_desc[0].id_aa64isar0)
1315                         cpu_print_regs |= PRINT_ID_AA64_ISAR0;
1316                 if (cpu_desc[cpu].id_aa64isar1 != cpu_desc[0].id_aa64isar1)
1317                         cpu_print_regs |= PRINT_ID_AA64_ISAR1;
1318
1319                 if (cpu_desc[cpu].id_aa64mmfr0 != cpu_desc[0].id_aa64mmfr0)
1320                         cpu_print_regs |= PRINT_ID_AA64_MMFR0;
1321                 if (cpu_desc[cpu].id_aa64mmfr1 != cpu_desc[0].id_aa64mmfr1)
1322                         cpu_print_regs |= PRINT_ID_AA64_MMFR1;
1323                 if (cpu_desc[cpu].id_aa64mmfr2 != cpu_desc[0].id_aa64mmfr2)
1324                         cpu_print_regs |= PRINT_ID_AA64_MMFR2;
1325
1326                 if (cpu_desc[cpu].id_aa64pfr0 != cpu_desc[0].id_aa64pfr0)
1327                         cpu_print_regs |= PRINT_ID_AA64_PFR0;
1328                 if (cpu_desc[cpu].id_aa64pfr1 != cpu_desc[0].id_aa64pfr1)
1329                         cpu_print_regs |= PRINT_ID_AA64_PFR1;
1330
1331                 /* Wake up the other CPUs */
1332                 atomic_store_rel_int(&ident_lock, 0);
1333                 __asm __volatile("sev" ::: "memory");
1334         }
1335 }