]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm64/arm64/identcpu.c
Move the undefined instruction handler to identcpu.c so we have access
[FreeBSD/FreeBSD.git] / sys / arm64 / arm64 / identcpu.c
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * Copyright (c) 2014 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Semihalf
7  * under sponsorship of the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/pcpu.h>
38 #include <sys/sbuf.h>
39 #include <sys/smp.h>
40 #include <sys/sysctl.h>
41 #include <sys/systm.h>
42
43 #include <machine/atomic.h>
44 #include <machine/cpu.h>
45 #include <machine/cpufunc.h>
46 #include <machine/undefined.h>
47
48 static int ident_lock;
49
50 char machine[] = "arm64";
51
52 SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0,
53     "Machine class");
54
55 /*
56  * Per-CPU affinity as provided in MPIDR_EL1
57  * Indexed by CPU number in logical order selected by the system.
58  * Relevant fields can be extracted using CPU_AFFn macros,
59  * Aff3.Aff2.Aff1.Aff0 construct a unique CPU address in the system.
60  *
61  * Fields used by us:
62  * Aff1 - Cluster number
63  * Aff0 - CPU number in Aff1 cluster
64  */
65 uint64_t __cpu_affinity[MAXCPU];
66 static u_int cpu_aff_levels;
67
68 struct cpu_desc {
69         u_int           cpu_impl;
70         u_int           cpu_part_num;
71         u_int           cpu_variant;
72         u_int           cpu_revision;
73         const char      *cpu_impl_name;
74         const char      *cpu_part_name;
75
76         uint64_t        mpidr;
77         uint64_t        id_aa64afr0;
78         uint64_t        id_aa64afr1;
79         uint64_t        id_aa64dfr0;
80         uint64_t        id_aa64dfr1;
81         uint64_t        id_aa64isar0;
82         uint64_t        id_aa64isar1;
83         uint64_t        id_aa64mmfr0;
84         uint64_t        id_aa64mmfr1;
85         uint64_t        id_aa64mmfr2;
86         uint64_t        id_aa64pfr0;
87         uint64_t        id_aa64pfr1;
88 };
89
90 struct cpu_desc cpu_desc[MAXCPU];
91 static u_int cpu_print_regs;
92 #define PRINT_ID_AA64_AFR0      0x00000001
93 #define PRINT_ID_AA64_AFR1      0x00000002
94 #define PRINT_ID_AA64_DFR0      0x00000010
95 #define PRINT_ID_AA64_DFR1      0x00000020
96 #define PRINT_ID_AA64_ISAR0     0x00000100
97 #define PRINT_ID_AA64_ISAR1     0x00000200
98 #define PRINT_ID_AA64_MMFR0     0x00001000
99 #define PRINT_ID_AA64_MMFR1     0x00002000
100 #define PRINT_ID_AA64_MMFR2     0x00004000
101 #define PRINT_ID_AA64_PFR0      0x00010000
102 #define PRINT_ID_AA64_PFR1      0x00020000
103
104 struct cpu_parts {
105         u_int           part_id;
106         const char      *part_name;
107 };
108 #define CPU_PART_NONE   { 0, "Unknown Processor" }
109
110 struct cpu_implementers {
111         u_int                   impl_id;
112         const char              *impl_name;
113         /*
114          * Part number is implementation defined
115          * so each vendor will have its own set of values and names.
116          */
117         const struct cpu_parts  *cpu_parts;
118 };
119 #define CPU_IMPLEMENTER_NONE    { 0, "Unknown Implementer", cpu_parts_none }
120
121 /*
122  * Per-implementer table of (PartNum, CPU Name) pairs.
123  */
124 /* ARM Ltd. */
125 static const struct cpu_parts cpu_parts_arm[] = {
126         { CPU_PART_FOUNDATION, "Foundation-Model" },
127         { CPU_PART_CORTEX_A35, "Cortex-A35" },
128         { CPU_PART_CORTEX_A53, "Cortex-A53" },
129         { CPU_PART_CORTEX_A55, "Cortex-A55" },
130         { CPU_PART_CORTEX_A57, "Cortex-A57" },
131         { CPU_PART_CORTEX_A72, "Cortex-A72" },
132         { CPU_PART_CORTEX_A73, "Cortex-A73" },
133         { CPU_PART_CORTEX_A75, "Cortex-A75" },
134         CPU_PART_NONE,
135 };
136 /* Cavium */
137 static const struct cpu_parts cpu_parts_cavium[] = {
138         { CPU_PART_THUNDERX, "ThunderX" },
139         { CPU_PART_THUNDERX2, "ThunderX2" },
140         CPU_PART_NONE,
141 };
142
143 /* Unknown */
144 static const struct cpu_parts cpu_parts_none[] = {
145         CPU_PART_NONE,
146 };
147
148 /*
149  * Implementers table.
150  */
151 const struct cpu_implementers cpu_implementers[] = {
152         { CPU_IMPL_ARM,         "ARM",          cpu_parts_arm },
153         { CPU_IMPL_BROADCOM,    "Broadcom",     cpu_parts_none },
154         { CPU_IMPL_CAVIUM,      "Cavium",       cpu_parts_cavium },
155         { CPU_IMPL_DEC,         "DEC",          cpu_parts_none },
156         { CPU_IMPL_INFINEON,    "IFX",          cpu_parts_none },
157         { CPU_IMPL_FREESCALE,   "Freescale",    cpu_parts_none },
158         { CPU_IMPL_NVIDIA,      "NVIDIA",       cpu_parts_none },
159         { CPU_IMPL_APM,         "APM",          cpu_parts_none },
160         { CPU_IMPL_QUALCOMM,    "Qualcomm",     cpu_parts_none },
161         { CPU_IMPL_MARVELL,     "Marvell",      cpu_parts_none },
162         { CPU_IMPL_INTEL,       "Intel",        cpu_parts_none },
163         CPU_IMPLEMENTER_NONE,
164 };
165
166 struct mrs_safe_value {
167         u_int           CRm;
168         u_int           Op2;
169         uint64_t        value;
170 };
171
172 static struct mrs_safe_value safe_values[] = {
173         {       /* id_aa64pfr0_el1 */
174                 .CRm = 4,
175                 .Op2 = 0,
176                 .value = ID_AA64PFR0_ADV_SIMD_NONE | ID_AA64PFR0_FP_NONE |
177                     ID_AA64PFR0_EL1_64 | ID_AA64PFR0_EL0_64,
178         },
179         {       /* id_aa64dfr0_el1 */
180                 .CRm = 5,
181                 .Op2 = 0,
182                 .value = ID_AA64DFR0_DEBUG_VER_8,
183         },
184 };
185
186 static int
187 user_mrs_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame,
188     uint32_t esr)
189 {
190         uint64_t value;
191         int CRm, Op2, i, reg;
192
193         if ((insn & MRS_MASK) != MRS_VALUE)
194                 return (0);
195
196         /*
197          * We only emulate Op0 == 3, Op1 == 0, CRn == 0, CRm == {0, 4-7}.
198          * These are in the EL1 CPU identification space.
199          * CRm == 0 holds MIDR_EL1, MPIDR_EL1, and REVID_EL1.
200          * CRm == {4-7} holds the ID_AA64 registers.
201          *
202          * For full details see the ARMv8 ARM (ARM DDI 0487C.a)
203          * Table D9-2 System instruction encodings for non-Debug System
204          * register accesses.
205          */
206         if (mrs_Op0(insn) != 3 || mrs_Op1(insn) != 0 || mrs_CRn(insn) != 0)
207                 return (0);
208
209         CRm = mrs_CRm(insn);
210         if (CRm > 7 || (CRm < 4 && CRm != 0))
211                 return (0);
212
213         Op2 = mrs_Op2(insn);
214         value = 0;
215
216         for (i = 0; i < nitems(safe_values); i++) {
217                 if (safe_values[i].CRm == CRm && safe_values[i].Op2 == Op2) {
218                         value = safe_values[i].value;
219                         break;
220                 }
221         }
222
223         if (CRm == 0) {
224                 switch (Op2) {
225                 case 0:
226                         value = READ_SPECIALREG(midr_el1);
227                         break;
228                 case 5:
229                         value = READ_SPECIALREG(mpidr_el1);
230                         break;
231                 case 6:
232                         value = READ_SPECIALREG(revidr_el1);
233                         break;
234                 default:
235                         return (0);
236                 }
237         }
238
239         /*
240          * We will handle this instruction, move to the next so we
241          * don't trap here again.
242          */
243         frame->tf_elr += INSN_SIZE;
244
245         reg = MRS_REGISTER(insn);
246         /* If reg is 31 then write to xzr, i.e. do nothing */
247         if (reg == 31)
248                 return (1);
249
250         if (reg < nitems(frame->tf_x))
251                 frame->tf_x[reg] = value;
252         else if (reg == 30)
253                 frame->tf_lr = value;
254
255         return (1);
256 }
257
258 static void
259 identify_cpu_sysinit(void *dummy __unused)
260 {
261         int cpu;
262
263         CPU_FOREACH(cpu) {
264                 print_cpu_features(cpu);
265         }
266
267         install_undef_handler(true, user_mrs_handler);
268 }
269 SYSINIT(idenrity_cpu, SI_SUB_SMP, SI_ORDER_ANY, identify_cpu_sysinit, NULL);
270
271 void
272 print_cpu_features(u_int cpu)
273 {
274         struct sbuf *sb;
275         int printed;
276
277         sb = sbuf_new_auto();
278         sbuf_printf(sb, "CPU%3d: %s %s r%dp%d", cpu,
279             cpu_desc[cpu].cpu_impl_name, cpu_desc[cpu].cpu_part_name,
280             cpu_desc[cpu].cpu_variant, cpu_desc[cpu].cpu_revision);
281
282         sbuf_cat(sb, " affinity:");
283         switch(cpu_aff_levels) {
284         default:
285         case 4:
286                 sbuf_printf(sb, " %2d", CPU_AFF3(cpu_desc[cpu].mpidr));
287                 /* FALLTHROUGH */
288         case 3:
289                 sbuf_printf(sb, " %2d", CPU_AFF2(cpu_desc[cpu].mpidr));
290                 /* FALLTHROUGH */
291         case 2:
292                 sbuf_printf(sb, " %2d", CPU_AFF1(cpu_desc[cpu].mpidr));
293                 /* FALLTHROUGH */
294         case 1:
295         case 0: /* On UP this will be zero */
296                 sbuf_printf(sb, " %2d", CPU_AFF0(cpu_desc[cpu].mpidr));
297                 break;
298         }
299         sbuf_finish(sb);
300         printf("%s\n", sbuf_data(sb));
301         sbuf_clear(sb);
302
303         /*
304          * There is a hardware errata where, if one CPU is performing a TLB
305          * invalidation while another is performing a store-exclusive the
306          * store-exclusive may return the wrong status. A workaround seems
307          * to be to use an IPI to invalidate on each CPU, however given the
308          * limited number of affected units (pass 1.1 is the evaluation
309          * hardware revision), and the lack of information from Cavium
310          * this has not been implemented.
311          *
312          * At the time of writing this the only information is from:
313          * https://lkml.org/lkml/2016/8/4/722
314          */
315         /*
316          * XXX: CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 on its own also
317          * triggers on pass 2.0+.
318          */
319         if (cpu == 0 && CPU_VAR(PCPU_GET(midr)) == 0 &&
320             CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1)
321                 printf("WARNING: ThunderX Pass 1.1 detected.\nThis has known "
322                     "hardware bugs that may cause the incorrect operation of "
323                     "atomic operations.\n");
324
325         if (cpu != 0 && cpu_print_regs == 0)
326                 return;
327
328 #define SEP_STR ((printed++) == 0) ? "" : ","
329
330         /* AArch64 Instruction Set Attribute Register 0 */
331         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR0) != 0) {
332                 printed = 0;
333                 sbuf_printf(sb, " Instruction Set Attributes 0 = <");
334
335                 switch (ID_AA64ISAR0_RDM(cpu_desc[cpu].id_aa64isar0)) {
336                 case ID_AA64ISAR0_RDM_NONE:
337                         break;
338                 case ID_AA64ISAR0_RDM_IMPL:
339                         sbuf_printf(sb, "%sRDM", SEP_STR);
340                         break;
341                 default:
342                         sbuf_printf(sb, "%sUnknown RDM", SEP_STR);
343                 }
344
345                 switch (ID_AA64ISAR0_ATOMIC(cpu_desc[cpu].id_aa64isar0)) {
346                 case ID_AA64ISAR0_ATOMIC_NONE:
347                         break;
348                 case ID_AA64ISAR0_ATOMIC_IMPL:
349                         sbuf_printf(sb, "%sAtomic", SEP_STR);
350                         break;
351                 default:
352                         sbuf_printf(sb, "%sUnknown Atomic", SEP_STR);
353                 }
354
355                 switch (ID_AA64ISAR0_AES(cpu_desc[cpu].id_aa64isar0)) {
356                 case ID_AA64ISAR0_AES_NONE:
357                         break;
358                 case ID_AA64ISAR0_AES_BASE:
359                         sbuf_printf(sb, "%sAES", SEP_STR);
360                         break;
361                 case ID_AA64ISAR0_AES_PMULL:
362                         sbuf_printf(sb, "%sAES+PMULL", SEP_STR);
363                         break;
364                 default:
365                         sbuf_printf(sb, "%sUnknown AES", SEP_STR);
366                         break;
367                 }
368
369                 switch (ID_AA64ISAR0_SHA1(cpu_desc[cpu].id_aa64isar0)) {
370                 case ID_AA64ISAR0_SHA1_NONE:
371                         break;
372                 case ID_AA64ISAR0_SHA1_BASE:
373                         sbuf_printf(sb, "%sSHA1", SEP_STR);
374                         break;
375                 default:
376                         sbuf_printf(sb, "%sUnknown SHA1", SEP_STR);
377                         break;
378                 }
379
380                 switch (ID_AA64ISAR0_SHA2(cpu_desc[cpu].id_aa64isar0)) {
381                 case ID_AA64ISAR0_SHA2_NONE:
382                         break;
383                 case ID_AA64ISAR0_SHA2_BASE:
384                         sbuf_printf(sb, "%sSHA2", SEP_STR);
385                         break;
386                 case ID_AA64ISAR0_SHA2_512:
387                         sbuf_printf(sb, "%sSHA2+SHA512", SEP_STR);
388                         break;
389                 default:
390                         sbuf_printf(sb, "%sUnknown SHA2", SEP_STR);
391                         break;
392                 }
393
394                 switch (ID_AA64ISAR0_CRC32(cpu_desc[cpu].id_aa64isar0)) {
395                 case ID_AA64ISAR0_CRC32_NONE:
396                         break;
397                 case ID_AA64ISAR0_CRC32_BASE:
398                         sbuf_printf(sb, "%sCRC32", SEP_STR);
399                         break;
400                 default:
401                         sbuf_printf(sb, "%sUnknown CRC32", SEP_STR);
402                         break;
403                 }
404
405                 switch (ID_AA64ISAR0_SHA3(cpu_desc[cpu].id_aa64isar0)) {
406                 case ID_AA64ISAR0_SHA3_NONE:
407                         break;
408                 case ID_AA64ISAR0_SHA3_IMPL:
409                         sbuf_printf(sb, "%sSHA3", SEP_STR);
410                         break;
411                 default:
412                         sbuf_printf(sb, "%sUnknown SHA3", SEP_STR);
413                         break;
414                 }
415
416                 switch (ID_AA64ISAR0_SM3(cpu_desc[cpu].id_aa64isar0)) {
417                 case ID_AA64ISAR0_SM3_NONE:
418                         break;
419                 case ID_AA64ISAR0_SM3_IMPL:
420                         sbuf_printf(sb, "%sSM3", SEP_STR);
421                         break;
422                 default:
423                         sbuf_printf(sb, "%sUnknown SM3", SEP_STR);
424                         break;
425                 }
426
427                 switch (ID_AA64ISAR0_SM4(cpu_desc[cpu].id_aa64isar0)) {
428                 case ID_AA64ISAR0_SM4_NONE:
429                         break;
430                 case ID_AA64ISAR0_SM4_IMPL:
431                         sbuf_printf(sb, "%sSM4", SEP_STR);
432                         break;
433                 default:
434                         sbuf_printf(sb, "%sUnknown SM4", SEP_STR);
435                         break;
436                 }
437
438                 switch (ID_AA64ISAR0_DP(cpu_desc[cpu].id_aa64isar0)) {
439                 case ID_AA64ISAR0_DP_NONE:
440                         break;
441                 case ID_AA64ISAR0_DP_IMPL:
442                         sbuf_printf(sb, "%sDotProd", SEP_STR);
443                         break;
444                 default:
445                         sbuf_printf(sb, "%sUnknown DP", SEP_STR);
446                         break;
447                 }
448
449                 if ((cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK) != 0)
450                         sbuf_printf(sb, "%s%#lx", SEP_STR,
451                             cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK);
452
453                 sbuf_finish(sb);
454                 printf("%s>\n", sbuf_data(sb));
455                 sbuf_clear(sb);
456         }
457
458         /* AArch64 Instruction Set Attribute Register 1 */
459         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR1) != 0) {
460                 printed = 0;
461                 sbuf_printf(sb, " Instruction Set Attributes 1 = <");
462
463                 switch (ID_AA64ISAR1_GPI(cpu_desc[cpu].id_aa64isar1)) {
464                 case ID_AA64ISAR1_GPI_NONE:
465                         break;
466                 case ID_AA64ISAR1_GPI_IMPL:
467                         sbuf_printf(sb, "%sImpl GenericAuth", SEP_STR);
468                         break;
469                 default:
470                         sbuf_printf(sb, "%sUnknown GenericAuth", SEP_STR);
471                         break;
472                 }
473
474                 switch (ID_AA64ISAR1_GPA(cpu_desc[cpu].id_aa64isar1)) {
475                 case ID_AA64ISAR1_GPA_NONE:
476                         break;
477                 case ID_AA64ISAR1_GPA_IMPL:
478                         sbuf_printf(sb, "%sPrince GenericAuth", SEP_STR);
479                         break;
480                 default:
481                         sbuf_printf(sb, "%sUnknown GenericAuth", SEP_STR);
482                         break;
483                 }
484
485                 switch (ID_AA64ISAR1_LRCPC(cpu_desc[cpu].id_aa64isar1)) {
486                 case ID_AA64ISAR1_LRCPC_NONE:
487                         break;
488                 case ID_AA64ISAR1_LRCPC_IMPL:
489                         sbuf_printf(sb, "%sRCpc", SEP_STR);
490                         break;
491                 default:
492                         sbuf_printf(sb, "%sUnknown RCpc", SEP_STR);
493                         break;
494                 }
495
496                 switch (ID_AA64ISAR1_FCMA(cpu_desc[cpu].id_aa64isar1)) {
497                 case ID_AA64ISAR1_FCMA_NONE:
498                         break;
499                 case ID_AA64ISAR1_FCMA_IMPL:
500                         sbuf_printf(sb, "%sFCMA", SEP_STR);
501                         break;
502                 default:
503                         sbuf_printf(sb, "%sUnknown FCMA", SEP_STR);
504                         break;
505                 }
506
507                 switch (ID_AA64ISAR1_JSCVT(cpu_desc[cpu].id_aa64isar1)) {
508                 case ID_AA64ISAR1_JSCVT_NONE:
509                         break;
510                 case ID_AA64ISAR1_JSCVT_IMPL:
511                         sbuf_printf(sb, "%sJS Conv", SEP_STR);
512                         break;
513                 default:
514                         sbuf_printf(sb, "%sUnknown JS Conv", SEP_STR);
515                         break;
516                 }
517
518                 switch (ID_AA64ISAR1_API(cpu_desc[cpu].id_aa64isar1)) {
519                 case ID_AA64ISAR1_API_NONE:
520                         break;
521                 case ID_AA64ISAR1_API_IMPL:
522                         sbuf_printf(sb, "%sImpl AddrAuth", SEP_STR);
523                         break;
524                 default:
525                         sbuf_printf(sb, "%sUnknown Impl AddrAuth", SEP_STR);
526                         break;
527                 }
528
529                 switch (ID_AA64ISAR1_APA(cpu_desc[cpu].id_aa64isar1)) {
530                 case ID_AA64ISAR1_APA_NONE:
531                         break;
532                 case ID_AA64ISAR1_APA_IMPL:
533                         sbuf_printf(sb, "%sPrince AddrAuth", SEP_STR);
534                         break;
535                 default:
536                         sbuf_printf(sb, "%sUnknown Prince AddrAuth", SEP_STR);
537                         break;
538                 }
539
540                 switch (ID_AA64ISAR1_DPB(cpu_desc[cpu].id_aa64isar1)) {
541                 case ID_AA64ISAR1_DPB_NONE:
542                         break;
543                 case ID_AA64ISAR1_DPB_IMPL:
544                         sbuf_printf(sb, "%sDC CVAP", SEP_STR);
545                         break;
546                 default:
547                         sbuf_printf(sb, "%sUnknown DC CVAP", SEP_STR);
548                         break;
549                 }
550
551                 if ((cpu_desc[cpu].id_aa64isar1 & ~ID_AA64ISAR1_MASK) != 0)
552                         sbuf_printf(sb, "%s%#lx", SEP_STR,
553                             cpu_desc[cpu].id_aa64isar1 & ~ID_AA64ISAR1_MASK);
554                 sbuf_finish(sb);
555                 printf("%s>\n", sbuf_data(sb));
556                 sbuf_clear(sb);
557         }
558
559         /* AArch64 Processor Feature Register 0 */
560         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR0) != 0) {
561                 printed = 0;
562                 sbuf_printf(sb, "         Processor Features 0 = <");
563
564                 switch (ID_AA64PFR0_SVE(cpu_desc[cpu].id_aa64pfr0)) {
565                 case ID_AA64PFR0_SVE_NONE:
566                         break;
567                 case ID_AA64PFR0_SVE_IMPL:
568                         sbuf_printf(sb, "%sSVE", SEP_STR);
569                         break;
570                 default:
571                         sbuf_printf(sb, "%sUnknown SVE", SEP_STR);
572                         break;
573                 }
574
575                 switch (ID_AA64PFR0_RAS(cpu_desc[cpu].id_aa64pfr0)) {
576                 case ID_AA64PFR0_RAS_NONE:
577                         break;
578                 case ID_AA64PFR0_RAS_V1:
579                         sbuf_printf(sb, "%sRASv1", SEP_STR);
580                         break;
581                 default:
582                         sbuf_printf(sb, "%sUnknown RAS", SEP_STR);
583                         break;
584                 }
585
586                 switch (ID_AA64PFR0_GIC(cpu_desc[cpu].id_aa64pfr0)) {
587                 case ID_AA64PFR0_GIC_CPUIF_NONE:
588                         break;
589                 case ID_AA64PFR0_GIC_CPUIF_EN:
590                         sbuf_printf(sb, "%sGIC", SEP_STR);
591                         break;
592                 default:
593                         sbuf_printf(sb, "%sUnknown GIC interface", SEP_STR);
594                         break;
595                 }
596
597                 switch (ID_AA64PFR0_ADV_SIMD(cpu_desc[cpu].id_aa64pfr0)) {
598                 case ID_AA64PFR0_ADV_SIMD_NONE:
599                         break;
600                 case ID_AA64PFR0_ADV_SIMD_IMPL:
601                         sbuf_printf(sb, "%sAdvSIMD", SEP_STR);
602                         break;
603                 case ID_AA64PFR0_ADV_SIMD_HP:
604                         sbuf_printf(sb, "%sAdvSIMD+HP", SEP_STR);
605                         break;
606                 default:
607                         sbuf_printf(sb, "%sUnknown AdvSIMD", SEP_STR);
608                         break;
609                 }
610
611                 switch (ID_AA64PFR0_FP(cpu_desc[cpu].id_aa64pfr0)) {
612                 case ID_AA64PFR0_FP_NONE:
613                         break;
614                 case ID_AA64PFR0_FP_IMPL:
615                         sbuf_printf(sb, "%sFloat", SEP_STR);
616                         break;
617                 case ID_AA64PFR0_FP_HP:
618                         sbuf_printf(sb, "%sFloat+HP", SEP_STR);
619                         break;
620                 default:
621                         sbuf_printf(sb, "%sUnknown Float", SEP_STR);
622                         break;
623                 }
624
625                 switch (ID_AA64PFR0_EL3(cpu_desc[cpu].id_aa64pfr0)) {
626                 case ID_AA64PFR0_EL3_NONE:
627                         sbuf_printf(sb, "%sNo EL3", SEP_STR);
628                         break;
629                 case ID_AA64PFR0_EL3_64:
630                         sbuf_printf(sb, "%sEL3", SEP_STR);
631                         break;
632                 case ID_AA64PFR0_EL3_64_32:
633                         sbuf_printf(sb, "%sEL3 32", SEP_STR);
634                         break;
635                 default:
636                         sbuf_printf(sb, "%sUnknown EL3", SEP_STR);
637                         break;
638                 }
639
640                 switch (ID_AA64PFR0_EL2(cpu_desc[cpu].id_aa64pfr0)) {
641                 case ID_AA64PFR0_EL2_NONE:
642                         sbuf_printf(sb, "%sNo EL2", SEP_STR);
643                         break;
644                 case ID_AA64PFR0_EL2_64:
645                         sbuf_printf(sb, "%sEL2", SEP_STR);
646                         break;
647                 case ID_AA64PFR0_EL2_64_32:
648                         sbuf_printf(sb, "%sEL2 32", SEP_STR);
649                         break;
650                 default:
651                         sbuf_printf(sb, "%sUnknown EL2", SEP_STR);
652                         break;
653                 }
654
655                 switch (ID_AA64PFR0_EL1(cpu_desc[cpu].id_aa64pfr0)) {
656                 case ID_AA64PFR0_EL1_64:
657                         sbuf_printf(sb, "%sEL1", SEP_STR);
658                         break;
659                 case ID_AA64PFR0_EL1_64_32:
660                         sbuf_printf(sb, "%sEL1 32", SEP_STR);
661                         break;
662                 default:
663                         sbuf_printf(sb, "%sUnknown EL1", SEP_STR);
664                         break;
665                 }
666
667                 switch (ID_AA64PFR0_EL0(cpu_desc[cpu].id_aa64pfr0)) {
668                 case ID_AA64PFR0_EL0_64:
669                         sbuf_printf(sb, "%sEL0", SEP_STR);
670                         break;
671                 case ID_AA64PFR0_EL0_64_32:
672                         sbuf_printf(sb, "%sEL0 32", SEP_STR);
673                         break;
674                 default:
675                         sbuf_printf(sb, "%sUnknown EL0", SEP_STR);
676                         break;
677                 }
678
679                 if ((cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK) != 0)
680                         sbuf_printf(sb, "%s%#lx", SEP_STR,
681                             cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK);
682
683                 sbuf_finish(sb);
684                 printf("%s>\n", sbuf_data(sb));
685                 sbuf_clear(sb);
686         }
687
688         /* AArch64 Processor Feature Register 1 */
689         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR1) != 0) {
690                 printf("         Processor Features 1 = <%#lx>\n",
691                     cpu_desc[cpu].id_aa64pfr1);
692         }
693
694         /* AArch64 Memory Model Feature Register 0 */
695         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR0) != 0) {
696                 printed = 0;
697                 sbuf_printf(sb, "      Memory Model Features 0 = <");
698                 switch (ID_AA64MMFR0_TGRAN4(cpu_desc[cpu].id_aa64mmfr0)) {
699                 case ID_AA64MMFR0_TGRAN4_NONE:
700                         break;
701                 case ID_AA64MMFR0_TGRAN4_IMPL:
702                         sbuf_printf(sb, "%s4k Granule", SEP_STR);
703                         break;
704                 default:
705                         sbuf_printf(sb, "%sUnknown 4k Granule", SEP_STR);
706                         break;
707                 }
708
709                 switch (ID_AA64MMFR0_TGRAN16(cpu_desc[cpu].id_aa64mmfr0)) {
710                 case ID_AA64MMFR0_TGRAN16_NONE:
711                         break;
712                 case ID_AA64MMFR0_TGRAN16_IMPL:
713                         sbuf_printf(sb, "%s16k Granule", SEP_STR);
714                         break;
715                 default:
716                         sbuf_printf(sb, "%sUnknown 16k Granule", SEP_STR);
717                         break;
718                 }
719
720                 switch (ID_AA64MMFR0_TGRAN64(cpu_desc[cpu].id_aa64mmfr0)) {
721                 case ID_AA64MMFR0_TGRAN64_NONE:
722                         break;
723                 case ID_AA64MMFR0_TGRAN64_IMPL:
724                         sbuf_printf(sb, "%s64k Granule", SEP_STR);
725                         break;
726                 default:
727                         sbuf_printf(sb, "%sUnknown 64k Granule", SEP_STR);
728                         break;
729                 }
730
731                 switch (ID_AA64MMFR0_BIGEND(cpu_desc[cpu].id_aa64mmfr0)) {
732                 case ID_AA64MMFR0_BIGEND_FIXED:
733                         break;
734                 case ID_AA64MMFR0_BIGEND_MIXED:
735                         sbuf_printf(sb, "%sMixedEndian", SEP_STR);
736                         break;
737                 default:
738                         sbuf_printf(sb, "%sUnknown Endian switching", SEP_STR);
739                         break;
740                 }
741
742                 switch (ID_AA64MMFR0_BIGEND_EL0(cpu_desc[cpu].id_aa64mmfr0)) {
743                 case ID_AA64MMFR0_BIGEND_EL0_FIXED:
744                         break;
745                 case ID_AA64MMFR0_BIGEND_EL0_MIXED:
746                         sbuf_printf(sb, "%sEL0 MixEndian", SEP_STR);
747                         break;
748                 default:
749                         sbuf_printf(sb, "%sUnknown EL0 Endian switching", SEP_STR);
750                         break;
751                 }
752
753                 switch (ID_AA64MMFR0_S_NS_MEM(cpu_desc[cpu].id_aa64mmfr0)) {
754                 case ID_AA64MMFR0_S_NS_MEM_NONE:
755                         break;
756                 case ID_AA64MMFR0_S_NS_MEM_DISTINCT:
757                         sbuf_printf(sb, "%sS/NS Mem", SEP_STR);
758                         break;
759                 default:
760                         sbuf_printf(sb, "%sUnknown S/NS Mem", SEP_STR);
761                         break;
762                 }
763
764                 switch (ID_AA64MMFR0_ASID_BITS(cpu_desc[cpu].id_aa64mmfr0)) {
765                 case ID_AA64MMFR0_ASID_BITS_8:
766                         sbuf_printf(sb, "%s8bit ASID", SEP_STR);
767                         break;
768                 case ID_AA64MMFR0_ASID_BITS_16:
769                         sbuf_printf(sb, "%s16bit ASID", SEP_STR);
770                         break;
771                 default:
772                         sbuf_printf(sb, "%sUnknown ASID", SEP_STR);
773                         break;
774                 }
775
776                 switch (ID_AA64MMFR0_PA_RANGE(cpu_desc[cpu].id_aa64mmfr0)) {
777                 case ID_AA64MMFR0_PA_RANGE_4G:
778                         sbuf_printf(sb, "%s4GB PA", SEP_STR);
779                         break;
780                 case ID_AA64MMFR0_PA_RANGE_64G:
781                         sbuf_printf(sb, "%s64GB PA", SEP_STR);
782                         break;
783                 case ID_AA64MMFR0_PA_RANGE_1T:
784                         sbuf_printf(sb, "%s1TB PA", SEP_STR);
785                         break;
786                 case ID_AA64MMFR0_PA_RANGE_4T:
787                         sbuf_printf(sb, "%s4TB PA", SEP_STR);
788                         break;
789                 case ID_AA64MMFR0_PA_RANGE_16T:
790                         sbuf_printf(sb, "%s16TB PA", SEP_STR);
791                         break;
792                 case ID_AA64MMFR0_PA_RANGE_256T:
793                         sbuf_printf(sb, "%s256TB PA", SEP_STR);
794                         break;
795                 case ID_AA64MMFR0_PA_RANGE_4P:
796                         sbuf_printf(sb, "%s4PB PA", SEP_STR);
797                         break;
798                 default:
799                         sbuf_printf(sb, "%sUnknown PA Range", SEP_STR);
800                         break;
801                 }
802
803                 if ((cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK) != 0)
804                         sbuf_printf(sb, "%s%#lx", SEP_STR,
805                             cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK);
806                 sbuf_finish(sb);
807                 printf("%s>\n", sbuf_data(sb));
808                 sbuf_clear(sb);
809         }
810
811         /* AArch64 Memory Model Feature Register 1 */
812         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR1) != 0) {
813                 printed = 0;
814                 sbuf_printf(sb, "      Memory Model Features 1 = <");
815
816                 switch (ID_AA64MMFR1_XNX(cpu_desc[cpu].id_aa64mmfr1)) {
817                 case ID_AA64MMFR1_XNX_NONE:
818                         break;
819                 case ID_AA64MMFR1_XNX_IMPL:
820                         sbuf_printf(sb, "%sEL2 XN", SEP_STR);
821                         break;
822                 default:
823                         sbuf_printf(sb, "%sUnknown XNX", SEP_STR);
824                         break;
825                 }
826
827                 switch (ID_AA64MMFR1_SPEC_SEI(cpu_desc[cpu].id_aa64mmfr1)) {
828                 case ID_AA64MMFR1_SPEC_SEI_NONE:
829                         break;
830                 case ID_AA64MMFR1_SPEC_SEI_IMPL:
831                         sbuf_printf(sb, "%sSpecSEI", SEP_STR);
832                         break;
833                 default:
834                         sbuf_printf(sb, "%sUnknown SpecSEI", SEP_STR);
835                         break;
836                 }
837
838                 switch (ID_AA64MMFR1_PAN(cpu_desc[cpu].id_aa64mmfr1)) {
839                 case ID_AA64MMFR1_PAN_NONE:
840                         break;
841                 case ID_AA64MMFR1_PAN_IMPL:
842                         sbuf_printf(sb, "%sPAN", SEP_STR);
843                         break;
844                 case ID_AA64MMFR1_PAN_ATS1E1:
845                         sbuf_printf(sb, "%sPAN+AT", SEP_STR);
846                         break;
847                 default:
848                         sbuf_printf(sb, "%sUnknown PAN", SEP_STR);
849                         break;
850                 }
851
852                 switch (ID_AA64MMFR1_LO(cpu_desc[cpu].id_aa64mmfr1)) {
853                 case ID_AA64MMFR1_LO_NONE:
854                         break;
855                 case ID_AA64MMFR1_LO_IMPL:
856                         sbuf_printf(sb, "%sLO", SEP_STR);
857                         break;
858                 default:
859                         sbuf_printf(sb, "%sUnknown LO", SEP_STR);
860                         break;
861                 }
862
863                 switch (ID_AA64MMFR1_HPDS(cpu_desc[cpu].id_aa64mmfr1)) {
864                 case ID_AA64MMFR1_HPDS_NONE:
865                         break;
866                 case ID_AA64MMFR1_HPDS_HPD:
867                         sbuf_printf(sb, "%sHPDS", SEP_STR);
868                         break;
869                 case ID_AA64MMFR1_HPDS_TTPBHA:
870                         sbuf_printf(sb, "%sTTPBHA", SEP_STR);
871                         break;
872                 default:
873                         sbuf_printf(sb, "%sUnknown HPDS", SEP_STR);
874                         break;
875                 }
876
877                 switch (ID_AA64MMFR1_VH(cpu_desc[cpu].id_aa64mmfr1)) {
878                 case ID_AA64MMFR1_VH_NONE:
879                         break;
880                 case ID_AA64MMFR1_VH_IMPL:
881                         sbuf_printf(sb, "%sVHE", SEP_STR);
882                         break;
883                 default:
884                         sbuf_printf(sb, "%sUnknown VHE", SEP_STR);
885                         break;
886                 }
887
888                 switch (ID_AA64MMFR1_VMIDBITS(cpu_desc[cpu].id_aa64mmfr1)) {
889                 case ID_AA64MMFR1_VMIDBITS_8:
890                         break;
891                 case ID_AA64MMFR1_VMIDBITS_16:
892                         sbuf_printf(sb, "%s16 VMID bits", SEP_STR);
893                         break;
894                 default:
895                         sbuf_printf(sb, "%sUnknown VMID bits", SEP_STR);
896                         break;
897                 }
898
899                 switch (ID_AA64MMFR1_HAFDBS(cpu_desc[cpu].id_aa64mmfr1)) {
900                 case ID_AA64MMFR1_HAFDBS_NONE:
901                         break;
902                 case ID_AA64MMFR1_HAFDBS_AF:
903                         sbuf_printf(sb, "%sAF", SEP_STR);
904                         break;
905                 case ID_AA64MMFR1_HAFDBS_AF_DBS:
906                         sbuf_printf(sb, "%sAF+DBS", SEP_STR);
907                         break;
908                 default:
909                         sbuf_printf(sb, "%sUnknown Hardware update AF/DBS", SEP_STR);
910                         break;
911                 }
912
913                 if ((cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK) != 0)
914                         sbuf_printf(sb, "%s%#lx", SEP_STR,
915                             cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK);
916                 sbuf_finish(sb);
917                 printf("%s>\n", sbuf_data(sb));
918                 sbuf_clear(sb);
919         }
920
921         /* AArch64 Memory Model Feature Register 2 */
922         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR2) != 0) {
923                 printed = 0;
924                 sbuf_printf(sb, "      Memory Model Features 2 = <");
925
926                 switch (ID_AA64MMFR2_NV(cpu_desc[cpu].id_aa64mmfr2)) {
927                 case ID_AA64MMFR2_NV_NONE:
928                         break;
929                 case ID_AA64MMFR2_NV_IMPL:
930                         sbuf_printf(sb, "%sNestedVirt", SEP_STR);
931                         break;
932                 default:
933                         sbuf_printf(sb, "%sUnknown NestedVirt", SEP_STR);
934                         break;
935                 }
936
937                 switch (ID_AA64MMFR2_CCIDX(cpu_desc[cpu].id_aa64mmfr2)) {
938                 case ID_AA64MMFR2_CCIDX_32:
939                         sbuf_printf(sb, "%s32b CCIDX", SEP_STR);
940                         break;
941                 case ID_AA64MMFR2_CCIDX_64:
942                         sbuf_printf(sb, "%s64b CCIDX", SEP_STR);
943                         break;
944                 default:
945                         sbuf_printf(sb, "%sUnknown CCIDX", SEP_STR);
946                         break;
947                 }
948
949                 switch (ID_AA64MMFR2_VA_RANGE(cpu_desc[cpu].id_aa64mmfr2)) {
950                 case ID_AA64MMFR2_VA_RANGE_48:
951                         sbuf_printf(sb, "%s48b VA", SEP_STR);
952                         break;
953                 case ID_AA64MMFR2_VA_RANGE_52:
954                         sbuf_printf(sb, "%s52b VA", SEP_STR);
955                         break;
956                 default:
957                         sbuf_printf(sb, "%sUnknown VA Range", SEP_STR);
958                         break;
959                 }
960
961                 switch (ID_AA64MMFR2_IESB(cpu_desc[cpu].id_aa64mmfr2)) {
962                 case ID_AA64MMFR2_IESB_NONE:
963                         break;
964                 case ID_AA64MMFR2_IESB_IMPL:
965                         sbuf_printf(sb, "%sIESB", SEP_STR);
966                         break;
967                 default:
968                         sbuf_printf(sb, "%sUnknown IESB", SEP_STR);
969                         break;
970                 }
971
972                 switch (ID_AA64MMFR2_LSM(cpu_desc[cpu].id_aa64mmfr2)) {
973                 case ID_AA64MMFR2_LSM_NONE:
974                         break;
975                 case ID_AA64MMFR2_LSM_IMPL:
976                         sbuf_printf(sb, "%sLSM", SEP_STR);
977                         break;
978                 default:
979                         sbuf_printf(sb, "%sUnknown LSM", SEP_STR);
980                         break;
981                 }
982
983                 switch (ID_AA64MMFR2_UAO(cpu_desc[cpu].id_aa64mmfr2)) {
984                 case ID_AA64MMFR2_UAO_NONE:
985                         break;
986                 case ID_AA64MMFR2_UAO_IMPL:
987                         sbuf_printf(sb, "%sUAO", SEP_STR);
988                         break;
989                 default:
990                         sbuf_printf(sb, "%sUnknown UAO", SEP_STR);
991                         break;
992                 }
993
994                 switch (ID_AA64MMFR2_CNP(cpu_desc[cpu].id_aa64mmfr2)) {
995                 case ID_AA64MMFR2_CNP_NONE:
996                         break;
997                 case ID_AA64MMFR2_CNP_IMPL:
998                         sbuf_printf(sb, "%sCnP", SEP_STR);
999                         break;
1000                 default:
1001                         sbuf_printf(sb, "%sUnknown CnP", SEP_STR);
1002                         break;
1003                 }
1004
1005                 if ((cpu_desc[cpu].id_aa64mmfr2 & ~ID_AA64MMFR2_MASK) != 0)
1006                         sbuf_printf(sb, "%s%#lx", SEP_STR,
1007                             cpu_desc[cpu].id_aa64mmfr2 & ~ID_AA64MMFR2_MASK);
1008                 sbuf_finish(sb);
1009                 printf("%s>\n", sbuf_data(sb));
1010                 sbuf_clear(sb);
1011         }
1012
1013         /* AArch64 Debug Feature Register 0 */
1014         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR0) != 0) {
1015                 printed = 0;
1016                 sbuf_printf(sb, "             Debug Features 0 = <");
1017                 switch(ID_AA64DFR0_PMS_VER(cpu_desc[cpu].id_aa64dfr0)) {
1018                 case ID_AA64DFR0_PMS_VER_NONE:
1019                         break;
1020                 case ID_AA64DFR0_PMS_VER_V1:
1021                         sbuf_printf(sb, "%sSPE v1", SEP_STR);
1022                         break;
1023                 default:
1024                         sbuf_printf(sb, "%sUnknown SPE", SEP_STR);
1025                         break;
1026                 }
1027
1028                 sbuf_printf(sb, "%s%lu CTX Breakpoints", SEP_STR,
1029                     ID_AA64DFR0_CTX_CMPS(cpu_desc[cpu].id_aa64dfr0));
1030
1031                 sbuf_printf(sb, "%s%lu Watchpoints", SEP_STR,
1032                     ID_AA64DFR0_WRPS(cpu_desc[cpu].id_aa64dfr0));
1033
1034                 sbuf_printf(sb, "%s%lu Breakpoints", SEP_STR,
1035                     ID_AA64DFR0_BRPS(cpu_desc[cpu].id_aa64dfr0));
1036
1037                 switch (ID_AA64DFR0_PMU_VER(cpu_desc[cpu].id_aa64dfr0)) {
1038                 case ID_AA64DFR0_PMU_VER_NONE:
1039                         break;
1040                 case ID_AA64DFR0_PMU_VER_3:
1041                         sbuf_printf(sb, "%sPMUv3", SEP_STR);
1042                         break;
1043                 case ID_AA64DFR0_PMU_VER_3_1:
1044                         sbuf_printf(sb, "%sPMUv3+16 bit evtCount", SEP_STR);
1045                         break;
1046                 case ID_AA64DFR0_PMU_VER_IMPL:
1047                         sbuf_printf(sb, "%sImplementation defined PMU", SEP_STR);
1048                         break;
1049                 default:
1050                         sbuf_printf(sb, "%sUnknown PMU", SEP_STR);
1051                         break;
1052                 }
1053
1054                 switch (ID_AA64DFR0_TRACE_VER(cpu_desc[cpu].id_aa64dfr0)) {
1055                 case ID_AA64DFR0_TRACE_VER_NONE:
1056                         break;
1057                 case ID_AA64DFR0_TRACE_VER_IMPL:
1058                         sbuf_printf(sb, "%sTrace", SEP_STR);
1059                         break;
1060                 default:
1061                         sbuf_printf(sb, "%sUnknown Trace", SEP_STR);
1062                         break;
1063                 }
1064
1065                 switch (ID_AA64DFR0_DEBUG_VER(cpu_desc[cpu].id_aa64dfr0)) {
1066                 case ID_AA64DFR0_DEBUG_VER_8:
1067                         sbuf_printf(sb, "%sDebug v8", SEP_STR);
1068                         break;
1069                 case ID_AA64DFR0_DEBUG_VER_8_VHE:
1070                         sbuf_printf(sb, "%sDebug v8+VHE", SEP_STR);
1071                         break;
1072                 case ID_AA64DFR0_DEBUG_VER_8_2:
1073                         sbuf_printf(sb, "%sDebug v8.2", SEP_STR);
1074                         break;
1075                 default:
1076                         sbuf_printf(sb, "%sUnknown Debug", SEP_STR);
1077                         break;
1078                 }
1079
1080                 if (cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK)
1081                         sbuf_printf(sb, "%s%#lx", SEP_STR,
1082                             cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK);
1083                 sbuf_finish(sb);
1084                 printf("%s>\n", sbuf_data(sb));
1085                 sbuf_clear(sb);
1086         }
1087
1088         /* AArch64 Memory Model Feature Register 1 */
1089         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR1) != 0) {
1090                 printf("             Debug Features 1 = <%#lx>\n",
1091                     cpu_desc[cpu].id_aa64dfr1);
1092         }
1093
1094         /* AArch64 Auxiliary Feature Register 0 */
1095         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR0) != 0) {
1096                 printf("         Auxiliary Features 0 = <%#lx>\n",
1097                     cpu_desc[cpu].id_aa64afr0);
1098         }
1099
1100         /* AArch64 Auxiliary Feature Register 1 */
1101         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR1) != 0) {
1102                 printf("         Auxiliary Features 1 = <%#lx>\n",
1103                     cpu_desc[cpu].id_aa64afr1);
1104         }
1105
1106         sbuf_delete(sb);
1107         sb = NULL;
1108 #undef SEP_STR
1109 }
1110
1111 void
1112 identify_cpu(void)
1113 {
1114         u_int midr;
1115         u_int impl_id;
1116         u_int part_id;
1117         u_int cpu;
1118         size_t i;
1119         const struct cpu_parts *cpu_partsp = NULL;
1120
1121         cpu = PCPU_GET(cpuid);
1122         midr = get_midr();
1123
1124         /*
1125          * Store midr to pcpu to allow fast reading
1126          * from EL0, EL1 and assembly code.
1127          */
1128         PCPU_SET(midr, midr);
1129
1130         impl_id = CPU_IMPL(midr);
1131         for (i = 0; i < nitems(cpu_implementers); i++) {
1132                 if (impl_id == cpu_implementers[i].impl_id ||
1133                     cpu_implementers[i].impl_id == 0) {
1134                         cpu_desc[cpu].cpu_impl = impl_id;
1135                         cpu_desc[cpu].cpu_impl_name = cpu_implementers[i].impl_name;
1136                         cpu_partsp = cpu_implementers[i].cpu_parts;
1137                         break;
1138                 }
1139         }
1140
1141         part_id = CPU_PART(midr);
1142         for (i = 0; &cpu_partsp[i] != NULL; i++) {
1143                 if (part_id == cpu_partsp[i].part_id ||
1144                     cpu_partsp[i].part_id == 0) {
1145                         cpu_desc[cpu].cpu_part_num = part_id;
1146                         cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name;
1147                         break;
1148                 }
1149         }
1150
1151         cpu_desc[cpu].cpu_revision = CPU_REV(midr);
1152         cpu_desc[cpu].cpu_variant = CPU_VAR(midr);
1153
1154         /* Save affinity for current CPU */
1155         cpu_desc[cpu].mpidr = get_mpidr();
1156         CPU_AFFINITY(cpu) = cpu_desc[cpu].mpidr & CPU_AFF_MASK;
1157
1158         cpu_desc[cpu].id_aa64dfr0 = READ_SPECIALREG(ID_AA64DFR0_EL1);
1159         cpu_desc[cpu].id_aa64dfr1 = READ_SPECIALREG(ID_AA64DFR1_EL1);
1160         cpu_desc[cpu].id_aa64isar0 = READ_SPECIALREG(ID_AA64ISAR0_EL1);
1161         cpu_desc[cpu].id_aa64isar1 = READ_SPECIALREG(ID_AA64ISAR1_EL1);
1162         cpu_desc[cpu].id_aa64mmfr0 = READ_SPECIALREG(ID_AA64MMFR0_EL1);
1163         cpu_desc[cpu].id_aa64mmfr1 = READ_SPECIALREG(ID_AA64MMFR1_EL1);
1164         cpu_desc[cpu].id_aa64mmfr2 = READ_SPECIALREG(ID_AA64MMFR2_EL1);
1165         cpu_desc[cpu].id_aa64pfr0 = READ_SPECIALREG(ID_AA64PFR0_EL1);
1166         cpu_desc[cpu].id_aa64pfr1 = READ_SPECIALREG(ID_AA64PFR1_EL1);
1167
1168         if (cpu != 0) {
1169                 /*
1170                  * This code must run on one cpu at a time, but we are
1171                  * not scheduling on the current core so implement a
1172                  * simple spinlock.
1173                  */
1174                 while (atomic_cmpset_acq_int(&ident_lock, 0, 1) == 0)
1175                         __asm __volatile("wfe" ::: "memory");
1176
1177                 switch (cpu_aff_levels) {
1178                 case 0:
1179                         if (CPU_AFF0(cpu_desc[cpu].mpidr) !=
1180                             CPU_AFF0(cpu_desc[0].mpidr))
1181                                 cpu_aff_levels = 1;
1182                         /* FALLTHROUGH */
1183                 case 1:
1184                         if (CPU_AFF1(cpu_desc[cpu].mpidr) !=
1185                             CPU_AFF1(cpu_desc[0].mpidr))
1186                                 cpu_aff_levels = 2;
1187                         /* FALLTHROUGH */
1188                 case 2:
1189                         if (CPU_AFF2(cpu_desc[cpu].mpidr) !=
1190                             CPU_AFF2(cpu_desc[0].mpidr))
1191                                 cpu_aff_levels = 3;
1192                         /* FALLTHROUGH */
1193                 case 3:
1194                         if (CPU_AFF3(cpu_desc[cpu].mpidr) !=
1195                             CPU_AFF3(cpu_desc[0].mpidr))
1196                                 cpu_aff_levels = 4;
1197                         break;
1198                 }
1199
1200                 if (cpu_desc[cpu].id_aa64afr0 != cpu_desc[0].id_aa64afr0)
1201                         cpu_print_regs |= PRINT_ID_AA64_AFR0;
1202                 if (cpu_desc[cpu].id_aa64afr1 != cpu_desc[0].id_aa64afr1)
1203                         cpu_print_regs |= PRINT_ID_AA64_AFR1;
1204
1205                 if (cpu_desc[cpu].id_aa64dfr0 != cpu_desc[0].id_aa64dfr0)
1206                         cpu_print_regs |= PRINT_ID_AA64_DFR0;
1207                 if (cpu_desc[cpu].id_aa64dfr1 != cpu_desc[0].id_aa64dfr1)
1208                         cpu_print_regs |= PRINT_ID_AA64_DFR1;
1209
1210                 if (cpu_desc[cpu].id_aa64isar0 != cpu_desc[0].id_aa64isar0)
1211                         cpu_print_regs |= PRINT_ID_AA64_ISAR0;
1212                 if (cpu_desc[cpu].id_aa64isar1 != cpu_desc[0].id_aa64isar1)
1213                         cpu_print_regs |= PRINT_ID_AA64_ISAR1;
1214
1215                 if (cpu_desc[cpu].id_aa64mmfr0 != cpu_desc[0].id_aa64mmfr0)
1216                         cpu_print_regs |= PRINT_ID_AA64_MMFR0;
1217                 if (cpu_desc[cpu].id_aa64mmfr1 != cpu_desc[0].id_aa64mmfr1)
1218                         cpu_print_regs |= PRINT_ID_AA64_MMFR1;
1219                 if (cpu_desc[cpu].id_aa64mmfr2 != cpu_desc[0].id_aa64mmfr2)
1220                         cpu_print_regs |= PRINT_ID_AA64_MMFR2;
1221
1222                 if (cpu_desc[cpu].id_aa64pfr0 != cpu_desc[0].id_aa64pfr0)
1223                         cpu_print_regs |= PRINT_ID_AA64_PFR0;
1224                 if (cpu_desc[cpu].id_aa64pfr1 != cpu_desc[0].id_aa64pfr1)
1225                         cpu_print_regs |= PRINT_ID_AA64_PFR1;
1226
1227                 /* Wake up the other CPUs */
1228                 atomic_store_rel_int(&ident_lock, 0);
1229                 __asm __volatile("sev" ::: "memory");
1230         }
1231 }