]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/arm64/arm64/identcpu.c
MFS r366360,r366361:
[FreeBSD/FreeBSD.git] / sys / arm64 / arm64 / identcpu.c
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * Copyright (c) 2014 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Semihalf
7  * under sponsorship of the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/pcpu.h>
38 #include <sys/sbuf.h>
39 #include <sys/smp.h>
40 #include <sys/sysctl.h>
41 #include <sys/systm.h>
42
43 #include <machine/atomic.h>
44 #include <machine/cpu.h>
45 #include <machine/cpufunc.h>
46 #include <machine/undefined.h>
47 #include <machine/elf.h>
48
49 static int ident_lock;
50 static void print_cpu_features(u_int cpu);
51 static u_long parse_cpu_features_hwcap(u_int cpu);
52
53 char machine[] = "arm64";
54
55 SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0,
56     "Machine class");
57
58 static char cpu_model[64];
59 SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD,
60         cpu_model, sizeof(cpu_model), "Machine model");
61
62 /*
63  * Per-CPU affinity as provided in MPIDR_EL1
64  * Indexed by CPU number in logical order selected by the system.
65  * Relevant fields can be extracted using CPU_AFFn macros,
66  * Aff3.Aff2.Aff1.Aff0 construct a unique CPU address in the system.
67  *
68  * Fields used by us:
69  * Aff1 - Cluster number
70  * Aff0 - CPU number in Aff1 cluster
71  */
72 uint64_t __cpu_affinity[MAXCPU];
73 static u_int cpu_aff_levels;
74
75 struct cpu_desc {
76         u_int           cpu_impl;
77         u_int           cpu_part_num;
78         u_int           cpu_variant;
79         u_int           cpu_revision;
80         const char      *cpu_impl_name;
81         const char      *cpu_part_name;
82
83         uint64_t        mpidr;
84         uint64_t        id_aa64afr0;
85         uint64_t        id_aa64afr1;
86         uint64_t        id_aa64dfr0;
87         uint64_t        id_aa64dfr1;
88         uint64_t        id_aa64isar0;
89         uint64_t        id_aa64isar1;
90         uint64_t        id_aa64mmfr0;
91         uint64_t        id_aa64mmfr1;
92         uint64_t        id_aa64mmfr2;
93         uint64_t        id_aa64pfr0;
94         uint64_t        id_aa64pfr1;
95 };
96
97 struct cpu_desc cpu_desc[MAXCPU];
98 struct cpu_desc user_cpu_desc;
99 static u_int cpu_print_regs;
100 #define PRINT_ID_AA64_AFR0      0x00000001
101 #define PRINT_ID_AA64_AFR1      0x00000002
102 #define PRINT_ID_AA64_DFR0      0x00000010
103 #define PRINT_ID_AA64_DFR1      0x00000020
104 #define PRINT_ID_AA64_ISAR0     0x00000100
105 #define PRINT_ID_AA64_ISAR1     0x00000200
106 #define PRINT_ID_AA64_MMFR0     0x00001000
107 #define PRINT_ID_AA64_MMFR1     0x00002000
108 #define PRINT_ID_AA64_MMFR2     0x00004000
109 #define PRINT_ID_AA64_PFR0      0x00010000
110 #define PRINT_ID_AA64_PFR1      0x00020000
111
112 struct cpu_parts {
113         u_int           part_id;
114         const char      *part_name;
115 };
116 #define CPU_PART_NONE   { 0, "Unknown Processor" }
117
118 struct cpu_implementers {
119         u_int                   impl_id;
120         const char              *impl_name;
121         /*
122          * Part number is implementation defined
123          * so each vendor will have its own set of values and names.
124          */
125         const struct cpu_parts  *cpu_parts;
126 };
127 #define CPU_IMPLEMENTER_NONE    { 0, "Unknown Implementer", cpu_parts_none }
128
129 /*
130  * Per-implementer table of (PartNum, CPU Name) pairs.
131  */
132 /* ARM Ltd. */
133 static const struct cpu_parts cpu_parts_arm[] = {
134         { CPU_PART_FOUNDATION, "Foundation-Model" },
135         { CPU_PART_CORTEX_A35, "Cortex-A35" },
136         { CPU_PART_CORTEX_A53, "Cortex-A53" },
137         { CPU_PART_CORTEX_A55, "Cortex-A55" },
138         { CPU_PART_CORTEX_A57, "Cortex-A57" },
139         { CPU_PART_CORTEX_A72, "Cortex-A72" },
140         { CPU_PART_CORTEX_A73, "Cortex-A73" },
141         { CPU_PART_CORTEX_A75, "Cortex-A75" },
142         CPU_PART_NONE,
143 };
144 /* Cavium */
145 static const struct cpu_parts cpu_parts_cavium[] = {
146         { CPU_PART_THUNDERX, "ThunderX" },
147         { CPU_PART_THUNDERX2, "ThunderX2" },
148         CPU_PART_NONE,
149 };
150
151 /* APM / Ampere */
152 static const struct cpu_parts cpu_parts_apm[] = {
153         { CPU_PART_EMAG8180, "eMAG 8180" },
154         CPU_PART_NONE,
155 };
156
157 /* Unknown */
158 static const struct cpu_parts cpu_parts_none[] = {
159         CPU_PART_NONE,
160 };
161
162 /*
163  * Implementers table.
164  */
165 const struct cpu_implementers cpu_implementers[] = {
166         { CPU_IMPL_ARM,         "ARM",          cpu_parts_arm },
167         { CPU_IMPL_BROADCOM,    "Broadcom",     cpu_parts_none },
168         { CPU_IMPL_CAVIUM,      "Cavium",       cpu_parts_cavium },
169         { CPU_IMPL_DEC,         "DEC",          cpu_parts_none },
170         { CPU_IMPL_INFINEON,    "IFX",          cpu_parts_none },
171         { CPU_IMPL_FREESCALE,   "Freescale",    cpu_parts_none },
172         { CPU_IMPL_NVIDIA,      "NVIDIA",       cpu_parts_none },
173         { CPU_IMPL_APM,         "APM",          cpu_parts_apm },
174         { CPU_IMPL_QUALCOMM,    "Qualcomm",     cpu_parts_none },
175         { CPU_IMPL_MARVELL,     "Marvell",      cpu_parts_none },
176         { CPU_IMPL_INTEL,       "Intel",        cpu_parts_none },
177         CPU_IMPLEMENTER_NONE,
178 };
179
180 #define MRS_TYPE_MASK           0xf
181 #define MRS_INVALID             0
182 #define MRS_EXACT               1
183 #define MRS_EXACT_VAL(x)        (MRS_EXACT | ((x) << 4))
184 #define MRS_EXACT_FIELD(x)      ((x) >> 4)
185 #define MRS_LOWER               2
186
187 struct mrs_field {
188         bool            sign;
189         u_int           type;
190         u_int           shift;
191 };
192
193 #define MRS_FIELD(_sign, _type, _shift)                                 \
194         {                                                               \
195                 .sign = (_sign),                                        \
196                 .type = (_type),                                        \
197                 .shift = (_shift),                                      \
198         }
199
200 #define MRS_FIELD_END   { .type = MRS_INVALID, }
201
202 static struct mrs_field id_aa64isar0_fields[] = {
203         MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_DP_SHIFT),
204         MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SM4_SHIFT),
205         MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SM3_SHIFT),
206         MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SHA3_SHIFT),
207         MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_RDM_SHIFT),
208         MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_Atomic_SHIFT),
209         MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_CRC32_SHIFT),
210         MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SHA2_SHIFT),
211         MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_SHA1_SHIFT),
212         MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR0_AES_SHIFT),
213         MRS_FIELD_END,
214 };
215
216 static struct mrs_field id_aa64isar1_fields[] = {
217         MRS_FIELD(false, MRS_EXACT, ID_AA64ISAR1_GPI_SHIFT),
218         MRS_FIELD(false, MRS_EXACT, ID_AA64ISAR1_GPA_SHIFT),
219         MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_LRCPC_SHIFT),
220         MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_FCMA_SHIFT),
221         MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_JSCVT_SHIFT),
222         MRS_FIELD(false, MRS_EXACT, ID_AA64ISAR1_API_SHIFT),
223         MRS_FIELD(false, MRS_EXACT, ID_AA64ISAR1_APA_SHIFT),
224         MRS_FIELD(false, MRS_LOWER, ID_AA64ISAR1_DPB_SHIFT),
225         MRS_FIELD_END,
226 };
227
228 static struct mrs_field id_aa64pfr0_fields[] = {
229         MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_SVE_SHIFT),
230         MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_RAS_SHIFT),
231         MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_GIC_SHIFT),
232         MRS_FIELD(true,  MRS_LOWER, ID_AA64PFR0_AdvSIMD_SHIFT),
233         MRS_FIELD(true,  MRS_LOWER, ID_AA64PFR0_FP_SHIFT),
234         MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_EL3_SHIFT),
235         MRS_FIELD(false, MRS_EXACT, ID_AA64PFR0_EL2_SHIFT),
236         MRS_FIELD(false, MRS_LOWER, ID_AA64PFR0_EL1_SHIFT),
237         MRS_FIELD(false, MRS_LOWER, ID_AA64PFR0_EL0_SHIFT),
238         MRS_FIELD_END,
239 };
240
241 static struct mrs_field id_aa64dfr0_fields[] = {
242         MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_PMSVer_SHIFT),
243         MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_CTX_CMPs_SHIFT),
244         MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_WRPs_SHIFT),
245         MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_BRPs_SHIFT),
246         MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_PMUVer_SHIFT),
247         MRS_FIELD(false, MRS_EXACT, ID_AA64DFR0_TraceVer_SHIFT),
248         MRS_FIELD(false, MRS_EXACT_VAL(0x6), ID_AA64DFR0_DebugVer_SHIFT),
249         MRS_FIELD_END,
250 };
251
252 struct mrs_user_reg {
253         u_int           CRm;
254         u_int           Op2;
255         size_t          offset;
256         struct mrs_field *fields;
257 };
258
259 static struct mrs_user_reg user_regs[] = {
260         {       /* id_aa64isar0_el1 */
261                 .CRm = 6,
262                 .Op2 = 0,
263                 .offset = __offsetof(struct cpu_desc, id_aa64isar0),
264                 .fields = id_aa64isar0_fields,
265         },
266         {       /* id_aa64isar1_el1 */
267                 .CRm = 6,
268                 .Op2 = 1,
269                 .offset = __offsetof(struct cpu_desc, id_aa64isar1),
270                 .fields = id_aa64isar1_fields,
271         },
272         {       /* id_aa64pfr0_el1 */
273                 .CRm = 4,
274                 .Op2 = 0,
275                 .offset = __offsetof(struct cpu_desc, id_aa64pfr0),
276                 .fields = id_aa64pfr0_fields,
277         },
278         {       /* id_aa64dfr0_el1 */
279                 .CRm = 5,
280                 .Op2 = 0,
281                 .offset = __offsetof(struct cpu_desc, id_aa64dfr0),
282                 .fields = id_aa64dfr0_fields,
283         },
284 };
285
286 #define CPU_DESC_FIELD(desc, idx)                                       \
287     *(uint64_t *)((char *)&(desc) + user_regs[(idx)].offset)
288
289 static int
290 user_mrs_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame,
291     uint32_t esr)
292 {
293         uint64_t value;
294         int CRm, Op2, i, reg;
295
296         if ((insn & MRS_MASK) != MRS_VALUE)
297                 return (0);
298
299         /*
300          * We only emulate Op0 == 3, Op1 == 0, CRn == 0, CRm == {0, 4-7}.
301          * These are in the EL1 CPU identification space.
302          * CRm == 0 holds MIDR_EL1, MPIDR_EL1, and REVID_EL1.
303          * CRm == {4-7} holds the ID_AA64 registers.
304          *
305          * For full details see the ARMv8 ARM (ARM DDI 0487C.a)
306          * Table D9-2 System instruction encodings for non-Debug System
307          * register accesses.
308          */
309         if (mrs_Op0(insn) != 3 || mrs_Op1(insn) != 0 || mrs_CRn(insn) != 0)
310                 return (0);
311
312         CRm = mrs_CRm(insn);
313         if (CRm > 7 || (CRm < 4 && CRm != 0))
314                 return (0);
315
316         Op2 = mrs_Op2(insn);
317         value = 0;
318
319         for (i = 0; i < nitems(user_regs); i++) {
320                 if (user_regs[i].CRm == CRm && user_regs[i].Op2 == Op2) {
321                         value = CPU_DESC_FIELD(user_cpu_desc, i);
322                         break;
323                 }
324         }
325
326         if (CRm == 0) {
327                 switch (Op2) {
328                 case 0:
329                         value = READ_SPECIALREG(midr_el1);
330                         break;
331                 case 5:
332                         value = READ_SPECIALREG(mpidr_el1);
333                         break;
334                 case 6:
335                         value = READ_SPECIALREG(revidr_el1);
336                         break;
337                 default:
338                         return (0);
339                 }
340         }
341
342         /*
343          * We will handle this instruction, move to the next so we
344          * don't trap here again.
345          */
346         frame->tf_elr += INSN_SIZE;
347
348         reg = MRS_REGISTER(insn);
349         /* If reg is 31 then write to xzr, i.e. do nothing */
350         if (reg == 31)
351                 return (1);
352
353         if (reg < nitems(frame->tf_x))
354                 frame->tf_x[reg] = value;
355         else if (reg == 30)
356                 frame->tf_lr = value;
357
358         return (1);
359 }
360
361 static void
362 update_user_regs(u_int cpu)
363 {
364         struct mrs_field *fields;
365         uint64_t cur, value;
366         int i, j, cur_field, new_field;
367
368         for (i = 0; i < nitems(user_regs); i++) {
369                 value = CPU_DESC_FIELD(cpu_desc[cpu], i);
370                 if (cpu == 0)
371                         cur = value;
372                 else
373                         cur = CPU_DESC_FIELD(user_cpu_desc, i);
374
375                 fields = user_regs[i].fields;
376                 for (j = 0; fields[j].type != 0; j++) {
377                         switch (fields[j].type & MRS_TYPE_MASK) {
378                         case MRS_EXACT:
379                                 cur &= ~(0xfu << fields[j].shift);
380                                 cur |=
381                                     (uint64_t)MRS_EXACT_FIELD(fields[j].type) <<
382                                     fields[j].shift;
383                                 break;
384                         case MRS_LOWER:
385                                 new_field = (value >> fields[j].shift) & 0xf;
386                                 cur_field = (cur >> fields[j].shift) & 0xf;
387                                 if ((fields[j].sign &&
388                                      (int)new_field < (int)cur_field) ||
389                                     (!fields[j].sign &&
390                                      (u_int)new_field < (u_int)cur_field)) {
391                                         cur &= ~(0xfu << fields[j].shift);
392                                         cur |= new_field << fields[j].shift;
393                                 }
394                                 break;
395                         default:
396                                 panic("Invalid field type: %d", fields[j].type);
397                         }
398                 }
399
400                 CPU_DESC_FIELD(user_cpu_desc, i) = cur;
401         }
402 }
403
404 /* HWCAP */
405 extern u_long elf_hwcap;
406
407 static void
408 identify_cpu_sysinit(void *dummy __unused)
409 {
410         int cpu;
411         u_long hwcap;
412
413         /* Create a user visible cpu description with safe values */
414         memset(&user_cpu_desc, 0, sizeof(user_cpu_desc));
415         /* Safe values for these registers */
416         user_cpu_desc.id_aa64pfr0 = ID_AA64PFR0_AdvSIMD_NONE |
417             ID_AA64PFR0_FP_NONE | ID_AA64PFR0_EL1_64 | ID_AA64PFR0_EL0_64;
418         user_cpu_desc.id_aa64dfr0 = ID_AA64DFR0_DebugVer_8;
419
420
421         CPU_FOREACH(cpu) {
422                 print_cpu_features(cpu);
423                 hwcap = parse_cpu_features_hwcap(cpu);
424                 if (elf_hwcap == 0)
425                         elf_hwcap = hwcap;
426                 else
427                         elf_hwcap &= hwcap;
428                 update_user_regs(cpu);
429         }
430
431         install_undef_handler(true, user_mrs_handler);
432 }
433 SYSINIT(idenrity_cpu, SI_SUB_SMP, SI_ORDER_ANY, identify_cpu_sysinit, NULL);
434
435 static u_long
436 parse_cpu_features_hwcap(u_int cpu)
437 {
438         u_long hwcap = 0;
439
440         if (ID_AA64ISAR0_DP(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_DP_IMPL)
441                 hwcap |= HWCAP_ASIMDDP;
442
443         if (ID_AA64ISAR0_SM4(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_SM4_IMPL)
444                 hwcap |= HWCAP_SM4;
445
446         if (ID_AA64ISAR0_SM3(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_SM3_IMPL)
447                 hwcap |= HWCAP_SM3;
448
449         if (ID_AA64ISAR0_RDM(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_RDM_IMPL)
450                 hwcap |= HWCAP_ASIMDRDM;
451
452         if (ID_AA64ISAR0_Atomic(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_Atomic_IMPL)
453                 hwcap |= HWCAP_ATOMICS;
454
455         if (ID_AA64ISAR0_CRC32(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_CRC32_BASE)
456                 hwcap |= HWCAP_CRC32;
457
458         switch (ID_AA64ISAR0_SHA2(cpu_desc[cpu].id_aa64isar0)) {
459                 case ID_AA64ISAR0_SHA2_BASE:
460                         hwcap |= HWCAP_SHA2;
461                         break;
462                 case ID_AA64ISAR0_SHA2_512:
463                         hwcap |= HWCAP_SHA2 | HWCAP_SHA512;
464                         break;
465         default:
466                 break;
467         }
468
469         if (ID_AA64ISAR0_SHA1(cpu_desc[cpu].id_aa64isar0))
470                 hwcap |= HWCAP_SHA1;
471
472         switch (ID_AA64ISAR0_AES(cpu_desc[cpu].id_aa64isar0)) {
473         case ID_AA64ISAR0_AES_BASE:
474                 hwcap |= HWCAP_AES;
475                 break;
476         case ID_AA64ISAR0_AES_PMULL:
477                 hwcap |= HWCAP_PMULL | HWCAP_AES;
478                 break;
479         default:
480                 break;
481         }
482
483         if (ID_AA64ISAR1_LRCPC(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_LRCPC_IMPL)
484                 hwcap |= HWCAP_LRCPC;
485
486         if (ID_AA64ISAR1_FCMA(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_FCMA_IMPL)
487                 hwcap |= HWCAP_FCMA;
488
489         if (ID_AA64ISAR1_JSCVT(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_JSCVT_IMPL)
490                 hwcap |= HWCAP_JSCVT;
491
492         if (ID_AA64ISAR1_DPB(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_DPB_IMPL)
493                 hwcap |= HWCAP_DCPOP;
494
495         if (ID_AA64PFR0_SVE(cpu_desc[cpu].id_aa64pfr0) == ID_AA64PFR0_SVE_IMPL)
496                 hwcap |= HWCAP_SVE;
497
498         switch (ID_AA64PFR0_AdvSIMD(cpu_desc[cpu].id_aa64pfr0)) {
499         case ID_AA64PFR0_AdvSIMD_IMPL:
500                 hwcap |= HWCAP_ASIMD;
501                 break;
502         case ID_AA64PFR0_AdvSIMD_HP:
503                 hwcap |= HWCAP_ASIMD | HWCAP_ASIMDHP;
504                 break;
505         default:
506                 break;
507         }
508
509         switch (ID_AA64PFR0_FP(cpu_desc[cpu].id_aa64pfr0)) {
510         case ID_AA64PFR0_FP_IMPL:
511                 hwcap |= HWCAP_FP;
512                 break;
513         case ID_AA64PFR0_FP_HP:
514                 hwcap |= HWCAP_FP | HWCAP_FPHP;
515                 break;
516         default:
517                 break;
518         }
519
520         return (hwcap);
521 }
522
523 static void
524 print_cpu_features(u_int cpu)
525 {
526         struct sbuf *sb;
527         int printed;
528
529         sb = sbuf_new_auto();
530         sbuf_printf(sb, "CPU%3d: %s %s r%dp%d", cpu,
531             cpu_desc[cpu].cpu_impl_name, cpu_desc[cpu].cpu_part_name,
532             cpu_desc[cpu].cpu_variant, cpu_desc[cpu].cpu_revision);
533
534         sbuf_cat(sb, " affinity:");
535         switch(cpu_aff_levels) {
536         default:
537         case 4:
538                 sbuf_printf(sb, " %2d", CPU_AFF3(cpu_desc[cpu].mpidr));
539                 /* FALLTHROUGH */
540         case 3:
541                 sbuf_printf(sb, " %2d", CPU_AFF2(cpu_desc[cpu].mpidr));
542                 /* FALLTHROUGH */
543         case 2:
544                 sbuf_printf(sb, " %2d", CPU_AFF1(cpu_desc[cpu].mpidr));
545                 /* FALLTHROUGH */
546         case 1:
547         case 0: /* On UP this will be zero */
548                 sbuf_printf(sb, " %2d", CPU_AFF0(cpu_desc[cpu].mpidr));
549                 break;
550         }
551         sbuf_finish(sb);
552         printf("%s\n", sbuf_data(sb));
553         sbuf_clear(sb);
554
555         /*
556          * There is a hardware errata where, if one CPU is performing a TLB
557          * invalidation while another is performing a store-exclusive the
558          * store-exclusive may return the wrong status. A workaround seems
559          * to be to use an IPI to invalidate on each CPU, however given the
560          * limited number of affected units (pass 1.1 is the evaluation
561          * hardware revision), and the lack of information from Cavium
562          * this has not been implemented.
563          *
564          * At the time of writing this the only information is from:
565          * https://lkml.org/lkml/2016/8/4/722
566          */
567         /*
568          * XXX: CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 on its own also
569          * triggers on pass 2.0+.
570          */
571         if (cpu == 0 && CPU_VAR(PCPU_GET(midr)) == 0 &&
572             CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1)
573                 printf("WARNING: ThunderX Pass 1.1 detected.\nThis has known "
574                     "hardware bugs that may cause the incorrect operation of "
575                     "atomic operations.\n");
576
577 #define SEP_STR ((printed++) == 0) ? "" : ","
578
579         /* AArch64 Instruction Set Attribute Register 0 */
580         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR0) != 0) {
581                 printed = 0;
582                 sbuf_printf(sb, " Instruction Set Attributes 0 = <");
583
584                 switch (ID_AA64ISAR0_DP(cpu_desc[cpu].id_aa64isar0)) {
585                 case ID_AA64ISAR0_DP_NONE:
586                         break;
587                 case ID_AA64ISAR0_DP_IMPL:
588                         sbuf_printf(sb, "%sDotProd", SEP_STR);
589                         break;
590                 default:
591                         sbuf_printf(sb, "%sUnknown DP", SEP_STR);
592                         break;
593                 }
594
595                 switch (ID_AA64ISAR0_SM4(cpu_desc[cpu].id_aa64isar0)) {
596                 case ID_AA64ISAR0_SM4_NONE:
597                         break;
598                 case ID_AA64ISAR0_SM4_IMPL:
599                         sbuf_printf(sb, "%sSM4", SEP_STR);
600                         break;
601                 default:
602                         sbuf_printf(sb, "%sUnknown SM4", SEP_STR);
603                         break;
604                 }
605
606                 switch (ID_AA64ISAR0_SM3(cpu_desc[cpu].id_aa64isar0)) {
607                 case ID_AA64ISAR0_SM3_NONE:
608                         break;
609                 case ID_AA64ISAR0_SM3_IMPL:
610                         sbuf_printf(sb, "%sSM3", SEP_STR);
611                         break;
612                 default:
613                         sbuf_printf(sb, "%sUnknown SM3", SEP_STR);
614                         break;
615                 }
616
617                 switch (ID_AA64ISAR0_SHA3(cpu_desc[cpu].id_aa64isar0)) {
618                 case ID_AA64ISAR0_SHA3_NONE:
619                         break;
620                 case ID_AA64ISAR0_SHA3_IMPL:
621                         sbuf_printf(sb, "%sSHA3", SEP_STR);
622                         break;
623                 default:
624                         sbuf_printf(sb, "%sUnknown SHA3", SEP_STR);
625                         break;
626                 }
627
628                 switch (ID_AA64ISAR0_RDM(cpu_desc[cpu].id_aa64isar0)) {
629                 case ID_AA64ISAR0_RDM_NONE:
630                         break;
631                 case ID_AA64ISAR0_RDM_IMPL:
632                         sbuf_printf(sb, "%sRDM", SEP_STR);
633                         break;
634                 default:
635                         sbuf_printf(sb, "%sUnknown RDM", SEP_STR);
636                 }
637
638                 switch (ID_AA64ISAR0_Atomic(cpu_desc[cpu].id_aa64isar0)) {
639                 case ID_AA64ISAR0_Atomic_NONE:
640                         break;
641                 case ID_AA64ISAR0_Atomic_IMPL:
642                         sbuf_printf(sb, "%sAtomic", SEP_STR);
643                         break;
644                 default:
645                         sbuf_printf(sb, "%sUnknown Atomic", SEP_STR);
646                 }
647
648                 switch (ID_AA64ISAR0_CRC32(cpu_desc[cpu].id_aa64isar0)) {
649                 case ID_AA64ISAR0_CRC32_NONE:
650                         break;
651                 case ID_AA64ISAR0_CRC32_BASE:
652                         sbuf_printf(sb, "%sCRC32", SEP_STR);
653                         break;
654                 default:
655                         sbuf_printf(sb, "%sUnknown CRC32", SEP_STR);
656                         break;
657                 }
658
659                 switch (ID_AA64ISAR0_SHA2(cpu_desc[cpu].id_aa64isar0)) {
660                 case ID_AA64ISAR0_SHA2_NONE:
661                         break;
662                 case ID_AA64ISAR0_SHA2_BASE:
663                         sbuf_printf(sb, "%sSHA2", SEP_STR);
664                         break;
665                 case ID_AA64ISAR0_SHA2_512:
666                         sbuf_printf(sb, "%sSHA2+SHA512", SEP_STR);
667                         break;
668                 default:
669                         sbuf_printf(sb, "%sUnknown SHA2", SEP_STR);
670                         break;
671                 }
672
673                 switch (ID_AA64ISAR0_SHA1(cpu_desc[cpu].id_aa64isar0)) {
674                 case ID_AA64ISAR0_SHA1_NONE:
675                         break;
676                 case ID_AA64ISAR0_SHA1_BASE:
677                         sbuf_printf(sb, "%sSHA1", SEP_STR);
678                         break;
679                 default:
680                         sbuf_printf(sb, "%sUnknown SHA1", SEP_STR);
681                         break;
682                 }
683
684                 switch (ID_AA64ISAR0_AES(cpu_desc[cpu].id_aa64isar0)) {
685                 case ID_AA64ISAR0_AES_NONE:
686                         break;
687                 case ID_AA64ISAR0_AES_BASE:
688                         sbuf_printf(sb, "%sAES", SEP_STR);
689                         break;
690                 case ID_AA64ISAR0_AES_PMULL:
691                         sbuf_printf(sb, "%sAES+PMULL", SEP_STR);
692                         break;
693                 default:
694                         sbuf_printf(sb, "%sUnknown AES", SEP_STR);
695                         break;
696                 }
697
698                 if ((cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK) != 0)
699                         sbuf_printf(sb, "%s%#lx", SEP_STR,
700                             cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK);
701
702                 sbuf_finish(sb);
703                 printf("%s>\n", sbuf_data(sb));
704                 sbuf_clear(sb);
705         }
706
707         /* AArch64 Instruction Set Attribute Register 1 */
708         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR1) != 0) {
709                 printed = 0;
710                 sbuf_printf(sb, " Instruction Set Attributes 1 = <");
711
712                 switch (ID_AA64ISAR1_GPI(cpu_desc[cpu].id_aa64isar1)) {
713                 case ID_AA64ISAR1_GPI_NONE:
714                         break;
715                 case ID_AA64ISAR1_GPI_IMPL:
716                         sbuf_printf(sb, "%sImpl GenericAuth", SEP_STR);
717                         break;
718                 default:
719                         sbuf_printf(sb, "%sUnknown GenericAuth", SEP_STR);
720                         break;
721                 }
722
723                 switch (ID_AA64ISAR1_GPA(cpu_desc[cpu].id_aa64isar1)) {
724                 case ID_AA64ISAR1_GPA_NONE:
725                         break;
726                 case ID_AA64ISAR1_GPA_IMPL:
727                         sbuf_printf(sb, "%sPrince GenericAuth", SEP_STR);
728                         break;
729                 default:
730                         sbuf_printf(sb, "%sUnknown GenericAuth", SEP_STR);
731                         break;
732                 }
733
734                 switch (ID_AA64ISAR1_LRCPC(cpu_desc[cpu].id_aa64isar1)) {
735                 case ID_AA64ISAR1_LRCPC_NONE:
736                         break;
737                 case ID_AA64ISAR1_LRCPC_IMPL:
738                         sbuf_printf(sb, "%sRCpc", SEP_STR);
739                         break;
740                 default:
741                         sbuf_printf(sb, "%sUnknown RCpc", SEP_STR);
742                         break;
743                 }
744
745                 switch (ID_AA64ISAR1_FCMA(cpu_desc[cpu].id_aa64isar1)) {
746                 case ID_AA64ISAR1_FCMA_NONE:
747                         break;
748                 case ID_AA64ISAR1_FCMA_IMPL:
749                         sbuf_printf(sb, "%sFCMA", SEP_STR);
750                         break;
751                 default:
752                         sbuf_printf(sb, "%sUnknown FCMA", SEP_STR);
753                         break;
754                 }
755
756                 switch (ID_AA64ISAR1_JSCVT(cpu_desc[cpu].id_aa64isar1)) {
757                 case ID_AA64ISAR1_JSCVT_NONE:
758                         break;
759                 case ID_AA64ISAR1_JSCVT_IMPL:
760                         sbuf_printf(sb, "%sJS Conv", SEP_STR);
761                         break;
762                 default:
763                         sbuf_printf(sb, "%sUnknown JS Conv", SEP_STR);
764                         break;
765                 }
766
767                 switch (ID_AA64ISAR1_API(cpu_desc[cpu].id_aa64isar1)) {
768                 case ID_AA64ISAR1_API_NONE:
769                         break;
770                 case ID_AA64ISAR1_API_IMPL:
771                         sbuf_printf(sb, "%sImpl AddrAuth", SEP_STR);
772                         break;
773                 default:
774                         sbuf_printf(sb, "%sUnknown Impl AddrAuth", SEP_STR);
775                         break;
776                 }
777
778                 switch (ID_AA64ISAR1_APA(cpu_desc[cpu].id_aa64isar1)) {
779                 case ID_AA64ISAR1_APA_NONE:
780                         break;
781                 case ID_AA64ISAR1_APA_IMPL:
782                         sbuf_printf(sb, "%sPrince AddrAuth", SEP_STR);
783                         break;
784                 default:
785                         sbuf_printf(sb, "%sUnknown Prince AddrAuth", SEP_STR);
786                         break;
787                 }
788
789                 switch (ID_AA64ISAR1_DPB(cpu_desc[cpu].id_aa64isar1)) {
790                 case ID_AA64ISAR1_DPB_NONE:
791                         break;
792                 case ID_AA64ISAR1_DPB_IMPL:
793                         sbuf_printf(sb, "%sDC CVAP", SEP_STR);
794                         break;
795                 default:
796                         sbuf_printf(sb, "%sUnknown DC CVAP", SEP_STR);
797                         break;
798                 }
799
800                 if ((cpu_desc[cpu].id_aa64isar1 & ~ID_AA64ISAR1_MASK) != 0)
801                         sbuf_printf(sb, "%s%#lx", SEP_STR,
802                             cpu_desc[cpu].id_aa64isar1 & ~ID_AA64ISAR1_MASK);
803                 sbuf_finish(sb);
804                 printf("%s>\n", sbuf_data(sb));
805                 sbuf_clear(sb);
806         }
807
808         /* AArch64 Processor Feature Register 0 */
809         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR0) != 0) {
810                 printed = 0;
811                 sbuf_printf(sb, "         Processor Features 0 = <");
812
813                 switch (ID_AA64PFR0_SVE(cpu_desc[cpu].id_aa64pfr0)) {
814                 case ID_AA64PFR0_SVE_NONE:
815                         break;
816                 case ID_AA64PFR0_SVE_IMPL:
817                         sbuf_printf(sb, "%sSVE", SEP_STR);
818                         break;
819                 default:
820                         sbuf_printf(sb, "%sUnknown SVE", SEP_STR);
821                         break;
822                 }
823
824                 switch (ID_AA64PFR0_RAS(cpu_desc[cpu].id_aa64pfr0)) {
825                 case ID_AA64PFR0_RAS_NONE:
826                         break;
827                 case ID_AA64PFR0_RAS_V1:
828                         sbuf_printf(sb, "%sRASv1", SEP_STR);
829                         break;
830                 default:
831                         sbuf_printf(sb, "%sUnknown RAS", SEP_STR);
832                         break;
833                 }
834
835                 switch (ID_AA64PFR0_GIC(cpu_desc[cpu].id_aa64pfr0)) {
836                 case ID_AA64PFR0_GIC_CPUIF_NONE:
837                         break;
838                 case ID_AA64PFR0_GIC_CPUIF_EN:
839                         sbuf_printf(sb, "%sGIC", SEP_STR);
840                         break;
841                 default:
842                         sbuf_printf(sb, "%sUnknown GIC interface", SEP_STR);
843                         break;
844                 }
845
846                 switch (ID_AA64PFR0_AdvSIMD(cpu_desc[cpu].id_aa64pfr0)) {
847                 case ID_AA64PFR0_AdvSIMD_NONE:
848                         break;
849                 case ID_AA64PFR0_AdvSIMD_IMPL:
850                         sbuf_printf(sb, "%sAdvSIMD", SEP_STR);
851                         break;
852                 case ID_AA64PFR0_AdvSIMD_HP:
853                         sbuf_printf(sb, "%sAdvSIMD+HP", SEP_STR);
854                         break;
855                 default:
856                         sbuf_printf(sb, "%sUnknown AdvSIMD", SEP_STR);
857                         break;
858                 }
859
860                 switch (ID_AA64PFR0_FP(cpu_desc[cpu].id_aa64pfr0)) {
861                 case ID_AA64PFR0_FP_NONE:
862                         break;
863                 case ID_AA64PFR0_FP_IMPL:
864                         sbuf_printf(sb, "%sFloat", SEP_STR);
865                         break;
866                 case ID_AA64PFR0_FP_HP:
867                         sbuf_printf(sb, "%sFloat+HP", SEP_STR);
868                         break;
869                 default:
870                         sbuf_printf(sb, "%sUnknown Float", SEP_STR);
871                         break;
872                 }
873
874                 switch (ID_AA64PFR0_EL3(cpu_desc[cpu].id_aa64pfr0)) {
875                 case ID_AA64PFR0_EL3_NONE:
876                         sbuf_printf(sb, "%sNo EL3", SEP_STR);
877                         break;
878                 case ID_AA64PFR0_EL3_64:
879                         sbuf_printf(sb, "%sEL3", SEP_STR);
880                         break;
881                 case ID_AA64PFR0_EL3_64_32:
882                         sbuf_printf(sb, "%sEL3 32", SEP_STR);
883                         break;
884                 default:
885                         sbuf_printf(sb, "%sUnknown EL3", SEP_STR);
886                         break;
887                 }
888
889                 switch (ID_AA64PFR0_EL2(cpu_desc[cpu].id_aa64pfr0)) {
890                 case ID_AA64PFR0_EL2_NONE:
891                         sbuf_printf(sb, "%sNo EL2", SEP_STR);
892                         break;
893                 case ID_AA64PFR0_EL2_64:
894                         sbuf_printf(sb, "%sEL2", SEP_STR);
895                         break;
896                 case ID_AA64PFR0_EL2_64_32:
897                         sbuf_printf(sb, "%sEL2 32", SEP_STR);
898                         break;
899                 default:
900                         sbuf_printf(sb, "%sUnknown EL2", SEP_STR);
901                         break;
902                 }
903
904                 switch (ID_AA64PFR0_EL1(cpu_desc[cpu].id_aa64pfr0)) {
905                 case ID_AA64PFR0_EL1_64:
906                         sbuf_printf(sb, "%sEL1", SEP_STR);
907                         break;
908                 case ID_AA64PFR0_EL1_64_32:
909                         sbuf_printf(sb, "%sEL1 32", SEP_STR);
910                         break;
911                 default:
912                         sbuf_printf(sb, "%sUnknown EL1", SEP_STR);
913                         break;
914                 }
915
916                 switch (ID_AA64PFR0_EL0(cpu_desc[cpu].id_aa64pfr0)) {
917                 case ID_AA64PFR0_EL0_64:
918                         sbuf_printf(sb, "%sEL0", SEP_STR);
919                         break;
920                 case ID_AA64PFR0_EL0_64_32:
921                         sbuf_printf(sb, "%sEL0 32", SEP_STR);
922                         break;
923                 default:
924                         sbuf_printf(sb, "%sUnknown EL0", SEP_STR);
925                         break;
926                 }
927
928                 if ((cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK) != 0)
929                         sbuf_printf(sb, "%s%#lx", SEP_STR,
930                             cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK);
931
932                 sbuf_finish(sb);
933                 printf("%s>\n", sbuf_data(sb));
934                 sbuf_clear(sb);
935         }
936
937         /* AArch64 Processor Feature Register 1 */
938         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR1) != 0) {
939                 printf("         Processor Features 1 = <%#lx>\n",
940                     cpu_desc[cpu].id_aa64pfr1);
941         }
942
943         /* AArch64 Memory Model Feature Register 0 */
944         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR0) != 0) {
945                 printed = 0;
946                 sbuf_printf(sb, "      Memory Model Features 0 = <");
947                 switch (ID_AA64MMFR0_TGran4(cpu_desc[cpu].id_aa64mmfr0)) {
948                 case ID_AA64MMFR0_TGran4_NONE:
949                         break;
950                 case ID_AA64MMFR0_TGran4_IMPL:
951                         sbuf_printf(sb, "%s4k Granule", SEP_STR);
952                         break;
953                 default:
954                         sbuf_printf(sb, "%sUnknown 4k Granule", SEP_STR);
955                         break;
956                 }
957
958                 switch (ID_AA64MMFR0_TGran64(cpu_desc[cpu].id_aa64mmfr0)) {
959                 case ID_AA64MMFR0_TGran64_NONE:
960                         break;
961                 case ID_AA64MMFR0_TGran64_IMPL:
962                         sbuf_printf(sb, "%s64k Granule", SEP_STR);
963                         break;
964                 default:
965                         sbuf_printf(sb, "%sUnknown 64k Granule", SEP_STR);
966                         break;
967                 }
968
969                 switch (ID_AA64MMFR0_TGran16(cpu_desc[cpu].id_aa64mmfr0)) {
970                 case ID_AA64MMFR0_TGran16_NONE:
971                         break;
972                 case ID_AA64MMFR0_TGran16_IMPL:
973                         sbuf_printf(sb, "%s16k Granule", SEP_STR);
974                         break;
975                 default:
976                         sbuf_printf(sb, "%sUnknown 16k Granule", SEP_STR);
977                         break;
978                 }
979
980                 switch (ID_AA64MMFR0_BigEndEL0(cpu_desc[cpu].id_aa64mmfr0)) {
981                 case ID_AA64MMFR0_BigEndEL0_FIXED:
982                         break;
983                 case ID_AA64MMFR0_BigEndEL0_MIXED:
984                         sbuf_printf(sb, "%sEL0 MixEndian", SEP_STR);
985                         break;
986                 default:
987                         sbuf_printf(sb, "%sUnknown EL0 Endian switching", SEP_STR);
988                         break;
989                 }
990
991                 switch (ID_AA64MMFR0_SNSMem(cpu_desc[cpu].id_aa64mmfr0)) {
992                 case ID_AA64MMFR0_SNSMem_NONE:
993                         break;
994                 case ID_AA64MMFR0_SNSMem_DISTINCT:
995                         sbuf_printf(sb, "%sS/NS Mem", SEP_STR);
996                         break;
997                 default:
998                         sbuf_printf(sb, "%sUnknown S/NS Mem", SEP_STR);
999                         break;
1000                 }
1001
1002                 switch (ID_AA64MMFR0_BigEnd(cpu_desc[cpu].id_aa64mmfr0)) {
1003                 case ID_AA64MMFR0_BigEnd_FIXED:
1004                         break;
1005                 case ID_AA64MMFR0_BigEnd_MIXED:
1006                         sbuf_printf(sb, "%sMixedEndian", SEP_STR);
1007                         break;
1008                 default:
1009                         sbuf_printf(sb, "%sUnknown Endian switching", SEP_STR);
1010                         break;
1011                 }
1012
1013                 switch (ID_AA64MMFR0_ASIDBits(cpu_desc[cpu].id_aa64mmfr0)) {
1014                 case ID_AA64MMFR0_ASIDBits_8:
1015                         sbuf_printf(sb, "%s8bit ASID", SEP_STR);
1016                         break;
1017                 case ID_AA64MMFR0_ASIDBits_16:
1018                         sbuf_printf(sb, "%s16bit ASID", SEP_STR);
1019                         break;
1020                 default:
1021                         sbuf_printf(sb, "%sUnknown ASID", SEP_STR);
1022                         break;
1023                 }
1024
1025                 switch (ID_AA64MMFR0_PARange(cpu_desc[cpu].id_aa64mmfr0)) {
1026                 case ID_AA64MMFR0_PARange_4G:
1027                         sbuf_printf(sb, "%s4GB PA", SEP_STR);
1028                         break;
1029                 case ID_AA64MMFR0_PARange_64G:
1030                         sbuf_printf(sb, "%s64GB PA", SEP_STR);
1031                         break;
1032                 case ID_AA64MMFR0_PARange_1T:
1033                         sbuf_printf(sb, "%s1TB PA", SEP_STR);
1034                         break;
1035                 case ID_AA64MMFR0_PARange_4T:
1036                         sbuf_printf(sb, "%s4TB PA", SEP_STR);
1037                         break;
1038                 case ID_AA64MMFR0_PARange_16T:
1039                         sbuf_printf(sb, "%s16TB PA", SEP_STR);
1040                         break;
1041                 case ID_AA64MMFR0_PARange_256T:
1042                         sbuf_printf(sb, "%s256TB PA", SEP_STR);
1043                         break;
1044                 case ID_AA64MMFR0_PARange_4P:
1045                         sbuf_printf(sb, "%s4PB PA", SEP_STR);
1046                         break;
1047                 default:
1048                         sbuf_printf(sb, "%sUnknown PA Range", SEP_STR);
1049                         break;
1050                 }
1051
1052                 if ((cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK) != 0)
1053                         sbuf_printf(sb, "%s%#lx", SEP_STR,
1054                             cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK);
1055                 sbuf_finish(sb);
1056                 printf("%s>\n", sbuf_data(sb));
1057                 sbuf_clear(sb);
1058         }
1059
1060         /* AArch64 Memory Model Feature Register 1 */
1061         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR1) != 0) {
1062                 printed = 0;
1063                 sbuf_printf(sb, "      Memory Model Features 1 = <");
1064
1065                 switch (ID_AA64MMFR1_XNX(cpu_desc[cpu].id_aa64mmfr1)) {
1066                 case ID_AA64MMFR1_XNX_NONE:
1067                         break;
1068                 case ID_AA64MMFR1_XNX_IMPL:
1069                         sbuf_printf(sb, "%sEL2 XN", SEP_STR);
1070                         break;
1071                 default:
1072                         sbuf_printf(sb, "%sUnknown XNX", SEP_STR);
1073                         break;
1074                 }
1075
1076                 switch (ID_AA64MMFR1_SpecSEI(cpu_desc[cpu].id_aa64mmfr1)) {
1077                 case ID_AA64MMFR1_SpecSEI_NONE:
1078                         break;
1079                 case ID_AA64MMFR1_SpecSEI_IMPL:
1080                         sbuf_printf(sb, "%sSpecSEI", SEP_STR);
1081                         break;
1082                 default:
1083                         sbuf_printf(sb, "%sUnknown SpecSEI", SEP_STR);
1084                         break;
1085                 }
1086
1087                 switch (ID_AA64MMFR1_PAN(cpu_desc[cpu].id_aa64mmfr1)) {
1088                 case ID_AA64MMFR1_PAN_NONE:
1089                         break;
1090                 case ID_AA64MMFR1_PAN_IMPL:
1091                         sbuf_printf(sb, "%sPAN", SEP_STR);
1092                         break;
1093                 case ID_AA64MMFR1_PAN_ATS1E1:
1094                         sbuf_printf(sb, "%sPAN+AT", SEP_STR);
1095                         break;
1096                 default:
1097                         sbuf_printf(sb, "%sUnknown PAN", SEP_STR);
1098                         break;
1099                 }
1100
1101                 switch (ID_AA64MMFR1_LO(cpu_desc[cpu].id_aa64mmfr1)) {
1102                 case ID_AA64MMFR1_LO_NONE:
1103                         break;
1104                 case ID_AA64MMFR1_LO_IMPL:
1105                         sbuf_printf(sb, "%sLO", SEP_STR);
1106                         break;
1107                 default:
1108                         sbuf_printf(sb, "%sUnknown LO", SEP_STR);
1109                         break;
1110                 }
1111
1112                 switch (ID_AA64MMFR1_HPDS(cpu_desc[cpu].id_aa64mmfr1)) {
1113                 case ID_AA64MMFR1_HPDS_NONE:
1114                         break;
1115                 case ID_AA64MMFR1_HPDS_HPD:
1116                         sbuf_printf(sb, "%sHPDS", SEP_STR);
1117                         break;
1118                 case ID_AA64MMFR1_HPDS_TTPBHA:
1119                         sbuf_printf(sb, "%sTTPBHA", SEP_STR);
1120                         break;
1121                 default:
1122                         sbuf_printf(sb, "%sUnknown HPDS", SEP_STR);
1123                         break;
1124                 }
1125
1126                 switch (ID_AA64MMFR1_VH(cpu_desc[cpu].id_aa64mmfr1)) {
1127                 case ID_AA64MMFR1_VH_NONE:
1128                         break;
1129                 case ID_AA64MMFR1_VH_IMPL:
1130                         sbuf_printf(sb, "%sVHE", SEP_STR);
1131                         break;
1132                 default:
1133                         sbuf_printf(sb, "%sUnknown VHE", SEP_STR);
1134                         break;
1135                 }
1136
1137                 switch (ID_AA64MMFR1_VMIDBits(cpu_desc[cpu].id_aa64mmfr1)) {
1138                 case ID_AA64MMFR1_VMIDBits_8:
1139                         break;
1140                 case ID_AA64MMFR1_VMIDBits_16:
1141                         sbuf_printf(sb, "%s16 VMID bits", SEP_STR);
1142                         break;
1143                 default:
1144                         sbuf_printf(sb, "%sUnknown VMID bits", SEP_STR);
1145                         break;
1146                 }
1147
1148                 switch (ID_AA64MMFR1_HAFDBS(cpu_desc[cpu].id_aa64mmfr1)) {
1149                 case ID_AA64MMFR1_HAFDBS_NONE:
1150                         break;
1151                 case ID_AA64MMFR1_HAFDBS_AF:
1152                         sbuf_printf(sb, "%sAF", SEP_STR);
1153                         break;
1154                 case ID_AA64MMFR1_HAFDBS_AF_DBS:
1155                         sbuf_printf(sb, "%sAF+DBS", SEP_STR);
1156                         break;
1157                 default:
1158                         sbuf_printf(sb, "%sUnknown Hardware update AF/DBS", SEP_STR);
1159                         break;
1160                 }
1161
1162                 if ((cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK) != 0)
1163                         sbuf_printf(sb, "%s%#lx", SEP_STR,
1164                             cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK);
1165                 sbuf_finish(sb);
1166                 printf("%s>\n", sbuf_data(sb));
1167                 sbuf_clear(sb);
1168         }
1169
1170         /* AArch64 Memory Model Feature Register 2 */
1171         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR2) != 0) {
1172                 printed = 0;
1173                 sbuf_printf(sb, "      Memory Model Features 2 = <");
1174
1175                 switch (ID_AA64MMFR2_NV(cpu_desc[cpu].id_aa64mmfr2)) {
1176                 case ID_AA64MMFR2_NV_NONE:
1177                         break;
1178                 case ID_AA64MMFR2_NV_IMPL:
1179                         sbuf_printf(sb, "%sNestedVirt", SEP_STR);
1180                         break;
1181                 default:
1182                         sbuf_printf(sb, "%sUnknown NestedVirt", SEP_STR);
1183                         break;
1184                 }
1185
1186                 switch (ID_AA64MMFR2_CCIDX(cpu_desc[cpu].id_aa64mmfr2)) {
1187                 case ID_AA64MMFR2_CCIDX_32:
1188                         sbuf_printf(sb, "%s32b CCIDX", SEP_STR);
1189                         break;
1190                 case ID_AA64MMFR2_CCIDX_64:
1191                         sbuf_printf(sb, "%s64b CCIDX", SEP_STR);
1192                         break;
1193                 default:
1194                         sbuf_printf(sb, "%sUnknown CCIDX", SEP_STR);
1195                         break;
1196                 }
1197
1198                 switch (ID_AA64MMFR2_VARange(cpu_desc[cpu].id_aa64mmfr2)) {
1199                 case ID_AA64MMFR2_VARange_48:
1200                         sbuf_printf(sb, "%s48b VA", SEP_STR);
1201                         break;
1202                 case ID_AA64MMFR2_VARange_52:
1203                         sbuf_printf(sb, "%s52b VA", SEP_STR);
1204                         break;
1205                 default:
1206                         sbuf_printf(sb, "%sUnknown VA Range", SEP_STR);
1207                         break;
1208                 }
1209
1210                 switch (ID_AA64MMFR2_IESB(cpu_desc[cpu].id_aa64mmfr2)) {
1211                 case ID_AA64MMFR2_IESB_NONE:
1212                         break;
1213                 case ID_AA64MMFR2_IESB_IMPL:
1214                         sbuf_printf(sb, "%sIESB", SEP_STR);
1215                         break;
1216                 default:
1217                         sbuf_printf(sb, "%sUnknown IESB", SEP_STR);
1218                         break;
1219                 }
1220
1221                 switch (ID_AA64MMFR2_LSM(cpu_desc[cpu].id_aa64mmfr2)) {
1222                 case ID_AA64MMFR2_LSM_NONE:
1223                         break;
1224                 case ID_AA64MMFR2_LSM_IMPL:
1225                         sbuf_printf(sb, "%sLSM", SEP_STR);
1226                         break;
1227                 default:
1228                         sbuf_printf(sb, "%sUnknown LSM", SEP_STR);
1229                         break;
1230                 }
1231
1232                 switch (ID_AA64MMFR2_UAO(cpu_desc[cpu].id_aa64mmfr2)) {
1233                 case ID_AA64MMFR2_UAO_NONE:
1234                         break;
1235                 case ID_AA64MMFR2_UAO_IMPL:
1236                         sbuf_printf(sb, "%sUAO", SEP_STR);
1237                         break;
1238                 default:
1239                         sbuf_printf(sb, "%sUnknown UAO", SEP_STR);
1240                         break;
1241                 }
1242
1243                 switch (ID_AA64MMFR2_CnP(cpu_desc[cpu].id_aa64mmfr2)) {
1244                 case ID_AA64MMFR2_CnP_NONE:
1245                         break;
1246                 case ID_AA64MMFR2_CnP_IMPL:
1247                         sbuf_printf(sb, "%sCnP", SEP_STR);
1248                         break;
1249                 default:
1250                         sbuf_printf(sb, "%sUnknown CnP", SEP_STR);
1251                         break;
1252                 }
1253
1254                 if ((cpu_desc[cpu].id_aa64mmfr2 & ~ID_AA64MMFR2_MASK) != 0)
1255                         sbuf_printf(sb, "%s%#lx", SEP_STR,
1256                             cpu_desc[cpu].id_aa64mmfr2 & ~ID_AA64MMFR2_MASK);
1257                 sbuf_finish(sb);
1258                 printf("%s>\n", sbuf_data(sb));
1259                 sbuf_clear(sb);
1260         }
1261
1262         /* AArch64 Debug Feature Register 0 */
1263         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR0) != 0) {
1264                 printed = 0;
1265                 sbuf_printf(sb, "             Debug Features 0 = <");
1266                 switch(ID_AA64DFR0_PMSVer(cpu_desc[cpu].id_aa64dfr0)) {
1267                 case ID_AA64DFR0_PMSVer_NONE:
1268                         break;
1269                 case ID_AA64DFR0_PMSVer_V1:
1270                         sbuf_printf(sb, "%sSPE v1", SEP_STR);
1271                         break;
1272                 default:
1273                         sbuf_printf(sb, "%sUnknown SPE", SEP_STR);
1274                         break;
1275                 }
1276
1277                 sbuf_printf(sb, "%s%lu CTX Breakpoints", SEP_STR,
1278                     ID_AA64DFR0_CTX_CMPs(cpu_desc[cpu].id_aa64dfr0));
1279
1280                 sbuf_printf(sb, "%s%lu Watchpoints", SEP_STR,
1281                     ID_AA64DFR0_WRPs(cpu_desc[cpu].id_aa64dfr0));
1282
1283                 sbuf_printf(sb, "%s%lu Breakpoints", SEP_STR,
1284                     ID_AA64DFR0_BRPs(cpu_desc[cpu].id_aa64dfr0));
1285
1286                 switch (ID_AA64DFR0_PMUVer(cpu_desc[cpu].id_aa64dfr0)) {
1287                 case ID_AA64DFR0_PMUVer_NONE:
1288                         break;
1289                 case ID_AA64DFR0_PMUVer_3:
1290                         sbuf_printf(sb, "%sPMUv3", SEP_STR);
1291                         break;
1292                 case ID_AA64DFR0_PMUVer_3_1:
1293                         sbuf_printf(sb, "%sPMUv3+16 bit evtCount", SEP_STR);
1294                         break;
1295                 case ID_AA64DFR0_PMUVer_IMPL:
1296                         sbuf_printf(sb, "%sImplementation defined PMU", SEP_STR);
1297                         break;
1298                 default:
1299                         sbuf_printf(sb, "%sUnknown PMU", SEP_STR);
1300                         break;
1301                 }
1302
1303                 switch (ID_AA64DFR0_TraceVer(cpu_desc[cpu].id_aa64dfr0)) {
1304                 case ID_AA64DFR0_TraceVer_NONE:
1305                         break;
1306                 case ID_AA64DFR0_TraceVer_IMPL:
1307                         sbuf_printf(sb, "%sTrace", SEP_STR);
1308                         break;
1309                 default:
1310                         sbuf_printf(sb, "%sUnknown Trace", SEP_STR);
1311                         break;
1312                 }
1313
1314                 switch (ID_AA64DFR0_DebugVer(cpu_desc[cpu].id_aa64dfr0)) {
1315                 case ID_AA64DFR0_DebugVer_8:
1316                         sbuf_printf(sb, "%sDebug v8", SEP_STR);
1317                         break;
1318                 case ID_AA64DFR0_DebugVer_8_VHE:
1319                         sbuf_printf(sb, "%sDebug v8+VHE", SEP_STR);
1320                         break;
1321                 case ID_AA64DFR0_DebugVer_8_2:
1322                         sbuf_printf(sb, "%sDebug v8.2", SEP_STR);
1323                         break;
1324                 default:
1325                         sbuf_printf(sb, "%sUnknown Debug", SEP_STR);
1326                         break;
1327                 }
1328
1329                 if (cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK)
1330                         sbuf_printf(sb, "%s%#lx", SEP_STR,
1331                             cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK);
1332                 sbuf_finish(sb);
1333                 printf("%s>\n", sbuf_data(sb));
1334                 sbuf_clear(sb);
1335         }
1336
1337         /* AArch64 Memory Model Feature Register 1 */
1338         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR1) != 0) {
1339                 printf("             Debug Features 1 = <%#lx>\n",
1340                     cpu_desc[cpu].id_aa64dfr1);
1341         }
1342
1343         /* AArch64 Auxiliary Feature Register 0 */
1344         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR0) != 0) {
1345                 printf("         Auxiliary Features 0 = <%#lx>\n",
1346                     cpu_desc[cpu].id_aa64afr0);
1347         }
1348
1349         /* AArch64 Auxiliary Feature Register 1 */
1350         if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR1) != 0) {
1351                 printf("         Auxiliary Features 1 = <%#lx>\n",
1352                     cpu_desc[cpu].id_aa64afr1);
1353         }
1354
1355         sbuf_delete(sb);
1356         sb = NULL;
1357 #undef SEP_STR
1358 }
1359
1360 void
1361 identify_cpu(void)
1362 {
1363         u_int midr;
1364         u_int impl_id;
1365         u_int part_id;
1366         u_int cpu;
1367         size_t i;
1368         const struct cpu_parts *cpu_partsp = NULL;
1369
1370         cpu = PCPU_GET(cpuid);
1371         midr = get_midr();
1372
1373         /*
1374          * Store midr to pcpu to allow fast reading
1375          * from EL0, EL1 and assembly code.
1376          */
1377         PCPU_SET(midr, midr);
1378
1379         impl_id = CPU_IMPL(midr);
1380         for (i = 0; i < nitems(cpu_implementers); i++) {
1381                 if (impl_id == cpu_implementers[i].impl_id ||
1382                     cpu_implementers[i].impl_id == 0) {
1383                         cpu_desc[cpu].cpu_impl = impl_id;
1384                         cpu_desc[cpu].cpu_impl_name = cpu_implementers[i].impl_name;
1385                         cpu_partsp = cpu_implementers[i].cpu_parts;
1386                         break;
1387                 }
1388         }
1389
1390         part_id = CPU_PART(midr);
1391         for (i = 0; &cpu_partsp[i] != NULL; i++) {
1392                 if (part_id == cpu_partsp[i].part_id ||
1393                     cpu_partsp[i].part_id == 0) {
1394                         cpu_desc[cpu].cpu_part_num = part_id;
1395                         cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name;
1396                         break;
1397                 }
1398         }
1399
1400         cpu_desc[cpu].cpu_revision = CPU_REV(midr);
1401         cpu_desc[cpu].cpu_variant = CPU_VAR(midr);
1402
1403         snprintf(cpu_model, sizeof(cpu_model), "%s %s r%dp%d",
1404             cpu_desc[cpu].cpu_impl_name, cpu_desc[cpu].cpu_part_name,
1405             cpu_desc[cpu].cpu_variant, cpu_desc[cpu].cpu_revision);
1406
1407         /* Save affinity for current CPU */
1408         cpu_desc[cpu].mpidr = get_mpidr();
1409         CPU_AFFINITY(cpu) = cpu_desc[cpu].mpidr & CPU_AFF_MASK;
1410
1411         cpu_desc[cpu].id_aa64dfr0 = READ_SPECIALREG(ID_AA64DFR0_EL1);
1412         cpu_desc[cpu].id_aa64dfr1 = READ_SPECIALREG(ID_AA64DFR1_EL1);
1413         cpu_desc[cpu].id_aa64isar0 = READ_SPECIALREG(ID_AA64ISAR0_EL1);
1414         cpu_desc[cpu].id_aa64isar1 = READ_SPECIALREG(ID_AA64ISAR1_EL1);
1415         cpu_desc[cpu].id_aa64mmfr0 = READ_SPECIALREG(ID_AA64MMFR0_EL1);
1416         cpu_desc[cpu].id_aa64mmfr1 = READ_SPECIALREG(ID_AA64MMFR1_EL1);
1417         cpu_desc[cpu].id_aa64mmfr2 = READ_SPECIALREG(ID_AA64MMFR2_EL1);
1418         cpu_desc[cpu].id_aa64pfr0 = READ_SPECIALREG(ID_AA64PFR0_EL1);
1419         cpu_desc[cpu].id_aa64pfr1 = READ_SPECIALREG(ID_AA64PFR1_EL1);
1420
1421         if (cpu != 0) {
1422                 /*
1423                  * This code must run on one cpu at a time, but we are
1424                  * not scheduling on the current core so implement a
1425                  * simple spinlock.
1426                  */
1427                 while (atomic_cmpset_acq_int(&ident_lock, 0, 1) == 0)
1428                         __asm __volatile("wfe" ::: "memory");
1429
1430                 switch (cpu_aff_levels) {
1431                 case 0:
1432                         if (CPU_AFF0(cpu_desc[cpu].mpidr) !=
1433                             CPU_AFF0(cpu_desc[0].mpidr))
1434                                 cpu_aff_levels = 1;
1435                         /* FALLTHROUGH */
1436                 case 1:
1437                         if (CPU_AFF1(cpu_desc[cpu].mpidr) !=
1438                             CPU_AFF1(cpu_desc[0].mpidr))
1439                                 cpu_aff_levels = 2;
1440                         /* FALLTHROUGH */
1441                 case 2:
1442                         if (CPU_AFF2(cpu_desc[cpu].mpidr) !=
1443                             CPU_AFF2(cpu_desc[0].mpidr))
1444                                 cpu_aff_levels = 3;
1445                         /* FALLTHROUGH */
1446                 case 3:
1447                         if (CPU_AFF3(cpu_desc[cpu].mpidr) !=
1448                             CPU_AFF3(cpu_desc[0].mpidr))
1449                                 cpu_aff_levels = 4;
1450                         break;
1451                 }
1452
1453                 if (cpu_desc[cpu].id_aa64afr0 != cpu_desc[0].id_aa64afr0)
1454                         cpu_print_regs |= PRINT_ID_AA64_AFR0;
1455                 if (cpu_desc[cpu].id_aa64afr1 != cpu_desc[0].id_aa64afr1)
1456                         cpu_print_regs |= PRINT_ID_AA64_AFR1;
1457
1458                 if (cpu_desc[cpu].id_aa64dfr0 != cpu_desc[0].id_aa64dfr0)
1459                         cpu_print_regs |= PRINT_ID_AA64_DFR0;
1460                 if (cpu_desc[cpu].id_aa64dfr1 != cpu_desc[0].id_aa64dfr1)
1461                         cpu_print_regs |= PRINT_ID_AA64_DFR1;
1462
1463                 if (cpu_desc[cpu].id_aa64isar0 != cpu_desc[0].id_aa64isar0)
1464                         cpu_print_regs |= PRINT_ID_AA64_ISAR0;
1465                 if (cpu_desc[cpu].id_aa64isar1 != cpu_desc[0].id_aa64isar1)
1466                         cpu_print_regs |= PRINT_ID_AA64_ISAR1;
1467
1468                 if (cpu_desc[cpu].id_aa64mmfr0 != cpu_desc[0].id_aa64mmfr0)
1469                         cpu_print_regs |= PRINT_ID_AA64_MMFR0;
1470                 if (cpu_desc[cpu].id_aa64mmfr1 != cpu_desc[0].id_aa64mmfr1)
1471                         cpu_print_regs |= PRINT_ID_AA64_MMFR1;
1472                 if (cpu_desc[cpu].id_aa64mmfr2 != cpu_desc[0].id_aa64mmfr2)
1473                         cpu_print_regs |= PRINT_ID_AA64_MMFR2;
1474
1475                 if (cpu_desc[cpu].id_aa64pfr0 != cpu_desc[0].id_aa64pfr0)
1476                         cpu_print_regs |= PRINT_ID_AA64_PFR0;
1477                 if (cpu_desc[cpu].id_aa64pfr1 != cpu_desc[0].id_aa64pfr1)
1478                         cpu_print_regs |= PRINT_ID_AA64_PFR1;
1479
1480                 /* Wake up the other CPUs */
1481                 atomic_store_rel_int(&ident_lock, 0);
1482                 __asm __volatile("sev" ::: "memory");
1483         }
1484 }